Commit f5caadbb3d8fc0b71533e880c684b2230bdb76ac
Exists in
master
and in
6 other branches
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
Showing 15 changed files Inline Diff
- include/linux/audit.h
- include/linux/netfilter/ipset/ip_set_ahash.h
- include/linux/netfilter/nfnetlink.h
- include/linux/netfilter/nfnetlink_queue.h
- kernel/audit.c
- net/netfilter/ipset/ip_set_hash_ip.c
- net/netfilter/ipset/ip_set_hash_ipport.c
- net/netfilter/ipset/ip_set_hash_ipportip.c
- net/netfilter/ipset/ip_set_hash_ipportnet.c
- net/netfilter/ipset/ip_set_hash_net.c
- net/netfilter/ipset/ip_set_hash_netiface.c
- net/netfilter/ipset/ip_set_hash_netport.c
- net/netfilter/nfnetlink.c
- net/netfilter/nfnetlink_queue.c
- net/netfilter/xt_AUDIT.c
include/linux/audit.h
1 | /* audit.h -- Auditing support | 1 | /* audit.h -- Auditing support |
2 | * | 2 | * |
3 | * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. | 3 | * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | * | 19 | * |
20 | * Written by Rickard E. (Rik) Faith <faith@redhat.com> | 20 | * Written by Rickard E. (Rik) Faith <faith@redhat.com> |
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #ifndef _LINUX_AUDIT_H_ | 24 | #ifndef _LINUX_AUDIT_H_ |
25 | #define _LINUX_AUDIT_H_ | 25 | #define _LINUX_AUDIT_H_ |
26 | 26 | ||
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/elf-em.h> | 28 | #include <linux/elf-em.h> |
29 | 29 | ||
30 | /* The netlink messages for the audit system is divided into blocks: | 30 | /* The netlink messages for the audit system is divided into blocks: |
31 | * 1000 - 1099 are for commanding the audit system | 31 | * 1000 - 1099 are for commanding the audit system |
32 | * 1100 - 1199 user space trusted application messages | 32 | * 1100 - 1199 user space trusted application messages |
33 | * 1200 - 1299 messages internal to the audit daemon | 33 | * 1200 - 1299 messages internal to the audit daemon |
34 | * 1300 - 1399 audit event messages | 34 | * 1300 - 1399 audit event messages |
35 | * 1400 - 1499 SE Linux use | 35 | * 1400 - 1499 SE Linux use |
36 | * 1500 - 1599 kernel LSPP events | 36 | * 1500 - 1599 kernel LSPP events |
37 | * 1600 - 1699 kernel crypto events | 37 | * 1600 - 1699 kernel crypto events |
38 | * 1700 - 1799 kernel anomaly records | 38 | * 1700 - 1799 kernel anomaly records |
39 | * 1800 - 1899 kernel integrity events | 39 | * 1800 - 1899 kernel integrity events |
40 | * 1900 - 1999 future kernel use | 40 | * 1900 - 1999 future kernel use |
41 | * 2000 is for otherwise unclassified kernel audit messages (legacy) | 41 | * 2000 is for otherwise unclassified kernel audit messages (legacy) |
42 | * 2001 - 2099 unused (kernel) | 42 | * 2001 - 2099 unused (kernel) |
43 | * 2100 - 2199 user space anomaly records | 43 | * 2100 - 2199 user space anomaly records |
44 | * 2200 - 2299 user space actions taken in response to anomalies | 44 | * 2200 - 2299 user space actions taken in response to anomalies |
45 | * 2300 - 2399 user space generated LSPP events | 45 | * 2300 - 2399 user space generated LSPP events |
46 | * 2400 - 2499 user space crypto events | 46 | * 2400 - 2499 user space crypto events |
47 | * 2500 - 2999 future user space (maybe integrity labels and related events) | 47 | * 2500 - 2999 future user space (maybe integrity labels and related events) |
48 | * | 48 | * |
49 | * Messages from 1000-1199 are bi-directional. 1200-1299 & 2100 - 2999 are | 49 | * Messages from 1000-1199 are bi-directional. 1200-1299 & 2100 - 2999 are |
50 | * exclusively user space. 1300-2099 is kernel --> user space | 50 | * exclusively user space. 1300-2099 is kernel --> user space |
51 | * communication. | 51 | * communication. |
52 | */ | 52 | */ |
53 | #define AUDIT_GET 1000 /* Get status */ | 53 | #define AUDIT_GET 1000 /* Get status */ |
54 | #define AUDIT_SET 1001 /* Set status (enable/disable/auditd) */ | 54 | #define AUDIT_SET 1001 /* Set status (enable/disable/auditd) */ |
55 | #define AUDIT_LIST 1002 /* List syscall rules -- deprecated */ | 55 | #define AUDIT_LIST 1002 /* List syscall rules -- deprecated */ |
56 | #define AUDIT_ADD 1003 /* Add syscall rule -- deprecated */ | 56 | #define AUDIT_ADD 1003 /* Add syscall rule -- deprecated */ |
57 | #define AUDIT_DEL 1004 /* Delete syscall rule -- deprecated */ | 57 | #define AUDIT_DEL 1004 /* Delete syscall rule -- deprecated */ |
58 | #define AUDIT_USER 1005 /* Message from userspace -- deprecated */ | 58 | #define AUDIT_USER 1005 /* Message from userspace -- deprecated */ |
59 | #define AUDIT_LOGIN 1006 /* Define the login id and information */ | 59 | #define AUDIT_LOGIN 1006 /* Define the login id and information */ |
60 | #define AUDIT_WATCH_INS 1007 /* Insert file/dir watch entry */ | 60 | #define AUDIT_WATCH_INS 1007 /* Insert file/dir watch entry */ |
61 | #define AUDIT_WATCH_REM 1008 /* Remove file/dir watch entry */ | 61 | #define AUDIT_WATCH_REM 1008 /* Remove file/dir watch entry */ |
62 | #define AUDIT_WATCH_LIST 1009 /* List all file/dir watches */ | 62 | #define AUDIT_WATCH_LIST 1009 /* List all file/dir watches */ |
63 | #define AUDIT_SIGNAL_INFO 1010 /* Get info about sender of signal to auditd */ | 63 | #define AUDIT_SIGNAL_INFO 1010 /* Get info about sender of signal to auditd */ |
64 | #define AUDIT_ADD_RULE 1011 /* Add syscall filtering rule */ | 64 | #define AUDIT_ADD_RULE 1011 /* Add syscall filtering rule */ |
65 | #define AUDIT_DEL_RULE 1012 /* Delete syscall filtering rule */ | 65 | #define AUDIT_DEL_RULE 1012 /* Delete syscall filtering rule */ |
66 | #define AUDIT_LIST_RULES 1013 /* List syscall filtering rules */ | 66 | #define AUDIT_LIST_RULES 1013 /* List syscall filtering rules */ |
67 | #define AUDIT_TRIM 1014 /* Trim junk from watched tree */ | 67 | #define AUDIT_TRIM 1014 /* Trim junk from watched tree */ |
68 | #define AUDIT_MAKE_EQUIV 1015 /* Append to watched tree */ | 68 | #define AUDIT_MAKE_EQUIV 1015 /* Append to watched tree */ |
69 | #define AUDIT_TTY_GET 1016 /* Get TTY auditing status */ | 69 | #define AUDIT_TTY_GET 1016 /* Get TTY auditing status */ |
70 | #define AUDIT_TTY_SET 1017 /* Set TTY auditing status */ | 70 | #define AUDIT_TTY_SET 1017 /* Set TTY auditing status */ |
71 | 71 | ||
72 | #define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */ | 72 | #define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */ |
73 | #define AUDIT_USER_AVC 1107 /* We filter this differently */ | 73 | #define AUDIT_USER_AVC 1107 /* We filter this differently */ |
74 | #define AUDIT_USER_TTY 1124 /* Non-ICANON TTY input meaning */ | 74 | #define AUDIT_USER_TTY 1124 /* Non-ICANON TTY input meaning */ |
75 | #define AUDIT_LAST_USER_MSG 1199 | 75 | #define AUDIT_LAST_USER_MSG 1199 |
76 | #define AUDIT_FIRST_USER_MSG2 2100 /* More user space messages */ | 76 | #define AUDIT_FIRST_USER_MSG2 2100 /* More user space messages */ |
77 | #define AUDIT_LAST_USER_MSG2 2999 | 77 | #define AUDIT_LAST_USER_MSG2 2999 |
78 | 78 | ||
79 | #define AUDIT_DAEMON_START 1200 /* Daemon startup record */ | 79 | #define AUDIT_DAEMON_START 1200 /* Daemon startup record */ |
80 | #define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */ | 80 | #define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */ |
81 | #define AUDIT_DAEMON_ABORT 1202 /* Daemon error stop record */ | 81 | #define AUDIT_DAEMON_ABORT 1202 /* Daemon error stop record */ |
82 | #define AUDIT_DAEMON_CONFIG 1203 /* Daemon config change */ | 82 | #define AUDIT_DAEMON_CONFIG 1203 /* Daemon config change */ |
83 | 83 | ||
84 | #define AUDIT_SYSCALL 1300 /* Syscall event */ | 84 | #define AUDIT_SYSCALL 1300 /* Syscall event */ |
85 | /* #define AUDIT_FS_WATCH 1301 * Deprecated */ | 85 | /* #define AUDIT_FS_WATCH 1301 * Deprecated */ |
86 | #define AUDIT_PATH 1302 /* Filename path information */ | 86 | #define AUDIT_PATH 1302 /* Filename path information */ |
87 | #define AUDIT_IPC 1303 /* IPC record */ | 87 | #define AUDIT_IPC 1303 /* IPC record */ |
88 | #define AUDIT_SOCKETCALL 1304 /* sys_socketcall arguments */ | 88 | #define AUDIT_SOCKETCALL 1304 /* sys_socketcall arguments */ |
89 | #define AUDIT_CONFIG_CHANGE 1305 /* Audit system configuration change */ | 89 | #define AUDIT_CONFIG_CHANGE 1305 /* Audit system configuration change */ |
90 | #define AUDIT_SOCKADDR 1306 /* sockaddr copied as syscall arg */ | 90 | #define AUDIT_SOCKADDR 1306 /* sockaddr copied as syscall arg */ |
91 | #define AUDIT_CWD 1307 /* Current working directory */ | 91 | #define AUDIT_CWD 1307 /* Current working directory */ |
92 | #define AUDIT_EXECVE 1309 /* execve arguments */ | 92 | #define AUDIT_EXECVE 1309 /* execve arguments */ |
93 | #define AUDIT_IPC_SET_PERM 1311 /* IPC new permissions record type */ | 93 | #define AUDIT_IPC_SET_PERM 1311 /* IPC new permissions record type */ |
94 | #define AUDIT_MQ_OPEN 1312 /* POSIX MQ open record type */ | 94 | #define AUDIT_MQ_OPEN 1312 /* POSIX MQ open record type */ |
95 | #define AUDIT_MQ_SENDRECV 1313 /* POSIX MQ send/receive record type */ | 95 | #define AUDIT_MQ_SENDRECV 1313 /* POSIX MQ send/receive record type */ |
96 | #define AUDIT_MQ_NOTIFY 1314 /* POSIX MQ notify record type */ | 96 | #define AUDIT_MQ_NOTIFY 1314 /* POSIX MQ notify record type */ |
97 | #define AUDIT_MQ_GETSETATTR 1315 /* POSIX MQ get/set attribute record type */ | 97 | #define AUDIT_MQ_GETSETATTR 1315 /* POSIX MQ get/set attribute record type */ |
98 | #define AUDIT_KERNEL_OTHER 1316 /* For use by 3rd party modules */ | 98 | #define AUDIT_KERNEL_OTHER 1316 /* For use by 3rd party modules */ |
99 | #define AUDIT_FD_PAIR 1317 /* audit record for pipe/socketpair */ | 99 | #define AUDIT_FD_PAIR 1317 /* audit record for pipe/socketpair */ |
100 | #define AUDIT_OBJ_PID 1318 /* ptrace target */ | 100 | #define AUDIT_OBJ_PID 1318 /* ptrace target */ |
101 | #define AUDIT_TTY 1319 /* Input on an administrative TTY */ | 101 | #define AUDIT_TTY 1319 /* Input on an administrative TTY */ |
102 | #define AUDIT_EOE 1320 /* End of multi-record event */ | 102 | #define AUDIT_EOE 1320 /* End of multi-record event */ |
103 | #define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */ | 103 | #define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */ |
104 | #define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */ | 104 | #define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */ |
105 | #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ | 105 | #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ |
106 | #define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ | 106 | #define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ |
107 | #define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ | 107 | #define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ |
108 | 108 | ||
109 | #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ | 109 | #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ |
110 | #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ | 110 | #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ |
111 | #define AUDIT_AVC_PATH 1402 /* dentry, vfsmount pair from avc */ | 111 | #define AUDIT_AVC_PATH 1402 /* dentry, vfsmount pair from avc */ |
112 | #define AUDIT_MAC_POLICY_LOAD 1403 /* Policy file load */ | 112 | #define AUDIT_MAC_POLICY_LOAD 1403 /* Policy file load */ |
113 | #define AUDIT_MAC_STATUS 1404 /* Changed enforcing,permissive,off */ | 113 | #define AUDIT_MAC_STATUS 1404 /* Changed enforcing,permissive,off */ |
114 | #define AUDIT_MAC_CONFIG_CHANGE 1405 /* Changes to booleans */ | 114 | #define AUDIT_MAC_CONFIG_CHANGE 1405 /* Changes to booleans */ |
115 | #define AUDIT_MAC_UNLBL_ALLOW 1406 /* NetLabel: allow unlabeled traffic */ | 115 | #define AUDIT_MAC_UNLBL_ALLOW 1406 /* NetLabel: allow unlabeled traffic */ |
116 | #define AUDIT_MAC_CIPSOV4_ADD 1407 /* NetLabel: add CIPSOv4 DOI entry */ | 116 | #define AUDIT_MAC_CIPSOV4_ADD 1407 /* NetLabel: add CIPSOv4 DOI entry */ |
117 | #define AUDIT_MAC_CIPSOV4_DEL 1408 /* NetLabel: del CIPSOv4 DOI entry */ | 117 | #define AUDIT_MAC_CIPSOV4_DEL 1408 /* NetLabel: del CIPSOv4 DOI entry */ |
118 | #define AUDIT_MAC_MAP_ADD 1409 /* NetLabel: add LSM domain mapping */ | 118 | #define AUDIT_MAC_MAP_ADD 1409 /* NetLabel: add LSM domain mapping */ |
119 | #define AUDIT_MAC_MAP_DEL 1410 /* NetLabel: del LSM domain mapping */ | 119 | #define AUDIT_MAC_MAP_DEL 1410 /* NetLabel: del LSM domain mapping */ |
120 | #define AUDIT_MAC_IPSEC_ADDSA 1411 /* Not used */ | 120 | #define AUDIT_MAC_IPSEC_ADDSA 1411 /* Not used */ |
121 | #define AUDIT_MAC_IPSEC_DELSA 1412 /* Not used */ | 121 | #define AUDIT_MAC_IPSEC_DELSA 1412 /* Not used */ |
122 | #define AUDIT_MAC_IPSEC_ADDSPD 1413 /* Not used */ | 122 | #define AUDIT_MAC_IPSEC_ADDSPD 1413 /* Not used */ |
123 | #define AUDIT_MAC_IPSEC_DELSPD 1414 /* Not used */ | 123 | #define AUDIT_MAC_IPSEC_DELSPD 1414 /* Not used */ |
124 | #define AUDIT_MAC_IPSEC_EVENT 1415 /* Audit an IPSec event */ | 124 | #define AUDIT_MAC_IPSEC_EVENT 1415 /* Audit an IPSec event */ |
125 | #define AUDIT_MAC_UNLBL_STCADD 1416 /* NetLabel: add a static label */ | 125 | #define AUDIT_MAC_UNLBL_STCADD 1416 /* NetLabel: add a static label */ |
126 | #define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */ | 126 | #define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */ |
127 | 127 | ||
128 | #define AUDIT_FIRST_KERN_ANOM_MSG 1700 | 128 | #define AUDIT_FIRST_KERN_ANOM_MSG 1700 |
129 | #define AUDIT_LAST_KERN_ANOM_MSG 1799 | 129 | #define AUDIT_LAST_KERN_ANOM_MSG 1799 |
130 | #define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */ | 130 | #define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */ |
131 | #define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */ | 131 | #define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */ |
132 | #define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */ | 132 | #define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */ |
133 | #define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */ | 133 | #define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */ |
134 | #define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */ | 134 | #define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */ |
135 | #define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */ | 135 | #define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */ |
136 | #define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */ | 136 | #define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */ |
137 | #define AUDIT_INTEGRITY_RULE 1805 /* policy rule */ | 137 | #define AUDIT_INTEGRITY_RULE 1805 /* policy rule */ |
138 | 138 | ||
139 | #define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ | 139 | #define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ |
140 | 140 | ||
141 | /* Rule flags */ | 141 | /* Rule flags */ |
142 | #define AUDIT_FILTER_USER 0x00 /* Apply rule to user-generated messages */ | 142 | #define AUDIT_FILTER_USER 0x00 /* Apply rule to user-generated messages */ |
143 | #define AUDIT_FILTER_TASK 0x01 /* Apply rule at task creation (not syscall) */ | 143 | #define AUDIT_FILTER_TASK 0x01 /* Apply rule at task creation (not syscall) */ |
144 | #define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */ | 144 | #define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */ |
145 | #define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */ | 145 | #define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */ |
146 | #define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */ | 146 | #define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */ |
147 | #define AUDIT_FILTER_TYPE 0x05 /* Apply rule at audit_log_start */ | 147 | #define AUDIT_FILTER_TYPE 0x05 /* Apply rule at audit_log_start */ |
148 | 148 | ||
149 | #define AUDIT_NR_FILTERS 6 | 149 | #define AUDIT_NR_FILTERS 6 |
150 | 150 | ||
151 | #define AUDIT_FILTER_PREPEND 0x10 /* Prepend to front of list */ | 151 | #define AUDIT_FILTER_PREPEND 0x10 /* Prepend to front of list */ |
152 | 152 | ||
153 | /* Rule actions */ | 153 | /* Rule actions */ |
154 | #define AUDIT_NEVER 0 /* Do not build context if rule matches */ | 154 | #define AUDIT_NEVER 0 /* Do not build context if rule matches */ |
155 | #define AUDIT_POSSIBLE 1 /* Build context if rule matches */ | 155 | #define AUDIT_POSSIBLE 1 /* Build context if rule matches */ |
156 | #define AUDIT_ALWAYS 2 /* Generate audit record if rule matches */ | 156 | #define AUDIT_ALWAYS 2 /* Generate audit record if rule matches */ |
157 | 157 | ||
158 | /* Rule structure sizes -- if these change, different AUDIT_ADD and | 158 | /* Rule structure sizes -- if these change, different AUDIT_ADD and |
159 | * AUDIT_LIST commands must be implemented. */ | 159 | * AUDIT_LIST commands must be implemented. */ |
160 | #define AUDIT_MAX_FIELDS 64 | 160 | #define AUDIT_MAX_FIELDS 64 |
161 | #define AUDIT_MAX_KEY_LEN 256 | 161 | #define AUDIT_MAX_KEY_LEN 256 |
162 | #define AUDIT_BITMASK_SIZE 64 | 162 | #define AUDIT_BITMASK_SIZE 64 |
163 | #define AUDIT_WORD(nr) ((__u32)((nr)/32)) | 163 | #define AUDIT_WORD(nr) ((__u32)((nr)/32)) |
164 | #define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32)) | 164 | #define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32)) |
165 | 165 | ||
166 | #define AUDIT_SYSCALL_CLASSES 16 | 166 | #define AUDIT_SYSCALL_CLASSES 16 |
167 | #define AUDIT_CLASS_DIR_WRITE 0 | 167 | #define AUDIT_CLASS_DIR_WRITE 0 |
168 | #define AUDIT_CLASS_DIR_WRITE_32 1 | 168 | #define AUDIT_CLASS_DIR_WRITE_32 1 |
169 | #define AUDIT_CLASS_CHATTR 2 | 169 | #define AUDIT_CLASS_CHATTR 2 |
170 | #define AUDIT_CLASS_CHATTR_32 3 | 170 | #define AUDIT_CLASS_CHATTR_32 3 |
171 | #define AUDIT_CLASS_READ 4 | 171 | #define AUDIT_CLASS_READ 4 |
172 | #define AUDIT_CLASS_READ_32 5 | 172 | #define AUDIT_CLASS_READ_32 5 |
173 | #define AUDIT_CLASS_WRITE 6 | 173 | #define AUDIT_CLASS_WRITE 6 |
174 | #define AUDIT_CLASS_WRITE_32 7 | 174 | #define AUDIT_CLASS_WRITE_32 7 |
175 | #define AUDIT_CLASS_SIGNAL 8 | 175 | #define AUDIT_CLASS_SIGNAL 8 |
176 | #define AUDIT_CLASS_SIGNAL_32 9 | 176 | #define AUDIT_CLASS_SIGNAL_32 9 |
177 | 177 | ||
178 | /* This bitmask is used to validate user input. It represents all bits that | 178 | /* This bitmask is used to validate user input. It represents all bits that |
179 | * are currently used in an audit field constant understood by the kernel. | 179 | * are currently used in an audit field constant understood by the kernel. |
180 | * If you are adding a new #define AUDIT_<whatever>, please ensure that | 180 | * If you are adding a new #define AUDIT_<whatever>, please ensure that |
181 | * AUDIT_UNUSED_BITS is updated if need be. */ | 181 | * AUDIT_UNUSED_BITS is updated if need be. */ |
182 | #define AUDIT_UNUSED_BITS 0x07FFFC00 | 182 | #define AUDIT_UNUSED_BITS 0x07FFFC00 |
183 | 183 | ||
184 | 184 | ||
185 | /* Rule fields */ | 185 | /* Rule fields */ |
186 | /* These are useful when checking the | 186 | /* These are useful when checking the |
187 | * task structure at task creation time | 187 | * task structure at task creation time |
188 | * (AUDIT_PER_TASK). */ | 188 | * (AUDIT_PER_TASK). */ |
189 | #define AUDIT_PID 0 | 189 | #define AUDIT_PID 0 |
190 | #define AUDIT_UID 1 | 190 | #define AUDIT_UID 1 |
191 | #define AUDIT_EUID 2 | 191 | #define AUDIT_EUID 2 |
192 | #define AUDIT_SUID 3 | 192 | #define AUDIT_SUID 3 |
193 | #define AUDIT_FSUID 4 | 193 | #define AUDIT_FSUID 4 |
194 | #define AUDIT_GID 5 | 194 | #define AUDIT_GID 5 |
195 | #define AUDIT_EGID 6 | 195 | #define AUDIT_EGID 6 |
196 | #define AUDIT_SGID 7 | 196 | #define AUDIT_SGID 7 |
197 | #define AUDIT_FSGID 8 | 197 | #define AUDIT_FSGID 8 |
198 | #define AUDIT_LOGINUID 9 | 198 | #define AUDIT_LOGINUID 9 |
199 | #define AUDIT_PERS 10 | 199 | #define AUDIT_PERS 10 |
200 | #define AUDIT_ARCH 11 | 200 | #define AUDIT_ARCH 11 |
201 | #define AUDIT_MSGTYPE 12 | 201 | #define AUDIT_MSGTYPE 12 |
202 | #define AUDIT_SUBJ_USER 13 /* security label user */ | 202 | #define AUDIT_SUBJ_USER 13 /* security label user */ |
203 | #define AUDIT_SUBJ_ROLE 14 /* security label role */ | 203 | #define AUDIT_SUBJ_ROLE 14 /* security label role */ |
204 | #define AUDIT_SUBJ_TYPE 15 /* security label type */ | 204 | #define AUDIT_SUBJ_TYPE 15 /* security label type */ |
205 | #define AUDIT_SUBJ_SEN 16 /* security label sensitivity label */ | 205 | #define AUDIT_SUBJ_SEN 16 /* security label sensitivity label */ |
206 | #define AUDIT_SUBJ_CLR 17 /* security label clearance label */ | 206 | #define AUDIT_SUBJ_CLR 17 /* security label clearance label */ |
207 | #define AUDIT_PPID 18 | 207 | #define AUDIT_PPID 18 |
208 | #define AUDIT_OBJ_USER 19 | 208 | #define AUDIT_OBJ_USER 19 |
209 | #define AUDIT_OBJ_ROLE 20 | 209 | #define AUDIT_OBJ_ROLE 20 |
210 | #define AUDIT_OBJ_TYPE 21 | 210 | #define AUDIT_OBJ_TYPE 21 |
211 | #define AUDIT_OBJ_LEV_LOW 22 | 211 | #define AUDIT_OBJ_LEV_LOW 22 |
212 | #define AUDIT_OBJ_LEV_HIGH 23 | 212 | #define AUDIT_OBJ_LEV_HIGH 23 |
213 | 213 | ||
214 | /* These are ONLY useful when checking | 214 | /* These are ONLY useful when checking |
215 | * at syscall exit time (AUDIT_AT_EXIT). */ | 215 | * at syscall exit time (AUDIT_AT_EXIT). */ |
216 | #define AUDIT_DEVMAJOR 100 | 216 | #define AUDIT_DEVMAJOR 100 |
217 | #define AUDIT_DEVMINOR 101 | 217 | #define AUDIT_DEVMINOR 101 |
218 | #define AUDIT_INODE 102 | 218 | #define AUDIT_INODE 102 |
219 | #define AUDIT_EXIT 103 | 219 | #define AUDIT_EXIT 103 |
220 | #define AUDIT_SUCCESS 104 /* exit >= 0; value ignored */ | 220 | #define AUDIT_SUCCESS 104 /* exit >= 0; value ignored */ |
221 | #define AUDIT_WATCH 105 | 221 | #define AUDIT_WATCH 105 |
222 | #define AUDIT_PERM 106 | 222 | #define AUDIT_PERM 106 |
223 | #define AUDIT_DIR 107 | 223 | #define AUDIT_DIR 107 |
224 | #define AUDIT_FILETYPE 108 | 224 | #define AUDIT_FILETYPE 108 |
225 | 225 | ||
226 | #define AUDIT_ARG0 200 | 226 | #define AUDIT_ARG0 200 |
227 | #define AUDIT_ARG1 (AUDIT_ARG0+1) | 227 | #define AUDIT_ARG1 (AUDIT_ARG0+1) |
228 | #define AUDIT_ARG2 (AUDIT_ARG0+2) | 228 | #define AUDIT_ARG2 (AUDIT_ARG0+2) |
229 | #define AUDIT_ARG3 (AUDIT_ARG0+3) | 229 | #define AUDIT_ARG3 (AUDIT_ARG0+3) |
230 | 230 | ||
231 | #define AUDIT_FILTERKEY 210 | 231 | #define AUDIT_FILTERKEY 210 |
232 | 232 | ||
233 | #define AUDIT_NEGATE 0x80000000 | 233 | #define AUDIT_NEGATE 0x80000000 |
234 | 234 | ||
235 | /* These are the supported operators. | 235 | /* These are the supported operators. |
236 | * 4 2 1 8 | 236 | * 4 2 1 8 |
237 | * = > < ? | 237 | * = > < ? |
238 | * ---------- | 238 | * ---------- |
239 | * 0 0 0 0 00 nonsense | 239 | * 0 0 0 0 00 nonsense |
240 | * 0 0 0 1 08 & bit mask | 240 | * 0 0 0 1 08 & bit mask |
241 | * 0 0 1 0 10 < | 241 | * 0 0 1 0 10 < |
242 | * 0 1 0 0 20 > | 242 | * 0 1 0 0 20 > |
243 | * 0 1 1 0 30 != | 243 | * 0 1 1 0 30 != |
244 | * 1 0 0 0 40 = | 244 | * 1 0 0 0 40 = |
245 | * 1 0 0 1 48 &= bit test | 245 | * 1 0 0 1 48 &= bit test |
246 | * 1 0 1 0 50 <= | 246 | * 1 0 1 0 50 <= |
247 | * 1 1 0 0 60 >= | 247 | * 1 1 0 0 60 >= |
248 | * 1 1 1 1 78 all operators | 248 | * 1 1 1 1 78 all operators |
249 | */ | 249 | */ |
250 | #define AUDIT_BIT_MASK 0x08000000 | 250 | #define AUDIT_BIT_MASK 0x08000000 |
251 | #define AUDIT_LESS_THAN 0x10000000 | 251 | #define AUDIT_LESS_THAN 0x10000000 |
252 | #define AUDIT_GREATER_THAN 0x20000000 | 252 | #define AUDIT_GREATER_THAN 0x20000000 |
253 | #define AUDIT_NOT_EQUAL 0x30000000 | 253 | #define AUDIT_NOT_EQUAL 0x30000000 |
254 | #define AUDIT_EQUAL 0x40000000 | 254 | #define AUDIT_EQUAL 0x40000000 |
255 | #define AUDIT_BIT_TEST (AUDIT_BIT_MASK|AUDIT_EQUAL) | 255 | #define AUDIT_BIT_TEST (AUDIT_BIT_MASK|AUDIT_EQUAL) |
256 | #define AUDIT_LESS_THAN_OR_EQUAL (AUDIT_LESS_THAN|AUDIT_EQUAL) | 256 | #define AUDIT_LESS_THAN_OR_EQUAL (AUDIT_LESS_THAN|AUDIT_EQUAL) |
257 | #define AUDIT_GREATER_THAN_OR_EQUAL (AUDIT_GREATER_THAN|AUDIT_EQUAL) | 257 | #define AUDIT_GREATER_THAN_OR_EQUAL (AUDIT_GREATER_THAN|AUDIT_EQUAL) |
258 | #define AUDIT_OPERATORS (AUDIT_EQUAL|AUDIT_NOT_EQUAL|AUDIT_BIT_MASK) | 258 | #define AUDIT_OPERATORS (AUDIT_EQUAL|AUDIT_NOT_EQUAL|AUDIT_BIT_MASK) |
259 | 259 | ||
260 | enum { | 260 | enum { |
261 | Audit_equal, | 261 | Audit_equal, |
262 | Audit_not_equal, | 262 | Audit_not_equal, |
263 | Audit_bitmask, | 263 | Audit_bitmask, |
264 | Audit_bittest, | 264 | Audit_bittest, |
265 | Audit_lt, | 265 | Audit_lt, |
266 | Audit_gt, | 266 | Audit_gt, |
267 | Audit_le, | 267 | Audit_le, |
268 | Audit_ge, | 268 | Audit_ge, |
269 | Audit_bad | 269 | Audit_bad |
270 | }; | 270 | }; |
271 | 271 | ||
272 | /* Status symbols */ | 272 | /* Status symbols */ |
273 | /* Mask values */ | 273 | /* Mask values */ |
274 | #define AUDIT_STATUS_ENABLED 0x0001 | 274 | #define AUDIT_STATUS_ENABLED 0x0001 |
275 | #define AUDIT_STATUS_FAILURE 0x0002 | 275 | #define AUDIT_STATUS_FAILURE 0x0002 |
276 | #define AUDIT_STATUS_PID 0x0004 | 276 | #define AUDIT_STATUS_PID 0x0004 |
277 | #define AUDIT_STATUS_RATE_LIMIT 0x0008 | 277 | #define AUDIT_STATUS_RATE_LIMIT 0x0008 |
278 | #define AUDIT_STATUS_BACKLOG_LIMIT 0x0010 | 278 | #define AUDIT_STATUS_BACKLOG_LIMIT 0x0010 |
279 | /* Failure-to-log actions */ | 279 | /* Failure-to-log actions */ |
280 | #define AUDIT_FAIL_SILENT 0 | 280 | #define AUDIT_FAIL_SILENT 0 |
281 | #define AUDIT_FAIL_PRINTK 1 | 281 | #define AUDIT_FAIL_PRINTK 1 |
282 | #define AUDIT_FAIL_PANIC 2 | 282 | #define AUDIT_FAIL_PANIC 2 |
283 | 283 | ||
284 | /* distinguish syscall tables */ | 284 | /* distinguish syscall tables */ |
285 | #define __AUDIT_ARCH_64BIT 0x80000000 | 285 | #define __AUDIT_ARCH_64BIT 0x80000000 |
286 | #define __AUDIT_ARCH_LE 0x40000000 | 286 | #define __AUDIT_ARCH_LE 0x40000000 |
287 | #define AUDIT_ARCH_ALPHA (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | 287 | #define AUDIT_ARCH_ALPHA (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
288 | #define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE) | 288 | #define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE) |
289 | #define AUDIT_ARCH_ARMEB (EM_ARM) | 289 | #define AUDIT_ARCH_ARMEB (EM_ARM) |
290 | #define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE) | 290 | #define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE) |
291 | #define AUDIT_ARCH_FRV (EM_FRV) | 291 | #define AUDIT_ARCH_FRV (EM_FRV) |
292 | #define AUDIT_ARCH_H8300 (EM_H8_300) | 292 | #define AUDIT_ARCH_H8300 (EM_H8_300) |
293 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | 293 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) |
294 | #define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | 294 | #define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
295 | #define AUDIT_ARCH_M32R (EM_M32R) | 295 | #define AUDIT_ARCH_M32R (EM_M32R) |
296 | #define AUDIT_ARCH_M68K (EM_68K) | 296 | #define AUDIT_ARCH_M68K (EM_68K) |
297 | #define AUDIT_ARCH_MIPS (EM_MIPS) | 297 | #define AUDIT_ARCH_MIPS (EM_MIPS) |
298 | #define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE) | 298 | #define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE) |
299 | #define AUDIT_ARCH_MIPS64 (EM_MIPS|__AUDIT_ARCH_64BIT) | 299 | #define AUDIT_ARCH_MIPS64 (EM_MIPS|__AUDIT_ARCH_64BIT) |
300 | #define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | 300 | #define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
301 | #define AUDIT_ARCH_PARISC (EM_PARISC) | 301 | #define AUDIT_ARCH_PARISC (EM_PARISC) |
302 | #define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT) | 302 | #define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT) |
303 | #define AUDIT_ARCH_PPC (EM_PPC) | 303 | #define AUDIT_ARCH_PPC (EM_PPC) |
304 | #define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT) | 304 | #define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT) |
305 | #define AUDIT_ARCH_S390 (EM_S390) | 305 | #define AUDIT_ARCH_S390 (EM_S390) |
306 | #define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT) | 306 | #define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT) |
307 | #define AUDIT_ARCH_SH (EM_SH) | 307 | #define AUDIT_ARCH_SH (EM_SH) |
308 | #define AUDIT_ARCH_SHEL (EM_SH|__AUDIT_ARCH_LE) | 308 | #define AUDIT_ARCH_SHEL (EM_SH|__AUDIT_ARCH_LE) |
309 | #define AUDIT_ARCH_SH64 (EM_SH|__AUDIT_ARCH_64BIT) | 309 | #define AUDIT_ARCH_SH64 (EM_SH|__AUDIT_ARCH_64BIT) |
310 | #define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | 310 | #define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
311 | #define AUDIT_ARCH_SPARC (EM_SPARC) | 311 | #define AUDIT_ARCH_SPARC (EM_SPARC) |
312 | #define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT) | 312 | #define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT) |
313 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | 313 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
314 | 314 | ||
315 | #define AUDIT_PERM_EXEC 1 | 315 | #define AUDIT_PERM_EXEC 1 |
316 | #define AUDIT_PERM_WRITE 2 | 316 | #define AUDIT_PERM_WRITE 2 |
317 | #define AUDIT_PERM_READ 4 | 317 | #define AUDIT_PERM_READ 4 |
318 | #define AUDIT_PERM_ATTR 8 | 318 | #define AUDIT_PERM_ATTR 8 |
319 | 319 | ||
320 | struct audit_status { | 320 | struct audit_status { |
321 | __u32 mask; /* Bit mask for valid entries */ | 321 | __u32 mask; /* Bit mask for valid entries */ |
322 | __u32 enabled; /* 1 = enabled, 0 = disabled */ | 322 | __u32 enabled; /* 1 = enabled, 0 = disabled */ |
323 | __u32 failure; /* Failure-to-log action */ | 323 | __u32 failure; /* Failure-to-log action */ |
324 | __u32 pid; /* pid of auditd process */ | 324 | __u32 pid; /* pid of auditd process */ |
325 | __u32 rate_limit; /* messages rate limit (per second) */ | 325 | __u32 rate_limit; /* messages rate limit (per second) */ |
326 | __u32 backlog_limit; /* waiting messages limit */ | 326 | __u32 backlog_limit; /* waiting messages limit */ |
327 | __u32 lost; /* messages lost */ | 327 | __u32 lost; /* messages lost */ |
328 | __u32 backlog; /* messages waiting in queue */ | 328 | __u32 backlog; /* messages waiting in queue */ |
329 | }; | 329 | }; |
330 | 330 | ||
331 | struct audit_tty_status { | 331 | struct audit_tty_status { |
332 | __u32 enabled; /* 1 = enabled, 0 = disabled */ | 332 | __u32 enabled; /* 1 = enabled, 0 = disabled */ |
333 | }; | 333 | }; |
334 | 334 | ||
335 | /* audit_rule_data supports filter rules with both integer and string | 335 | /* audit_rule_data supports filter rules with both integer and string |
336 | * fields. It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and | 336 | * fields. It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and |
337 | * AUDIT_LIST_RULES requests. | 337 | * AUDIT_LIST_RULES requests. |
338 | */ | 338 | */ |
339 | struct audit_rule_data { | 339 | struct audit_rule_data { |
340 | __u32 flags; /* AUDIT_PER_{TASK,CALL}, AUDIT_PREPEND */ | 340 | __u32 flags; /* AUDIT_PER_{TASK,CALL}, AUDIT_PREPEND */ |
341 | __u32 action; /* AUDIT_NEVER, AUDIT_POSSIBLE, AUDIT_ALWAYS */ | 341 | __u32 action; /* AUDIT_NEVER, AUDIT_POSSIBLE, AUDIT_ALWAYS */ |
342 | __u32 field_count; | 342 | __u32 field_count; |
343 | __u32 mask[AUDIT_BITMASK_SIZE]; /* syscall(s) affected */ | 343 | __u32 mask[AUDIT_BITMASK_SIZE]; /* syscall(s) affected */ |
344 | __u32 fields[AUDIT_MAX_FIELDS]; | 344 | __u32 fields[AUDIT_MAX_FIELDS]; |
345 | __u32 values[AUDIT_MAX_FIELDS]; | 345 | __u32 values[AUDIT_MAX_FIELDS]; |
346 | __u32 fieldflags[AUDIT_MAX_FIELDS]; | 346 | __u32 fieldflags[AUDIT_MAX_FIELDS]; |
347 | __u32 buflen; /* total length of string fields */ | 347 | __u32 buflen; /* total length of string fields */ |
348 | char buf[0]; /* string fields buffer */ | 348 | char buf[0]; /* string fields buffer */ |
349 | }; | 349 | }; |
350 | 350 | ||
351 | /* audit_rule is supported to maintain backward compatibility with | 351 | /* audit_rule is supported to maintain backward compatibility with |
352 | * userspace. It supports integer fields only and corresponds to | 352 | * userspace. It supports integer fields only and corresponds to |
353 | * AUDIT_ADD, AUDIT_DEL and AUDIT_LIST requests. | 353 | * AUDIT_ADD, AUDIT_DEL and AUDIT_LIST requests. |
354 | */ | 354 | */ |
355 | struct audit_rule { /* for AUDIT_LIST, AUDIT_ADD, and AUDIT_DEL */ | 355 | struct audit_rule { /* for AUDIT_LIST, AUDIT_ADD, and AUDIT_DEL */ |
356 | __u32 flags; /* AUDIT_PER_{TASK,CALL}, AUDIT_PREPEND */ | 356 | __u32 flags; /* AUDIT_PER_{TASK,CALL}, AUDIT_PREPEND */ |
357 | __u32 action; /* AUDIT_NEVER, AUDIT_POSSIBLE, AUDIT_ALWAYS */ | 357 | __u32 action; /* AUDIT_NEVER, AUDIT_POSSIBLE, AUDIT_ALWAYS */ |
358 | __u32 field_count; | 358 | __u32 field_count; |
359 | __u32 mask[AUDIT_BITMASK_SIZE]; | 359 | __u32 mask[AUDIT_BITMASK_SIZE]; |
360 | __u32 fields[AUDIT_MAX_FIELDS]; | 360 | __u32 fields[AUDIT_MAX_FIELDS]; |
361 | __u32 values[AUDIT_MAX_FIELDS]; | 361 | __u32 values[AUDIT_MAX_FIELDS]; |
362 | }; | 362 | }; |
363 | 363 | ||
364 | #ifdef __KERNEL__ | 364 | #ifdef __KERNEL__ |
365 | #include <linux/sched.h> | 365 | #include <linux/sched.h> |
366 | 366 | ||
367 | struct audit_sig_info { | 367 | struct audit_sig_info { |
368 | uid_t uid; | 368 | uid_t uid; |
369 | pid_t pid; | 369 | pid_t pid; |
370 | char ctx[0]; | 370 | char ctx[0]; |
371 | }; | 371 | }; |
372 | 372 | ||
373 | struct audit_buffer; | 373 | struct audit_buffer; |
374 | struct audit_context; | 374 | struct audit_context; |
375 | struct inode; | 375 | struct inode; |
376 | struct netlink_skb_parms; | 376 | struct netlink_skb_parms; |
377 | struct path; | 377 | struct path; |
378 | struct linux_binprm; | 378 | struct linux_binprm; |
379 | struct mq_attr; | 379 | struct mq_attr; |
380 | struct mqstat; | 380 | struct mqstat; |
381 | struct audit_watch; | 381 | struct audit_watch; |
382 | struct audit_tree; | 382 | struct audit_tree; |
383 | 383 | ||
384 | struct audit_krule { | 384 | struct audit_krule { |
385 | int vers_ops; | 385 | int vers_ops; |
386 | u32 flags; | 386 | u32 flags; |
387 | u32 listnr; | 387 | u32 listnr; |
388 | u32 action; | 388 | u32 action; |
389 | u32 mask[AUDIT_BITMASK_SIZE]; | 389 | u32 mask[AUDIT_BITMASK_SIZE]; |
390 | u32 buflen; /* for data alloc on list rules */ | 390 | u32 buflen; /* for data alloc on list rules */ |
391 | u32 field_count; | 391 | u32 field_count; |
392 | char *filterkey; /* ties events to rules */ | 392 | char *filterkey; /* ties events to rules */ |
393 | struct audit_field *fields; | 393 | struct audit_field *fields; |
394 | struct audit_field *arch_f; /* quick access to arch field */ | 394 | struct audit_field *arch_f; /* quick access to arch field */ |
395 | struct audit_field *inode_f; /* quick access to an inode field */ | 395 | struct audit_field *inode_f; /* quick access to an inode field */ |
396 | struct audit_watch *watch; /* associated watch */ | 396 | struct audit_watch *watch; /* associated watch */ |
397 | struct audit_tree *tree; /* associated watched tree */ | 397 | struct audit_tree *tree; /* associated watched tree */ |
398 | struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ | 398 | struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ |
399 | struct list_head list; /* for AUDIT_LIST* purposes only */ | 399 | struct list_head list; /* for AUDIT_LIST* purposes only */ |
400 | u64 prio; | 400 | u64 prio; |
401 | }; | 401 | }; |
402 | 402 | ||
403 | struct audit_field { | 403 | struct audit_field { |
404 | u32 type; | 404 | u32 type; |
405 | u32 val; | 405 | u32 val; |
406 | u32 op; | 406 | u32 op; |
407 | char *lsm_str; | 407 | char *lsm_str; |
408 | void *lsm_rule; | 408 | void *lsm_rule; |
409 | }; | 409 | }; |
410 | 410 | ||
411 | #define AUDITSC_INVALID 0 | 411 | #define AUDITSC_INVALID 0 |
412 | #define AUDITSC_SUCCESS 1 | 412 | #define AUDITSC_SUCCESS 1 |
413 | #define AUDITSC_FAILURE 2 | 413 | #define AUDITSC_FAILURE 2 |
414 | #define AUDITSC_RESULT(x) ( ((long)(x))<0?AUDITSC_FAILURE:AUDITSC_SUCCESS ) | 414 | #define AUDITSC_RESULT(x) ( ((long)(x))<0?AUDITSC_FAILURE:AUDITSC_SUCCESS ) |
415 | extern int __init audit_register_class(int class, unsigned *list); | 415 | extern int __init audit_register_class(int class, unsigned *list); |
416 | extern int audit_classify_syscall(int abi, unsigned syscall); | 416 | extern int audit_classify_syscall(int abi, unsigned syscall); |
417 | extern int audit_classify_arch(int arch); | 417 | extern int audit_classify_arch(int arch); |
418 | #ifdef CONFIG_AUDITSYSCALL | 418 | #ifdef CONFIG_AUDITSYSCALL |
419 | /* These are defined in auditsc.c */ | 419 | /* These are defined in auditsc.c */ |
420 | /* Public API */ | 420 | /* Public API */ |
421 | extern void audit_finish_fork(struct task_struct *child); | 421 | extern void audit_finish_fork(struct task_struct *child); |
422 | extern int audit_alloc(struct task_struct *task); | 422 | extern int audit_alloc(struct task_struct *task); |
423 | extern void audit_free(struct task_struct *task); | 423 | extern void audit_free(struct task_struct *task); |
424 | extern void audit_syscall_entry(int arch, | 424 | extern void audit_syscall_entry(int arch, |
425 | int major, unsigned long a0, unsigned long a1, | 425 | int major, unsigned long a0, unsigned long a1, |
426 | unsigned long a2, unsigned long a3); | 426 | unsigned long a2, unsigned long a3); |
427 | extern void audit_syscall_exit(int failed, long return_code); | 427 | extern void audit_syscall_exit(int failed, long return_code); |
428 | extern void __audit_getname(const char *name); | 428 | extern void __audit_getname(const char *name); |
429 | extern void audit_putname(const char *name); | 429 | extern void audit_putname(const char *name); |
430 | extern void __audit_inode(const char *name, const struct dentry *dentry); | 430 | extern void __audit_inode(const char *name, const struct dentry *dentry); |
431 | extern void __audit_inode_child(const struct dentry *dentry, | 431 | extern void __audit_inode_child(const struct dentry *dentry, |
432 | const struct inode *parent); | 432 | const struct inode *parent); |
433 | extern void __audit_ptrace(struct task_struct *t); | 433 | extern void __audit_ptrace(struct task_struct *t); |
434 | 434 | ||
435 | static inline int audit_dummy_context(void) | 435 | static inline int audit_dummy_context(void) |
436 | { | 436 | { |
437 | void *p = current->audit_context; | 437 | void *p = current->audit_context; |
438 | return !p || *(int *)p; | 438 | return !p || *(int *)p; |
439 | } | 439 | } |
440 | static inline void audit_getname(const char *name) | 440 | static inline void audit_getname(const char *name) |
441 | { | 441 | { |
442 | if (unlikely(!audit_dummy_context())) | 442 | if (unlikely(!audit_dummy_context())) |
443 | __audit_getname(name); | 443 | __audit_getname(name); |
444 | } | 444 | } |
445 | static inline void audit_inode(const char *name, const struct dentry *dentry) { | 445 | static inline void audit_inode(const char *name, const struct dentry *dentry) { |
446 | if (unlikely(!audit_dummy_context())) | 446 | if (unlikely(!audit_dummy_context())) |
447 | __audit_inode(name, dentry); | 447 | __audit_inode(name, dentry); |
448 | } | 448 | } |
449 | static inline void audit_inode_child(const struct dentry *dentry, | 449 | static inline void audit_inode_child(const struct dentry *dentry, |
450 | const struct inode *parent) { | 450 | const struct inode *parent) { |
451 | if (unlikely(!audit_dummy_context())) | 451 | if (unlikely(!audit_dummy_context())) |
452 | __audit_inode_child(dentry, parent); | 452 | __audit_inode_child(dentry, parent); |
453 | } | 453 | } |
454 | void audit_core_dumps(long signr); | 454 | void audit_core_dumps(long signr); |
455 | 455 | ||
456 | static inline void audit_ptrace(struct task_struct *t) | 456 | static inline void audit_ptrace(struct task_struct *t) |
457 | { | 457 | { |
458 | if (unlikely(!audit_dummy_context())) | 458 | if (unlikely(!audit_dummy_context())) |
459 | __audit_ptrace(t); | 459 | __audit_ptrace(t); |
460 | } | 460 | } |
461 | 461 | ||
462 | /* Private API (for audit.c only) */ | 462 | /* Private API (for audit.c only) */ |
463 | extern unsigned int audit_serial(void); | 463 | extern unsigned int audit_serial(void); |
464 | extern int auditsc_get_stamp(struct audit_context *ctx, | 464 | extern int auditsc_get_stamp(struct audit_context *ctx, |
465 | struct timespec *t, unsigned int *serial); | 465 | struct timespec *t, unsigned int *serial); |
466 | extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); | 466 | extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); |
467 | #define audit_get_loginuid(t) ((t)->loginuid) | 467 | #define audit_get_loginuid(t) ((t)->loginuid) |
468 | #define audit_get_sessionid(t) ((t)->sessionid) | 468 | #define audit_get_sessionid(t) ((t)->sessionid) |
469 | extern void audit_log_task_context(struct audit_buffer *ab); | 469 | extern void audit_log_task_context(struct audit_buffer *ab); |
470 | extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); | 470 | extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); |
471 | extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); | 471 | extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); |
472 | extern int audit_bprm(struct linux_binprm *bprm); | 472 | extern int audit_bprm(struct linux_binprm *bprm); |
473 | extern void audit_socketcall(int nargs, unsigned long *args); | 473 | extern void audit_socketcall(int nargs, unsigned long *args); |
474 | extern int audit_sockaddr(int len, void *addr); | 474 | extern int audit_sockaddr(int len, void *addr); |
475 | extern void __audit_fd_pair(int fd1, int fd2); | 475 | extern void __audit_fd_pair(int fd1, int fd2); |
476 | extern int audit_set_macxattr(const char *name); | 476 | extern int audit_set_macxattr(const char *name); |
477 | extern void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr); | 477 | extern void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr); |
478 | extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); | 478 | extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); |
479 | extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); | 479 | extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); |
480 | extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); | 480 | extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); |
481 | extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, | 481 | extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, |
482 | const struct cred *new, | 482 | const struct cred *new, |
483 | const struct cred *old); | 483 | const struct cred *old); |
484 | extern void __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old); | 484 | extern void __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old); |
485 | extern void __audit_mmap_fd(int fd, int flags); | 485 | extern void __audit_mmap_fd(int fd, int flags); |
486 | 486 | ||
487 | static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) | 487 | static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) |
488 | { | 488 | { |
489 | if (unlikely(!audit_dummy_context())) | 489 | if (unlikely(!audit_dummy_context())) |
490 | __audit_ipc_obj(ipcp); | 490 | __audit_ipc_obj(ipcp); |
491 | } | 491 | } |
492 | static inline void audit_fd_pair(int fd1, int fd2) | 492 | static inline void audit_fd_pair(int fd1, int fd2) |
493 | { | 493 | { |
494 | if (unlikely(!audit_dummy_context())) | 494 | if (unlikely(!audit_dummy_context())) |
495 | __audit_fd_pair(fd1, fd2); | 495 | __audit_fd_pair(fd1, fd2); |
496 | } | 496 | } |
497 | static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) | 497 | static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) |
498 | { | 498 | { |
499 | if (unlikely(!audit_dummy_context())) | 499 | if (unlikely(!audit_dummy_context())) |
500 | __audit_ipc_set_perm(qbytes, uid, gid, mode); | 500 | __audit_ipc_set_perm(qbytes, uid, gid, mode); |
501 | } | 501 | } |
502 | static inline void audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr) | 502 | static inline void audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr) |
503 | { | 503 | { |
504 | if (unlikely(!audit_dummy_context())) | 504 | if (unlikely(!audit_dummy_context())) |
505 | __audit_mq_open(oflag, mode, attr); | 505 | __audit_mq_open(oflag, mode, attr); |
506 | } | 506 | } |
507 | static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) | 507 | static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) |
508 | { | 508 | { |
509 | if (unlikely(!audit_dummy_context())) | 509 | if (unlikely(!audit_dummy_context())) |
510 | __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); | 510 | __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); |
511 | } | 511 | } |
512 | static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) | 512 | static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) |
513 | { | 513 | { |
514 | if (unlikely(!audit_dummy_context())) | 514 | if (unlikely(!audit_dummy_context())) |
515 | __audit_mq_notify(mqdes, notification); | 515 | __audit_mq_notify(mqdes, notification); |
516 | } | 516 | } |
517 | static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) | 517 | static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) |
518 | { | 518 | { |
519 | if (unlikely(!audit_dummy_context())) | 519 | if (unlikely(!audit_dummy_context())) |
520 | __audit_mq_getsetattr(mqdes, mqstat); | 520 | __audit_mq_getsetattr(mqdes, mqstat); |
521 | } | 521 | } |
522 | 522 | ||
523 | static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, | 523 | static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, |
524 | const struct cred *new, | 524 | const struct cred *new, |
525 | const struct cred *old) | 525 | const struct cred *old) |
526 | { | 526 | { |
527 | if (unlikely(!audit_dummy_context())) | 527 | if (unlikely(!audit_dummy_context())) |
528 | return __audit_log_bprm_fcaps(bprm, new, old); | 528 | return __audit_log_bprm_fcaps(bprm, new, old); |
529 | return 0; | 529 | return 0; |
530 | } | 530 | } |
531 | 531 | ||
532 | static inline void audit_log_capset(pid_t pid, const struct cred *new, | 532 | static inline void audit_log_capset(pid_t pid, const struct cred *new, |
533 | const struct cred *old) | 533 | const struct cred *old) |
534 | { | 534 | { |
535 | if (unlikely(!audit_dummy_context())) | 535 | if (unlikely(!audit_dummy_context())) |
536 | __audit_log_capset(pid, new, old); | 536 | __audit_log_capset(pid, new, old); |
537 | } | 537 | } |
538 | 538 | ||
539 | static inline void audit_mmap_fd(int fd, int flags) | 539 | static inline void audit_mmap_fd(int fd, int flags) |
540 | { | 540 | { |
541 | if (unlikely(!audit_dummy_context())) | 541 | if (unlikely(!audit_dummy_context())) |
542 | __audit_mmap_fd(fd, flags); | 542 | __audit_mmap_fd(fd, flags); |
543 | } | 543 | } |
544 | 544 | ||
545 | extern int audit_n_rules; | 545 | extern int audit_n_rules; |
546 | extern int audit_signals; | 546 | extern int audit_signals; |
547 | #else | 547 | #else |
548 | #define audit_finish_fork(t) | 548 | #define audit_finish_fork(t) |
549 | #define audit_alloc(t) ({ 0; }) | 549 | #define audit_alloc(t) ({ 0; }) |
550 | #define audit_free(t) do { ; } while (0) | 550 | #define audit_free(t) do { ; } while (0) |
551 | #define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0) | 551 | #define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0) |
552 | #define audit_syscall_exit(f,r) do { ; } while (0) | 552 | #define audit_syscall_exit(f,r) do { ; } while (0) |
553 | #define audit_dummy_context() 1 | 553 | #define audit_dummy_context() 1 |
554 | #define audit_getname(n) do { ; } while (0) | 554 | #define audit_getname(n) do { ; } while (0) |
555 | #define audit_putname(n) do { ; } while (0) | 555 | #define audit_putname(n) do { ; } while (0) |
556 | #define __audit_inode(n,d) do { ; } while (0) | 556 | #define __audit_inode(n,d) do { ; } while (0) |
557 | #define __audit_inode_child(i,p) do { ; } while (0) | 557 | #define __audit_inode_child(i,p) do { ; } while (0) |
558 | #define audit_inode(n,d) do { (void)(d); } while (0) | 558 | #define audit_inode(n,d) do { (void)(d); } while (0) |
559 | #define audit_inode_child(i,p) do { ; } while (0) | 559 | #define audit_inode_child(i,p) do { ; } while (0) |
560 | #define audit_core_dumps(i) do { ; } while (0) | 560 | #define audit_core_dumps(i) do { ; } while (0) |
561 | #define auditsc_get_stamp(c,t,s) (0) | 561 | #define auditsc_get_stamp(c,t,s) (0) |
562 | #define audit_get_loginuid(t) (-1) | 562 | #define audit_get_loginuid(t) (-1) |
563 | #define audit_get_sessionid(t) (-1) | 563 | #define audit_get_sessionid(t) (-1) |
564 | #define audit_log_task_context(b) do { ; } while (0) | 564 | #define audit_log_task_context(b) do { ; } while (0) |
565 | #define audit_ipc_obj(i) ((void)0) | 565 | #define audit_ipc_obj(i) ((void)0) |
566 | #define audit_ipc_set_perm(q,u,g,m) ((void)0) | 566 | #define audit_ipc_set_perm(q,u,g,m) ((void)0) |
567 | #define audit_bprm(p) ({ 0; }) | 567 | #define audit_bprm(p) ({ 0; }) |
568 | #define audit_socketcall(n,a) ((void)0) | 568 | #define audit_socketcall(n,a) ((void)0) |
569 | #define audit_fd_pair(n,a) ((void)0) | 569 | #define audit_fd_pair(n,a) ((void)0) |
570 | #define audit_sockaddr(len, addr) ({ 0; }) | 570 | #define audit_sockaddr(len, addr) ({ 0; }) |
571 | #define audit_set_macxattr(n) do { ; } while (0) | 571 | #define audit_set_macxattr(n) do { ; } while (0) |
572 | #define audit_mq_open(o,m,a) ((void)0) | 572 | #define audit_mq_open(o,m,a) ((void)0) |
573 | #define audit_mq_sendrecv(d,l,p,t) ((void)0) | 573 | #define audit_mq_sendrecv(d,l,p,t) ((void)0) |
574 | #define audit_mq_notify(d,n) ((void)0) | 574 | #define audit_mq_notify(d,n) ((void)0) |
575 | #define audit_mq_getsetattr(d,s) ((void)0) | 575 | #define audit_mq_getsetattr(d,s) ((void)0) |
576 | #define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; }) | 576 | #define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; }) |
577 | #define audit_log_capset(pid, ncr, ocr) ((void)0) | 577 | #define audit_log_capset(pid, ncr, ocr) ((void)0) |
578 | #define audit_mmap_fd(fd, flags) ((void)0) | 578 | #define audit_mmap_fd(fd, flags) ((void)0) |
579 | #define audit_ptrace(t) ((void)0) | 579 | #define audit_ptrace(t) ((void)0) |
580 | #define audit_n_rules 0 | 580 | #define audit_n_rules 0 |
581 | #define audit_signals 0 | 581 | #define audit_signals 0 |
582 | #endif | 582 | #endif |
583 | 583 | ||
584 | #ifdef CONFIG_AUDIT | 584 | #ifdef CONFIG_AUDIT |
585 | /* These are defined in audit.c */ | 585 | /* These are defined in audit.c */ |
586 | /* Public API */ | 586 | /* Public API */ |
587 | extern void audit_log(struct audit_context *ctx, gfp_t gfp_mask, | 587 | extern void audit_log(struct audit_context *ctx, gfp_t gfp_mask, |
588 | int type, const char *fmt, ...) | 588 | int type, const char *fmt, ...) |
589 | __attribute__((format(printf,4,5))); | 589 | __attribute__((format(printf,4,5))); |
590 | 590 | ||
591 | extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); | 591 | extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); |
592 | extern void audit_log_format(struct audit_buffer *ab, | 592 | extern void audit_log_format(struct audit_buffer *ab, |
593 | const char *fmt, ...) | 593 | const char *fmt, ...) |
594 | __attribute__((format(printf,2,3))); | 594 | __attribute__((format(printf,2,3))); |
595 | extern void audit_log_end(struct audit_buffer *ab); | 595 | extern void audit_log_end(struct audit_buffer *ab); |
596 | extern int audit_string_contains_control(const char *string, | 596 | extern int audit_string_contains_control(const char *string, |
597 | size_t len); | 597 | size_t len); |
598 | extern void audit_log_n_hex(struct audit_buffer *ab, | 598 | extern void audit_log_n_hex(struct audit_buffer *ab, |
599 | const unsigned char *buf, | 599 | const unsigned char *buf, |
600 | size_t len); | 600 | size_t len); |
601 | extern void audit_log_n_string(struct audit_buffer *ab, | 601 | extern void audit_log_n_string(struct audit_buffer *ab, |
602 | const char *buf, | 602 | const char *buf, |
603 | size_t n); | 603 | size_t n); |
604 | #define audit_log_string(a,b) audit_log_n_string(a, b, strlen(b)); | 604 | #define audit_log_string(a,b) audit_log_n_string(a, b, strlen(b)); |
605 | extern void audit_log_n_untrustedstring(struct audit_buffer *ab, | 605 | extern void audit_log_n_untrustedstring(struct audit_buffer *ab, |
606 | const char *string, | 606 | const char *string, |
607 | size_t n); | 607 | size_t n); |
608 | extern void audit_log_untrustedstring(struct audit_buffer *ab, | 608 | extern void audit_log_untrustedstring(struct audit_buffer *ab, |
609 | const char *string); | 609 | const char *string); |
610 | extern void audit_log_d_path(struct audit_buffer *ab, | 610 | extern void audit_log_d_path(struct audit_buffer *ab, |
611 | const char *prefix, | 611 | const char *prefix, |
612 | struct path *path); | 612 | struct path *path); |
613 | extern void audit_log_key(struct audit_buffer *ab, | 613 | extern void audit_log_key(struct audit_buffer *ab, |
614 | char *key); | 614 | char *key); |
615 | extern void audit_log_lost(const char *message); | 615 | extern void audit_log_lost(const char *message); |
616 | #ifdef CONFIG_SECURITY | ||
617 | extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); | ||
618 | #else | ||
619 | #define audit_log_secctx(b,s) do { ; } while (0) | ||
620 | #endif | ||
621 | |||
616 | extern int audit_update_lsm_rules(void); | 622 | extern int audit_update_lsm_rules(void); |
617 | 623 | ||
618 | /* Private API (for audit.c only) */ | 624 | /* Private API (for audit.c only) */ |
619 | extern int audit_filter_user(struct netlink_skb_parms *cb); | 625 | extern int audit_filter_user(struct netlink_skb_parms *cb); |
620 | extern int audit_filter_type(int type); | 626 | extern int audit_filter_type(int type); |
621 | extern int audit_receive_filter(int type, int pid, int uid, int seq, | 627 | extern int audit_receive_filter(int type, int pid, int uid, int seq, |
622 | void *data, size_t datasz, uid_t loginuid, | 628 | void *data, size_t datasz, uid_t loginuid, |
623 | u32 sessionid, u32 sid); | 629 | u32 sessionid, u32 sid); |
624 | extern int audit_enabled; | 630 | extern int audit_enabled; |
625 | #else | 631 | #else |
626 | #define audit_log(c,g,t,f,...) do { ; } while (0) | 632 | #define audit_log(c,g,t,f,...) do { ; } while (0) |
627 | #define audit_log_start(c,g,t) ({ NULL; }) | 633 | #define audit_log_start(c,g,t) ({ NULL; }) |
628 | #define audit_log_vformat(b,f,a) do { ; } while (0) | 634 | #define audit_log_vformat(b,f,a) do { ; } while (0) |
629 | #define audit_log_format(b,f,...) do { ; } while (0) | 635 | #define audit_log_format(b,f,...) do { ; } while (0) |
630 | #define audit_log_end(b) do { ; } while (0) | 636 | #define audit_log_end(b) do { ; } while (0) |
631 | #define audit_log_n_hex(a,b,l) do { ; } while (0) | 637 | #define audit_log_n_hex(a,b,l) do { ; } while (0) |
632 | #define audit_log_n_string(a,c,l) do { ; } while (0) | 638 | #define audit_log_n_string(a,c,l) do { ; } while (0) |
633 | #define audit_log_string(a,c) do { ; } while (0) | 639 | #define audit_log_string(a,c) do { ; } while (0) |
634 | #define audit_log_n_untrustedstring(a,n,s) do { ; } while (0) | 640 | #define audit_log_n_untrustedstring(a,n,s) do { ; } while (0) |
635 | #define audit_log_untrustedstring(a,s) do { ; } while (0) | 641 | #define audit_log_untrustedstring(a,s) do { ; } while (0) |
636 | #define audit_log_d_path(b, p, d) do { ; } while (0) | 642 | #define audit_log_d_path(b, p, d) do { ; } while (0) |
637 | #define audit_log_key(b, k) do { ; } while (0) | 643 | #define audit_log_key(b, k) do { ; } while (0) |
644 | #define audit_log_secctx(b,s) do { ; } while (0) | ||
638 | #define audit_enabled 0 | 645 | #define audit_enabled 0 |
639 | #endif | 646 | #endif |
640 | #endif | 647 | #endif |
641 | #endif | 648 | #endif |
642 | 649 |
include/linux/netfilter/ipset/ip_set_ahash.h
1 | #ifndef _IP_SET_AHASH_H | 1 | #ifndef _IP_SET_AHASH_H |
2 | #define _IP_SET_AHASH_H | 2 | #define _IP_SET_AHASH_H |
3 | 3 | ||
4 | #include <linux/rcupdate.h> | 4 | #include <linux/rcupdate.h> |
5 | #include <linux/jhash.h> | 5 | #include <linux/jhash.h> |
6 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 6 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
7 | 7 | ||
8 | #define CONCAT(a, b, c) a##b##c | 8 | #define CONCAT(a, b, c) a##b##c |
9 | #define TOKEN(a, b, c) CONCAT(a, b, c) | 9 | #define TOKEN(a, b, c) CONCAT(a, b, c) |
10 | 10 | ||
11 | #define type_pf_next TOKEN(TYPE, PF, _elem) | 11 | #define type_pf_next TOKEN(TYPE, PF, _elem) |
12 | 12 | ||
13 | /* Hashing which uses arrays to resolve clashing. The hash table is resized | 13 | /* Hashing which uses arrays to resolve clashing. The hash table is resized |
14 | * (doubled) when searching becomes too long. | 14 | * (doubled) when searching becomes too long. |
15 | * Internally jhash is used with the assumption that the size of the | 15 | * Internally jhash is used with the assumption that the size of the |
16 | * stored data is a multiple of sizeof(u32). If storage supports timeout, | 16 | * stored data is a multiple of sizeof(u32). If storage supports timeout, |
17 | * the timeout field must be the last one in the data structure - that field | 17 | * the timeout field must be the last one in the data structure - that field |
18 | * is ignored when computing the hash key. | 18 | * is ignored when computing the hash key. |
19 | * | 19 | * |
20 | * Readers and resizing | 20 | * Readers and resizing |
21 | * | 21 | * |
22 | * Resizing can be triggered by userspace command only, and those | 22 | * Resizing can be triggered by userspace command only, and those |
23 | * are serialized by the nfnl mutex. During resizing the set is | 23 | * are serialized by the nfnl mutex. During resizing the set is |
24 | * read-locked, so the only possible concurrent operations are | 24 | * read-locked, so the only possible concurrent operations are |
25 | * the kernel side readers. Those must be protected by proper RCU locking. | 25 | * the kernel side readers. Those must be protected by proper RCU locking. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | /* Number of elements to store in an initial array block */ | 28 | /* Number of elements to store in an initial array block */ |
29 | #define AHASH_INIT_SIZE 4 | 29 | #define AHASH_INIT_SIZE 4 |
30 | /* Max number of elements to store in an array block */ | 30 | /* Max number of elements to store in an array block */ |
31 | #define AHASH_MAX_SIZE (3*4) | 31 | #define AHASH_MAX_SIZE (3*AHASH_INIT_SIZE) |
32 | 32 | ||
33 | /* Max number of elements can be tuned */ | ||
34 | #ifdef IP_SET_HASH_WITH_MULTI | ||
35 | #define AHASH_MAX(h) ((h)->ahash_max) | ||
36 | |||
37 | static inline u8 | ||
38 | tune_ahash_max(u8 curr, u32 multi) | ||
39 | { | ||
40 | u32 n; | ||
41 | |||
42 | if (multi < curr) | ||
43 | return curr; | ||
44 | |||
45 | n = curr + AHASH_INIT_SIZE; | ||
46 | /* Currently, at listing one hash bucket must fit into a message. | ||
47 | * Therefore we have a hard limit here. | ||
48 | */ | ||
49 | return n > curr && n <= 64 ? n : curr; | ||
50 | } | ||
51 | #define TUNE_AHASH_MAX(h, multi) \ | ||
52 | ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi)) | ||
53 | #else | ||
54 | #define AHASH_MAX(h) AHASH_MAX_SIZE | ||
55 | #define TUNE_AHASH_MAX(h, multi) | ||
56 | #endif | ||
57 | |||
33 | /* A hash bucket */ | 58 | /* A hash bucket */ |
34 | struct hbucket { | 59 | struct hbucket { |
35 | void *value; /* the array of the values */ | 60 | void *value; /* the array of the values */ |
36 | u8 size; /* size of the array */ | 61 | u8 size; /* size of the array */ |
37 | u8 pos; /* position of the first free entry */ | 62 | u8 pos; /* position of the first free entry */ |
38 | }; | 63 | }; |
39 | 64 | ||
40 | /* The hash table: the table size stored here in order to make resizing easy */ | 65 | /* The hash table: the table size stored here in order to make resizing easy */ |
41 | struct htable { | 66 | struct htable { |
42 | u8 htable_bits; /* size of hash table == 2^htable_bits */ | 67 | u8 htable_bits; /* size of hash table == 2^htable_bits */ |
43 | struct hbucket bucket[0]; /* hashtable buckets */ | 68 | struct hbucket bucket[0]; /* hashtable buckets */ |
44 | }; | 69 | }; |
45 | 70 | ||
46 | #define hbucket(h, i) (&((h)->bucket[i])) | 71 | #define hbucket(h, i) (&((h)->bucket[i])) |
47 | 72 | ||
48 | /* Book-keeping of the prefixes added to the set */ | 73 | /* Book-keeping of the prefixes added to the set */ |
49 | struct ip_set_hash_nets { | 74 | struct ip_set_hash_nets { |
50 | u8 cidr; /* the different cidr values in the set */ | 75 | u8 cidr; /* the different cidr values in the set */ |
51 | u32 nets; /* number of elements per cidr */ | 76 | u32 nets; /* number of elements per cidr */ |
52 | }; | 77 | }; |
53 | 78 | ||
54 | /* The generic ip_set hash structure */ | 79 | /* The generic ip_set hash structure */ |
55 | struct ip_set_hash { | 80 | struct ip_set_hash { |
56 | struct htable *table; /* the hash table */ | 81 | struct htable *table; /* the hash table */ |
57 | u32 maxelem; /* max elements in the hash */ | 82 | u32 maxelem; /* max elements in the hash */ |
58 | u32 elements; /* current element (vs timeout) */ | 83 | u32 elements; /* current element (vs timeout) */ |
59 | u32 initval; /* random jhash init value */ | 84 | u32 initval; /* random jhash init value */ |
60 | u32 timeout; /* timeout value, if enabled */ | 85 | u32 timeout; /* timeout value, if enabled */ |
61 | struct timer_list gc; /* garbage collection when timeout enabled */ | 86 | struct timer_list gc; /* garbage collection when timeout enabled */ |
62 | struct type_pf_next next; /* temporary storage for uadd */ | 87 | struct type_pf_next next; /* temporary storage for uadd */ |
88 | #ifdef IP_SET_HASH_WITH_MULTI | ||
89 | u8 ahash_max; /* max elements in an array block */ | ||
90 | #endif | ||
63 | #ifdef IP_SET_HASH_WITH_NETMASK | 91 | #ifdef IP_SET_HASH_WITH_NETMASK |
64 | u8 netmask; /* netmask value for subnets to store */ | 92 | u8 netmask; /* netmask value for subnets to store */ |
65 | #endif | 93 | #endif |
66 | #ifdef IP_SET_HASH_WITH_RBTREE | 94 | #ifdef IP_SET_HASH_WITH_RBTREE |
67 | struct rb_root rbtree; | 95 | struct rb_root rbtree; |
68 | #endif | 96 | #endif |
69 | #ifdef IP_SET_HASH_WITH_NETS | 97 | #ifdef IP_SET_HASH_WITH_NETS |
70 | struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */ | 98 | struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */ |
71 | #endif | 99 | #endif |
72 | }; | 100 | }; |
73 | 101 | ||
74 | /* Compute htable_bits from the user input parameter hashsize */ | 102 | /* Compute htable_bits from the user input parameter hashsize */ |
75 | static u8 | 103 | static u8 |
76 | htable_bits(u32 hashsize) | 104 | htable_bits(u32 hashsize) |
77 | { | 105 | { |
78 | /* Assume that hashsize == 2^htable_bits */ | 106 | /* Assume that hashsize == 2^htable_bits */ |
79 | u8 bits = fls(hashsize - 1); | 107 | u8 bits = fls(hashsize - 1); |
80 | if (jhash_size(bits) != hashsize) | 108 | if (jhash_size(bits) != hashsize) |
81 | /* Round up to the first 2^n value */ | 109 | /* Round up to the first 2^n value */ |
82 | bits = fls(hashsize); | 110 | bits = fls(hashsize); |
83 | 111 | ||
84 | return bits; | 112 | return bits; |
85 | } | 113 | } |
86 | 114 | ||
87 | #ifdef IP_SET_HASH_WITH_NETS | 115 | #ifdef IP_SET_HASH_WITH_NETS |
88 | 116 | ||
89 | #define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128) | 117 | #define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128) |
90 | 118 | ||
91 | /* Network cidr size book keeping when the hash stores different | 119 | /* Network cidr size book keeping when the hash stores different |
92 | * sized networks */ | 120 | * sized networks */ |
93 | static void | 121 | static void |
94 | add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) | 122 | add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) |
95 | { | 123 | { |
96 | u8 i; | 124 | u8 i; |
97 | 125 | ||
98 | ++h->nets[cidr-1].nets; | 126 | ++h->nets[cidr-1].nets; |
99 | 127 | ||
100 | pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets); | 128 | pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets); |
101 | 129 | ||
102 | if (h->nets[cidr-1].nets > 1) | 130 | if (h->nets[cidr-1].nets > 1) |
103 | return; | 131 | return; |
104 | 132 | ||
105 | /* New cidr size */ | 133 | /* New cidr size */ |
106 | for (i = 0; i < host_mask && h->nets[i].cidr; i++) { | 134 | for (i = 0; i < host_mask && h->nets[i].cidr; i++) { |
107 | /* Add in increasing prefix order, so larger cidr first */ | 135 | /* Add in increasing prefix order, so larger cidr first */ |
108 | if (h->nets[i].cidr < cidr) | 136 | if (h->nets[i].cidr < cidr) |
109 | swap(h->nets[i].cidr, cidr); | 137 | swap(h->nets[i].cidr, cidr); |
110 | } | 138 | } |
111 | if (i < host_mask) | 139 | if (i < host_mask) |
112 | h->nets[i].cidr = cidr; | 140 | h->nets[i].cidr = cidr; |
113 | } | 141 | } |
114 | 142 | ||
115 | static void | 143 | static void |
116 | del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) | 144 | del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) |
117 | { | 145 | { |
118 | u8 i; | 146 | u8 i; |
119 | 147 | ||
120 | --h->nets[cidr-1].nets; | 148 | --h->nets[cidr-1].nets; |
121 | 149 | ||
122 | pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets); | 150 | pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets); |
123 | 151 | ||
124 | if (h->nets[cidr-1].nets != 0) | 152 | if (h->nets[cidr-1].nets != 0) |
125 | return; | 153 | return; |
126 | 154 | ||
127 | /* All entries with this cidr size deleted, so cleanup h->cidr[] */ | 155 | /* All entries with this cidr size deleted, so cleanup h->cidr[] */ |
128 | for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) { | 156 | for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) { |
129 | if (h->nets[i].cidr == cidr) | 157 | if (h->nets[i].cidr == cidr) |
130 | h->nets[i].cidr = cidr = h->nets[i+1].cidr; | 158 | h->nets[i].cidr = cidr = h->nets[i+1].cidr; |
131 | } | 159 | } |
132 | h->nets[i - 1].cidr = 0; | 160 | h->nets[i - 1].cidr = 0; |
133 | } | 161 | } |
134 | #endif | 162 | #endif |
135 | 163 | ||
136 | /* Destroy the hashtable part of the set */ | 164 | /* Destroy the hashtable part of the set */ |
137 | static void | 165 | static void |
138 | ahash_destroy(struct htable *t) | 166 | ahash_destroy(struct htable *t) |
139 | { | 167 | { |
140 | struct hbucket *n; | 168 | struct hbucket *n; |
141 | u32 i; | 169 | u32 i; |
142 | 170 | ||
143 | for (i = 0; i < jhash_size(t->htable_bits); i++) { | 171 | for (i = 0; i < jhash_size(t->htable_bits); i++) { |
144 | n = hbucket(t, i); | 172 | n = hbucket(t, i); |
145 | if (n->size) | 173 | if (n->size) |
146 | /* FIXME: use slab cache */ | 174 | /* FIXME: use slab cache */ |
147 | kfree(n->value); | 175 | kfree(n->value); |
148 | } | 176 | } |
149 | 177 | ||
150 | ip_set_free(t); | 178 | ip_set_free(t); |
151 | } | 179 | } |
152 | 180 | ||
153 | /* Calculate the actual memory size of the set data */ | 181 | /* Calculate the actual memory size of the set data */ |
154 | static size_t | 182 | static size_t |
155 | ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask) | 183 | ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask) |
156 | { | 184 | { |
157 | u32 i; | 185 | u32 i; |
158 | struct htable *t = h->table; | 186 | struct htable *t = h->table; |
159 | size_t memsize = sizeof(*h) | 187 | size_t memsize = sizeof(*h) |
160 | + sizeof(*t) | 188 | + sizeof(*t) |
161 | #ifdef IP_SET_HASH_WITH_NETS | 189 | #ifdef IP_SET_HASH_WITH_NETS |
162 | + sizeof(struct ip_set_hash_nets) * host_mask | 190 | + sizeof(struct ip_set_hash_nets) * host_mask |
163 | #endif | 191 | #endif |
164 | + jhash_size(t->htable_bits) * sizeof(struct hbucket); | 192 | + jhash_size(t->htable_bits) * sizeof(struct hbucket); |
165 | 193 | ||
166 | for (i = 0; i < jhash_size(t->htable_bits); i++) | 194 | for (i = 0; i < jhash_size(t->htable_bits); i++) |
167 | memsize += t->bucket[i].size * dsize; | 195 | memsize += t->bucket[i].size * dsize; |
168 | 196 | ||
169 | return memsize; | 197 | return memsize; |
170 | } | 198 | } |
171 | 199 | ||
172 | /* Flush a hash type of set: destroy all elements */ | 200 | /* Flush a hash type of set: destroy all elements */ |
173 | static void | 201 | static void |
174 | ip_set_hash_flush(struct ip_set *set) | 202 | ip_set_hash_flush(struct ip_set *set) |
175 | { | 203 | { |
176 | struct ip_set_hash *h = set->data; | 204 | struct ip_set_hash *h = set->data; |
177 | struct htable *t = h->table; | 205 | struct htable *t = h->table; |
178 | struct hbucket *n; | 206 | struct hbucket *n; |
179 | u32 i; | 207 | u32 i; |
180 | 208 | ||
181 | for (i = 0; i < jhash_size(t->htable_bits); i++) { | 209 | for (i = 0; i < jhash_size(t->htable_bits); i++) { |
182 | n = hbucket(t, i); | 210 | n = hbucket(t, i); |
183 | if (n->size) { | 211 | if (n->size) { |
184 | n->size = n->pos = 0; | 212 | n->size = n->pos = 0; |
185 | /* FIXME: use slab cache */ | 213 | /* FIXME: use slab cache */ |
186 | kfree(n->value); | 214 | kfree(n->value); |
187 | } | 215 | } |
188 | } | 216 | } |
189 | #ifdef IP_SET_HASH_WITH_NETS | 217 | #ifdef IP_SET_HASH_WITH_NETS |
190 | memset(h->nets, 0, sizeof(struct ip_set_hash_nets) | 218 | memset(h->nets, 0, sizeof(struct ip_set_hash_nets) |
191 | * SET_HOST_MASK(set->family)); | 219 | * SET_HOST_MASK(set->family)); |
192 | #endif | 220 | #endif |
193 | h->elements = 0; | 221 | h->elements = 0; |
194 | } | 222 | } |
195 | 223 | ||
196 | /* Destroy a hash type of set */ | 224 | /* Destroy a hash type of set */ |
197 | static void | 225 | static void |
198 | ip_set_hash_destroy(struct ip_set *set) | 226 | ip_set_hash_destroy(struct ip_set *set) |
199 | { | 227 | { |
200 | struct ip_set_hash *h = set->data; | 228 | struct ip_set_hash *h = set->data; |
201 | 229 | ||
202 | if (with_timeout(h->timeout)) | 230 | if (with_timeout(h->timeout)) |
203 | del_timer_sync(&h->gc); | 231 | del_timer_sync(&h->gc); |
204 | 232 | ||
205 | ahash_destroy(h->table); | 233 | ahash_destroy(h->table); |
206 | #ifdef IP_SET_HASH_WITH_RBTREE | 234 | #ifdef IP_SET_HASH_WITH_RBTREE |
207 | rbtree_destroy(&h->rbtree); | 235 | rbtree_destroy(&h->rbtree); |
208 | #endif | 236 | #endif |
209 | kfree(h); | 237 | kfree(h); |
210 | 238 | ||
211 | set->data = NULL; | 239 | set->data = NULL; |
212 | } | 240 | } |
213 | 241 | ||
214 | #define HKEY(data, initval, htable_bits) \ | ||
215 | (jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \ | ||
216 | & jhash_mask(htable_bits)) | ||
217 | |||
218 | #endif /* _IP_SET_AHASH_H */ | 242 | #endif /* _IP_SET_AHASH_H */ |
219 | 243 | ||
244 | #ifndef HKEY_DATALEN | ||
245 | #define HKEY_DATALEN sizeof(struct type_pf_elem) | ||
246 | #endif | ||
247 | |||
248 | #define HKEY(data, initval, htable_bits) \ | ||
249 | (jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \ | ||
250 | & jhash_mask(htable_bits)) | ||
251 | |||
220 | #define CONCAT(a, b, c) a##b##c | 252 | #define CONCAT(a, b, c) a##b##c |
221 | #define TOKEN(a, b, c) CONCAT(a, b, c) | 253 | #define TOKEN(a, b, c) CONCAT(a, b, c) |
222 | 254 | ||
223 | /* Type/family dependent function prototypes */ | 255 | /* Type/family dependent function prototypes */ |
224 | 256 | ||
225 | #define type_pf_data_equal TOKEN(TYPE, PF, _data_equal) | 257 | #define type_pf_data_equal TOKEN(TYPE, PF, _data_equal) |
226 | #define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull) | 258 | #define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull) |
227 | #define type_pf_data_copy TOKEN(TYPE, PF, _data_copy) | 259 | #define type_pf_data_copy TOKEN(TYPE, PF, _data_copy) |
228 | #define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out) | 260 | #define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out) |
229 | #define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask) | 261 | #define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask) |
230 | #define type_pf_data_list TOKEN(TYPE, PF, _data_list) | 262 | #define type_pf_data_list TOKEN(TYPE, PF, _data_list) |
231 | #define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) | 263 | #define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) |
232 | #define type_pf_data_next TOKEN(TYPE, PF, _data_next) | 264 | #define type_pf_data_next TOKEN(TYPE, PF, _data_next) |
233 | 265 | ||
234 | #define type_pf_elem TOKEN(TYPE, PF, _elem) | 266 | #define type_pf_elem TOKEN(TYPE, PF, _elem) |
235 | #define type_pf_telem TOKEN(TYPE, PF, _telem) | 267 | #define type_pf_telem TOKEN(TYPE, PF, _telem) |
236 | #define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout) | 268 | #define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout) |
237 | #define type_pf_data_expired TOKEN(TYPE, PF, _data_expired) | 269 | #define type_pf_data_expired TOKEN(TYPE, PF, _data_expired) |
238 | #define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set) | 270 | #define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set) |
239 | 271 | ||
240 | #define type_pf_elem_add TOKEN(TYPE, PF, _elem_add) | 272 | #define type_pf_elem_add TOKEN(TYPE, PF, _elem_add) |
241 | #define type_pf_add TOKEN(TYPE, PF, _add) | 273 | #define type_pf_add TOKEN(TYPE, PF, _add) |
242 | #define type_pf_del TOKEN(TYPE, PF, _del) | 274 | #define type_pf_del TOKEN(TYPE, PF, _del) |
243 | #define type_pf_test_cidrs TOKEN(TYPE, PF, _test_cidrs) | 275 | #define type_pf_test_cidrs TOKEN(TYPE, PF, _test_cidrs) |
244 | #define type_pf_test TOKEN(TYPE, PF, _test) | 276 | #define type_pf_test TOKEN(TYPE, PF, _test) |
245 | 277 | ||
246 | #define type_pf_elem_tadd TOKEN(TYPE, PF, _elem_tadd) | 278 | #define type_pf_elem_tadd TOKEN(TYPE, PF, _elem_tadd) |
247 | #define type_pf_del_telem TOKEN(TYPE, PF, _ahash_del_telem) | 279 | #define type_pf_del_telem TOKEN(TYPE, PF, _ahash_del_telem) |
248 | #define type_pf_expire TOKEN(TYPE, PF, _expire) | 280 | #define type_pf_expire TOKEN(TYPE, PF, _expire) |
249 | #define type_pf_tadd TOKEN(TYPE, PF, _tadd) | 281 | #define type_pf_tadd TOKEN(TYPE, PF, _tadd) |
250 | #define type_pf_tdel TOKEN(TYPE, PF, _tdel) | 282 | #define type_pf_tdel TOKEN(TYPE, PF, _tdel) |
251 | #define type_pf_ttest_cidrs TOKEN(TYPE, PF, _ahash_ttest_cidrs) | 283 | #define type_pf_ttest_cidrs TOKEN(TYPE, PF, _ahash_ttest_cidrs) |
252 | #define type_pf_ttest TOKEN(TYPE, PF, _ahash_ttest) | 284 | #define type_pf_ttest TOKEN(TYPE, PF, _ahash_ttest) |
253 | 285 | ||
254 | #define type_pf_resize TOKEN(TYPE, PF, _resize) | 286 | #define type_pf_resize TOKEN(TYPE, PF, _resize) |
255 | #define type_pf_tresize TOKEN(TYPE, PF, _tresize) | 287 | #define type_pf_tresize TOKEN(TYPE, PF, _tresize) |
256 | #define type_pf_flush ip_set_hash_flush | 288 | #define type_pf_flush ip_set_hash_flush |
257 | #define type_pf_destroy ip_set_hash_destroy | 289 | #define type_pf_destroy ip_set_hash_destroy |
258 | #define type_pf_head TOKEN(TYPE, PF, _head) | 290 | #define type_pf_head TOKEN(TYPE, PF, _head) |
259 | #define type_pf_list TOKEN(TYPE, PF, _list) | 291 | #define type_pf_list TOKEN(TYPE, PF, _list) |
260 | #define type_pf_tlist TOKEN(TYPE, PF, _tlist) | 292 | #define type_pf_tlist TOKEN(TYPE, PF, _tlist) |
261 | #define type_pf_same_set TOKEN(TYPE, PF, _same_set) | 293 | #define type_pf_same_set TOKEN(TYPE, PF, _same_set) |
262 | #define type_pf_kadt TOKEN(TYPE, PF, _kadt) | 294 | #define type_pf_kadt TOKEN(TYPE, PF, _kadt) |
263 | #define type_pf_uadt TOKEN(TYPE, PF, _uadt) | 295 | #define type_pf_uadt TOKEN(TYPE, PF, _uadt) |
264 | #define type_pf_gc TOKEN(TYPE, PF, _gc) | 296 | #define type_pf_gc TOKEN(TYPE, PF, _gc) |
265 | #define type_pf_gc_init TOKEN(TYPE, PF, _gc_init) | 297 | #define type_pf_gc_init TOKEN(TYPE, PF, _gc_init) |
266 | #define type_pf_variant TOKEN(TYPE, PF, _variant) | 298 | #define type_pf_variant TOKEN(TYPE, PF, _variant) |
267 | #define type_pf_tvariant TOKEN(TYPE, PF, _tvariant) | 299 | #define type_pf_tvariant TOKEN(TYPE, PF, _tvariant) |
268 | 300 | ||
269 | /* Flavour without timeout */ | 301 | /* Flavour without timeout */ |
270 | 302 | ||
271 | /* Get the ith element from the array block n */ | 303 | /* Get the ith element from the array block n */ |
272 | #define ahash_data(n, i) \ | 304 | #define ahash_data(n, i) \ |
273 | ((struct type_pf_elem *)((n)->value) + (i)) | 305 | ((struct type_pf_elem *)((n)->value) + (i)) |
274 | 306 | ||
275 | /* Add an element to the hash table when resizing the set: | 307 | /* Add an element to the hash table when resizing the set: |
276 | * we spare the maintenance of the internal counters. */ | 308 | * we spare the maintenance of the internal counters. */ |
277 | static int | 309 | static int |
278 | type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value) | 310 | type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value, |
311 | u8 ahash_max) | ||
279 | { | 312 | { |
280 | if (n->pos >= n->size) { | 313 | if (n->pos >= n->size) { |
281 | void *tmp; | 314 | void *tmp; |
282 | 315 | ||
283 | if (n->size >= AHASH_MAX_SIZE) | 316 | if (n->size >= ahash_max) |
284 | /* Trigger rehashing */ | 317 | /* Trigger rehashing */ |
285 | return -EAGAIN; | 318 | return -EAGAIN; |
286 | 319 | ||
287 | tmp = kzalloc((n->size + AHASH_INIT_SIZE) | 320 | tmp = kzalloc((n->size + AHASH_INIT_SIZE) |
288 | * sizeof(struct type_pf_elem), | 321 | * sizeof(struct type_pf_elem), |
289 | GFP_ATOMIC); | 322 | GFP_ATOMIC); |
290 | if (!tmp) | 323 | if (!tmp) |
291 | return -ENOMEM; | 324 | return -ENOMEM; |
292 | if (n->size) { | 325 | if (n->size) { |
293 | memcpy(tmp, n->value, | 326 | memcpy(tmp, n->value, |
294 | sizeof(struct type_pf_elem) * n->size); | 327 | sizeof(struct type_pf_elem) * n->size); |
295 | kfree(n->value); | 328 | kfree(n->value); |
296 | } | 329 | } |
297 | n->value = tmp; | 330 | n->value = tmp; |
298 | n->size += AHASH_INIT_SIZE; | 331 | n->size += AHASH_INIT_SIZE; |
299 | } | 332 | } |
300 | type_pf_data_copy(ahash_data(n, n->pos++), value); | 333 | type_pf_data_copy(ahash_data(n, n->pos++), value); |
301 | return 0; | 334 | return 0; |
302 | } | 335 | } |
303 | 336 | ||
304 | /* Resize a hash: create a new hash table with doubling the hashsize | 337 | /* Resize a hash: create a new hash table with doubling the hashsize |
305 | * and inserting the elements to it. Repeat until we succeed or | 338 | * and inserting the elements to it. Repeat until we succeed or |
306 | * fail due to memory pressures. */ | 339 | * fail due to memory pressures. */ |
307 | static int | 340 | static int |
308 | type_pf_resize(struct ip_set *set, bool retried) | 341 | type_pf_resize(struct ip_set *set, bool retried) |
309 | { | 342 | { |
310 | struct ip_set_hash *h = set->data; | 343 | struct ip_set_hash *h = set->data; |
311 | struct htable *t, *orig = h->table; | 344 | struct htable *t, *orig = h->table; |
312 | u8 htable_bits = orig->htable_bits; | 345 | u8 htable_bits = orig->htable_bits; |
313 | const struct type_pf_elem *data; | 346 | const struct type_pf_elem *data; |
314 | struct hbucket *n, *m; | 347 | struct hbucket *n, *m; |
315 | u32 i, j; | 348 | u32 i, j; |
316 | int ret; | 349 | int ret; |
317 | 350 | ||
318 | retry: | 351 | retry: |
319 | ret = 0; | 352 | ret = 0; |
320 | htable_bits++; | 353 | htable_bits++; |
321 | pr_debug("attempt to resize set %s from %u to %u, t %p\n", | 354 | pr_debug("attempt to resize set %s from %u to %u, t %p\n", |
322 | set->name, orig->htable_bits, htable_bits, orig); | 355 | set->name, orig->htable_bits, htable_bits, orig); |
323 | if (!htable_bits) | 356 | if (!htable_bits) |
324 | /* In case we have plenty of memory :-) */ | 357 | /* In case we have plenty of memory :-) */ |
325 | return -IPSET_ERR_HASH_FULL; | 358 | return -IPSET_ERR_HASH_FULL; |
326 | t = ip_set_alloc(sizeof(*t) | 359 | t = ip_set_alloc(sizeof(*t) |
327 | + jhash_size(htable_bits) * sizeof(struct hbucket)); | 360 | + jhash_size(htable_bits) * sizeof(struct hbucket)); |
328 | if (!t) | 361 | if (!t) |
329 | return -ENOMEM; | 362 | return -ENOMEM; |
330 | t->htable_bits = htable_bits; | 363 | t->htable_bits = htable_bits; |
331 | 364 | ||
332 | read_lock_bh(&set->lock); | 365 | read_lock_bh(&set->lock); |
333 | for (i = 0; i < jhash_size(orig->htable_bits); i++) { | 366 | for (i = 0; i < jhash_size(orig->htable_bits); i++) { |
334 | n = hbucket(orig, i); | 367 | n = hbucket(orig, i); |
335 | for (j = 0; j < n->pos; j++) { | 368 | for (j = 0; j < n->pos; j++) { |
336 | data = ahash_data(n, j); | 369 | data = ahash_data(n, j); |
337 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); | 370 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); |
338 | ret = type_pf_elem_add(m, data); | 371 | ret = type_pf_elem_add(m, data, AHASH_MAX(h)); |
339 | if (ret < 0) { | 372 | if (ret < 0) { |
340 | read_unlock_bh(&set->lock); | 373 | read_unlock_bh(&set->lock); |
341 | ahash_destroy(t); | 374 | ahash_destroy(t); |
342 | if (ret == -EAGAIN) | 375 | if (ret == -EAGAIN) |
343 | goto retry; | 376 | goto retry; |
344 | return ret; | 377 | return ret; |
345 | } | 378 | } |
346 | } | 379 | } |
347 | } | 380 | } |
348 | 381 | ||
349 | rcu_assign_pointer(h->table, t); | 382 | rcu_assign_pointer(h->table, t); |
350 | read_unlock_bh(&set->lock); | 383 | read_unlock_bh(&set->lock); |
351 | 384 | ||
352 | /* Give time to other readers of the set */ | 385 | /* Give time to other readers of the set */ |
353 | synchronize_rcu_bh(); | 386 | synchronize_rcu_bh(); |
354 | 387 | ||
355 | pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name, | 388 | pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name, |
356 | orig->htable_bits, orig, t->htable_bits, t); | 389 | orig->htable_bits, orig, t->htable_bits, t); |
357 | ahash_destroy(orig); | 390 | ahash_destroy(orig); |
358 | 391 | ||
359 | return 0; | 392 | return 0; |
360 | } | 393 | } |
361 | 394 | ||
362 | static void | 395 | static inline void |
363 | type_pf_data_next(struct ip_set_hash *h, const struct type_pf_elem *d); | 396 | type_pf_data_next(struct ip_set_hash *h, const struct type_pf_elem *d); |
364 | 397 | ||
365 | /* Add an element to a hash and update the internal counters when succeeded, | 398 | /* Add an element to a hash and update the internal counters when succeeded, |
366 | * otherwise report the proper error code. */ | 399 | * otherwise report the proper error code. */ |
367 | static int | 400 | static int |
368 | type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags) | 401 | type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags) |
369 | { | 402 | { |
370 | struct ip_set_hash *h = set->data; | 403 | struct ip_set_hash *h = set->data; |
371 | struct htable *t; | 404 | struct htable *t; |
372 | const struct type_pf_elem *d = value; | 405 | const struct type_pf_elem *d = value; |
373 | struct hbucket *n; | 406 | struct hbucket *n; |
374 | int i, ret = 0; | 407 | int i, ret = 0; |
375 | u32 key; | 408 | u32 key, multi = 0; |
376 | 409 | ||
377 | if (h->elements >= h->maxelem) | 410 | if (h->elements >= h->maxelem) |
378 | return -IPSET_ERR_HASH_FULL; | 411 | return -IPSET_ERR_HASH_FULL; |
379 | 412 | ||
380 | rcu_read_lock_bh(); | 413 | rcu_read_lock_bh(); |
381 | t = rcu_dereference_bh(h->table); | 414 | t = rcu_dereference_bh(h->table); |
382 | key = HKEY(value, h->initval, t->htable_bits); | 415 | key = HKEY(value, h->initval, t->htable_bits); |
383 | n = hbucket(t, key); | 416 | n = hbucket(t, key); |
384 | for (i = 0; i < n->pos; i++) | 417 | for (i = 0; i < n->pos; i++) |
385 | if (type_pf_data_equal(ahash_data(n, i), d)) { | 418 | if (type_pf_data_equal(ahash_data(n, i), d, &multi)) { |
386 | ret = -IPSET_ERR_EXIST; | 419 | ret = -IPSET_ERR_EXIST; |
387 | goto out; | 420 | goto out; |
388 | } | 421 | } |
389 | 422 | TUNE_AHASH_MAX(h, multi); | |
390 | ret = type_pf_elem_add(n, value); | 423 | ret = type_pf_elem_add(n, value, AHASH_MAX(h)); |
391 | if (ret != 0) { | 424 | if (ret != 0) { |
392 | if (ret == -EAGAIN) | 425 | if (ret == -EAGAIN) |
393 | type_pf_data_next(h, d); | 426 | type_pf_data_next(h, d); |
394 | goto out; | 427 | goto out; |
395 | } | 428 | } |
396 | 429 | ||
397 | #ifdef IP_SET_HASH_WITH_NETS | 430 | #ifdef IP_SET_HASH_WITH_NETS |
398 | add_cidr(h, d->cidr, HOST_MASK); | 431 | add_cidr(h, d->cidr, HOST_MASK); |
399 | #endif | 432 | #endif |
400 | h->elements++; | 433 | h->elements++; |
401 | out: | 434 | out: |
402 | rcu_read_unlock_bh(); | 435 | rcu_read_unlock_bh(); |
403 | return ret; | 436 | return ret; |
404 | } | 437 | } |
405 | 438 | ||
406 | /* Delete an element from the hash: swap it with the last element | 439 | /* Delete an element from the hash: swap it with the last element |
407 | * and free up space if possible. | 440 | * and free up space if possible. |
408 | */ | 441 | */ |
409 | static int | 442 | static int |
410 | type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags) | 443 | type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags) |
411 | { | 444 | { |
412 | struct ip_set_hash *h = set->data; | 445 | struct ip_set_hash *h = set->data; |
413 | struct htable *t = h->table; | 446 | struct htable *t = h->table; |
414 | const struct type_pf_elem *d = value; | 447 | const struct type_pf_elem *d = value; |
415 | struct hbucket *n; | 448 | struct hbucket *n; |
416 | int i; | 449 | int i; |
417 | struct type_pf_elem *data; | 450 | struct type_pf_elem *data; |
418 | u32 key; | 451 | u32 key, multi = 0; |
419 | 452 | ||
420 | key = HKEY(value, h->initval, t->htable_bits); | 453 | key = HKEY(value, h->initval, t->htable_bits); |
421 | n = hbucket(t, key); | 454 | n = hbucket(t, key); |
422 | for (i = 0; i < n->pos; i++) { | 455 | for (i = 0; i < n->pos; i++) { |
423 | data = ahash_data(n, i); | 456 | data = ahash_data(n, i); |
424 | if (!type_pf_data_equal(data, d)) | 457 | if (!type_pf_data_equal(data, d, &multi)) |
425 | continue; | 458 | continue; |
426 | if (i != n->pos - 1) | 459 | if (i != n->pos - 1) |
427 | /* Not last one */ | 460 | /* Not last one */ |
428 | type_pf_data_copy(data, ahash_data(n, n->pos - 1)); | 461 | type_pf_data_copy(data, ahash_data(n, n->pos - 1)); |
429 | 462 | ||
430 | n->pos--; | 463 | n->pos--; |
431 | h->elements--; | 464 | h->elements--; |
432 | #ifdef IP_SET_HASH_WITH_NETS | 465 | #ifdef IP_SET_HASH_WITH_NETS |
433 | del_cidr(h, d->cidr, HOST_MASK); | 466 | del_cidr(h, d->cidr, HOST_MASK); |
434 | #endif | 467 | #endif |
435 | if (n->pos + AHASH_INIT_SIZE < n->size) { | 468 | if (n->pos + AHASH_INIT_SIZE < n->size) { |
436 | void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) | 469 | void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) |
437 | * sizeof(struct type_pf_elem), | 470 | * sizeof(struct type_pf_elem), |
438 | GFP_ATOMIC); | 471 | GFP_ATOMIC); |
439 | if (!tmp) | 472 | if (!tmp) |
440 | return 0; | 473 | return 0; |
441 | n->size -= AHASH_INIT_SIZE; | 474 | n->size -= AHASH_INIT_SIZE; |
442 | memcpy(tmp, n->value, | 475 | memcpy(tmp, n->value, |
443 | n->size * sizeof(struct type_pf_elem)); | 476 | n->size * sizeof(struct type_pf_elem)); |
444 | kfree(n->value); | 477 | kfree(n->value); |
445 | n->value = tmp; | 478 | n->value = tmp; |
446 | } | 479 | } |
447 | return 0; | 480 | return 0; |
448 | } | 481 | } |
449 | 482 | ||
450 | return -IPSET_ERR_EXIST; | 483 | return -IPSET_ERR_EXIST; |
451 | } | 484 | } |
452 | 485 | ||
453 | #ifdef IP_SET_HASH_WITH_NETS | 486 | #ifdef IP_SET_HASH_WITH_NETS |
454 | 487 | ||
455 | /* Special test function which takes into account the different network | 488 | /* Special test function which takes into account the different network |
456 | * sizes added to the set */ | 489 | * sizes added to the set */ |
457 | static int | 490 | static int |
458 | type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) | 491 | type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) |
459 | { | 492 | { |
460 | struct ip_set_hash *h = set->data; | 493 | struct ip_set_hash *h = set->data; |
461 | struct htable *t = h->table; | 494 | struct htable *t = h->table; |
462 | struct hbucket *n; | 495 | struct hbucket *n; |
463 | const struct type_pf_elem *data; | 496 | const struct type_pf_elem *data; |
464 | int i, j = 0; | 497 | int i, j = 0; |
465 | u32 key; | 498 | u32 key, multi = 0; |
466 | u8 host_mask = SET_HOST_MASK(set->family); | 499 | u8 host_mask = SET_HOST_MASK(set->family); |
467 | 500 | ||
468 | pr_debug("test by nets\n"); | 501 | pr_debug("test by nets\n"); |
469 | for (; j < host_mask && h->nets[j].cidr; j++) { | 502 | for (; j < host_mask && h->nets[j].cidr && !multi; j++) { |
470 | type_pf_data_netmask(d, h->nets[j].cidr); | 503 | type_pf_data_netmask(d, h->nets[j].cidr); |
471 | key = HKEY(d, h->initval, t->htable_bits); | 504 | key = HKEY(d, h->initval, t->htable_bits); |
472 | n = hbucket(t, key); | 505 | n = hbucket(t, key); |
473 | for (i = 0; i < n->pos; i++) { | 506 | for (i = 0; i < n->pos; i++) { |
474 | data = ahash_data(n, i); | 507 | data = ahash_data(n, i); |
475 | if (type_pf_data_equal(data, d)) | 508 | if (type_pf_data_equal(data, d, &multi)) |
476 | return 1; | 509 | return 1; |
477 | } | 510 | } |
478 | } | 511 | } |
479 | return 0; | 512 | return 0; |
480 | } | 513 | } |
481 | #endif | 514 | #endif |
482 | 515 | ||
483 | /* Test whether the element is added to the set */ | 516 | /* Test whether the element is added to the set */ |
484 | static int | 517 | static int |
485 | type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags) | 518 | type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags) |
486 | { | 519 | { |
487 | struct ip_set_hash *h = set->data; | 520 | struct ip_set_hash *h = set->data; |
488 | struct htable *t = h->table; | 521 | struct htable *t = h->table; |
489 | struct type_pf_elem *d = value; | 522 | struct type_pf_elem *d = value; |
490 | struct hbucket *n; | 523 | struct hbucket *n; |
491 | const struct type_pf_elem *data; | 524 | const struct type_pf_elem *data; |
492 | int i; | 525 | int i; |
493 | u32 key; | 526 | u32 key, multi = 0; |
494 | 527 | ||
495 | #ifdef IP_SET_HASH_WITH_NETS | 528 | #ifdef IP_SET_HASH_WITH_NETS |
496 | /* If we test an IP address and not a network address, | 529 | /* If we test an IP address and not a network address, |
497 | * try all possible network sizes */ | 530 | * try all possible network sizes */ |
498 | if (d->cidr == SET_HOST_MASK(set->family)) | 531 | if (d->cidr == SET_HOST_MASK(set->family)) |
499 | return type_pf_test_cidrs(set, d, timeout); | 532 | return type_pf_test_cidrs(set, d, timeout); |
500 | #endif | 533 | #endif |
501 | 534 | ||
502 | key = HKEY(d, h->initval, t->htable_bits); | 535 | key = HKEY(d, h->initval, t->htable_bits); |
503 | n = hbucket(t, key); | 536 | n = hbucket(t, key); |
504 | for (i = 0; i < n->pos; i++) { | 537 | for (i = 0; i < n->pos; i++) { |
505 | data = ahash_data(n, i); | 538 | data = ahash_data(n, i); |
506 | if (type_pf_data_equal(data, d)) | 539 | if (type_pf_data_equal(data, d, &multi)) |
507 | return 1; | 540 | return 1; |
508 | } | 541 | } |
509 | return 0; | 542 | return 0; |
510 | } | 543 | } |
511 | 544 | ||
512 | /* Reply a HEADER request: fill out the header part of the set */ | 545 | /* Reply a HEADER request: fill out the header part of the set */ |
513 | static int | 546 | static int |
514 | type_pf_head(struct ip_set *set, struct sk_buff *skb) | 547 | type_pf_head(struct ip_set *set, struct sk_buff *skb) |
515 | { | 548 | { |
516 | const struct ip_set_hash *h = set->data; | 549 | const struct ip_set_hash *h = set->data; |
517 | struct nlattr *nested; | 550 | struct nlattr *nested; |
518 | size_t memsize; | 551 | size_t memsize; |
519 | 552 | ||
520 | read_lock_bh(&set->lock); | 553 | read_lock_bh(&set->lock); |
521 | memsize = ahash_memsize(h, with_timeout(h->timeout) | 554 | memsize = ahash_memsize(h, with_timeout(h->timeout) |
522 | ? sizeof(struct type_pf_telem) | 555 | ? sizeof(struct type_pf_telem) |
523 | : sizeof(struct type_pf_elem), | 556 | : sizeof(struct type_pf_elem), |
524 | set->family == AF_INET ? 32 : 128); | 557 | set->family == AF_INET ? 32 : 128); |
525 | read_unlock_bh(&set->lock); | 558 | read_unlock_bh(&set->lock); |
526 | 559 | ||
527 | nested = ipset_nest_start(skb, IPSET_ATTR_DATA); | 560 | nested = ipset_nest_start(skb, IPSET_ATTR_DATA); |
528 | if (!nested) | 561 | if (!nested) |
529 | goto nla_put_failure; | 562 | goto nla_put_failure; |
530 | NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE, | 563 | NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE, |
531 | htonl(jhash_size(h->table->htable_bits))); | 564 | htonl(jhash_size(h->table->htable_bits))); |
532 | NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)); | 565 | NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)); |
533 | #ifdef IP_SET_HASH_WITH_NETMASK | 566 | #ifdef IP_SET_HASH_WITH_NETMASK |
534 | if (h->netmask != HOST_MASK) | 567 | if (h->netmask != HOST_MASK) |
535 | NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); | 568 | NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); |
536 | #endif | 569 | #endif |
537 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); | 570 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
538 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); | 571 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); |
539 | if (with_timeout(h->timeout)) | 572 | if (with_timeout(h->timeout)) |
540 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); | 573 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); |
541 | ipset_nest_end(skb, nested); | 574 | ipset_nest_end(skb, nested); |
542 | 575 | ||
543 | return 0; | 576 | return 0; |
544 | nla_put_failure: | 577 | nla_put_failure: |
545 | return -EMSGSIZE; | 578 | return -EMSGSIZE; |
546 | } | 579 | } |
547 | 580 | ||
548 | /* Reply a LIST/SAVE request: dump the elements of the specified set */ | 581 | /* Reply a LIST/SAVE request: dump the elements of the specified set */ |
549 | static int | 582 | static int |
550 | type_pf_list(const struct ip_set *set, | 583 | type_pf_list(const struct ip_set *set, |
551 | struct sk_buff *skb, struct netlink_callback *cb) | 584 | struct sk_buff *skb, struct netlink_callback *cb) |
552 | { | 585 | { |
553 | const struct ip_set_hash *h = set->data; | 586 | const struct ip_set_hash *h = set->data; |
554 | const struct htable *t = h->table; | 587 | const struct htable *t = h->table; |
555 | struct nlattr *atd, *nested; | 588 | struct nlattr *atd, *nested; |
556 | const struct hbucket *n; | 589 | const struct hbucket *n; |
557 | const struct type_pf_elem *data; | 590 | const struct type_pf_elem *data; |
558 | u32 first = cb->args[2]; | 591 | u32 first = cb->args[2]; |
559 | /* We assume that one hash bucket fills into one page */ | 592 | /* We assume that one hash bucket fills into one page */ |
560 | void *incomplete; | 593 | void *incomplete; |
561 | int i; | 594 | int i; |
562 | 595 | ||
563 | atd = ipset_nest_start(skb, IPSET_ATTR_ADT); | 596 | atd = ipset_nest_start(skb, IPSET_ATTR_ADT); |
564 | if (!atd) | 597 | if (!atd) |
565 | return -EMSGSIZE; | 598 | return -EMSGSIZE; |
566 | pr_debug("list hash set %s\n", set->name); | 599 | pr_debug("list hash set %s\n", set->name); |
567 | for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { | 600 | for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { |
568 | incomplete = skb_tail_pointer(skb); | 601 | incomplete = skb_tail_pointer(skb); |
569 | n = hbucket(t, cb->args[2]); | 602 | n = hbucket(t, cb->args[2]); |
570 | pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n); | 603 | pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n); |
571 | for (i = 0; i < n->pos; i++) { | 604 | for (i = 0; i < n->pos; i++) { |
572 | data = ahash_data(n, i); | 605 | data = ahash_data(n, i); |
573 | pr_debug("list hash %lu hbucket %p i %u, data %p\n", | 606 | pr_debug("list hash %lu hbucket %p i %u, data %p\n", |
574 | cb->args[2], n, i, data); | 607 | cb->args[2], n, i, data); |
575 | nested = ipset_nest_start(skb, IPSET_ATTR_DATA); | 608 | nested = ipset_nest_start(skb, IPSET_ATTR_DATA); |
576 | if (!nested) { | 609 | if (!nested) { |
577 | if (cb->args[2] == first) { | 610 | if (cb->args[2] == first) { |
578 | nla_nest_cancel(skb, atd); | 611 | nla_nest_cancel(skb, atd); |
579 | return -EMSGSIZE; | 612 | return -EMSGSIZE; |
580 | } else | 613 | } else |
581 | goto nla_put_failure; | 614 | goto nla_put_failure; |
582 | } | 615 | } |
583 | if (type_pf_data_list(skb, data)) | 616 | if (type_pf_data_list(skb, data)) |
584 | goto nla_put_failure; | 617 | goto nla_put_failure; |
585 | ipset_nest_end(skb, nested); | 618 | ipset_nest_end(skb, nested); |
586 | } | 619 | } |
587 | } | 620 | } |
588 | ipset_nest_end(skb, atd); | 621 | ipset_nest_end(skb, atd); |
589 | /* Set listing finished */ | 622 | /* Set listing finished */ |
590 | cb->args[2] = 0; | 623 | cb->args[2] = 0; |
591 | 624 | ||
592 | return 0; | 625 | return 0; |
593 | 626 | ||
594 | nla_put_failure: | 627 | nla_put_failure: |
595 | nlmsg_trim(skb, incomplete); | 628 | nlmsg_trim(skb, incomplete); |
596 | ipset_nest_end(skb, atd); | 629 | ipset_nest_end(skb, atd); |
597 | if (unlikely(first == cb->args[2])) { | 630 | if (unlikely(first == cb->args[2])) { |
598 | pr_warning("Can't list set %s: one bucket does not fit into " | 631 | pr_warning("Can't list set %s: one bucket does not fit into " |
599 | "a message. Please report it!\n", set->name); | 632 | "a message. Please report it!\n", set->name); |
600 | cb->args[2] = 0; | 633 | cb->args[2] = 0; |
601 | return -EMSGSIZE; | 634 | return -EMSGSIZE; |
602 | } | 635 | } |
603 | return 0; | 636 | return 0; |
604 | } | 637 | } |
605 | 638 | ||
606 | static int | 639 | static int |
607 | type_pf_kadt(struct ip_set *set, const struct sk_buff * skb, | 640 | type_pf_kadt(struct ip_set *set, const struct sk_buff * skb, |
608 | const struct xt_action_param *par, | 641 | const struct xt_action_param *par, |
609 | enum ipset_adt adt, const struct ip_set_adt_opt *opt); | 642 | enum ipset_adt adt, const struct ip_set_adt_opt *opt); |
610 | static int | 643 | static int |
611 | type_pf_uadt(struct ip_set *set, struct nlattr *tb[], | 644 | type_pf_uadt(struct ip_set *set, struct nlattr *tb[], |
612 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); | 645 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); |
613 | 646 | ||
614 | static const struct ip_set_type_variant type_pf_variant = { | 647 | static const struct ip_set_type_variant type_pf_variant = { |
615 | .kadt = type_pf_kadt, | 648 | .kadt = type_pf_kadt, |
616 | .uadt = type_pf_uadt, | 649 | .uadt = type_pf_uadt, |
617 | .adt = { | 650 | .adt = { |
618 | [IPSET_ADD] = type_pf_add, | 651 | [IPSET_ADD] = type_pf_add, |
619 | [IPSET_DEL] = type_pf_del, | 652 | [IPSET_DEL] = type_pf_del, |
620 | [IPSET_TEST] = type_pf_test, | 653 | [IPSET_TEST] = type_pf_test, |
621 | }, | 654 | }, |
622 | .destroy = type_pf_destroy, | 655 | .destroy = type_pf_destroy, |
623 | .flush = type_pf_flush, | 656 | .flush = type_pf_flush, |
624 | .head = type_pf_head, | 657 | .head = type_pf_head, |
625 | .list = type_pf_list, | 658 | .list = type_pf_list, |
626 | .resize = type_pf_resize, | 659 | .resize = type_pf_resize, |
627 | .same_set = type_pf_same_set, | 660 | .same_set = type_pf_same_set, |
628 | }; | 661 | }; |
629 | 662 | ||
630 | /* Flavour with timeout support */ | 663 | /* Flavour with timeout support */ |
631 | 664 | ||
632 | #define ahash_tdata(n, i) \ | 665 | #define ahash_tdata(n, i) \ |
633 | (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i)) | 666 | (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i)) |
634 | 667 | ||
635 | static inline u32 | 668 | static inline u32 |
636 | type_pf_data_timeout(const struct type_pf_elem *data) | 669 | type_pf_data_timeout(const struct type_pf_elem *data) |
637 | { | 670 | { |
638 | const struct type_pf_telem *tdata = | 671 | const struct type_pf_telem *tdata = |
639 | (const struct type_pf_telem *) data; | 672 | (const struct type_pf_telem *) data; |
640 | 673 | ||
641 | return tdata->timeout; | 674 | return tdata->timeout; |
642 | } | 675 | } |
643 | 676 | ||
644 | static inline bool | 677 | static inline bool |
645 | type_pf_data_expired(const struct type_pf_elem *data) | 678 | type_pf_data_expired(const struct type_pf_elem *data) |
646 | { | 679 | { |
647 | const struct type_pf_telem *tdata = | 680 | const struct type_pf_telem *tdata = |
648 | (const struct type_pf_telem *) data; | 681 | (const struct type_pf_telem *) data; |
649 | 682 | ||
650 | return ip_set_timeout_expired(tdata->timeout); | 683 | return ip_set_timeout_expired(tdata->timeout); |
651 | } | 684 | } |
652 | 685 | ||
653 | static inline void | 686 | static inline void |
654 | type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout) | 687 | type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout) |
655 | { | 688 | { |
656 | struct type_pf_telem *tdata = (struct type_pf_telem *) data; | 689 | struct type_pf_telem *tdata = (struct type_pf_telem *) data; |
657 | 690 | ||
658 | tdata->timeout = ip_set_timeout_set(timeout); | 691 | tdata->timeout = ip_set_timeout_set(timeout); |
659 | } | 692 | } |
660 | 693 | ||
661 | static int | 694 | static int |
662 | type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value, | 695 | type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value, |
663 | u32 timeout) | 696 | u8 ahash_max, u32 timeout) |
664 | { | 697 | { |
665 | struct type_pf_elem *data; | 698 | struct type_pf_elem *data; |
666 | 699 | ||
667 | if (n->pos >= n->size) { | 700 | if (n->pos >= n->size) { |
668 | void *tmp; | 701 | void *tmp; |
669 | 702 | ||
670 | if (n->size >= AHASH_MAX_SIZE) | 703 | if (n->size >= ahash_max) |
671 | /* Trigger rehashing */ | 704 | /* Trigger rehashing */ |
672 | return -EAGAIN; | 705 | return -EAGAIN; |
673 | 706 | ||
674 | tmp = kzalloc((n->size + AHASH_INIT_SIZE) | 707 | tmp = kzalloc((n->size + AHASH_INIT_SIZE) |
675 | * sizeof(struct type_pf_telem), | 708 | * sizeof(struct type_pf_telem), |
676 | GFP_ATOMIC); | 709 | GFP_ATOMIC); |
677 | if (!tmp) | 710 | if (!tmp) |
678 | return -ENOMEM; | 711 | return -ENOMEM; |
679 | if (n->size) { | 712 | if (n->size) { |
680 | memcpy(tmp, n->value, | 713 | memcpy(tmp, n->value, |
681 | sizeof(struct type_pf_telem) * n->size); | 714 | sizeof(struct type_pf_telem) * n->size); |
682 | kfree(n->value); | 715 | kfree(n->value); |
683 | } | 716 | } |
684 | n->value = tmp; | 717 | n->value = tmp; |
685 | n->size += AHASH_INIT_SIZE; | 718 | n->size += AHASH_INIT_SIZE; |
686 | } | 719 | } |
687 | data = ahash_tdata(n, n->pos++); | 720 | data = ahash_tdata(n, n->pos++); |
688 | type_pf_data_copy(data, value); | 721 | type_pf_data_copy(data, value); |
689 | type_pf_data_timeout_set(data, timeout); | 722 | type_pf_data_timeout_set(data, timeout); |
690 | return 0; | 723 | return 0; |
691 | } | 724 | } |
692 | 725 | ||
693 | /* Delete expired elements from the hashtable */ | 726 | /* Delete expired elements from the hashtable */ |
694 | static void | 727 | static void |
695 | type_pf_expire(struct ip_set_hash *h) | 728 | type_pf_expire(struct ip_set_hash *h) |
696 | { | 729 | { |
697 | struct htable *t = h->table; | 730 | struct htable *t = h->table; |
698 | struct hbucket *n; | 731 | struct hbucket *n; |
699 | struct type_pf_elem *data; | 732 | struct type_pf_elem *data; |
700 | u32 i; | 733 | u32 i; |
701 | int j; | 734 | int j; |
702 | 735 | ||
703 | for (i = 0; i < jhash_size(t->htable_bits); i++) { | 736 | for (i = 0; i < jhash_size(t->htable_bits); i++) { |
704 | n = hbucket(t, i); | 737 | n = hbucket(t, i); |
705 | for (j = 0; j < n->pos; j++) { | 738 | for (j = 0; j < n->pos; j++) { |
706 | data = ahash_tdata(n, j); | 739 | data = ahash_tdata(n, j); |
707 | if (type_pf_data_expired(data)) { | 740 | if (type_pf_data_expired(data)) { |
708 | pr_debug("expired %u/%u\n", i, j); | 741 | pr_debug("expired %u/%u\n", i, j); |
709 | #ifdef IP_SET_HASH_WITH_NETS | 742 | #ifdef IP_SET_HASH_WITH_NETS |
710 | del_cidr(h, data->cidr, HOST_MASK); | 743 | del_cidr(h, data->cidr, HOST_MASK); |
711 | #endif | 744 | #endif |
712 | if (j != n->pos - 1) | 745 | if (j != n->pos - 1) |
713 | /* Not last one */ | 746 | /* Not last one */ |
714 | type_pf_data_copy(data, | 747 | type_pf_data_copy(data, |
715 | ahash_tdata(n, n->pos - 1)); | 748 | ahash_tdata(n, n->pos - 1)); |
716 | n->pos--; | 749 | n->pos--; |
717 | h->elements--; | 750 | h->elements--; |
718 | } | 751 | } |
719 | } | 752 | } |
720 | if (n->pos + AHASH_INIT_SIZE < n->size) { | 753 | if (n->pos + AHASH_INIT_SIZE < n->size) { |
721 | void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) | 754 | void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) |
722 | * sizeof(struct type_pf_telem), | 755 | * sizeof(struct type_pf_telem), |
723 | GFP_ATOMIC); | 756 | GFP_ATOMIC); |
724 | if (!tmp) | 757 | if (!tmp) |
725 | /* Still try to delete expired elements */ | 758 | /* Still try to delete expired elements */ |
726 | continue; | 759 | continue; |
727 | n->size -= AHASH_INIT_SIZE; | 760 | n->size -= AHASH_INIT_SIZE; |
728 | memcpy(tmp, n->value, | 761 | memcpy(tmp, n->value, |
729 | n->size * sizeof(struct type_pf_telem)); | 762 | n->size * sizeof(struct type_pf_telem)); |
730 | kfree(n->value); | 763 | kfree(n->value); |
731 | n->value = tmp; | 764 | n->value = tmp; |
732 | } | 765 | } |
733 | } | 766 | } |
734 | } | 767 | } |
735 | 768 | ||
736 | static int | 769 | static int |
737 | type_pf_tresize(struct ip_set *set, bool retried) | 770 | type_pf_tresize(struct ip_set *set, bool retried) |
738 | { | 771 | { |
739 | struct ip_set_hash *h = set->data; | 772 | struct ip_set_hash *h = set->data; |
740 | struct htable *t, *orig = h->table; | 773 | struct htable *t, *orig = h->table; |
741 | u8 htable_bits = orig->htable_bits; | 774 | u8 htable_bits = orig->htable_bits; |
742 | const struct type_pf_elem *data; | 775 | const struct type_pf_elem *data; |
743 | struct hbucket *n, *m; | 776 | struct hbucket *n, *m; |
744 | u32 i, j; | 777 | u32 i, j; |
745 | int ret; | 778 | int ret; |
746 | 779 | ||
747 | /* Try to cleanup once */ | 780 | /* Try to cleanup once */ |
748 | if (!retried) { | 781 | if (!retried) { |
749 | i = h->elements; | 782 | i = h->elements; |
750 | write_lock_bh(&set->lock); | 783 | write_lock_bh(&set->lock); |
751 | type_pf_expire(set->data); | 784 | type_pf_expire(set->data); |
752 | write_unlock_bh(&set->lock); | 785 | write_unlock_bh(&set->lock); |
753 | if (h->elements < i) | 786 | if (h->elements < i) |
754 | return 0; | 787 | return 0; |
755 | } | 788 | } |
756 | 789 | ||
757 | retry: | 790 | retry: |
758 | ret = 0; | 791 | ret = 0; |
759 | htable_bits++; | 792 | htable_bits++; |
760 | if (!htable_bits) | 793 | if (!htable_bits) |
761 | /* In case we have plenty of memory :-) */ | 794 | /* In case we have plenty of memory :-) */ |
762 | return -IPSET_ERR_HASH_FULL; | 795 | return -IPSET_ERR_HASH_FULL; |
763 | t = ip_set_alloc(sizeof(*t) | 796 | t = ip_set_alloc(sizeof(*t) |
764 | + jhash_size(htable_bits) * sizeof(struct hbucket)); | 797 | + jhash_size(htable_bits) * sizeof(struct hbucket)); |
765 | if (!t) | 798 | if (!t) |
766 | return -ENOMEM; | 799 | return -ENOMEM; |
767 | t->htable_bits = htable_bits; | 800 | t->htable_bits = htable_bits; |
768 | 801 | ||
769 | read_lock_bh(&set->lock); | 802 | read_lock_bh(&set->lock); |
770 | for (i = 0; i < jhash_size(orig->htable_bits); i++) { | 803 | for (i = 0; i < jhash_size(orig->htable_bits); i++) { |
771 | n = hbucket(orig, i); | 804 | n = hbucket(orig, i); |
772 | for (j = 0; j < n->pos; j++) { | 805 | for (j = 0; j < n->pos; j++) { |
773 | data = ahash_tdata(n, j); | 806 | data = ahash_tdata(n, j); |
774 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); | 807 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); |
775 | ret = type_pf_elem_tadd(m, data, | 808 | ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), |
776 | type_pf_data_timeout(data)); | 809 | type_pf_data_timeout(data)); |
777 | if (ret < 0) { | 810 | if (ret < 0) { |
778 | read_unlock_bh(&set->lock); | 811 | read_unlock_bh(&set->lock); |
779 | ahash_destroy(t); | 812 | ahash_destroy(t); |
780 | if (ret == -EAGAIN) | 813 | if (ret == -EAGAIN) |
781 | goto retry; | 814 | goto retry; |
782 | return ret; | 815 | return ret; |
783 | } | 816 | } |
784 | } | 817 | } |
785 | } | 818 | } |
786 | 819 | ||
787 | rcu_assign_pointer(h->table, t); | 820 | rcu_assign_pointer(h->table, t); |
788 | read_unlock_bh(&set->lock); | 821 | read_unlock_bh(&set->lock); |
789 | 822 | ||
790 | /* Give time to other readers of the set */ | 823 | /* Give time to other readers of the set */ |
791 | synchronize_rcu_bh(); | 824 | synchronize_rcu_bh(); |
792 | 825 | ||
793 | ahash_destroy(orig); | 826 | ahash_destroy(orig); |
794 | 827 | ||
795 | return 0; | 828 | return 0; |
796 | } | 829 | } |
797 | 830 | ||
798 | static int | 831 | static int |
799 | type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags) | 832 | type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags) |
800 | { | 833 | { |
801 | struct ip_set_hash *h = set->data; | 834 | struct ip_set_hash *h = set->data; |
802 | struct htable *t = h->table; | 835 | struct htable *t = h->table; |
803 | const struct type_pf_elem *d = value; | 836 | const struct type_pf_elem *d = value; |
804 | struct hbucket *n; | 837 | struct hbucket *n; |
805 | struct type_pf_elem *data; | 838 | struct type_pf_elem *data; |
806 | int ret = 0, i, j = AHASH_MAX_SIZE + 1; | 839 | int ret = 0, i, j = AHASH_MAX(h) + 1; |
807 | bool flag_exist = flags & IPSET_FLAG_EXIST; | 840 | bool flag_exist = flags & IPSET_FLAG_EXIST; |
808 | u32 key; | 841 | u32 key, multi = 0; |
809 | 842 | ||
810 | if (h->elements >= h->maxelem) | 843 | if (h->elements >= h->maxelem) |
811 | /* FIXME: when set is full, we slow down here */ | 844 | /* FIXME: when set is full, we slow down here */ |
812 | type_pf_expire(h); | 845 | type_pf_expire(h); |
813 | if (h->elements >= h->maxelem) | 846 | if (h->elements >= h->maxelem) |
814 | return -IPSET_ERR_HASH_FULL; | 847 | return -IPSET_ERR_HASH_FULL; |
815 | 848 | ||
816 | rcu_read_lock_bh(); | 849 | rcu_read_lock_bh(); |
817 | t = rcu_dereference_bh(h->table); | 850 | t = rcu_dereference_bh(h->table); |
818 | key = HKEY(d, h->initval, t->htable_bits); | 851 | key = HKEY(d, h->initval, t->htable_bits); |
819 | n = hbucket(t, key); | 852 | n = hbucket(t, key); |
820 | for (i = 0; i < n->pos; i++) { | 853 | for (i = 0; i < n->pos; i++) { |
821 | data = ahash_tdata(n, i); | 854 | data = ahash_tdata(n, i); |
822 | if (type_pf_data_equal(data, d)) { | 855 | if (type_pf_data_equal(data, d, &multi)) { |
823 | if (type_pf_data_expired(data) || flag_exist) | 856 | if (type_pf_data_expired(data) || flag_exist) |
824 | j = i; | 857 | j = i; |
825 | else { | 858 | else { |
826 | ret = -IPSET_ERR_EXIST; | 859 | ret = -IPSET_ERR_EXIST; |
827 | goto out; | 860 | goto out; |
828 | } | 861 | } |
829 | } else if (j == AHASH_MAX_SIZE + 1 && | 862 | } else if (j == AHASH_MAX(h) + 1 && |
830 | type_pf_data_expired(data)) | 863 | type_pf_data_expired(data)) |
831 | j = i; | 864 | j = i; |
832 | } | 865 | } |
833 | if (j != AHASH_MAX_SIZE + 1) { | 866 | if (j != AHASH_MAX(h) + 1) { |
834 | data = ahash_tdata(n, j); | 867 | data = ahash_tdata(n, j); |
835 | #ifdef IP_SET_HASH_WITH_NETS | 868 | #ifdef IP_SET_HASH_WITH_NETS |
836 | del_cidr(h, data->cidr, HOST_MASK); | 869 | del_cidr(h, data->cidr, HOST_MASK); |
837 | add_cidr(h, d->cidr, HOST_MASK); | 870 | add_cidr(h, d->cidr, HOST_MASK); |
838 | #endif | 871 | #endif |
839 | type_pf_data_copy(data, d); | 872 | type_pf_data_copy(data, d); |
840 | type_pf_data_timeout_set(data, timeout); | 873 | type_pf_data_timeout_set(data, timeout); |
841 | goto out; | 874 | goto out; |
842 | } | 875 | } |
843 | ret = type_pf_elem_tadd(n, d, timeout); | 876 | TUNE_AHASH_MAX(h, multi); |
877 | ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), timeout); | ||
844 | if (ret != 0) { | 878 | if (ret != 0) { |
845 | if (ret == -EAGAIN) | 879 | if (ret == -EAGAIN) |
846 | type_pf_data_next(h, d); | 880 | type_pf_data_next(h, d); |
847 | goto out; | 881 | goto out; |
848 | } | 882 | } |
849 | 883 | ||
850 | #ifdef IP_SET_HASH_WITH_NETS | 884 | #ifdef IP_SET_HASH_WITH_NETS |
851 | add_cidr(h, d->cidr, HOST_MASK); | 885 | add_cidr(h, d->cidr, HOST_MASK); |
852 | #endif | 886 | #endif |
853 | h->elements++; | 887 | h->elements++; |
854 | out: | 888 | out: |
855 | rcu_read_unlock_bh(); | 889 | rcu_read_unlock_bh(); |
856 | return ret; | 890 | return ret; |
857 | } | 891 | } |
858 | 892 | ||
859 | static int | 893 | static int |
860 | type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags) | 894 | type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags) |
861 | { | 895 | { |
862 | struct ip_set_hash *h = set->data; | 896 | struct ip_set_hash *h = set->data; |
863 | struct htable *t = h->table; | 897 | struct htable *t = h->table; |
864 | const struct type_pf_elem *d = value; | 898 | const struct type_pf_elem *d = value; |
865 | struct hbucket *n; | 899 | struct hbucket *n; |
866 | int i; | 900 | int i; |
867 | struct type_pf_elem *data; | 901 | struct type_pf_elem *data; |
868 | u32 key; | 902 | u32 key, multi = 0; |
869 | 903 | ||
870 | key = HKEY(value, h->initval, t->htable_bits); | 904 | key = HKEY(value, h->initval, t->htable_bits); |
871 | n = hbucket(t, key); | 905 | n = hbucket(t, key); |
872 | for (i = 0; i < n->pos; i++) { | 906 | for (i = 0; i < n->pos; i++) { |
873 | data = ahash_tdata(n, i); | 907 | data = ahash_tdata(n, i); |
874 | if (!type_pf_data_equal(data, d)) | 908 | if (!type_pf_data_equal(data, d, &multi)) |
875 | continue; | 909 | continue; |
876 | if (type_pf_data_expired(data)) | 910 | if (type_pf_data_expired(data)) |
877 | return -IPSET_ERR_EXIST; | 911 | return -IPSET_ERR_EXIST; |
878 | if (i != n->pos - 1) | 912 | if (i != n->pos - 1) |
879 | /* Not last one */ | 913 | /* Not last one */ |
880 | type_pf_data_copy(data, ahash_tdata(n, n->pos - 1)); | 914 | type_pf_data_copy(data, ahash_tdata(n, n->pos - 1)); |
881 | 915 | ||
882 | n->pos--; | 916 | n->pos--; |
883 | h->elements--; | 917 | h->elements--; |
884 | #ifdef IP_SET_HASH_WITH_NETS | 918 | #ifdef IP_SET_HASH_WITH_NETS |
885 | del_cidr(h, d->cidr, HOST_MASK); | 919 | del_cidr(h, d->cidr, HOST_MASK); |
886 | #endif | 920 | #endif |
887 | if (n->pos + AHASH_INIT_SIZE < n->size) { | 921 | if (n->pos + AHASH_INIT_SIZE < n->size) { |
888 | void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) | 922 | void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) |
889 | * sizeof(struct type_pf_telem), | 923 | * sizeof(struct type_pf_telem), |
890 | GFP_ATOMIC); | 924 | GFP_ATOMIC); |
891 | if (!tmp) | 925 | if (!tmp) |
892 | return 0; | 926 | return 0; |
893 | n->size -= AHASH_INIT_SIZE; | 927 | n->size -= AHASH_INIT_SIZE; |
894 | memcpy(tmp, n->value, | 928 | memcpy(tmp, n->value, |
895 | n->size * sizeof(struct type_pf_telem)); | 929 | n->size * sizeof(struct type_pf_telem)); |
896 | kfree(n->value); | 930 | kfree(n->value); |
897 | n->value = tmp; | 931 | n->value = tmp; |
898 | } | 932 | } |
899 | return 0; | 933 | return 0; |
900 | } | 934 | } |
901 | 935 | ||
902 | return -IPSET_ERR_EXIST; | 936 | return -IPSET_ERR_EXIST; |
903 | } | 937 | } |
904 | 938 | ||
905 | #ifdef IP_SET_HASH_WITH_NETS | 939 | #ifdef IP_SET_HASH_WITH_NETS |
906 | static int | 940 | static int |
907 | type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) | 941 | type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) |
908 | { | 942 | { |
909 | struct ip_set_hash *h = set->data; | 943 | struct ip_set_hash *h = set->data; |
910 | struct htable *t = h->table; | 944 | struct htable *t = h->table; |
911 | struct type_pf_elem *data; | 945 | struct type_pf_elem *data; |
912 | struct hbucket *n; | 946 | struct hbucket *n; |
913 | int i, j = 0; | 947 | int i, j = 0; |
914 | u32 key; | 948 | u32 key, multi = 0; |
915 | u8 host_mask = SET_HOST_MASK(set->family); | 949 | u8 host_mask = SET_HOST_MASK(set->family); |
916 | 950 | ||
917 | for (; j < host_mask && h->nets[j].cidr; j++) { | 951 | for (; j < host_mask && h->nets[j].cidr && !multi; j++) { |
918 | type_pf_data_netmask(d, h->nets[j].cidr); | 952 | type_pf_data_netmask(d, h->nets[j].cidr); |
919 | key = HKEY(d, h->initval, t->htable_bits); | 953 | key = HKEY(d, h->initval, t->htable_bits); |
920 | n = hbucket(t, key); | 954 | n = hbucket(t, key); |
921 | for (i = 0; i < n->pos; i++) { | 955 | for (i = 0; i < n->pos; i++) { |
922 | data = ahash_tdata(n, i); | 956 | data = ahash_tdata(n, i); |
923 | if (type_pf_data_equal(data, d)) | 957 | if (type_pf_data_equal(data, d, &multi)) |
924 | return !type_pf_data_expired(data); | 958 | return !type_pf_data_expired(data); |
925 | } | 959 | } |
926 | } | 960 | } |
927 | return 0; | 961 | return 0; |
928 | } | 962 | } |
929 | #endif | 963 | #endif |
930 | 964 | ||
931 | static int | 965 | static int |
932 | type_pf_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags) | 966 | type_pf_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags) |
933 | { | 967 | { |
934 | struct ip_set_hash *h = set->data; | 968 | struct ip_set_hash *h = set->data; |
935 | struct htable *t = h->table; | 969 | struct htable *t = h->table; |
936 | struct type_pf_elem *data, *d = value; | 970 | struct type_pf_elem *data, *d = value; |
937 | struct hbucket *n; | 971 | struct hbucket *n; |
938 | int i; | 972 | int i; |
939 | u32 key; | 973 | u32 key, multi = 0; |
940 | 974 | ||
941 | #ifdef IP_SET_HASH_WITH_NETS | 975 | #ifdef IP_SET_HASH_WITH_NETS |
942 | if (d->cidr == SET_HOST_MASK(set->family)) | 976 | if (d->cidr == SET_HOST_MASK(set->family)) |
943 | return type_pf_ttest_cidrs(set, d, timeout); | 977 | return type_pf_ttest_cidrs(set, d, timeout); |
944 | #endif | 978 | #endif |
945 | key = HKEY(d, h->initval, t->htable_bits); | 979 | key = HKEY(d, h->initval, t->htable_bits); |
946 | n = hbucket(t, key); | 980 | n = hbucket(t, key); |
947 | for (i = 0; i < n->pos; i++) { | 981 | for (i = 0; i < n->pos; i++) { |
948 | data = ahash_tdata(n, i); | 982 | data = ahash_tdata(n, i); |
949 | if (type_pf_data_equal(data, d)) | 983 | if (type_pf_data_equal(data, d, &multi)) |
950 | return !type_pf_data_expired(data); | 984 | return !type_pf_data_expired(data); |
951 | } | 985 | } |
952 | return 0; | 986 | return 0; |
953 | } | 987 | } |
954 | 988 | ||
955 | static int | 989 | static int |
956 | type_pf_tlist(const struct ip_set *set, | 990 | type_pf_tlist(const struct ip_set *set, |
957 | struct sk_buff *skb, struct netlink_callback *cb) | 991 | struct sk_buff *skb, struct netlink_callback *cb) |
958 | { | 992 | { |
959 | const struct ip_set_hash *h = set->data; | 993 | const struct ip_set_hash *h = set->data; |
960 | const struct htable *t = h->table; | 994 | const struct htable *t = h->table; |
961 | struct nlattr *atd, *nested; | 995 | struct nlattr *atd, *nested; |
962 | const struct hbucket *n; | 996 | const struct hbucket *n; |
963 | const struct type_pf_elem *data; | 997 | const struct type_pf_elem *data; |
964 | u32 first = cb->args[2]; | 998 | u32 first = cb->args[2]; |
965 | /* We assume that one hash bucket fills into one page */ | 999 | /* We assume that one hash bucket fills into one page */ |
966 | void *incomplete; | 1000 | void *incomplete; |
967 | int i; | 1001 | int i; |
968 | 1002 | ||
969 | atd = ipset_nest_start(skb, IPSET_ATTR_ADT); | 1003 | atd = ipset_nest_start(skb, IPSET_ATTR_ADT); |
970 | if (!atd) | 1004 | if (!atd) |
971 | return -EMSGSIZE; | 1005 | return -EMSGSIZE; |
972 | for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { | 1006 | for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { |
973 | incomplete = skb_tail_pointer(skb); | 1007 | incomplete = skb_tail_pointer(skb); |
974 | n = hbucket(t, cb->args[2]); | 1008 | n = hbucket(t, cb->args[2]); |
975 | for (i = 0; i < n->pos; i++) { | 1009 | for (i = 0; i < n->pos; i++) { |
976 | data = ahash_tdata(n, i); | 1010 | data = ahash_tdata(n, i); |
977 | pr_debug("list %p %u\n", n, i); | 1011 | pr_debug("list %p %u\n", n, i); |
978 | if (type_pf_data_expired(data)) | 1012 | if (type_pf_data_expired(data)) |
979 | continue; | 1013 | continue; |
980 | pr_debug("do list %p %u\n", n, i); | 1014 | pr_debug("do list %p %u\n", n, i); |
981 | nested = ipset_nest_start(skb, IPSET_ATTR_DATA); | 1015 | nested = ipset_nest_start(skb, IPSET_ATTR_DATA); |
982 | if (!nested) { | 1016 | if (!nested) { |
983 | if (cb->args[2] == first) { | 1017 | if (cb->args[2] == first) { |
984 | nla_nest_cancel(skb, atd); | 1018 | nla_nest_cancel(skb, atd); |
985 | return -EMSGSIZE; | 1019 | return -EMSGSIZE; |
986 | } else | 1020 | } else |
987 | goto nla_put_failure; | 1021 | goto nla_put_failure; |
988 | } | 1022 | } |
989 | if (type_pf_data_tlist(skb, data)) | 1023 | if (type_pf_data_tlist(skb, data)) |
990 | goto nla_put_failure; | 1024 | goto nla_put_failure; |
991 | ipset_nest_end(skb, nested); | 1025 | ipset_nest_end(skb, nested); |
992 | } | 1026 | } |
993 | } | 1027 | } |
994 | ipset_nest_end(skb, atd); | 1028 | ipset_nest_end(skb, atd); |
995 | /* Set listing finished */ | 1029 | /* Set listing finished */ |
996 | cb->args[2] = 0; | 1030 | cb->args[2] = 0; |
997 | 1031 | ||
998 | return 0; | 1032 | return 0; |
999 | 1033 | ||
1000 | nla_put_failure: | 1034 | nla_put_failure: |
1001 | nlmsg_trim(skb, incomplete); | 1035 | nlmsg_trim(skb, incomplete); |
1002 | ipset_nest_end(skb, atd); | 1036 | ipset_nest_end(skb, atd); |
1003 | if (unlikely(first == cb->args[2])) { | 1037 | if (unlikely(first == cb->args[2])) { |
1004 | pr_warning("Can't list set %s: one bucket does not fit into " | 1038 | pr_warning("Can't list set %s: one bucket does not fit into " |
1005 | "a message. Please report it!\n", set->name); | 1039 | "a message. Please report it!\n", set->name); |
1006 | cb->args[2] = 0; | 1040 | cb->args[2] = 0; |
1007 | return -EMSGSIZE; | 1041 | return -EMSGSIZE; |
1008 | } | 1042 | } |
1009 | return 0; | 1043 | return 0; |
1010 | } | 1044 | } |
1011 | 1045 | ||
1012 | static const struct ip_set_type_variant type_pf_tvariant = { | 1046 | static const struct ip_set_type_variant type_pf_tvariant = { |
1013 | .kadt = type_pf_kadt, | 1047 | .kadt = type_pf_kadt, |
1014 | .uadt = type_pf_uadt, | 1048 | .uadt = type_pf_uadt, |
1015 | .adt = { | 1049 | .adt = { |
1016 | [IPSET_ADD] = type_pf_tadd, | 1050 | [IPSET_ADD] = type_pf_tadd, |
1017 | [IPSET_DEL] = type_pf_tdel, | 1051 | [IPSET_DEL] = type_pf_tdel, |
1018 | [IPSET_TEST] = type_pf_ttest, | 1052 | [IPSET_TEST] = type_pf_ttest, |
1019 | }, | 1053 | }, |
1020 | .destroy = type_pf_destroy, | 1054 | .destroy = type_pf_destroy, |
1021 | .flush = type_pf_flush, | 1055 | .flush = type_pf_flush, |
1022 | .head = type_pf_head, | 1056 | .head = type_pf_head, |
1023 | .list = type_pf_tlist, | 1057 | .list = type_pf_tlist, |
1024 | .resize = type_pf_tresize, | 1058 | .resize = type_pf_tresize, |
1025 | .same_set = type_pf_same_set, | 1059 | .same_set = type_pf_same_set, |
1026 | }; | 1060 | }; |
1027 | 1061 | ||
1028 | static void | 1062 | static void |
1029 | type_pf_gc(unsigned long ul_set) | 1063 | type_pf_gc(unsigned long ul_set) |
1030 | { | 1064 | { |
1031 | struct ip_set *set = (struct ip_set *) ul_set; | 1065 | struct ip_set *set = (struct ip_set *) ul_set; |
1032 | struct ip_set_hash *h = set->data; | 1066 | struct ip_set_hash *h = set->data; |
1033 | 1067 | ||
1034 | pr_debug("called\n"); | 1068 | pr_debug("called\n"); |
1035 | write_lock_bh(&set->lock); | 1069 | write_lock_bh(&set->lock); |
1036 | type_pf_expire(h); | 1070 | type_pf_expire(h); |
1037 | write_unlock_bh(&set->lock); | 1071 | write_unlock_bh(&set->lock); |
1038 | 1072 | ||
1039 | h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; | 1073 | h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; |
1040 | add_timer(&h->gc); | 1074 | add_timer(&h->gc); |
1041 | } | 1075 | } |
1042 | 1076 | ||
1043 | static void | 1077 | static void |
1044 | type_pf_gc_init(struct ip_set *set) | 1078 | type_pf_gc_init(struct ip_set *set) |
1045 | { | 1079 | { |
1046 | struct ip_set_hash *h = set->data; | 1080 | struct ip_set_hash *h = set->data; |
1047 | 1081 | ||
1048 | init_timer(&h->gc); | 1082 | init_timer(&h->gc); |
1049 | h->gc.data = (unsigned long) set; | 1083 | h->gc.data = (unsigned long) set; |
1050 | h->gc.function = type_pf_gc; | 1084 | h->gc.function = type_pf_gc; |
1051 | h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; | 1085 | h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; |
1052 | add_timer(&h->gc); | 1086 | add_timer(&h->gc); |
1053 | pr_debug("gc initialized, run in every %u\n", | 1087 | pr_debug("gc initialized, run in every %u\n", |
1054 | IPSET_GC_PERIOD(h->timeout)); | 1088 | IPSET_GC_PERIOD(h->timeout)); |
1055 | } | 1089 | } |
1056 | 1090 | ||
1091 | #undef HKEY_DATALEN | ||
1092 | #undef HKEY | ||
1057 | #undef type_pf_data_equal | 1093 | #undef type_pf_data_equal |
1058 | #undef type_pf_data_isnull | 1094 | #undef type_pf_data_isnull |
1059 | #undef type_pf_data_copy | 1095 | #undef type_pf_data_copy |
1060 | #undef type_pf_data_zero_out | 1096 | #undef type_pf_data_zero_out |
1061 | #undef type_pf_data_list | 1097 | #undef type_pf_data_list |
1062 | #undef type_pf_data_tlist | 1098 | #undef type_pf_data_tlist |
1063 | 1099 | ||
1064 | #undef type_pf_elem | 1100 | #undef type_pf_elem |
1065 | #undef type_pf_telem | 1101 | #undef type_pf_telem |
1066 | #undef type_pf_data_timeout | 1102 | #undef type_pf_data_timeout |
1067 | #undef type_pf_data_expired | 1103 | #undef type_pf_data_expired |
1068 | #undef type_pf_data_netmask | 1104 | #undef type_pf_data_netmask |
1069 | #undef type_pf_data_timeout_set | 1105 | #undef type_pf_data_timeout_set |
1070 | 1106 | ||
1071 | #undef type_pf_elem_add | 1107 | #undef type_pf_elem_add |
1072 | #undef type_pf_add | 1108 | #undef type_pf_add |
1073 | #undef type_pf_del | 1109 | #undef type_pf_del |
1074 | #undef type_pf_test_cidrs | 1110 | #undef type_pf_test_cidrs |
1075 | #undef type_pf_test | 1111 | #undef type_pf_test |
1076 | 1112 | ||
1077 | #undef type_pf_elem_tadd | 1113 | #undef type_pf_elem_tadd |
1078 | #undef type_pf_expire | 1114 | #undef type_pf_expire |
1079 | #undef type_pf_tadd | 1115 | #undef type_pf_tadd |
1080 | #undef type_pf_tdel | 1116 | #undef type_pf_tdel |
1081 | #undef type_pf_ttest_cidrs | 1117 | #undef type_pf_ttest_cidrs |
1082 | #undef type_pf_ttest | 1118 | #undef type_pf_ttest |
1083 | 1119 | ||
1084 | #undef type_pf_resize | 1120 | #undef type_pf_resize |
1085 | #undef type_pf_tresize | 1121 | #undef type_pf_tresize |
1086 | #undef type_pf_flush | 1122 | #undef type_pf_flush |
1087 | #undef type_pf_destroy | 1123 | #undef type_pf_destroy |
1088 | #undef type_pf_head | 1124 | #undef type_pf_head |
1089 | #undef type_pf_list | 1125 | #undef type_pf_list |
1090 | #undef type_pf_tlist | 1126 | #undef type_pf_tlist |
1091 | #undef type_pf_same_set | 1127 | #undef type_pf_same_set |
1092 | #undef type_pf_kadt | 1128 | #undef type_pf_kadt |
1093 | #undef type_pf_uadt | 1129 | #undef type_pf_uadt |
1094 | #undef type_pf_gc | 1130 | #undef type_pf_gc |
include/linux/netfilter/nfnetlink.h
1 | #ifndef _NFNETLINK_H | 1 | #ifndef _NFNETLINK_H |
2 | #define _NFNETLINK_H | 2 | #define _NFNETLINK_H |
3 | #include <linux/types.h> | 3 | #include <linux/types.h> |
4 | #include <linux/netfilter/nfnetlink_compat.h> | 4 | #include <linux/netfilter/nfnetlink_compat.h> |
5 | 5 | ||
6 | enum nfnetlink_groups { | 6 | enum nfnetlink_groups { |
7 | NFNLGRP_NONE, | 7 | NFNLGRP_NONE, |
8 | #define NFNLGRP_NONE NFNLGRP_NONE | 8 | #define NFNLGRP_NONE NFNLGRP_NONE |
9 | NFNLGRP_CONNTRACK_NEW, | 9 | NFNLGRP_CONNTRACK_NEW, |
10 | #define NFNLGRP_CONNTRACK_NEW NFNLGRP_CONNTRACK_NEW | 10 | #define NFNLGRP_CONNTRACK_NEW NFNLGRP_CONNTRACK_NEW |
11 | NFNLGRP_CONNTRACK_UPDATE, | 11 | NFNLGRP_CONNTRACK_UPDATE, |
12 | #define NFNLGRP_CONNTRACK_UPDATE NFNLGRP_CONNTRACK_UPDATE | 12 | #define NFNLGRP_CONNTRACK_UPDATE NFNLGRP_CONNTRACK_UPDATE |
13 | NFNLGRP_CONNTRACK_DESTROY, | 13 | NFNLGRP_CONNTRACK_DESTROY, |
14 | #define NFNLGRP_CONNTRACK_DESTROY NFNLGRP_CONNTRACK_DESTROY | 14 | #define NFNLGRP_CONNTRACK_DESTROY NFNLGRP_CONNTRACK_DESTROY |
15 | NFNLGRP_CONNTRACK_EXP_NEW, | 15 | NFNLGRP_CONNTRACK_EXP_NEW, |
16 | #define NFNLGRP_CONNTRACK_EXP_NEW NFNLGRP_CONNTRACK_EXP_NEW | 16 | #define NFNLGRP_CONNTRACK_EXP_NEW NFNLGRP_CONNTRACK_EXP_NEW |
17 | NFNLGRP_CONNTRACK_EXP_UPDATE, | 17 | NFNLGRP_CONNTRACK_EXP_UPDATE, |
18 | #define NFNLGRP_CONNTRACK_EXP_UPDATE NFNLGRP_CONNTRACK_EXP_UPDATE | 18 | #define NFNLGRP_CONNTRACK_EXP_UPDATE NFNLGRP_CONNTRACK_EXP_UPDATE |
19 | NFNLGRP_CONNTRACK_EXP_DESTROY, | 19 | NFNLGRP_CONNTRACK_EXP_DESTROY, |
20 | #define NFNLGRP_CONNTRACK_EXP_DESTROY NFNLGRP_CONNTRACK_EXP_DESTROY | 20 | #define NFNLGRP_CONNTRACK_EXP_DESTROY NFNLGRP_CONNTRACK_EXP_DESTROY |
21 | __NFNLGRP_MAX, | 21 | __NFNLGRP_MAX, |
22 | }; | 22 | }; |
23 | #define NFNLGRP_MAX (__NFNLGRP_MAX - 1) | 23 | #define NFNLGRP_MAX (__NFNLGRP_MAX - 1) |
24 | 24 | ||
25 | /* General form of address family dependent message. | 25 | /* General form of address family dependent message. |
26 | */ | 26 | */ |
27 | struct nfgenmsg { | 27 | struct nfgenmsg { |
28 | __u8 nfgen_family; /* AF_xxx */ | 28 | __u8 nfgen_family; /* AF_xxx */ |
29 | __u8 version; /* nfnetlink version */ | 29 | __u8 version; /* nfnetlink version */ |
30 | __be16 res_id; /* resource id */ | 30 | __be16 res_id; /* resource id */ |
31 | }; | 31 | }; |
32 | 32 | ||
33 | #define NFNETLINK_V0 0 | 33 | #define NFNETLINK_V0 0 |
34 | 34 | ||
35 | /* netfilter netlink message types are split in two pieces: | 35 | /* netfilter netlink message types are split in two pieces: |
36 | * 8 bit subsystem, 8bit operation. | 36 | * 8 bit subsystem, 8bit operation. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | #define NFNL_SUBSYS_ID(x) ((x & 0xff00) >> 8) | 39 | #define NFNL_SUBSYS_ID(x) ((x & 0xff00) >> 8) |
40 | #define NFNL_MSG_TYPE(x) (x & 0x00ff) | 40 | #define NFNL_MSG_TYPE(x) (x & 0x00ff) |
41 | 41 | ||
42 | /* No enum here, otherwise __stringify() trick of MODULE_ALIAS_NFNL_SUBSYS() | 42 | /* No enum here, otherwise __stringify() trick of MODULE_ALIAS_NFNL_SUBSYS() |
43 | * won't work anymore */ | 43 | * won't work anymore */ |
44 | #define NFNL_SUBSYS_NONE 0 | 44 | #define NFNL_SUBSYS_NONE 0 |
45 | #define NFNL_SUBSYS_CTNETLINK 1 | 45 | #define NFNL_SUBSYS_CTNETLINK 1 |
46 | #define NFNL_SUBSYS_CTNETLINK_EXP 2 | 46 | #define NFNL_SUBSYS_CTNETLINK_EXP 2 |
47 | #define NFNL_SUBSYS_QUEUE 3 | 47 | #define NFNL_SUBSYS_QUEUE 3 |
48 | #define NFNL_SUBSYS_ULOG 4 | 48 | #define NFNL_SUBSYS_ULOG 4 |
49 | #define NFNL_SUBSYS_OSF 5 | 49 | #define NFNL_SUBSYS_OSF 5 |
50 | #define NFNL_SUBSYS_IPSET 6 | 50 | #define NFNL_SUBSYS_IPSET 6 |
51 | #define NFNL_SUBSYS_COUNT 7 | 51 | #define NFNL_SUBSYS_COUNT 7 |
52 | 52 | ||
53 | #ifdef __KERNEL__ | 53 | #ifdef __KERNEL__ |
54 | 54 | ||
55 | #include <linux/netlink.h> | 55 | #include <linux/netlink.h> |
56 | #include <linux/capability.h> | 56 | #include <linux/capability.h> |
57 | #include <net/netlink.h> | 57 | #include <net/netlink.h> |
58 | 58 | ||
59 | struct nfnl_callback { | 59 | struct nfnl_callback { |
60 | int (*call)(struct sock *nl, struct sk_buff *skb, | 60 | int (*call)(struct sock *nl, struct sk_buff *skb, |
61 | const struct nlmsghdr *nlh, | 61 | const struct nlmsghdr *nlh, |
62 | const struct nlattr * const cda[]); | 62 | const struct nlattr * const cda[]); |
63 | int (*call_rcu)(struct sock *nl, struct sk_buff *skb, | ||
64 | const struct nlmsghdr *nlh, | ||
65 | const struct nlattr * const cda[]); | ||
63 | const struct nla_policy *policy; /* netlink attribute policy */ | 66 | const struct nla_policy *policy; /* netlink attribute policy */ |
64 | const u_int16_t attr_count; /* number of nlattr's */ | 67 | const u_int16_t attr_count; /* number of nlattr's */ |
65 | }; | 68 | }; |
66 | 69 | ||
67 | struct nfnetlink_subsystem { | 70 | struct nfnetlink_subsystem { |
68 | const char *name; | 71 | const char *name; |
69 | __u8 subsys_id; /* nfnetlink subsystem ID */ | 72 | __u8 subsys_id; /* nfnetlink subsystem ID */ |
70 | __u8 cb_count; /* number of callbacks */ | 73 | __u8 cb_count; /* number of callbacks */ |
71 | const struct nfnl_callback *cb; /* callback for individual types */ | 74 | const struct nfnl_callback *cb; /* callback for individual types */ |
72 | }; | 75 | }; |
73 | 76 | ||
74 | extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); | 77 | extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); |
75 | extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); | 78 | extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); |
76 | 79 | ||
77 | extern int nfnetlink_has_listeners(struct net *net, unsigned int group); | 80 | extern int nfnetlink_has_listeners(struct net *net, unsigned int group); |
78 | extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, | 81 | extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, |
79 | int echo, gfp_t flags); | 82 | int echo, gfp_t flags); |
80 | extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error); | 83 | extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error); |
81 | extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags); | 84 | extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags); |
82 | 85 | ||
83 | extern void nfnl_lock(void); | 86 | extern void nfnl_lock(void); |
84 | extern void nfnl_unlock(void); | 87 | extern void nfnl_unlock(void); |
85 | 88 | ||
86 | #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ | 89 | #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ |
87 | MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) | 90 | MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) |
88 | 91 | ||
89 | #endif /* __KERNEL__ */ | 92 | #endif /* __KERNEL__ */ |
90 | #endif /* _NFNETLINK_H */ | 93 | #endif /* _NFNETLINK_H */ |
91 | 94 |
include/linux/netfilter/nfnetlink_queue.h
1 | #ifndef _NFNETLINK_QUEUE_H | 1 | #ifndef _NFNETLINK_QUEUE_H |
2 | #define _NFNETLINK_QUEUE_H | 2 | #define _NFNETLINK_QUEUE_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/netfilter/nfnetlink.h> | 5 | #include <linux/netfilter/nfnetlink.h> |
6 | 6 | ||
7 | enum nfqnl_msg_types { | 7 | enum nfqnl_msg_types { |
8 | NFQNL_MSG_PACKET, /* packet from kernel to userspace */ | 8 | NFQNL_MSG_PACKET, /* packet from kernel to userspace */ |
9 | NFQNL_MSG_VERDICT, /* verdict from userspace to kernel */ | 9 | NFQNL_MSG_VERDICT, /* verdict from userspace to kernel */ |
10 | NFQNL_MSG_CONFIG, /* connect to a particular queue */ | 10 | NFQNL_MSG_CONFIG, /* connect to a particular queue */ |
11 | NFQNL_MSG_VERDICT_BATCH, /* batchv from userspace to kernel */ | ||
11 | 12 | ||
12 | NFQNL_MSG_MAX | 13 | NFQNL_MSG_MAX |
13 | }; | 14 | }; |
14 | 15 | ||
15 | struct nfqnl_msg_packet_hdr { | 16 | struct nfqnl_msg_packet_hdr { |
16 | __be32 packet_id; /* unique ID of packet in queue */ | 17 | __be32 packet_id; /* unique ID of packet in queue */ |
17 | __be16 hw_protocol; /* hw protocol (network order) */ | 18 | __be16 hw_protocol; /* hw protocol (network order) */ |
18 | __u8 hook; /* netfilter hook */ | 19 | __u8 hook; /* netfilter hook */ |
19 | } __attribute__ ((packed)); | 20 | } __attribute__ ((packed)); |
20 | 21 | ||
21 | struct nfqnl_msg_packet_hw { | 22 | struct nfqnl_msg_packet_hw { |
22 | __be16 hw_addrlen; | 23 | __be16 hw_addrlen; |
23 | __u16 _pad; | 24 | __u16 _pad; |
24 | __u8 hw_addr[8]; | 25 | __u8 hw_addr[8]; |
25 | }; | 26 | }; |
26 | 27 | ||
27 | struct nfqnl_msg_packet_timestamp { | 28 | struct nfqnl_msg_packet_timestamp { |
28 | __aligned_be64 sec; | 29 | __aligned_be64 sec; |
29 | __aligned_be64 usec; | 30 | __aligned_be64 usec; |
30 | }; | 31 | }; |
31 | 32 | ||
32 | enum nfqnl_attr_type { | 33 | enum nfqnl_attr_type { |
33 | NFQA_UNSPEC, | 34 | NFQA_UNSPEC, |
34 | NFQA_PACKET_HDR, | 35 | NFQA_PACKET_HDR, |
35 | NFQA_VERDICT_HDR, /* nfqnl_msg_verdict_hrd */ | 36 | NFQA_VERDICT_HDR, /* nfqnl_msg_verdict_hrd */ |
36 | NFQA_MARK, /* __u32 nfmark */ | 37 | NFQA_MARK, /* __u32 nfmark */ |
37 | NFQA_TIMESTAMP, /* nfqnl_msg_packet_timestamp */ | 38 | NFQA_TIMESTAMP, /* nfqnl_msg_packet_timestamp */ |
38 | NFQA_IFINDEX_INDEV, /* __u32 ifindex */ | 39 | NFQA_IFINDEX_INDEV, /* __u32 ifindex */ |
39 | NFQA_IFINDEX_OUTDEV, /* __u32 ifindex */ | 40 | NFQA_IFINDEX_OUTDEV, /* __u32 ifindex */ |
40 | NFQA_IFINDEX_PHYSINDEV, /* __u32 ifindex */ | 41 | NFQA_IFINDEX_PHYSINDEV, /* __u32 ifindex */ |
41 | NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ | 42 | NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ |
42 | NFQA_HWADDR, /* nfqnl_msg_packet_hw */ | 43 | NFQA_HWADDR, /* nfqnl_msg_packet_hw */ |
43 | NFQA_PAYLOAD, /* opaque data payload */ | 44 | NFQA_PAYLOAD, /* opaque data payload */ |
44 | 45 | ||
45 | __NFQA_MAX | 46 | __NFQA_MAX |
46 | }; | 47 | }; |
47 | #define NFQA_MAX (__NFQA_MAX - 1) | 48 | #define NFQA_MAX (__NFQA_MAX - 1) |
48 | 49 | ||
49 | struct nfqnl_msg_verdict_hdr { | 50 | struct nfqnl_msg_verdict_hdr { |
50 | __be32 verdict; | 51 | __be32 verdict; |
51 | __be32 id; | 52 | __be32 id; |
52 | }; | 53 | }; |
53 | 54 | ||
54 | 55 | ||
55 | enum nfqnl_msg_config_cmds { | 56 | enum nfqnl_msg_config_cmds { |
56 | NFQNL_CFG_CMD_NONE, | 57 | NFQNL_CFG_CMD_NONE, |
57 | NFQNL_CFG_CMD_BIND, | 58 | NFQNL_CFG_CMD_BIND, |
58 | NFQNL_CFG_CMD_UNBIND, | 59 | NFQNL_CFG_CMD_UNBIND, |
59 | NFQNL_CFG_CMD_PF_BIND, | 60 | NFQNL_CFG_CMD_PF_BIND, |
60 | NFQNL_CFG_CMD_PF_UNBIND, | 61 | NFQNL_CFG_CMD_PF_UNBIND, |
61 | }; | 62 | }; |
62 | 63 | ||
63 | struct nfqnl_msg_config_cmd { | 64 | struct nfqnl_msg_config_cmd { |
64 | __u8 command; /* nfqnl_msg_config_cmds */ | 65 | __u8 command; /* nfqnl_msg_config_cmds */ |
65 | __u8 _pad; | 66 | __u8 _pad; |
66 | __be16 pf; /* AF_xxx for PF_[UN]BIND */ | 67 | __be16 pf; /* AF_xxx for PF_[UN]BIND */ |
67 | }; | 68 | }; |
68 | 69 | ||
69 | enum nfqnl_config_mode { | 70 | enum nfqnl_config_mode { |
70 | NFQNL_COPY_NONE, | 71 | NFQNL_COPY_NONE, |
71 | NFQNL_COPY_META, | 72 | NFQNL_COPY_META, |
72 | NFQNL_COPY_PACKET, | 73 | NFQNL_COPY_PACKET, |
73 | }; | 74 | }; |
74 | 75 | ||
75 | struct nfqnl_msg_config_params { | 76 | struct nfqnl_msg_config_params { |
76 | __be32 copy_range; | 77 | __be32 copy_range; |
77 | __u8 copy_mode; /* enum nfqnl_config_mode */ | 78 | __u8 copy_mode; /* enum nfqnl_config_mode */ |
78 | } __attribute__ ((packed)); | 79 | } __attribute__ ((packed)); |
79 | 80 | ||
80 | 81 | ||
81 | enum nfqnl_attr_config { | 82 | enum nfqnl_attr_config { |
82 | NFQA_CFG_UNSPEC, | 83 | NFQA_CFG_UNSPEC, |
83 | NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */ | 84 | NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */ |
84 | NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */ | 85 | NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */ |
85 | NFQA_CFG_QUEUE_MAXLEN, /* __u32 */ | 86 | NFQA_CFG_QUEUE_MAXLEN, /* __u32 */ |
86 | __NFQA_CFG_MAX | 87 | __NFQA_CFG_MAX |
87 | }; | 88 | }; |
88 | #define NFQA_CFG_MAX (__NFQA_CFG_MAX-1) | 89 | #define NFQA_CFG_MAX (__NFQA_CFG_MAX-1) |
89 | 90 | ||
90 | #endif /* _NFNETLINK_QUEUE_H */ | 91 | #endif /* _NFNETLINK_QUEUE_H */ |
91 | 92 |
kernel/audit.c
1 | /* audit.c -- Auditing support | 1 | /* audit.c -- Auditing support |
2 | * Gateway between the kernel (e.g., selinux) and the user-space audit daemon. | 2 | * Gateway between the kernel (e.g., selinux) and the user-space audit daemon. |
3 | * System-call specific features have moved to auditsc.c | 3 | * System-call specific features have moved to auditsc.c |
4 | * | 4 | * |
5 | * Copyright 2003-2007 Red Hat Inc., Durham, North Carolina. | 5 | * Copyright 2003-2007 Red Hat Inc., Durham, North Carolina. |
6 | * All Rights Reserved. | 6 | * All Rights Reserved. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or | 10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. | 11 | * (at your option) any later version. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 | * | 21 | * |
22 | * Written by Rickard E. (Rik) Faith <faith@redhat.com> | 22 | * Written by Rickard E. (Rik) Faith <faith@redhat.com> |
23 | * | 23 | * |
24 | * Goals: 1) Integrate fully with Security Modules. | 24 | * Goals: 1) Integrate fully with Security Modules. |
25 | * 2) Minimal run-time overhead: | 25 | * 2) Minimal run-time overhead: |
26 | * a) Minimal when syscall auditing is disabled (audit_enable=0). | 26 | * a) Minimal when syscall auditing is disabled (audit_enable=0). |
27 | * b) Small when syscall auditing is enabled and no audit record | 27 | * b) Small when syscall auditing is enabled and no audit record |
28 | * is generated (defer as much work as possible to record | 28 | * is generated (defer as much work as possible to record |
29 | * generation time): | 29 | * generation time): |
30 | * i) context is allocated, | 30 | * i) context is allocated, |
31 | * ii) names from getname are stored without a copy, and | 31 | * ii) names from getname are stored without a copy, and |
32 | * iii) inode information stored from path_lookup. | 32 | * iii) inode information stored from path_lookup. |
33 | * 3) Ability to disable syscall auditing at boot time (audit=0). | 33 | * 3) Ability to disable syscall auditing at boot time (audit=0). |
34 | * 4) Usable by other parts of the kernel (if audit_log* is called, | 34 | * 4) Usable by other parts of the kernel (if audit_log* is called, |
35 | * then a syscall record will be generated automatically for the | 35 | * then a syscall record will be generated automatically for the |
36 | * current syscall). | 36 | * current syscall). |
37 | * 5) Netlink interface to user-space. | 37 | * 5) Netlink interface to user-space. |
38 | * 6) Support low-overhead kernel-based filtering to minimize the | 38 | * 6) Support low-overhead kernel-based filtering to minimize the |
39 | * information that must be passed to user-space. | 39 | * information that must be passed to user-space. |
40 | * | 40 | * |
41 | * Example user-space utilities: http://people.redhat.com/sgrubb/audit/ | 41 | * Example user-space utilities: http://people.redhat.com/sgrubb/audit/ |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/init.h> | 44 | #include <linux/init.h> |
45 | #include <asm/types.h> | 45 | #include <asm/types.h> |
46 | #include <asm/atomic.h> | 46 | #include <asm/atomic.h> |
47 | #include <linux/mm.h> | 47 | #include <linux/mm.h> |
48 | #include <linux/module.h> | 48 | #include <linux/module.h> |
49 | #include <linux/slab.h> | 49 | #include <linux/slab.h> |
50 | #include <linux/err.h> | 50 | #include <linux/err.h> |
51 | #include <linux/kthread.h> | 51 | #include <linux/kthread.h> |
52 | 52 | ||
53 | #include <linux/audit.h> | 53 | #include <linux/audit.h> |
54 | 54 | ||
55 | #include <net/sock.h> | 55 | #include <net/sock.h> |
56 | #include <net/netlink.h> | 56 | #include <net/netlink.h> |
57 | #include <linux/skbuff.h> | 57 | #include <linux/skbuff.h> |
58 | #ifdef CONFIG_SECURITY | ||
59 | #include <linux/security.h> | ||
60 | #endif | ||
58 | #include <linux/netlink.h> | 61 | #include <linux/netlink.h> |
59 | #include <linux/freezer.h> | 62 | #include <linux/freezer.h> |
60 | #include <linux/tty.h> | 63 | #include <linux/tty.h> |
61 | 64 | ||
62 | #include "audit.h" | 65 | #include "audit.h" |
63 | 66 | ||
64 | /* No auditing will take place until audit_initialized == AUDIT_INITIALIZED. | 67 | /* No auditing will take place until audit_initialized == AUDIT_INITIALIZED. |
65 | * (Initialization happens after skb_init is called.) */ | 68 | * (Initialization happens after skb_init is called.) */ |
66 | #define AUDIT_DISABLED -1 | 69 | #define AUDIT_DISABLED -1 |
67 | #define AUDIT_UNINITIALIZED 0 | 70 | #define AUDIT_UNINITIALIZED 0 |
68 | #define AUDIT_INITIALIZED 1 | 71 | #define AUDIT_INITIALIZED 1 |
69 | static int audit_initialized; | 72 | static int audit_initialized; |
70 | 73 | ||
71 | #define AUDIT_OFF 0 | 74 | #define AUDIT_OFF 0 |
72 | #define AUDIT_ON 1 | 75 | #define AUDIT_ON 1 |
73 | #define AUDIT_LOCKED 2 | 76 | #define AUDIT_LOCKED 2 |
74 | int audit_enabled; | 77 | int audit_enabled; |
75 | int audit_ever_enabled; | 78 | int audit_ever_enabled; |
76 | 79 | ||
77 | EXPORT_SYMBOL_GPL(audit_enabled); | 80 | EXPORT_SYMBOL_GPL(audit_enabled); |
78 | 81 | ||
79 | /* Default state when kernel boots without any parameters. */ | 82 | /* Default state when kernel boots without any parameters. */ |
80 | static int audit_default; | 83 | static int audit_default; |
81 | 84 | ||
82 | /* If auditing cannot proceed, audit_failure selects what happens. */ | 85 | /* If auditing cannot proceed, audit_failure selects what happens. */ |
83 | static int audit_failure = AUDIT_FAIL_PRINTK; | 86 | static int audit_failure = AUDIT_FAIL_PRINTK; |
84 | 87 | ||
85 | /* | 88 | /* |
86 | * If audit records are to be written to the netlink socket, audit_pid | 89 | * If audit records are to be written to the netlink socket, audit_pid |
87 | * contains the pid of the auditd process and audit_nlk_pid contains | 90 | * contains the pid of the auditd process and audit_nlk_pid contains |
88 | * the pid to use to send netlink messages to that process. | 91 | * the pid to use to send netlink messages to that process. |
89 | */ | 92 | */ |
90 | int audit_pid; | 93 | int audit_pid; |
91 | static int audit_nlk_pid; | 94 | static int audit_nlk_pid; |
92 | 95 | ||
93 | /* If audit_rate_limit is non-zero, limit the rate of sending audit records | 96 | /* If audit_rate_limit is non-zero, limit the rate of sending audit records |
94 | * to that number per second. This prevents DoS attacks, but results in | 97 | * to that number per second. This prevents DoS attacks, but results in |
95 | * audit records being dropped. */ | 98 | * audit records being dropped. */ |
96 | static int audit_rate_limit; | 99 | static int audit_rate_limit; |
97 | 100 | ||
98 | /* Number of outstanding audit_buffers allowed. */ | 101 | /* Number of outstanding audit_buffers allowed. */ |
99 | static int audit_backlog_limit = 64; | 102 | static int audit_backlog_limit = 64; |
100 | static int audit_backlog_wait_time = 60 * HZ; | 103 | static int audit_backlog_wait_time = 60 * HZ; |
101 | static int audit_backlog_wait_overflow = 0; | 104 | static int audit_backlog_wait_overflow = 0; |
102 | 105 | ||
103 | /* The identity of the user shutting down the audit system. */ | 106 | /* The identity of the user shutting down the audit system. */ |
104 | uid_t audit_sig_uid = -1; | 107 | uid_t audit_sig_uid = -1; |
105 | pid_t audit_sig_pid = -1; | 108 | pid_t audit_sig_pid = -1; |
106 | u32 audit_sig_sid = 0; | 109 | u32 audit_sig_sid = 0; |
107 | 110 | ||
108 | /* Records can be lost in several ways: | 111 | /* Records can be lost in several ways: |
109 | 0) [suppressed in audit_alloc] | 112 | 0) [suppressed in audit_alloc] |
110 | 1) out of memory in audit_log_start [kmalloc of struct audit_buffer] | 113 | 1) out of memory in audit_log_start [kmalloc of struct audit_buffer] |
111 | 2) out of memory in audit_log_move [alloc_skb] | 114 | 2) out of memory in audit_log_move [alloc_skb] |
112 | 3) suppressed due to audit_rate_limit | 115 | 3) suppressed due to audit_rate_limit |
113 | 4) suppressed due to audit_backlog_limit | 116 | 4) suppressed due to audit_backlog_limit |
114 | */ | 117 | */ |
115 | static atomic_t audit_lost = ATOMIC_INIT(0); | 118 | static atomic_t audit_lost = ATOMIC_INIT(0); |
116 | 119 | ||
117 | /* The netlink socket. */ | 120 | /* The netlink socket. */ |
118 | static struct sock *audit_sock; | 121 | static struct sock *audit_sock; |
119 | 122 | ||
120 | /* Hash for inode-based rules */ | 123 | /* Hash for inode-based rules */ |
121 | struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; | 124 | struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; |
122 | 125 | ||
123 | /* The audit_freelist is a list of pre-allocated audit buffers (if more | 126 | /* The audit_freelist is a list of pre-allocated audit buffers (if more |
124 | * than AUDIT_MAXFREE are in use, the audit buffer is freed instead of | 127 | * than AUDIT_MAXFREE are in use, the audit buffer is freed instead of |
125 | * being placed on the freelist). */ | 128 | * being placed on the freelist). */ |
126 | static DEFINE_SPINLOCK(audit_freelist_lock); | 129 | static DEFINE_SPINLOCK(audit_freelist_lock); |
127 | static int audit_freelist_count; | 130 | static int audit_freelist_count; |
128 | static LIST_HEAD(audit_freelist); | 131 | static LIST_HEAD(audit_freelist); |
129 | 132 | ||
130 | static struct sk_buff_head audit_skb_queue; | 133 | static struct sk_buff_head audit_skb_queue; |
131 | /* queue of skbs to send to auditd when/if it comes back */ | 134 | /* queue of skbs to send to auditd when/if it comes back */ |
132 | static struct sk_buff_head audit_skb_hold_queue; | 135 | static struct sk_buff_head audit_skb_hold_queue; |
133 | static struct task_struct *kauditd_task; | 136 | static struct task_struct *kauditd_task; |
134 | static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); | 137 | static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); |
135 | static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); | 138 | static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); |
136 | 139 | ||
137 | /* Serialize requests from userspace. */ | 140 | /* Serialize requests from userspace. */ |
138 | DEFINE_MUTEX(audit_cmd_mutex); | 141 | DEFINE_MUTEX(audit_cmd_mutex); |
139 | 142 | ||
140 | /* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting | 143 | /* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting |
141 | * audit records. Since printk uses a 1024 byte buffer, this buffer | 144 | * audit records. Since printk uses a 1024 byte buffer, this buffer |
142 | * should be at least that large. */ | 145 | * should be at least that large. */ |
143 | #define AUDIT_BUFSIZ 1024 | 146 | #define AUDIT_BUFSIZ 1024 |
144 | 147 | ||
145 | /* AUDIT_MAXFREE is the number of empty audit_buffers we keep on the | 148 | /* AUDIT_MAXFREE is the number of empty audit_buffers we keep on the |
146 | * audit_freelist. Doing so eliminates many kmalloc/kfree calls. */ | 149 | * audit_freelist. Doing so eliminates many kmalloc/kfree calls. */ |
147 | #define AUDIT_MAXFREE (2*NR_CPUS) | 150 | #define AUDIT_MAXFREE (2*NR_CPUS) |
148 | 151 | ||
149 | /* The audit_buffer is used when formatting an audit record. The caller | 152 | /* The audit_buffer is used when formatting an audit record. The caller |
150 | * locks briefly to get the record off the freelist or to allocate the | 153 | * locks briefly to get the record off the freelist or to allocate the |
151 | * buffer, and locks briefly to send the buffer to the netlink layer or | 154 | * buffer, and locks briefly to send the buffer to the netlink layer or |
152 | * to place it on a transmit queue. Multiple audit_buffers can be in | 155 | * to place it on a transmit queue. Multiple audit_buffers can be in |
153 | * use simultaneously. */ | 156 | * use simultaneously. */ |
154 | struct audit_buffer { | 157 | struct audit_buffer { |
155 | struct list_head list; | 158 | struct list_head list; |
156 | struct sk_buff *skb; /* formatted skb ready to send */ | 159 | struct sk_buff *skb; /* formatted skb ready to send */ |
157 | struct audit_context *ctx; /* NULL or associated context */ | 160 | struct audit_context *ctx; /* NULL or associated context */ |
158 | gfp_t gfp_mask; | 161 | gfp_t gfp_mask; |
159 | }; | 162 | }; |
160 | 163 | ||
161 | struct audit_reply { | 164 | struct audit_reply { |
162 | int pid; | 165 | int pid; |
163 | struct sk_buff *skb; | 166 | struct sk_buff *skb; |
164 | }; | 167 | }; |
165 | 168 | ||
166 | static void audit_set_pid(struct audit_buffer *ab, pid_t pid) | 169 | static void audit_set_pid(struct audit_buffer *ab, pid_t pid) |
167 | { | 170 | { |
168 | if (ab) { | 171 | if (ab) { |
169 | struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); | 172 | struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); |
170 | nlh->nlmsg_pid = pid; | 173 | nlh->nlmsg_pid = pid; |
171 | } | 174 | } |
172 | } | 175 | } |
173 | 176 | ||
174 | void audit_panic(const char *message) | 177 | void audit_panic(const char *message) |
175 | { | 178 | { |
176 | switch (audit_failure) | 179 | switch (audit_failure) |
177 | { | 180 | { |
178 | case AUDIT_FAIL_SILENT: | 181 | case AUDIT_FAIL_SILENT: |
179 | break; | 182 | break; |
180 | case AUDIT_FAIL_PRINTK: | 183 | case AUDIT_FAIL_PRINTK: |
181 | if (printk_ratelimit()) | 184 | if (printk_ratelimit()) |
182 | printk(KERN_ERR "audit: %s\n", message); | 185 | printk(KERN_ERR "audit: %s\n", message); |
183 | break; | 186 | break; |
184 | case AUDIT_FAIL_PANIC: | 187 | case AUDIT_FAIL_PANIC: |
185 | /* test audit_pid since printk is always losey, why bother? */ | 188 | /* test audit_pid since printk is always losey, why bother? */ |
186 | if (audit_pid) | 189 | if (audit_pid) |
187 | panic("audit: %s\n", message); | 190 | panic("audit: %s\n", message); |
188 | break; | 191 | break; |
189 | } | 192 | } |
190 | } | 193 | } |
191 | 194 | ||
192 | static inline int audit_rate_check(void) | 195 | static inline int audit_rate_check(void) |
193 | { | 196 | { |
194 | static unsigned long last_check = 0; | 197 | static unsigned long last_check = 0; |
195 | static int messages = 0; | 198 | static int messages = 0; |
196 | static DEFINE_SPINLOCK(lock); | 199 | static DEFINE_SPINLOCK(lock); |
197 | unsigned long flags; | 200 | unsigned long flags; |
198 | unsigned long now; | 201 | unsigned long now; |
199 | unsigned long elapsed; | 202 | unsigned long elapsed; |
200 | int retval = 0; | 203 | int retval = 0; |
201 | 204 | ||
202 | if (!audit_rate_limit) return 1; | 205 | if (!audit_rate_limit) return 1; |
203 | 206 | ||
204 | spin_lock_irqsave(&lock, flags); | 207 | spin_lock_irqsave(&lock, flags); |
205 | if (++messages < audit_rate_limit) { | 208 | if (++messages < audit_rate_limit) { |
206 | retval = 1; | 209 | retval = 1; |
207 | } else { | 210 | } else { |
208 | now = jiffies; | 211 | now = jiffies; |
209 | elapsed = now - last_check; | 212 | elapsed = now - last_check; |
210 | if (elapsed > HZ) { | 213 | if (elapsed > HZ) { |
211 | last_check = now; | 214 | last_check = now; |
212 | messages = 0; | 215 | messages = 0; |
213 | retval = 1; | 216 | retval = 1; |
214 | } | 217 | } |
215 | } | 218 | } |
216 | spin_unlock_irqrestore(&lock, flags); | 219 | spin_unlock_irqrestore(&lock, flags); |
217 | 220 | ||
218 | return retval; | 221 | return retval; |
219 | } | 222 | } |
220 | 223 | ||
221 | /** | 224 | /** |
222 | * audit_log_lost - conditionally log lost audit message event | 225 | * audit_log_lost - conditionally log lost audit message event |
223 | * @message: the message stating reason for lost audit message | 226 | * @message: the message stating reason for lost audit message |
224 | * | 227 | * |
225 | * Emit at least 1 message per second, even if audit_rate_check is | 228 | * Emit at least 1 message per second, even if audit_rate_check is |
226 | * throttling. | 229 | * throttling. |
227 | * Always increment the lost messages counter. | 230 | * Always increment the lost messages counter. |
228 | */ | 231 | */ |
229 | void audit_log_lost(const char *message) | 232 | void audit_log_lost(const char *message) |
230 | { | 233 | { |
231 | static unsigned long last_msg = 0; | 234 | static unsigned long last_msg = 0; |
232 | static DEFINE_SPINLOCK(lock); | 235 | static DEFINE_SPINLOCK(lock); |
233 | unsigned long flags; | 236 | unsigned long flags; |
234 | unsigned long now; | 237 | unsigned long now; |
235 | int print; | 238 | int print; |
236 | 239 | ||
237 | atomic_inc(&audit_lost); | 240 | atomic_inc(&audit_lost); |
238 | 241 | ||
239 | print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit); | 242 | print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit); |
240 | 243 | ||
241 | if (!print) { | 244 | if (!print) { |
242 | spin_lock_irqsave(&lock, flags); | 245 | spin_lock_irqsave(&lock, flags); |
243 | now = jiffies; | 246 | now = jiffies; |
244 | if (now - last_msg > HZ) { | 247 | if (now - last_msg > HZ) { |
245 | print = 1; | 248 | print = 1; |
246 | last_msg = now; | 249 | last_msg = now; |
247 | } | 250 | } |
248 | spin_unlock_irqrestore(&lock, flags); | 251 | spin_unlock_irqrestore(&lock, flags); |
249 | } | 252 | } |
250 | 253 | ||
251 | if (print) { | 254 | if (print) { |
252 | if (printk_ratelimit()) | 255 | if (printk_ratelimit()) |
253 | printk(KERN_WARNING | 256 | printk(KERN_WARNING |
254 | "audit: audit_lost=%d audit_rate_limit=%d " | 257 | "audit: audit_lost=%d audit_rate_limit=%d " |
255 | "audit_backlog_limit=%d\n", | 258 | "audit_backlog_limit=%d\n", |
256 | atomic_read(&audit_lost), | 259 | atomic_read(&audit_lost), |
257 | audit_rate_limit, | 260 | audit_rate_limit, |
258 | audit_backlog_limit); | 261 | audit_backlog_limit); |
259 | audit_panic(message); | 262 | audit_panic(message); |
260 | } | 263 | } |
261 | } | 264 | } |
262 | 265 | ||
263 | static int audit_log_config_change(char *function_name, int new, int old, | 266 | static int audit_log_config_change(char *function_name, int new, int old, |
264 | uid_t loginuid, u32 sessionid, u32 sid, | 267 | uid_t loginuid, u32 sessionid, u32 sid, |
265 | int allow_changes) | 268 | int allow_changes) |
266 | { | 269 | { |
267 | struct audit_buffer *ab; | 270 | struct audit_buffer *ab; |
268 | int rc = 0; | 271 | int rc = 0; |
269 | 272 | ||
270 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | 273 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); |
271 | audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, | 274 | audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, |
272 | old, loginuid, sessionid); | 275 | old, loginuid, sessionid); |
273 | if (sid) { | 276 | if (sid) { |
274 | char *ctx = NULL; | 277 | char *ctx = NULL; |
275 | u32 len; | 278 | u32 len; |
276 | 279 | ||
277 | rc = security_secid_to_secctx(sid, &ctx, &len); | 280 | rc = security_secid_to_secctx(sid, &ctx, &len); |
278 | if (rc) { | 281 | if (rc) { |
279 | audit_log_format(ab, " sid=%u", sid); | 282 | audit_log_format(ab, " sid=%u", sid); |
280 | allow_changes = 0; /* Something weird, deny request */ | 283 | allow_changes = 0; /* Something weird, deny request */ |
281 | } else { | 284 | } else { |
282 | audit_log_format(ab, " subj=%s", ctx); | 285 | audit_log_format(ab, " subj=%s", ctx); |
283 | security_release_secctx(ctx, len); | 286 | security_release_secctx(ctx, len); |
284 | } | 287 | } |
285 | } | 288 | } |
286 | audit_log_format(ab, " res=%d", allow_changes); | 289 | audit_log_format(ab, " res=%d", allow_changes); |
287 | audit_log_end(ab); | 290 | audit_log_end(ab); |
288 | return rc; | 291 | return rc; |
289 | } | 292 | } |
290 | 293 | ||
291 | static int audit_do_config_change(char *function_name, int *to_change, | 294 | static int audit_do_config_change(char *function_name, int *to_change, |
292 | int new, uid_t loginuid, u32 sessionid, | 295 | int new, uid_t loginuid, u32 sessionid, |
293 | u32 sid) | 296 | u32 sid) |
294 | { | 297 | { |
295 | int allow_changes, rc = 0, old = *to_change; | 298 | int allow_changes, rc = 0, old = *to_change; |
296 | 299 | ||
297 | /* check if we are locked */ | 300 | /* check if we are locked */ |
298 | if (audit_enabled == AUDIT_LOCKED) | 301 | if (audit_enabled == AUDIT_LOCKED) |
299 | allow_changes = 0; | 302 | allow_changes = 0; |
300 | else | 303 | else |
301 | allow_changes = 1; | 304 | allow_changes = 1; |
302 | 305 | ||
303 | if (audit_enabled != AUDIT_OFF) { | 306 | if (audit_enabled != AUDIT_OFF) { |
304 | rc = audit_log_config_change(function_name, new, old, loginuid, | 307 | rc = audit_log_config_change(function_name, new, old, loginuid, |
305 | sessionid, sid, allow_changes); | 308 | sessionid, sid, allow_changes); |
306 | if (rc) | 309 | if (rc) |
307 | allow_changes = 0; | 310 | allow_changes = 0; |
308 | } | 311 | } |
309 | 312 | ||
310 | /* If we are allowed, make the change */ | 313 | /* If we are allowed, make the change */ |
311 | if (allow_changes == 1) | 314 | if (allow_changes == 1) |
312 | *to_change = new; | 315 | *to_change = new; |
313 | /* Not allowed, update reason */ | 316 | /* Not allowed, update reason */ |
314 | else if (rc == 0) | 317 | else if (rc == 0) |
315 | rc = -EPERM; | 318 | rc = -EPERM; |
316 | return rc; | 319 | return rc; |
317 | } | 320 | } |
318 | 321 | ||
319 | static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sessionid, | 322 | static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sessionid, |
320 | u32 sid) | 323 | u32 sid) |
321 | { | 324 | { |
322 | return audit_do_config_change("audit_rate_limit", &audit_rate_limit, | 325 | return audit_do_config_change("audit_rate_limit", &audit_rate_limit, |
323 | limit, loginuid, sessionid, sid); | 326 | limit, loginuid, sessionid, sid); |
324 | } | 327 | } |
325 | 328 | ||
326 | static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sessionid, | 329 | static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sessionid, |
327 | u32 sid) | 330 | u32 sid) |
328 | { | 331 | { |
329 | return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, | 332 | return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, |
330 | limit, loginuid, sessionid, sid); | 333 | limit, loginuid, sessionid, sid); |
331 | } | 334 | } |
332 | 335 | ||
333 | static int audit_set_enabled(int state, uid_t loginuid, u32 sessionid, u32 sid) | 336 | static int audit_set_enabled(int state, uid_t loginuid, u32 sessionid, u32 sid) |
334 | { | 337 | { |
335 | int rc; | 338 | int rc; |
336 | if (state < AUDIT_OFF || state > AUDIT_LOCKED) | 339 | if (state < AUDIT_OFF || state > AUDIT_LOCKED) |
337 | return -EINVAL; | 340 | return -EINVAL; |
338 | 341 | ||
339 | rc = audit_do_config_change("audit_enabled", &audit_enabled, state, | 342 | rc = audit_do_config_change("audit_enabled", &audit_enabled, state, |
340 | loginuid, sessionid, sid); | 343 | loginuid, sessionid, sid); |
341 | 344 | ||
342 | if (!rc) | 345 | if (!rc) |
343 | audit_ever_enabled |= !!state; | 346 | audit_ever_enabled |= !!state; |
344 | 347 | ||
345 | return rc; | 348 | return rc; |
346 | } | 349 | } |
347 | 350 | ||
348 | static int audit_set_failure(int state, uid_t loginuid, u32 sessionid, u32 sid) | 351 | static int audit_set_failure(int state, uid_t loginuid, u32 sessionid, u32 sid) |
349 | { | 352 | { |
350 | if (state != AUDIT_FAIL_SILENT | 353 | if (state != AUDIT_FAIL_SILENT |
351 | && state != AUDIT_FAIL_PRINTK | 354 | && state != AUDIT_FAIL_PRINTK |
352 | && state != AUDIT_FAIL_PANIC) | 355 | && state != AUDIT_FAIL_PANIC) |
353 | return -EINVAL; | 356 | return -EINVAL; |
354 | 357 | ||
355 | return audit_do_config_change("audit_failure", &audit_failure, state, | 358 | return audit_do_config_change("audit_failure", &audit_failure, state, |
356 | loginuid, sessionid, sid); | 359 | loginuid, sessionid, sid); |
357 | } | 360 | } |
358 | 361 | ||
359 | /* | 362 | /* |
360 | * Queue skbs to be sent to auditd when/if it comes back. These skbs should | 363 | * Queue skbs to be sent to auditd when/if it comes back. These skbs should |
361 | * already have been sent via prink/syslog and so if these messages are dropped | 364 | * already have been sent via prink/syslog and so if these messages are dropped |
362 | * it is not a huge concern since we already passed the audit_log_lost() | 365 | * it is not a huge concern since we already passed the audit_log_lost() |
363 | * notification and stuff. This is just nice to get audit messages during | 366 | * notification and stuff. This is just nice to get audit messages during |
364 | * boot before auditd is running or messages generated while auditd is stopped. | 367 | * boot before auditd is running or messages generated while auditd is stopped. |
365 | * This only holds messages is audit_default is set, aka booting with audit=1 | 368 | * This only holds messages is audit_default is set, aka booting with audit=1 |
366 | * or building your kernel that way. | 369 | * or building your kernel that way. |
367 | */ | 370 | */ |
368 | static void audit_hold_skb(struct sk_buff *skb) | 371 | static void audit_hold_skb(struct sk_buff *skb) |
369 | { | 372 | { |
370 | if (audit_default && | 373 | if (audit_default && |
371 | skb_queue_len(&audit_skb_hold_queue) < audit_backlog_limit) | 374 | skb_queue_len(&audit_skb_hold_queue) < audit_backlog_limit) |
372 | skb_queue_tail(&audit_skb_hold_queue, skb); | 375 | skb_queue_tail(&audit_skb_hold_queue, skb); |
373 | else | 376 | else |
374 | kfree_skb(skb); | 377 | kfree_skb(skb); |
375 | } | 378 | } |
376 | 379 | ||
377 | /* | 380 | /* |
378 | * For one reason or another this nlh isn't getting delivered to the userspace | 381 | * For one reason or another this nlh isn't getting delivered to the userspace |
379 | * audit daemon, just send it to printk. | 382 | * audit daemon, just send it to printk. |
380 | */ | 383 | */ |
381 | static void audit_printk_skb(struct sk_buff *skb) | 384 | static void audit_printk_skb(struct sk_buff *skb) |
382 | { | 385 | { |
383 | struct nlmsghdr *nlh = nlmsg_hdr(skb); | 386 | struct nlmsghdr *nlh = nlmsg_hdr(skb); |
384 | char *data = NLMSG_DATA(nlh); | 387 | char *data = NLMSG_DATA(nlh); |
385 | 388 | ||
386 | if (nlh->nlmsg_type != AUDIT_EOE) { | 389 | if (nlh->nlmsg_type != AUDIT_EOE) { |
387 | if (printk_ratelimit()) | 390 | if (printk_ratelimit()) |
388 | printk(KERN_NOTICE "type=%d %s\n", nlh->nlmsg_type, data); | 391 | printk(KERN_NOTICE "type=%d %s\n", nlh->nlmsg_type, data); |
389 | else | 392 | else |
390 | audit_log_lost("printk limit exceeded\n"); | 393 | audit_log_lost("printk limit exceeded\n"); |
391 | } | 394 | } |
392 | 395 | ||
393 | audit_hold_skb(skb); | 396 | audit_hold_skb(skb); |
394 | } | 397 | } |
395 | 398 | ||
396 | static void kauditd_send_skb(struct sk_buff *skb) | 399 | static void kauditd_send_skb(struct sk_buff *skb) |
397 | { | 400 | { |
398 | int err; | 401 | int err; |
399 | /* take a reference in case we can't send it and we want to hold it */ | 402 | /* take a reference in case we can't send it and we want to hold it */ |
400 | skb_get(skb); | 403 | skb_get(skb); |
401 | err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0); | 404 | err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0); |
402 | if (err < 0) { | 405 | if (err < 0) { |
403 | BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ | 406 | BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ |
404 | printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); | 407 | printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); |
405 | audit_log_lost("auditd disappeared\n"); | 408 | audit_log_lost("auditd disappeared\n"); |
406 | audit_pid = 0; | 409 | audit_pid = 0; |
407 | /* we might get lucky and get this in the next auditd */ | 410 | /* we might get lucky and get this in the next auditd */ |
408 | audit_hold_skb(skb); | 411 | audit_hold_skb(skb); |
409 | } else | 412 | } else |
410 | /* drop the extra reference if sent ok */ | 413 | /* drop the extra reference if sent ok */ |
411 | consume_skb(skb); | 414 | consume_skb(skb); |
412 | } | 415 | } |
413 | 416 | ||
414 | static int kauditd_thread(void *dummy) | 417 | static int kauditd_thread(void *dummy) |
415 | { | 418 | { |
416 | struct sk_buff *skb; | 419 | struct sk_buff *skb; |
417 | 420 | ||
418 | set_freezable(); | 421 | set_freezable(); |
419 | while (!kthread_should_stop()) { | 422 | while (!kthread_should_stop()) { |
420 | /* | 423 | /* |
421 | * if auditd just started drain the queue of messages already | 424 | * if auditd just started drain the queue of messages already |
422 | * sent to syslog/printk. remember loss here is ok. we already | 425 | * sent to syslog/printk. remember loss here is ok. we already |
423 | * called audit_log_lost() if it didn't go out normally. so the | 426 | * called audit_log_lost() if it didn't go out normally. so the |
424 | * race between the skb_dequeue and the next check for audit_pid | 427 | * race between the skb_dequeue and the next check for audit_pid |
425 | * doesn't matter. | 428 | * doesn't matter. |
426 | * | 429 | * |
427 | * if you ever find kauditd to be too slow we can get a perf win | 430 | * if you ever find kauditd to be too slow we can get a perf win |
428 | * by doing our own locking and keeping better track if there | 431 | * by doing our own locking and keeping better track if there |
429 | * are messages in this queue. I don't see the need now, but | 432 | * are messages in this queue. I don't see the need now, but |
430 | * in 5 years when I want to play with this again I'll see this | 433 | * in 5 years when I want to play with this again I'll see this |
431 | * note and still have no friggin idea what i'm thinking today. | 434 | * note and still have no friggin idea what i'm thinking today. |
432 | */ | 435 | */ |
433 | if (audit_default && audit_pid) { | 436 | if (audit_default && audit_pid) { |
434 | skb = skb_dequeue(&audit_skb_hold_queue); | 437 | skb = skb_dequeue(&audit_skb_hold_queue); |
435 | if (unlikely(skb)) { | 438 | if (unlikely(skb)) { |
436 | while (skb && audit_pid) { | 439 | while (skb && audit_pid) { |
437 | kauditd_send_skb(skb); | 440 | kauditd_send_skb(skb); |
438 | skb = skb_dequeue(&audit_skb_hold_queue); | 441 | skb = skb_dequeue(&audit_skb_hold_queue); |
439 | } | 442 | } |
440 | } | 443 | } |
441 | } | 444 | } |
442 | 445 | ||
443 | skb = skb_dequeue(&audit_skb_queue); | 446 | skb = skb_dequeue(&audit_skb_queue); |
444 | wake_up(&audit_backlog_wait); | 447 | wake_up(&audit_backlog_wait); |
445 | if (skb) { | 448 | if (skb) { |
446 | if (audit_pid) | 449 | if (audit_pid) |
447 | kauditd_send_skb(skb); | 450 | kauditd_send_skb(skb); |
448 | else | 451 | else |
449 | audit_printk_skb(skb); | 452 | audit_printk_skb(skb); |
450 | } else { | 453 | } else { |
451 | DECLARE_WAITQUEUE(wait, current); | 454 | DECLARE_WAITQUEUE(wait, current); |
452 | set_current_state(TASK_INTERRUPTIBLE); | 455 | set_current_state(TASK_INTERRUPTIBLE); |
453 | add_wait_queue(&kauditd_wait, &wait); | 456 | add_wait_queue(&kauditd_wait, &wait); |
454 | 457 | ||
455 | if (!skb_queue_len(&audit_skb_queue)) { | 458 | if (!skb_queue_len(&audit_skb_queue)) { |
456 | try_to_freeze(); | 459 | try_to_freeze(); |
457 | schedule(); | 460 | schedule(); |
458 | } | 461 | } |
459 | 462 | ||
460 | __set_current_state(TASK_RUNNING); | 463 | __set_current_state(TASK_RUNNING); |
461 | remove_wait_queue(&kauditd_wait, &wait); | 464 | remove_wait_queue(&kauditd_wait, &wait); |
462 | } | 465 | } |
463 | } | 466 | } |
464 | return 0; | 467 | return 0; |
465 | } | 468 | } |
466 | 469 | ||
467 | static int audit_prepare_user_tty(pid_t pid, uid_t loginuid, u32 sessionid) | 470 | static int audit_prepare_user_tty(pid_t pid, uid_t loginuid, u32 sessionid) |
468 | { | 471 | { |
469 | struct task_struct *tsk; | 472 | struct task_struct *tsk; |
470 | int err; | 473 | int err; |
471 | 474 | ||
472 | rcu_read_lock(); | 475 | rcu_read_lock(); |
473 | tsk = find_task_by_vpid(pid); | 476 | tsk = find_task_by_vpid(pid); |
474 | if (!tsk) { | 477 | if (!tsk) { |
475 | rcu_read_unlock(); | 478 | rcu_read_unlock(); |
476 | return -ESRCH; | 479 | return -ESRCH; |
477 | } | 480 | } |
478 | get_task_struct(tsk); | 481 | get_task_struct(tsk); |
479 | rcu_read_unlock(); | 482 | rcu_read_unlock(); |
480 | err = tty_audit_push_task(tsk, loginuid, sessionid); | 483 | err = tty_audit_push_task(tsk, loginuid, sessionid); |
481 | put_task_struct(tsk); | 484 | put_task_struct(tsk); |
482 | return err; | 485 | return err; |
483 | } | 486 | } |
484 | 487 | ||
485 | int audit_send_list(void *_dest) | 488 | int audit_send_list(void *_dest) |
486 | { | 489 | { |
487 | struct audit_netlink_list *dest = _dest; | 490 | struct audit_netlink_list *dest = _dest; |
488 | int pid = dest->pid; | 491 | int pid = dest->pid; |
489 | struct sk_buff *skb; | 492 | struct sk_buff *skb; |
490 | 493 | ||
491 | /* wait for parent to finish and send an ACK */ | 494 | /* wait for parent to finish and send an ACK */ |
492 | mutex_lock(&audit_cmd_mutex); | 495 | mutex_lock(&audit_cmd_mutex); |
493 | mutex_unlock(&audit_cmd_mutex); | 496 | mutex_unlock(&audit_cmd_mutex); |
494 | 497 | ||
495 | while ((skb = __skb_dequeue(&dest->q)) != NULL) | 498 | while ((skb = __skb_dequeue(&dest->q)) != NULL) |
496 | netlink_unicast(audit_sock, skb, pid, 0); | 499 | netlink_unicast(audit_sock, skb, pid, 0); |
497 | 500 | ||
498 | kfree(dest); | 501 | kfree(dest); |
499 | 502 | ||
500 | return 0; | 503 | return 0; |
501 | } | 504 | } |
502 | 505 | ||
503 | struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, | 506 | struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, |
504 | int multi, const void *payload, int size) | 507 | int multi, const void *payload, int size) |
505 | { | 508 | { |
506 | struct sk_buff *skb; | 509 | struct sk_buff *skb; |
507 | struct nlmsghdr *nlh; | 510 | struct nlmsghdr *nlh; |
508 | void *data; | 511 | void *data; |
509 | int flags = multi ? NLM_F_MULTI : 0; | 512 | int flags = multi ? NLM_F_MULTI : 0; |
510 | int t = done ? NLMSG_DONE : type; | 513 | int t = done ? NLMSG_DONE : type; |
511 | 514 | ||
512 | skb = nlmsg_new(size, GFP_KERNEL); | 515 | skb = nlmsg_new(size, GFP_KERNEL); |
513 | if (!skb) | 516 | if (!skb) |
514 | return NULL; | 517 | return NULL; |
515 | 518 | ||
516 | nlh = NLMSG_NEW(skb, pid, seq, t, size, flags); | 519 | nlh = NLMSG_NEW(skb, pid, seq, t, size, flags); |
517 | data = NLMSG_DATA(nlh); | 520 | data = NLMSG_DATA(nlh); |
518 | memcpy(data, payload, size); | 521 | memcpy(data, payload, size); |
519 | return skb; | 522 | return skb; |
520 | 523 | ||
521 | nlmsg_failure: /* Used by NLMSG_NEW */ | 524 | nlmsg_failure: /* Used by NLMSG_NEW */ |
522 | if (skb) | 525 | if (skb) |
523 | kfree_skb(skb); | 526 | kfree_skb(skb); |
524 | return NULL; | 527 | return NULL; |
525 | } | 528 | } |
526 | 529 | ||
527 | static int audit_send_reply_thread(void *arg) | 530 | static int audit_send_reply_thread(void *arg) |
528 | { | 531 | { |
529 | struct audit_reply *reply = (struct audit_reply *)arg; | 532 | struct audit_reply *reply = (struct audit_reply *)arg; |
530 | 533 | ||
531 | mutex_lock(&audit_cmd_mutex); | 534 | mutex_lock(&audit_cmd_mutex); |
532 | mutex_unlock(&audit_cmd_mutex); | 535 | mutex_unlock(&audit_cmd_mutex); |
533 | 536 | ||
534 | /* Ignore failure. It'll only happen if the sender goes away, | 537 | /* Ignore failure. It'll only happen if the sender goes away, |
535 | because our timeout is set to infinite. */ | 538 | because our timeout is set to infinite. */ |
536 | netlink_unicast(audit_sock, reply->skb, reply->pid, 0); | 539 | netlink_unicast(audit_sock, reply->skb, reply->pid, 0); |
537 | kfree(reply); | 540 | kfree(reply); |
538 | return 0; | 541 | return 0; |
539 | } | 542 | } |
540 | /** | 543 | /** |
541 | * audit_send_reply - send an audit reply message via netlink | 544 | * audit_send_reply - send an audit reply message via netlink |
542 | * @pid: process id to send reply to | 545 | * @pid: process id to send reply to |
543 | * @seq: sequence number | 546 | * @seq: sequence number |
544 | * @type: audit message type | 547 | * @type: audit message type |
545 | * @done: done (last) flag | 548 | * @done: done (last) flag |
546 | * @multi: multi-part message flag | 549 | * @multi: multi-part message flag |
547 | * @payload: payload data | 550 | * @payload: payload data |
548 | * @size: payload size | 551 | * @size: payload size |
549 | * | 552 | * |
550 | * Allocates an skb, builds the netlink message, and sends it to the pid. | 553 | * Allocates an skb, builds the netlink message, and sends it to the pid. |
551 | * No failure notifications. | 554 | * No failure notifications. |
552 | */ | 555 | */ |
553 | static void audit_send_reply(int pid, int seq, int type, int done, int multi, | 556 | static void audit_send_reply(int pid, int seq, int type, int done, int multi, |
554 | const void *payload, int size) | 557 | const void *payload, int size) |
555 | { | 558 | { |
556 | struct sk_buff *skb; | 559 | struct sk_buff *skb; |
557 | struct task_struct *tsk; | 560 | struct task_struct *tsk; |
558 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), | 561 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), |
559 | GFP_KERNEL); | 562 | GFP_KERNEL); |
560 | 563 | ||
561 | if (!reply) | 564 | if (!reply) |
562 | return; | 565 | return; |
563 | 566 | ||
564 | skb = audit_make_reply(pid, seq, type, done, multi, payload, size); | 567 | skb = audit_make_reply(pid, seq, type, done, multi, payload, size); |
565 | if (!skb) | 568 | if (!skb) |
566 | goto out; | 569 | goto out; |
567 | 570 | ||
568 | reply->pid = pid; | 571 | reply->pid = pid; |
569 | reply->skb = skb; | 572 | reply->skb = skb; |
570 | 573 | ||
571 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); | 574 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); |
572 | if (!IS_ERR(tsk)) | 575 | if (!IS_ERR(tsk)) |
573 | return; | 576 | return; |
574 | kfree_skb(skb); | 577 | kfree_skb(skb); |
575 | out: | 578 | out: |
576 | kfree(reply); | 579 | kfree(reply); |
577 | } | 580 | } |
578 | 581 | ||
579 | /* | 582 | /* |
580 | * Check for appropriate CAP_AUDIT_ capabilities on incoming audit | 583 | * Check for appropriate CAP_AUDIT_ capabilities on incoming audit |
581 | * control messages. | 584 | * control messages. |
582 | */ | 585 | */ |
583 | static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | 586 | static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) |
584 | { | 587 | { |
585 | int err = 0; | 588 | int err = 0; |
586 | 589 | ||
587 | switch (msg_type) { | 590 | switch (msg_type) { |
588 | case AUDIT_GET: | 591 | case AUDIT_GET: |
589 | case AUDIT_LIST: | 592 | case AUDIT_LIST: |
590 | case AUDIT_LIST_RULES: | 593 | case AUDIT_LIST_RULES: |
591 | case AUDIT_SET: | 594 | case AUDIT_SET: |
592 | case AUDIT_ADD: | 595 | case AUDIT_ADD: |
593 | case AUDIT_ADD_RULE: | 596 | case AUDIT_ADD_RULE: |
594 | case AUDIT_DEL: | 597 | case AUDIT_DEL: |
595 | case AUDIT_DEL_RULE: | 598 | case AUDIT_DEL_RULE: |
596 | case AUDIT_SIGNAL_INFO: | 599 | case AUDIT_SIGNAL_INFO: |
597 | case AUDIT_TTY_GET: | 600 | case AUDIT_TTY_GET: |
598 | case AUDIT_TTY_SET: | 601 | case AUDIT_TTY_SET: |
599 | case AUDIT_TRIM: | 602 | case AUDIT_TRIM: |
600 | case AUDIT_MAKE_EQUIV: | 603 | case AUDIT_MAKE_EQUIV: |
601 | if (security_netlink_recv(skb, CAP_AUDIT_CONTROL)) | 604 | if (security_netlink_recv(skb, CAP_AUDIT_CONTROL)) |
602 | err = -EPERM; | 605 | err = -EPERM; |
603 | break; | 606 | break; |
604 | case AUDIT_USER: | 607 | case AUDIT_USER: |
605 | case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: | 608 | case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: |
606 | case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: | 609 | case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: |
607 | if (security_netlink_recv(skb, CAP_AUDIT_WRITE)) | 610 | if (security_netlink_recv(skb, CAP_AUDIT_WRITE)) |
608 | err = -EPERM; | 611 | err = -EPERM; |
609 | break; | 612 | break; |
610 | default: /* bad msg */ | 613 | default: /* bad msg */ |
611 | err = -EINVAL; | 614 | err = -EINVAL; |
612 | } | 615 | } |
613 | 616 | ||
614 | return err; | 617 | return err; |
615 | } | 618 | } |
616 | 619 | ||
617 | static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, | 620 | static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, |
618 | u32 pid, u32 uid, uid_t auid, u32 ses, | 621 | u32 pid, u32 uid, uid_t auid, u32 ses, |
619 | u32 sid) | 622 | u32 sid) |
620 | { | 623 | { |
621 | int rc = 0; | 624 | int rc = 0; |
622 | char *ctx = NULL; | 625 | char *ctx = NULL; |
623 | u32 len; | 626 | u32 len; |
624 | 627 | ||
625 | if (!audit_enabled) { | 628 | if (!audit_enabled) { |
626 | *ab = NULL; | 629 | *ab = NULL; |
627 | return rc; | 630 | return rc; |
628 | } | 631 | } |
629 | 632 | ||
630 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); | 633 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); |
631 | audit_log_format(*ab, "user pid=%d uid=%u auid=%u ses=%u", | 634 | audit_log_format(*ab, "user pid=%d uid=%u auid=%u ses=%u", |
632 | pid, uid, auid, ses); | 635 | pid, uid, auid, ses); |
633 | if (sid) { | 636 | if (sid) { |
634 | rc = security_secid_to_secctx(sid, &ctx, &len); | 637 | rc = security_secid_to_secctx(sid, &ctx, &len); |
635 | if (rc) | 638 | if (rc) |
636 | audit_log_format(*ab, " ssid=%u", sid); | 639 | audit_log_format(*ab, " ssid=%u", sid); |
637 | else { | 640 | else { |
638 | audit_log_format(*ab, " subj=%s", ctx); | 641 | audit_log_format(*ab, " subj=%s", ctx); |
639 | security_release_secctx(ctx, len); | 642 | security_release_secctx(ctx, len); |
640 | } | 643 | } |
641 | } | 644 | } |
642 | 645 | ||
643 | return rc; | 646 | return rc; |
644 | } | 647 | } |
645 | 648 | ||
646 | static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | 649 | static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
647 | { | 650 | { |
648 | u32 uid, pid, seq, sid; | 651 | u32 uid, pid, seq, sid; |
649 | void *data; | 652 | void *data; |
650 | struct audit_status *status_get, status_set; | 653 | struct audit_status *status_get, status_set; |
651 | int err; | 654 | int err; |
652 | struct audit_buffer *ab; | 655 | struct audit_buffer *ab; |
653 | u16 msg_type = nlh->nlmsg_type; | 656 | u16 msg_type = nlh->nlmsg_type; |
654 | uid_t loginuid; /* loginuid of sender */ | 657 | uid_t loginuid; /* loginuid of sender */ |
655 | u32 sessionid; | 658 | u32 sessionid; |
656 | struct audit_sig_info *sig_data; | 659 | struct audit_sig_info *sig_data; |
657 | char *ctx = NULL; | 660 | char *ctx = NULL; |
658 | u32 len; | 661 | u32 len; |
659 | 662 | ||
660 | err = audit_netlink_ok(skb, msg_type); | 663 | err = audit_netlink_ok(skb, msg_type); |
661 | if (err) | 664 | if (err) |
662 | return err; | 665 | return err; |
663 | 666 | ||
664 | /* As soon as there's any sign of userspace auditd, | 667 | /* As soon as there's any sign of userspace auditd, |
665 | * start kauditd to talk to it */ | 668 | * start kauditd to talk to it */ |
666 | if (!kauditd_task) | 669 | if (!kauditd_task) |
667 | kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); | 670 | kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); |
668 | if (IS_ERR(kauditd_task)) { | 671 | if (IS_ERR(kauditd_task)) { |
669 | err = PTR_ERR(kauditd_task); | 672 | err = PTR_ERR(kauditd_task); |
670 | kauditd_task = NULL; | 673 | kauditd_task = NULL; |
671 | return err; | 674 | return err; |
672 | } | 675 | } |
673 | 676 | ||
674 | pid = NETLINK_CREDS(skb)->pid; | 677 | pid = NETLINK_CREDS(skb)->pid; |
675 | uid = NETLINK_CREDS(skb)->uid; | 678 | uid = NETLINK_CREDS(skb)->uid; |
676 | loginuid = audit_get_loginuid(current); | 679 | loginuid = audit_get_loginuid(current); |
677 | sessionid = audit_get_sessionid(current); | 680 | sessionid = audit_get_sessionid(current); |
678 | security_task_getsecid(current, &sid); | 681 | security_task_getsecid(current, &sid); |
679 | seq = nlh->nlmsg_seq; | 682 | seq = nlh->nlmsg_seq; |
680 | data = NLMSG_DATA(nlh); | 683 | data = NLMSG_DATA(nlh); |
681 | 684 | ||
682 | switch (msg_type) { | 685 | switch (msg_type) { |
683 | case AUDIT_GET: | 686 | case AUDIT_GET: |
684 | status_set.enabled = audit_enabled; | 687 | status_set.enabled = audit_enabled; |
685 | status_set.failure = audit_failure; | 688 | status_set.failure = audit_failure; |
686 | status_set.pid = audit_pid; | 689 | status_set.pid = audit_pid; |
687 | status_set.rate_limit = audit_rate_limit; | 690 | status_set.rate_limit = audit_rate_limit; |
688 | status_set.backlog_limit = audit_backlog_limit; | 691 | status_set.backlog_limit = audit_backlog_limit; |
689 | status_set.lost = atomic_read(&audit_lost); | 692 | status_set.lost = atomic_read(&audit_lost); |
690 | status_set.backlog = skb_queue_len(&audit_skb_queue); | 693 | status_set.backlog = skb_queue_len(&audit_skb_queue); |
691 | audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0, | 694 | audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0, |
692 | &status_set, sizeof(status_set)); | 695 | &status_set, sizeof(status_set)); |
693 | break; | 696 | break; |
694 | case AUDIT_SET: | 697 | case AUDIT_SET: |
695 | if (nlh->nlmsg_len < sizeof(struct audit_status)) | 698 | if (nlh->nlmsg_len < sizeof(struct audit_status)) |
696 | return -EINVAL; | 699 | return -EINVAL; |
697 | status_get = (struct audit_status *)data; | 700 | status_get = (struct audit_status *)data; |
698 | if (status_get->mask & AUDIT_STATUS_ENABLED) { | 701 | if (status_get->mask & AUDIT_STATUS_ENABLED) { |
699 | err = audit_set_enabled(status_get->enabled, | 702 | err = audit_set_enabled(status_get->enabled, |
700 | loginuid, sessionid, sid); | 703 | loginuid, sessionid, sid); |
701 | if (err < 0) | 704 | if (err < 0) |
702 | return err; | 705 | return err; |
703 | } | 706 | } |
704 | if (status_get->mask & AUDIT_STATUS_FAILURE) { | 707 | if (status_get->mask & AUDIT_STATUS_FAILURE) { |
705 | err = audit_set_failure(status_get->failure, | 708 | err = audit_set_failure(status_get->failure, |
706 | loginuid, sessionid, sid); | 709 | loginuid, sessionid, sid); |
707 | if (err < 0) | 710 | if (err < 0) |
708 | return err; | 711 | return err; |
709 | } | 712 | } |
710 | if (status_get->mask & AUDIT_STATUS_PID) { | 713 | if (status_get->mask & AUDIT_STATUS_PID) { |
711 | int new_pid = status_get->pid; | 714 | int new_pid = status_get->pid; |
712 | 715 | ||
713 | if (audit_enabled != AUDIT_OFF) | 716 | if (audit_enabled != AUDIT_OFF) |
714 | audit_log_config_change("audit_pid", new_pid, | 717 | audit_log_config_change("audit_pid", new_pid, |
715 | audit_pid, loginuid, | 718 | audit_pid, loginuid, |
716 | sessionid, sid, 1); | 719 | sessionid, sid, 1); |
717 | 720 | ||
718 | audit_pid = new_pid; | 721 | audit_pid = new_pid; |
719 | audit_nlk_pid = NETLINK_CB(skb).pid; | 722 | audit_nlk_pid = NETLINK_CB(skb).pid; |
720 | } | 723 | } |
721 | if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) { | 724 | if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) { |
722 | err = audit_set_rate_limit(status_get->rate_limit, | 725 | err = audit_set_rate_limit(status_get->rate_limit, |
723 | loginuid, sessionid, sid); | 726 | loginuid, sessionid, sid); |
724 | if (err < 0) | 727 | if (err < 0) |
725 | return err; | 728 | return err; |
726 | } | 729 | } |
727 | if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) | 730 | if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) |
728 | err = audit_set_backlog_limit(status_get->backlog_limit, | 731 | err = audit_set_backlog_limit(status_get->backlog_limit, |
729 | loginuid, sessionid, sid); | 732 | loginuid, sessionid, sid); |
730 | break; | 733 | break; |
731 | case AUDIT_USER: | 734 | case AUDIT_USER: |
732 | case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: | 735 | case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: |
733 | case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: | 736 | case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: |
734 | if (!audit_enabled && msg_type != AUDIT_USER_AVC) | 737 | if (!audit_enabled && msg_type != AUDIT_USER_AVC) |
735 | return 0; | 738 | return 0; |
736 | 739 | ||
737 | err = audit_filter_user(&NETLINK_CB(skb)); | 740 | err = audit_filter_user(&NETLINK_CB(skb)); |
738 | if (err == 1) { | 741 | if (err == 1) { |
739 | err = 0; | 742 | err = 0; |
740 | if (msg_type == AUDIT_USER_TTY) { | 743 | if (msg_type == AUDIT_USER_TTY) { |
741 | err = audit_prepare_user_tty(pid, loginuid, | 744 | err = audit_prepare_user_tty(pid, loginuid, |
742 | sessionid); | 745 | sessionid); |
743 | if (err) | 746 | if (err) |
744 | break; | 747 | break; |
745 | } | 748 | } |
746 | audit_log_common_recv_msg(&ab, msg_type, pid, uid, | 749 | audit_log_common_recv_msg(&ab, msg_type, pid, uid, |
747 | loginuid, sessionid, sid); | 750 | loginuid, sessionid, sid); |
748 | 751 | ||
749 | if (msg_type != AUDIT_USER_TTY) | 752 | if (msg_type != AUDIT_USER_TTY) |
750 | audit_log_format(ab, " msg='%.1024s'", | 753 | audit_log_format(ab, " msg='%.1024s'", |
751 | (char *)data); | 754 | (char *)data); |
752 | else { | 755 | else { |
753 | int size; | 756 | int size; |
754 | 757 | ||
755 | audit_log_format(ab, " msg="); | 758 | audit_log_format(ab, " msg="); |
756 | size = nlmsg_len(nlh); | 759 | size = nlmsg_len(nlh); |
757 | if (size > 0 && | 760 | if (size > 0 && |
758 | ((unsigned char *)data)[size - 1] == '\0') | 761 | ((unsigned char *)data)[size - 1] == '\0') |
759 | size--; | 762 | size--; |
760 | audit_log_n_untrustedstring(ab, data, size); | 763 | audit_log_n_untrustedstring(ab, data, size); |
761 | } | 764 | } |
762 | audit_set_pid(ab, pid); | 765 | audit_set_pid(ab, pid); |
763 | audit_log_end(ab); | 766 | audit_log_end(ab); |
764 | } | 767 | } |
765 | break; | 768 | break; |
766 | case AUDIT_ADD: | 769 | case AUDIT_ADD: |
767 | case AUDIT_DEL: | 770 | case AUDIT_DEL: |
768 | if (nlmsg_len(nlh) < sizeof(struct audit_rule)) | 771 | if (nlmsg_len(nlh) < sizeof(struct audit_rule)) |
769 | return -EINVAL; | 772 | return -EINVAL; |
770 | if (audit_enabled == AUDIT_LOCKED) { | 773 | if (audit_enabled == AUDIT_LOCKED) { |
771 | audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, | 774 | audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, |
772 | uid, loginuid, sessionid, sid); | 775 | uid, loginuid, sessionid, sid); |
773 | 776 | ||
774 | audit_log_format(ab, " audit_enabled=%d res=0", | 777 | audit_log_format(ab, " audit_enabled=%d res=0", |
775 | audit_enabled); | 778 | audit_enabled); |
776 | audit_log_end(ab); | 779 | audit_log_end(ab); |
777 | return -EPERM; | 780 | return -EPERM; |
778 | } | 781 | } |
779 | /* fallthrough */ | 782 | /* fallthrough */ |
780 | case AUDIT_LIST: | 783 | case AUDIT_LIST: |
781 | err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, | 784 | err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, |
782 | uid, seq, data, nlmsg_len(nlh), | 785 | uid, seq, data, nlmsg_len(nlh), |
783 | loginuid, sessionid, sid); | 786 | loginuid, sessionid, sid); |
784 | break; | 787 | break; |
785 | case AUDIT_ADD_RULE: | 788 | case AUDIT_ADD_RULE: |
786 | case AUDIT_DEL_RULE: | 789 | case AUDIT_DEL_RULE: |
787 | if (nlmsg_len(nlh) < sizeof(struct audit_rule_data)) | 790 | if (nlmsg_len(nlh) < sizeof(struct audit_rule_data)) |
788 | return -EINVAL; | 791 | return -EINVAL; |
789 | if (audit_enabled == AUDIT_LOCKED) { | 792 | if (audit_enabled == AUDIT_LOCKED) { |
790 | audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, | 793 | audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, |
791 | uid, loginuid, sessionid, sid); | 794 | uid, loginuid, sessionid, sid); |
792 | 795 | ||
793 | audit_log_format(ab, " audit_enabled=%d res=0", | 796 | audit_log_format(ab, " audit_enabled=%d res=0", |
794 | audit_enabled); | 797 | audit_enabled); |
795 | audit_log_end(ab); | 798 | audit_log_end(ab); |
796 | return -EPERM; | 799 | return -EPERM; |
797 | } | 800 | } |
798 | /* fallthrough */ | 801 | /* fallthrough */ |
799 | case AUDIT_LIST_RULES: | 802 | case AUDIT_LIST_RULES: |
800 | err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, | 803 | err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, |
801 | uid, seq, data, nlmsg_len(nlh), | 804 | uid, seq, data, nlmsg_len(nlh), |
802 | loginuid, sessionid, sid); | 805 | loginuid, sessionid, sid); |
803 | break; | 806 | break; |
804 | case AUDIT_TRIM: | 807 | case AUDIT_TRIM: |
805 | audit_trim_trees(); | 808 | audit_trim_trees(); |
806 | 809 | ||
807 | audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, | 810 | audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, |
808 | uid, loginuid, sessionid, sid); | 811 | uid, loginuid, sessionid, sid); |
809 | 812 | ||
810 | audit_log_format(ab, " op=trim res=1"); | 813 | audit_log_format(ab, " op=trim res=1"); |
811 | audit_log_end(ab); | 814 | audit_log_end(ab); |
812 | break; | 815 | break; |
813 | case AUDIT_MAKE_EQUIV: { | 816 | case AUDIT_MAKE_EQUIV: { |
814 | void *bufp = data; | 817 | void *bufp = data; |
815 | u32 sizes[2]; | 818 | u32 sizes[2]; |
816 | size_t msglen = nlmsg_len(nlh); | 819 | size_t msglen = nlmsg_len(nlh); |
817 | char *old, *new; | 820 | char *old, *new; |
818 | 821 | ||
819 | err = -EINVAL; | 822 | err = -EINVAL; |
820 | if (msglen < 2 * sizeof(u32)) | 823 | if (msglen < 2 * sizeof(u32)) |
821 | break; | 824 | break; |
822 | memcpy(sizes, bufp, 2 * sizeof(u32)); | 825 | memcpy(sizes, bufp, 2 * sizeof(u32)); |
823 | bufp += 2 * sizeof(u32); | 826 | bufp += 2 * sizeof(u32); |
824 | msglen -= 2 * sizeof(u32); | 827 | msglen -= 2 * sizeof(u32); |
825 | old = audit_unpack_string(&bufp, &msglen, sizes[0]); | 828 | old = audit_unpack_string(&bufp, &msglen, sizes[0]); |
826 | if (IS_ERR(old)) { | 829 | if (IS_ERR(old)) { |
827 | err = PTR_ERR(old); | 830 | err = PTR_ERR(old); |
828 | break; | 831 | break; |
829 | } | 832 | } |
830 | new = audit_unpack_string(&bufp, &msglen, sizes[1]); | 833 | new = audit_unpack_string(&bufp, &msglen, sizes[1]); |
831 | if (IS_ERR(new)) { | 834 | if (IS_ERR(new)) { |
832 | err = PTR_ERR(new); | 835 | err = PTR_ERR(new); |
833 | kfree(old); | 836 | kfree(old); |
834 | break; | 837 | break; |
835 | } | 838 | } |
836 | /* OK, here comes... */ | 839 | /* OK, here comes... */ |
837 | err = audit_tag_tree(old, new); | 840 | err = audit_tag_tree(old, new); |
838 | 841 | ||
839 | audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, | 842 | audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, |
840 | uid, loginuid, sessionid, sid); | 843 | uid, loginuid, sessionid, sid); |
841 | 844 | ||
842 | audit_log_format(ab, " op=make_equiv old="); | 845 | audit_log_format(ab, " op=make_equiv old="); |
843 | audit_log_untrustedstring(ab, old); | 846 | audit_log_untrustedstring(ab, old); |
844 | audit_log_format(ab, " new="); | 847 | audit_log_format(ab, " new="); |
845 | audit_log_untrustedstring(ab, new); | 848 | audit_log_untrustedstring(ab, new); |
846 | audit_log_format(ab, " res=%d", !err); | 849 | audit_log_format(ab, " res=%d", !err); |
847 | audit_log_end(ab); | 850 | audit_log_end(ab); |
848 | kfree(old); | 851 | kfree(old); |
849 | kfree(new); | 852 | kfree(new); |
850 | break; | 853 | break; |
851 | } | 854 | } |
852 | case AUDIT_SIGNAL_INFO: | 855 | case AUDIT_SIGNAL_INFO: |
853 | len = 0; | 856 | len = 0; |
854 | if (audit_sig_sid) { | 857 | if (audit_sig_sid) { |
855 | err = security_secid_to_secctx(audit_sig_sid, &ctx, &len); | 858 | err = security_secid_to_secctx(audit_sig_sid, &ctx, &len); |
856 | if (err) | 859 | if (err) |
857 | return err; | 860 | return err; |
858 | } | 861 | } |
859 | sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL); | 862 | sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL); |
860 | if (!sig_data) { | 863 | if (!sig_data) { |
861 | if (audit_sig_sid) | 864 | if (audit_sig_sid) |
862 | security_release_secctx(ctx, len); | 865 | security_release_secctx(ctx, len); |
863 | return -ENOMEM; | 866 | return -ENOMEM; |
864 | } | 867 | } |
865 | sig_data->uid = audit_sig_uid; | 868 | sig_data->uid = audit_sig_uid; |
866 | sig_data->pid = audit_sig_pid; | 869 | sig_data->pid = audit_sig_pid; |
867 | if (audit_sig_sid) { | 870 | if (audit_sig_sid) { |
868 | memcpy(sig_data->ctx, ctx, len); | 871 | memcpy(sig_data->ctx, ctx, len); |
869 | security_release_secctx(ctx, len); | 872 | security_release_secctx(ctx, len); |
870 | } | 873 | } |
871 | audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, | 874 | audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, |
872 | 0, 0, sig_data, sizeof(*sig_data) + len); | 875 | 0, 0, sig_data, sizeof(*sig_data) + len); |
873 | kfree(sig_data); | 876 | kfree(sig_data); |
874 | break; | 877 | break; |
875 | case AUDIT_TTY_GET: { | 878 | case AUDIT_TTY_GET: { |
876 | struct audit_tty_status s; | 879 | struct audit_tty_status s; |
877 | struct task_struct *tsk; | 880 | struct task_struct *tsk; |
878 | unsigned long flags; | 881 | unsigned long flags; |
879 | 882 | ||
880 | rcu_read_lock(); | 883 | rcu_read_lock(); |
881 | tsk = find_task_by_vpid(pid); | 884 | tsk = find_task_by_vpid(pid); |
882 | if (tsk && lock_task_sighand(tsk, &flags)) { | 885 | if (tsk && lock_task_sighand(tsk, &flags)) { |
883 | s.enabled = tsk->signal->audit_tty != 0; | 886 | s.enabled = tsk->signal->audit_tty != 0; |
884 | unlock_task_sighand(tsk, &flags); | 887 | unlock_task_sighand(tsk, &flags); |
885 | } else | 888 | } else |
886 | err = -ESRCH; | 889 | err = -ESRCH; |
887 | rcu_read_unlock(); | 890 | rcu_read_unlock(); |
888 | 891 | ||
889 | if (!err) | 892 | if (!err) |
890 | audit_send_reply(NETLINK_CB(skb).pid, seq, | 893 | audit_send_reply(NETLINK_CB(skb).pid, seq, |
891 | AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); | 894 | AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); |
892 | break; | 895 | break; |
893 | } | 896 | } |
894 | case AUDIT_TTY_SET: { | 897 | case AUDIT_TTY_SET: { |
895 | struct audit_tty_status *s; | 898 | struct audit_tty_status *s; |
896 | struct task_struct *tsk; | 899 | struct task_struct *tsk; |
897 | unsigned long flags; | 900 | unsigned long flags; |
898 | 901 | ||
899 | if (nlh->nlmsg_len < sizeof(struct audit_tty_status)) | 902 | if (nlh->nlmsg_len < sizeof(struct audit_tty_status)) |
900 | return -EINVAL; | 903 | return -EINVAL; |
901 | s = data; | 904 | s = data; |
902 | if (s->enabled != 0 && s->enabled != 1) | 905 | if (s->enabled != 0 && s->enabled != 1) |
903 | return -EINVAL; | 906 | return -EINVAL; |
904 | rcu_read_lock(); | 907 | rcu_read_lock(); |
905 | tsk = find_task_by_vpid(pid); | 908 | tsk = find_task_by_vpid(pid); |
906 | if (tsk && lock_task_sighand(tsk, &flags)) { | 909 | if (tsk && lock_task_sighand(tsk, &flags)) { |
907 | tsk->signal->audit_tty = s->enabled != 0; | 910 | tsk->signal->audit_tty = s->enabled != 0; |
908 | unlock_task_sighand(tsk, &flags); | 911 | unlock_task_sighand(tsk, &flags); |
909 | } else | 912 | } else |
910 | err = -ESRCH; | 913 | err = -ESRCH; |
911 | rcu_read_unlock(); | 914 | rcu_read_unlock(); |
912 | break; | 915 | break; |
913 | } | 916 | } |
914 | default: | 917 | default: |
915 | err = -EINVAL; | 918 | err = -EINVAL; |
916 | break; | 919 | break; |
917 | } | 920 | } |
918 | 921 | ||
919 | return err < 0 ? err : 0; | 922 | return err < 0 ? err : 0; |
920 | } | 923 | } |
921 | 924 | ||
922 | /* | 925 | /* |
923 | * Get message from skb. Each message is processed by audit_receive_msg. | 926 | * Get message from skb. Each message is processed by audit_receive_msg. |
924 | * Malformed skbs with wrong length are discarded silently. | 927 | * Malformed skbs with wrong length are discarded silently. |
925 | */ | 928 | */ |
926 | static void audit_receive_skb(struct sk_buff *skb) | 929 | static void audit_receive_skb(struct sk_buff *skb) |
927 | { | 930 | { |
928 | struct nlmsghdr *nlh; | 931 | struct nlmsghdr *nlh; |
929 | /* | 932 | /* |
930 | * len MUST be signed for NLMSG_NEXT to be able to dec it below 0 | 933 | * len MUST be signed for NLMSG_NEXT to be able to dec it below 0 |
931 | * if the nlmsg_len was not aligned | 934 | * if the nlmsg_len was not aligned |
932 | */ | 935 | */ |
933 | int len; | 936 | int len; |
934 | int err; | 937 | int err; |
935 | 938 | ||
936 | nlh = nlmsg_hdr(skb); | 939 | nlh = nlmsg_hdr(skb); |
937 | len = skb->len; | 940 | len = skb->len; |
938 | 941 | ||
939 | while (NLMSG_OK(nlh, len)) { | 942 | while (NLMSG_OK(nlh, len)) { |
940 | err = audit_receive_msg(skb, nlh); | 943 | err = audit_receive_msg(skb, nlh); |
941 | /* if err or if this message says it wants a response */ | 944 | /* if err or if this message says it wants a response */ |
942 | if (err || (nlh->nlmsg_flags & NLM_F_ACK)) | 945 | if (err || (nlh->nlmsg_flags & NLM_F_ACK)) |
943 | netlink_ack(skb, nlh, err); | 946 | netlink_ack(skb, nlh, err); |
944 | 947 | ||
945 | nlh = NLMSG_NEXT(nlh, len); | 948 | nlh = NLMSG_NEXT(nlh, len); |
946 | } | 949 | } |
947 | } | 950 | } |
948 | 951 | ||
949 | /* Receive messages from netlink socket. */ | 952 | /* Receive messages from netlink socket. */ |
950 | static void audit_receive(struct sk_buff *skb) | 953 | static void audit_receive(struct sk_buff *skb) |
951 | { | 954 | { |
952 | mutex_lock(&audit_cmd_mutex); | 955 | mutex_lock(&audit_cmd_mutex); |
953 | audit_receive_skb(skb); | 956 | audit_receive_skb(skb); |
954 | mutex_unlock(&audit_cmd_mutex); | 957 | mutex_unlock(&audit_cmd_mutex); |
955 | } | 958 | } |
956 | 959 | ||
957 | /* Initialize audit support at boot time. */ | 960 | /* Initialize audit support at boot time. */ |
958 | static int __init audit_init(void) | 961 | static int __init audit_init(void) |
959 | { | 962 | { |
960 | int i; | 963 | int i; |
961 | 964 | ||
962 | if (audit_initialized == AUDIT_DISABLED) | 965 | if (audit_initialized == AUDIT_DISABLED) |
963 | return 0; | 966 | return 0; |
964 | 967 | ||
965 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", | 968 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", |
966 | audit_default ? "enabled" : "disabled"); | 969 | audit_default ? "enabled" : "disabled"); |
967 | audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, | 970 | audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, |
968 | audit_receive, NULL, THIS_MODULE); | 971 | audit_receive, NULL, THIS_MODULE); |
969 | if (!audit_sock) | 972 | if (!audit_sock) |
970 | audit_panic("cannot initialize netlink socket"); | 973 | audit_panic("cannot initialize netlink socket"); |
971 | else | 974 | else |
972 | audit_sock->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; | 975 | audit_sock->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; |
973 | 976 | ||
974 | skb_queue_head_init(&audit_skb_queue); | 977 | skb_queue_head_init(&audit_skb_queue); |
975 | skb_queue_head_init(&audit_skb_hold_queue); | 978 | skb_queue_head_init(&audit_skb_hold_queue); |
976 | audit_initialized = AUDIT_INITIALIZED; | 979 | audit_initialized = AUDIT_INITIALIZED; |
977 | audit_enabled = audit_default; | 980 | audit_enabled = audit_default; |
978 | audit_ever_enabled |= !!audit_default; | 981 | audit_ever_enabled |= !!audit_default; |
979 | 982 | ||
980 | audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized"); | 983 | audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized"); |
981 | 984 | ||
982 | for (i = 0; i < AUDIT_INODE_BUCKETS; i++) | 985 | for (i = 0; i < AUDIT_INODE_BUCKETS; i++) |
983 | INIT_LIST_HEAD(&audit_inode_hash[i]); | 986 | INIT_LIST_HEAD(&audit_inode_hash[i]); |
984 | 987 | ||
985 | return 0; | 988 | return 0; |
986 | } | 989 | } |
987 | __initcall(audit_init); | 990 | __initcall(audit_init); |
988 | 991 | ||
989 | /* Process kernel command-line parameter at boot time. audit=0 or audit=1. */ | 992 | /* Process kernel command-line parameter at boot time. audit=0 or audit=1. */ |
990 | static int __init audit_enable(char *str) | 993 | static int __init audit_enable(char *str) |
991 | { | 994 | { |
992 | audit_default = !!simple_strtol(str, NULL, 0); | 995 | audit_default = !!simple_strtol(str, NULL, 0); |
993 | if (!audit_default) | 996 | if (!audit_default) |
994 | audit_initialized = AUDIT_DISABLED; | 997 | audit_initialized = AUDIT_DISABLED; |
995 | 998 | ||
996 | printk(KERN_INFO "audit: %s", audit_default ? "enabled" : "disabled"); | 999 | printk(KERN_INFO "audit: %s", audit_default ? "enabled" : "disabled"); |
997 | 1000 | ||
998 | if (audit_initialized == AUDIT_INITIALIZED) { | 1001 | if (audit_initialized == AUDIT_INITIALIZED) { |
999 | audit_enabled = audit_default; | 1002 | audit_enabled = audit_default; |
1000 | audit_ever_enabled |= !!audit_default; | 1003 | audit_ever_enabled |= !!audit_default; |
1001 | } else if (audit_initialized == AUDIT_UNINITIALIZED) { | 1004 | } else if (audit_initialized == AUDIT_UNINITIALIZED) { |
1002 | printk(" (after initialization)"); | 1005 | printk(" (after initialization)"); |
1003 | } else { | 1006 | } else { |
1004 | printk(" (until reboot)"); | 1007 | printk(" (until reboot)"); |
1005 | } | 1008 | } |
1006 | printk("\n"); | 1009 | printk("\n"); |
1007 | 1010 | ||
1008 | return 1; | 1011 | return 1; |
1009 | } | 1012 | } |
1010 | 1013 | ||
1011 | __setup("audit=", audit_enable); | 1014 | __setup("audit=", audit_enable); |
1012 | 1015 | ||
1013 | static void audit_buffer_free(struct audit_buffer *ab) | 1016 | static void audit_buffer_free(struct audit_buffer *ab) |
1014 | { | 1017 | { |
1015 | unsigned long flags; | 1018 | unsigned long flags; |
1016 | 1019 | ||
1017 | if (!ab) | 1020 | if (!ab) |
1018 | return; | 1021 | return; |
1019 | 1022 | ||
1020 | if (ab->skb) | 1023 | if (ab->skb) |
1021 | kfree_skb(ab->skb); | 1024 | kfree_skb(ab->skb); |
1022 | 1025 | ||
1023 | spin_lock_irqsave(&audit_freelist_lock, flags); | 1026 | spin_lock_irqsave(&audit_freelist_lock, flags); |
1024 | if (audit_freelist_count > AUDIT_MAXFREE) | 1027 | if (audit_freelist_count > AUDIT_MAXFREE) |
1025 | kfree(ab); | 1028 | kfree(ab); |
1026 | else { | 1029 | else { |
1027 | audit_freelist_count++; | 1030 | audit_freelist_count++; |
1028 | list_add(&ab->list, &audit_freelist); | 1031 | list_add(&ab->list, &audit_freelist); |
1029 | } | 1032 | } |
1030 | spin_unlock_irqrestore(&audit_freelist_lock, flags); | 1033 | spin_unlock_irqrestore(&audit_freelist_lock, flags); |
1031 | } | 1034 | } |
1032 | 1035 | ||
1033 | static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx, | 1036 | static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx, |
1034 | gfp_t gfp_mask, int type) | 1037 | gfp_t gfp_mask, int type) |
1035 | { | 1038 | { |
1036 | unsigned long flags; | 1039 | unsigned long flags; |
1037 | struct audit_buffer *ab = NULL; | 1040 | struct audit_buffer *ab = NULL; |
1038 | struct nlmsghdr *nlh; | 1041 | struct nlmsghdr *nlh; |
1039 | 1042 | ||
1040 | spin_lock_irqsave(&audit_freelist_lock, flags); | 1043 | spin_lock_irqsave(&audit_freelist_lock, flags); |
1041 | if (!list_empty(&audit_freelist)) { | 1044 | if (!list_empty(&audit_freelist)) { |
1042 | ab = list_entry(audit_freelist.next, | 1045 | ab = list_entry(audit_freelist.next, |
1043 | struct audit_buffer, list); | 1046 | struct audit_buffer, list); |
1044 | list_del(&ab->list); | 1047 | list_del(&ab->list); |
1045 | --audit_freelist_count; | 1048 | --audit_freelist_count; |
1046 | } | 1049 | } |
1047 | spin_unlock_irqrestore(&audit_freelist_lock, flags); | 1050 | spin_unlock_irqrestore(&audit_freelist_lock, flags); |
1048 | 1051 | ||
1049 | if (!ab) { | 1052 | if (!ab) { |
1050 | ab = kmalloc(sizeof(*ab), gfp_mask); | 1053 | ab = kmalloc(sizeof(*ab), gfp_mask); |
1051 | if (!ab) | 1054 | if (!ab) |
1052 | goto err; | 1055 | goto err; |
1053 | } | 1056 | } |
1054 | 1057 | ||
1055 | ab->ctx = ctx; | 1058 | ab->ctx = ctx; |
1056 | ab->gfp_mask = gfp_mask; | 1059 | ab->gfp_mask = gfp_mask; |
1057 | 1060 | ||
1058 | ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask); | 1061 | ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask); |
1059 | if (!ab->skb) | 1062 | if (!ab->skb) |
1060 | goto nlmsg_failure; | 1063 | goto nlmsg_failure; |
1061 | 1064 | ||
1062 | nlh = NLMSG_NEW(ab->skb, 0, 0, type, 0, 0); | 1065 | nlh = NLMSG_NEW(ab->skb, 0, 0, type, 0, 0); |
1063 | 1066 | ||
1064 | return ab; | 1067 | return ab; |
1065 | 1068 | ||
1066 | nlmsg_failure: /* Used by NLMSG_NEW */ | 1069 | nlmsg_failure: /* Used by NLMSG_NEW */ |
1067 | kfree_skb(ab->skb); | 1070 | kfree_skb(ab->skb); |
1068 | ab->skb = NULL; | 1071 | ab->skb = NULL; |
1069 | err: | 1072 | err: |
1070 | audit_buffer_free(ab); | 1073 | audit_buffer_free(ab); |
1071 | return NULL; | 1074 | return NULL; |
1072 | } | 1075 | } |
1073 | 1076 | ||
1074 | /** | 1077 | /** |
1075 | * audit_serial - compute a serial number for the audit record | 1078 | * audit_serial - compute a serial number for the audit record |
1076 | * | 1079 | * |
1077 | * Compute a serial number for the audit record. Audit records are | 1080 | * Compute a serial number for the audit record. Audit records are |
1078 | * written to user-space as soon as they are generated, so a complete | 1081 | * written to user-space as soon as they are generated, so a complete |
1079 | * audit record may be written in several pieces. The timestamp of the | 1082 | * audit record may be written in several pieces. The timestamp of the |
1080 | * record and this serial number are used by the user-space tools to | 1083 | * record and this serial number are used by the user-space tools to |
1081 | * determine which pieces belong to the same audit record. The | 1084 | * determine which pieces belong to the same audit record. The |
1082 | * (timestamp,serial) tuple is unique for each syscall and is live from | 1085 | * (timestamp,serial) tuple is unique for each syscall and is live from |
1083 | * syscall entry to syscall exit. | 1086 | * syscall entry to syscall exit. |
1084 | * | 1087 | * |
1085 | * NOTE: Another possibility is to store the formatted records off the | 1088 | * NOTE: Another possibility is to store the formatted records off the |
1086 | * audit context (for those records that have a context), and emit them | 1089 | * audit context (for those records that have a context), and emit them |
1087 | * all at syscall exit. However, this could delay the reporting of | 1090 | * all at syscall exit. However, this could delay the reporting of |
1088 | * significant errors until syscall exit (or never, if the system | 1091 | * significant errors until syscall exit (or never, if the system |
1089 | * halts). | 1092 | * halts). |
1090 | */ | 1093 | */ |
1091 | unsigned int audit_serial(void) | 1094 | unsigned int audit_serial(void) |
1092 | { | 1095 | { |
1093 | static DEFINE_SPINLOCK(serial_lock); | 1096 | static DEFINE_SPINLOCK(serial_lock); |
1094 | static unsigned int serial = 0; | 1097 | static unsigned int serial = 0; |
1095 | 1098 | ||
1096 | unsigned long flags; | 1099 | unsigned long flags; |
1097 | unsigned int ret; | 1100 | unsigned int ret; |
1098 | 1101 | ||
1099 | spin_lock_irqsave(&serial_lock, flags); | 1102 | spin_lock_irqsave(&serial_lock, flags); |
1100 | do { | 1103 | do { |
1101 | ret = ++serial; | 1104 | ret = ++serial; |
1102 | } while (unlikely(!ret)); | 1105 | } while (unlikely(!ret)); |
1103 | spin_unlock_irqrestore(&serial_lock, flags); | 1106 | spin_unlock_irqrestore(&serial_lock, flags); |
1104 | 1107 | ||
1105 | return ret; | 1108 | return ret; |
1106 | } | 1109 | } |
1107 | 1110 | ||
1108 | static inline void audit_get_stamp(struct audit_context *ctx, | 1111 | static inline void audit_get_stamp(struct audit_context *ctx, |
1109 | struct timespec *t, unsigned int *serial) | 1112 | struct timespec *t, unsigned int *serial) |
1110 | { | 1113 | { |
1111 | if (!ctx || !auditsc_get_stamp(ctx, t, serial)) { | 1114 | if (!ctx || !auditsc_get_stamp(ctx, t, serial)) { |
1112 | *t = CURRENT_TIME; | 1115 | *t = CURRENT_TIME; |
1113 | *serial = audit_serial(); | 1116 | *serial = audit_serial(); |
1114 | } | 1117 | } |
1115 | } | 1118 | } |
1116 | 1119 | ||
1117 | /* Obtain an audit buffer. This routine does locking to obtain the | 1120 | /* Obtain an audit buffer. This routine does locking to obtain the |
1118 | * audit buffer, but then no locking is required for calls to | 1121 | * audit buffer, but then no locking is required for calls to |
1119 | * audit_log_*format. If the tsk is a task that is currently in a | 1122 | * audit_log_*format. If the tsk is a task that is currently in a |
1120 | * syscall, then the syscall is marked as auditable and an audit record | 1123 | * syscall, then the syscall is marked as auditable and an audit record |
1121 | * will be written at syscall exit. If there is no associated task, tsk | 1124 | * will be written at syscall exit. If there is no associated task, tsk |
1122 | * should be NULL. */ | 1125 | * should be NULL. */ |
1123 | 1126 | ||
1124 | /** | 1127 | /** |
1125 | * audit_log_start - obtain an audit buffer | 1128 | * audit_log_start - obtain an audit buffer |
1126 | * @ctx: audit_context (may be NULL) | 1129 | * @ctx: audit_context (may be NULL) |
1127 | * @gfp_mask: type of allocation | 1130 | * @gfp_mask: type of allocation |
1128 | * @type: audit message type | 1131 | * @type: audit message type |
1129 | * | 1132 | * |
1130 | * Returns audit_buffer pointer on success or NULL on error. | 1133 | * Returns audit_buffer pointer on success or NULL on error. |
1131 | * | 1134 | * |
1132 | * Obtain an audit buffer. This routine does locking to obtain the | 1135 | * Obtain an audit buffer. This routine does locking to obtain the |
1133 | * audit buffer, but then no locking is required for calls to | 1136 | * audit buffer, but then no locking is required for calls to |
1134 | * audit_log_*format. If the task (ctx) is a task that is currently in a | 1137 | * audit_log_*format. If the task (ctx) is a task that is currently in a |
1135 | * syscall, then the syscall is marked as auditable and an audit record | 1138 | * syscall, then the syscall is marked as auditable and an audit record |
1136 | * will be written at syscall exit. If there is no associated task, then | 1139 | * will be written at syscall exit. If there is no associated task, then |
1137 | * task context (ctx) should be NULL. | 1140 | * task context (ctx) should be NULL. |
1138 | */ | 1141 | */ |
1139 | struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | 1142 | struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, |
1140 | int type) | 1143 | int type) |
1141 | { | 1144 | { |
1142 | struct audit_buffer *ab = NULL; | 1145 | struct audit_buffer *ab = NULL; |
1143 | struct timespec t; | 1146 | struct timespec t; |
1144 | unsigned int uninitialized_var(serial); | 1147 | unsigned int uninitialized_var(serial); |
1145 | int reserve; | 1148 | int reserve; |
1146 | unsigned long timeout_start = jiffies; | 1149 | unsigned long timeout_start = jiffies; |
1147 | 1150 | ||
1148 | if (audit_initialized != AUDIT_INITIALIZED) | 1151 | if (audit_initialized != AUDIT_INITIALIZED) |
1149 | return NULL; | 1152 | return NULL; |
1150 | 1153 | ||
1151 | if (unlikely(audit_filter_type(type))) | 1154 | if (unlikely(audit_filter_type(type))) |
1152 | return NULL; | 1155 | return NULL; |
1153 | 1156 | ||
1154 | if (gfp_mask & __GFP_WAIT) | 1157 | if (gfp_mask & __GFP_WAIT) |
1155 | reserve = 0; | 1158 | reserve = 0; |
1156 | else | 1159 | else |
1157 | reserve = 5; /* Allow atomic callers to go up to five | 1160 | reserve = 5; /* Allow atomic callers to go up to five |
1158 | entries over the normal backlog limit */ | 1161 | entries over the normal backlog limit */ |
1159 | 1162 | ||
1160 | while (audit_backlog_limit | 1163 | while (audit_backlog_limit |
1161 | && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { | 1164 | && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { |
1162 | if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time | 1165 | if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time |
1163 | && time_before(jiffies, timeout_start + audit_backlog_wait_time)) { | 1166 | && time_before(jiffies, timeout_start + audit_backlog_wait_time)) { |
1164 | 1167 | ||
1165 | /* Wait for auditd to drain the queue a little */ | 1168 | /* Wait for auditd to drain the queue a little */ |
1166 | DECLARE_WAITQUEUE(wait, current); | 1169 | DECLARE_WAITQUEUE(wait, current); |
1167 | set_current_state(TASK_INTERRUPTIBLE); | 1170 | set_current_state(TASK_INTERRUPTIBLE); |
1168 | add_wait_queue(&audit_backlog_wait, &wait); | 1171 | add_wait_queue(&audit_backlog_wait, &wait); |
1169 | 1172 | ||
1170 | if (audit_backlog_limit && | 1173 | if (audit_backlog_limit && |
1171 | skb_queue_len(&audit_skb_queue) > audit_backlog_limit) | 1174 | skb_queue_len(&audit_skb_queue) > audit_backlog_limit) |
1172 | schedule_timeout(timeout_start + audit_backlog_wait_time - jiffies); | 1175 | schedule_timeout(timeout_start + audit_backlog_wait_time - jiffies); |
1173 | 1176 | ||
1174 | __set_current_state(TASK_RUNNING); | 1177 | __set_current_state(TASK_RUNNING); |
1175 | remove_wait_queue(&audit_backlog_wait, &wait); | 1178 | remove_wait_queue(&audit_backlog_wait, &wait); |
1176 | continue; | 1179 | continue; |
1177 | } | 1180 | } |
1178 | if (audit_rate_check() && printk_ratelimit()) | 1181 | if (audit_rate_check() && printk_ratelimit()) |
1179 | printk(KERN_WARNING | 1182 | printk(KERN_WARNING |
1180 | "audit: audit_backlog=%d > " | 1183 | "audit: audit_backlog=%d > " |
1181 | "audit_backlog_limit=%d\n", | 1184 | "audit_backlog_limit=%d\n", |
1182 | skb_queue_len(&audit_skb_queue), | 1185 | skb_queue_len(&audit_skb_queue), |
1183 | audit_backlog_limit); | 1186 | audit_backlog_limit); |
1184 | audit_log_lost("backlog limit exceeded"); | 1187 | audit_log_lost("backlog limit exceeded"); |
1185 | audit_backlog_wait_time = audit_backlog_wait_overflow; | 1188 | audit_backlog_wait_time = audit_backlog_wait_overflow; |
1186 | wake_up(&audit_backlog_wait); | 1189 | wake_up(&audit_backlog_wait); |
1187 | return NULL; | 1190 | return NULL; |
1188 | } | 1191 | } |
1189 | 1192 | ||
1190 | ab = audit_buffer_alloc(ctx, gfp_mask, type); | 1193 | ab = audit_buffer_alloc(ctx, gfp_mask, type); |
1191 | if (!ab) { | 1194 | if (!ab) { |
1192 | audit_log_lost("out of memory in audit_log_start"); | 1195 | audit_log_lost("out of memory in audit_log_start"); |
1193 | return NULL; | 1196 | return NULL; |
1194 | } | 1197 | } |
1195 | 1198 | ||
1196 | audit_get_stamp(ab->ctx, &t, &serial); | 1199 | audit_get_stamp(ab->ctx, &t, &serial); |
1197 | 1200 | ||
1198 | audit_log_format(ab, "audit(%lu.%03lu:%u): ", | 1201 | audit_log_format(ab, "audit(%lu.%03lu:%u): ", |
1199 | t.tv_sec, t.tv_nsec/1000000, serial); | 1202 | t.tv_sec, t.tv_nsec/1000000, serial); |
1200 | return ab; | 1203 | return ab; |
1201 | } | 1204 | } |
1202 | 1205 | ||
1203 | /** | 1206 | /** |
1204 | * audit_expand - expand skb in the audit buffer | 1207 | * audit_expand - expand skb in the audit buffer |
1205 | * @ab: audit_buffer | 1208 | * @ab: audit_buffer |
1206 | * @extra: space to add at tail of the skb | 1209 | * @extra: space to add at tail of the skb |
1207 | * | 1210 | * |
1208 | * Returns 0 (no space) on failed expansion, or available space if | 1211 | * Returns 0 (no space) on failed expansion, or available space if |
1209 | * successful. | 1212 | * successful. |
1210 | */ | 1213 | */ |
1211 | static inline int audit_expand(struct audit_buffer *ab, int extra) | 1214 | static inline int audit_expand(struct audit_buffer *ab, int extra) |
1212 | { | 1215 | { |
1213 | struct sk_buff *skb = ab->skb; | 1216 | struct sk_buff *skb = ab->skb; |
1214 | int oldtail = skb_tailroom(skb); | 1217 | int oldtail = skb_tailroom(skb); |
1215 | int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask); | 1218 | int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask); |
1216 | int newtail = skb_tailroom(skb); | 1219 | int newtail = skb_tailroom(skb); |
1217 | 1220 | ||
1218 | if (ret < 0) { | 1221 | if (ret < 0) { |
1219 | audit_log_lost("out of memory in audit_expand"); | 1222 | audit_log_lost("out of memory in audit_expand"); |
1220 | return 0; | 1223 | return 0; |
1221 | } | 1224 | } |
1222 | 1225 | ||
1223 | skb->truesize += newtail - oldtail; | 1226 | skb->truesize += newtail - oldtail; |
1224 | return newtail; | 1227 | return newtail; |
1225 | } | 1228 | } |
1226 | 1229 | ||
1227 | /* | 1230 | /* |
1228 | * Format an audit message into the audit buffer. If there isn't enough | 1231 | * Format an audit message into the audit buffer. If there isn't enough |
1229 | * room in the audit buffer, more room will be allocated and vsnprint | 1232 | * room in the audit buffer, more room will be allocated and vsnprint |
1230 | * will be called a second time. Currently, we assume that a printk | 1233 | * will be called a second time. Currently, we assume that a printk |
1231 | * can't format message larger than 1024 bytes, so we don't either. | 1234 | * can't format message larger than 1024 bytes, so we don't either. |
1232 | */ | 1235 | */ |
1233 | static void audit_log_vformat(struct audit_buffer *ab, const char *fmt, | 1236 | static void audit_log_vformat(struct audit_buffer *ab, const char *fmt, |
1234 | va_list args) | 1237 | va_list args) |
1235 | { | 1238 | { |
1236 | int len, avail; | 1239 | int len, avail; |
1237 | struct sk_buff *skb; | 1240 | struct sk_buff *skb; |
1238 | va_list args2; | 1241 | va_list args2; |
1239 | 1242 | ||
1240 | if (!ab) | 1243 | if (!ab) |
1241 | return; | 1244 | return; |
1242 | 1245 | ||
1243 | BUG_ON(!ab->skb); | 1246 | BUG_ON(!ab->skb); |
1244 | skb = ab->skb; | 1247 | skb = ab->skb; |
1245 | avail = skb_tailroom(skb); | 1248 | avail = skb_tailroom(skb); |
1246 | if (avail == 0) { | 1249 | if (avail == 0) { |
1247 | avail = audit_expand(ab, AUDIT_BUFSIZ); | 1250 | avail = audit_expand(ab, AUDIT_BUFSIZ); |
1248 | if (!avail) | 1251 | if (!avail) |
1249 | goto out; | 1252 | goto out; |
1250 | } | 1253 | } |
1251 | va_copy(args2, args); | 1254 | va_copy(args2, args); |
1252 | len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args); | 1255 | len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args); |
1253 | if (len >= avail) { | 1256 | if (len >= avail) { |
1254 | /* The printk buffer is 1024 bytes long, so if we get | 1257 | /* The printk buffer is 1024 bytes long, so if we get |
1255 | * here and AUDIT_BUFSIZ is at least 1024, then we can | 1258 | * here and AUDIT_BUFSIZ is at least 1024, then we can |
1256 | * log everything that printk could have logged. */ | 1259 | * log everything that printk could have logged. */ |
1257 | avail = audit_expand(ab, | 1260 | avail = audit_expand(ab, |
1258 | max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail)); | 1261 | max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail)); |
1259 | if (!avail) | 1262 | if (!avail) |
1260 | goto out; | 1263 | goto out; |
1261 | len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2); | 1264 | len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2); |
1262 | } | 1265 | } |
1263 | va_end(args2); | 1266 | va_end(args2); |
1264 | if (len > 0) | 1267 | if (len > 0) |
1265 | skb_put(skb, len); | 1268 | skb_put(skb, len); |
1266 | out: | 1269 | out: |
1267 | return; | 1270 | return; |
1268 | } | 1271 | } |
1269 | 1272 | ||
1270 | /** | 1273 | /** |
1271 | * audit_log_format - format a message into the audit buffer. | 1274 | * audit_log_format - format a message into the audit buffer. |
1272 | * @ab: audit_buffer | 1275 | * @ab: audit_buffer |
1273 | * @fmt: format string | 1276 | * @fmt: format string |
1274 | * @...: optional parameters matching @fmt string | 1277 | * @...: optional parameters matching @fmt string |
1275 | * | 1278 | * |
1276 | * All the work is done in audit_log_vformat. | 1279 | * All the work is done in audit_log_vformat. |
1277 | */ | 1280 | */ |
1278 | void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) | 1281 | void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) |
1279 | { | 1282 | { |
1280 | va_list args; | 1283 | va_list args; |
1281 | 1284 | ||
1282 | if (!ab) | 1285 | if (!ab) |
1283 | return; | 1286 | return; |
1284 | va_start(args, fmt); | 1287 | va_start(args, fmt); |
1285 | audit_log_vformat(ab, fmt, args); | 1288 | audit_log_vformat(ab, fmt, args); |
1286 | va_end(args); | 1289 | va_end(args); |
1287 | } | 1290 | } |
1288 | 1291 | ||
1289 | /** | 1292 | /** |
1290 | * audit_log_hex - convert a buffer to hex and append it to the audit skb | 1293 | * audit_log_hex - convert a buffer to hex and append it to the audit skb |
1291 | * @ab: the audit_buffer | 1294 | * @ab: the audit_buffer |
1292 | * @buf: buffer to convert to hex | 1295 | * @buf: buffer to convert to hex |
1293 | * @len: length of @buf to be converted | 1296 | * @len: length of @buf to be converted |
1294 | * | 1297 | * |
1295 | * No return value; failure to expand is silently ignored. | 1298 | * No return value; failure to expand is silently ignored. |
1296 | * | 1299 | * |
1297 | * This function will take the passed buf and convert it into a string of | 1300 | * This function will take the passed buf and convert it into a string of |
1298 | * ascii hex digits. The new string is placed onto the skb. | 1301 | * ascii hex digits. The new string is placed onto the skb. |
1299 | */ | 1302 | */ |
1300 | void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, | 1303 | void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, |
1301 | size_t len) | 1304 | size_t len) |
1302 | { | 1305 | { |
1303 | int i, avail, new_len; | 1306 | int i, avail, new_len; |
1304 | unsigned char *ptr; | 1307 | unsigned char *ptr; |
1305 | struct sk_buff *skb; | 1308 | struct sk_buff *skb; |
1306 | static const unsigned char *hex = "0123456789ABCDEF"; | 1309 | static const unsigned char *hex = "0123456789ABCDEF"; |
1307 | 1310 | ||
1308 | if (!ab) | 1311 | if (!ab) |
1309 | return; | 1312 | return; |
1310 | 1313 | ||
1311 | BUG_ON(!ab->skb); | 1314 | BUG_ON(!ab->skb); |
1312 | skb = ab->skb; | 1315 | skb = ab->skb; |
1313 | avail = skb_tailroom(skb); | 1316 | avail = skb_tailroom(skb); |
1314 | new_len = len<<1; | 1317 | new_len = len<<1; |
1315 | if (new_len >= avail) { | 1318 | if (new_len >= avail) { |
1316 | /* Round the buffer request up to the next multiple */ | 1319 | /* Round the buffer request up to the next multiple */ |
1317 | new_len = AUDIT_BUFSIZ*(((new_len-avail)/AUDIT_BUFSIZ) + 1); | 1320 | new_len = AUDIT_BUFSIZ*(((new_len-avail)/AUDIT_BUFSIZ) + 1); |
1318 | avail = audit_expand(ab, new_len); | 1321 | avail = audit_expand(ab, new_len); |
1319 | if (!avail) | 1322 | if (!avail) |
1320 | return; | 1323 | return; |
1321 | } | 1324 | } |
1322 | 1325 | ||
1323 | ptr = skb_tail_pointer(skb); | 1326 | ptr = skb_tail_pointer(skb); |
1324 | for (i=0; i<len; i++) { | 1327 | for (i=0; i<len; i++) { |
1325 | *ptr++ = hex[(buf[i] & 0xF0)>>4]; /* Upper nibble */ | 1328 | *ptr++ = hex[(buf[i] & 0xF0)>>4]; /* Upper nibble */ |
1326 | *ptr++ = hex[buf[i] & 0x0F]; /* Lower nibble */ | 1329 | *ptr++ = hex[buf[i] & 0x0F]; /* Lower nibble */ |
1327 | } | 1330 | } |
1328 | *ptr = 0; | 1331 | *ptr = 0; |
1329 | skb_put(skb, len << 1); /* new string is twice the old string */ | 1332 | skb_put(skb, len << 1); /* new string is twice the old string */ |
1330 | } | 1333 | } |
1331 | 1334 | ||
1332 | /* | 1335 | /* |
1333 | * Format a string of no more than slen characters into the audit buffer, | 1336 | * Format a string of no more than slen characters into the audit buffer, |
1334 | * enclosed in quote marks. | 1337 | * enclosed in quote marks. |
1335 | */ | 1338 | */ |
1336 | void audit_log_n_string(struct audit_buffer *ab, const char *string, | 1339 | void audit_log_n_string(struct audit_buffer *ab, const char *string, |
1337 | size_t slen) | 1340 | size_t slen) |
1338 | { | 1341 | { |
1339 | int avail, new_len; | 1342 | int avail, new_len; |
1340 | unsigned char *ptr; | 1343 | unsigned char *ptr; |
1341 | struct sk_buff *skb; | 1344 | struct sk_buff *skb; |
1342 | 1345 | ||
1343 | if (!ab) | 1346 | if (!ab) |
1344 | return; | 1347 | return; |
1345 | 1348 | ||
1346 | BUG_ON(!ab->skb); | 1349 | BUG_ON(!ab->skb); |
1347 | skb = ab->skb; | 1350 | skb = ab->skb; |
1348 | avail = skb_tailroom(skb); | 1351 | avail = skb_tailroom(skb); |
1349 | new_len = slen + 3; /* enclosing quotes + null terminator */ | 1352 | new_len = slen + 3; /* enclosing quotes + null terminator */ |
1350 | if (new_len > avail) { | 1353 | if (new_len > avail) { |
1351 | avail = audit_expand(ab, new_len); | 1354 | avail = audit_expand(ab, new_len); |
1352 | if (!avail) | 1355 | if (!avail) |
1353 | return; | 1356 | return; |
1354 | } | 1357 | } |
1355 | ptr = skb_tail_pointer(skb); | 1358 | ptr = skb_tail_pointer(skb); |
1356 | *ptr++ = '"'; | 1359 | *ptr++ = '"'; |
1357 | memcpy(ptr, string, slen); | 1360 | memcpy(ptr, string, slen); |
1358 | ptr += slen; | 1361 | ptr += slen; |
1359 | *ptr++ = '"'; | 1362 | *ptr++ = '"'; |
1360 | *ptr = 0; | 1363 | *ptr = 0; |
1361 | skb_put(skb, slen + 2); /* don't include null terminator */ | 1364 | skb_put(skb, slen + 2); /* don't include null terminator */ |
1362 | } | 1365 | } |
1363 | 1366 | ||
1364 | /** | 1367 | /** |
1365 | * audit_string_contains_control - does a string need to be logged in hex | 1368 | * audit_string_contains_control - does a string need to be logged in hex |
1366 | * @string: string to be checked | 1369 | * @string: string to be checked |
1367 | * @len: max length of the string to check | 1370 | * @len: max length of the string to check |
1368 | */ | 1371 | */ |
1369 | int audit_string_contains_control(const char *string, size_t len) | 1372 | int audit_string_contains_control(const char *string, size_t len) |
1370 | { | 1373 | { |
1371 | const unsigned char *p; | 1374 | const unsigned char *p; |
1372 | for (p = string; p < (const unsigned char *)string + len; p++) { | 1375 | for (p = string; p < (const unsigned char *)string + len; p++) { |
1373 | if (*p == '"' || *p < 0x21 || *p > 0x7e) | 1376 | if (*p == '"' || *p < 0x21 || *p > 0x7e) |
1374 | return 1; | 1377 | return 1; |
1375 | } | 1378 | } |
1376 | return 0; | 1379 | return 0; |
1377 | } | 1380 | } |
1378 | 1381 | ||
1379 | /** | 1382 | /** |
1380 | * audit_log_n_untrustedstring - log a string that may contain random characters | 1383 | * audit_log_n_untrustedstring - log a string that may contain random characters |
1381 | * @ab: audit_buffer | 1384 | * @ab: audit_buffer |
1382 | * @len: length of string (not including trailing null) | 1385 | * @len: length of string (not including trailing null) |
1383 | * @string: string to be logged | 1386 | * @string: string to be logged |
1384 | * | 1387 | * |
1385 | * This code will escape a string that is passed to it if the string | 1388 | * This code will escape a string that is passed to it if the string |
1386 | * contains a control character, unprintable character, double quote mark, | 1389 | * contains a control character, unprintable character, double quote mark, |
1387 | * or a space. Unescaped strings will start and end with a double quote mark. | 1390 | * or a space. Unescaped strings will start and end with a double quote mark. |
1388 | * Strings that are escaped are printed in hex (2 digits per char). | 1391 | * Strings that are escaped are printed in hex (2 digits per char). |
1389 | * | 1392 | * |
1390 | * The caller specifies the number of characters in the string to log, which may | 1393 | * The caller specifies the number of characters in the string to log, which may |
1391 | * or may not be the entire string. | 1394 | * or may not be the entire string. |
1392 | */ | 1395 | */ |
1393 | void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, | 1396 | void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, |
1394 | size_t len) | 1397 | size_t len) |
1395 | { | 1398 | { |
1396 | if (audit_string_contains_control(string, len)) | 1399 | if (audit_string_contains_control(string, len)) |
1397 | audit_log_n_hex(ab, string, len); | 1400 | audit_log_n_hex(ab, string, len); |
1398 | else | 1401 | else |
1399 | audit_log_n_string(ab, string, len); | 1402 | audit_log_n_string(ab, string, len); |
1400 | } | 1403 | } |
1401 | 1404 | ||
1402 | /** | 1405 | /** |
1403 | * audit_log_untrustedstring - log a string that may contain random characters | 1406 | * audit_log_untrustedstring - log a string that may contain random characters |
1404 | * @ab: audit_buffer | 1407 | * @ab: audit_buffer |
1405 | * @string: string to be logged | 1408 | * @string: string to be logged |
1406 | * | 1409 | * |
1407 | * Same as audit_log_n_untrustedstring(), except that strlen is used to | 1410 | * Same as audit_log_n_untrustedstring(), except that strlen is used to |
1408 | * determine string length. | 1411 | * determine string length. |
1409 | */ | 1412 | */ |
1410 | void audit_log_untrustedstring(struct audit_buffer *ab, const char *string) | 1413 | void audit_log_untrustedstring(struct audit_buffer *ab, const char *string) |
1411 | { | 1414 | { |
1412 | audit_log_n_untrustedstring(ab, string, strlen(string)); | 1415 | audit_log_n_untrustedstring(ab, string, strlen(string)); |
1413 | } | 1416 | } |
1414 | 1417 | ||
1415 | /* This is a helper-function to print the escaped d_path */ | 1418 | /* This is a helper-function to print the escaped d_path */ |
1416 | void audit_log_d_path(struct audit_buffer *ab, const char *prefix, | 1419 | void audit_log_d_path(struct audit_buffer *ab, const char *prefix, |
1417 | struct path *path) | 1420 | struct path *path) |
1418 | { | 1421 | { |
1419 | char *p, *pathname; | 1422 | char *p, *pathname; |
1420 | 1423 | ||
1421 | if (prefix) | 1424 | if (prefix) |
1422 | audit_log_format(ab, " %s", prefix); | 1425 | audit_log_format(ab, " %s", prefix); |
1423 | 1426 | ||
1424 | /* We will allow 11 spaces for ' (deleted)' to be appended */ | 1427 | /* We will allow 11 spaces for ' (deleted)' to be appended */ |
1425 | pathname = kmalloc(PATH_MAX+11, ab->gfp_mask); | 1428 | pathname = kmalloc(PATH_MAX+11, ab->gfp_mask); |
1426 | if (!pathname) { | 1429 | if (!pathname) { |
1427 | audit_log_string(ab, "<no_memory>"); | 1430 | audit_log_string(ab, "<no_memory>"); |
1428 | return; | 1431 | return; |
1429 | } | 1432 | } |
1430 | p = d_path(path, pathname, PATH_MAX+11); | 1433 | p = d_path(path, pathname, PATH_MAX+11); |
1431 | if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */ | 1434 | if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */ |
1432 | /* FIXME: can we save some information here? */ | 1435 | /* FIXME: can we save some information here? */ |
1433 | audit_log_string(ab, "<too_long>"); | 1436 | audit_log_string(ab, "<too_long>"); |
1434 | } else | 1437 | } else |
1435 | audit_log_untrustedstring(ab, p); | 1438 | audit_log_untrustedstring(ab, p); |
1436 | kfree(pathname); | 1439 | kfree(pathname); |
1437 | } | 1440 | } |
1438 | 1441 | ||
1439 | void audit_log_key(struct audit_buffer *ab, char *key) | 1442 | void audit_log_key(struct audit_buffer *ab, char *key) |
1440 | { | 1443 | { |
1441 | audit_log_format(ab, " key="); | 1444 | audit_log_format(ab, " key="); |
1442 | if (key) | 1445 | if (key) |
1443 | audit_log_untrustedstring(ab, key); | 1446 | audit_log_untrustedstring(ab, key); |
1444 | else | 1447 | else |
1445 | audit_log_format(ab, "(null)"); | 1448 | audit_log_format(ab, "(null)"); |
1446 | } | 1449 | } |
1447 | 1450 | ||
1448 | /** | 1451 | /** |
1449 | * audit_log_end - end one audit record | 1452 | * audit_log_end - end one audit record |
1450 | * @ab: the audit_buffer | 1453 | * @ab: the audit_buffer |
1451 | * | 1454 | * |
1452 | * The netlink_* functions cannot be called inside an irq context, so | 1455 | * The netlink_* functions cannot be called inside an irq context, so |
1453 | * the audit buffer is placed on a queue and a tasklet is scheduled to | 1456 | * the audit buffer is placed on a queue and a tasklet is scheduled to |
1454 | * remove them from the queue outside the irq context. May be called in | 1457 | * remove them from the queue outside the irq context. May be called in |
1455 | * any context. | 1458 | * any context. |
1456 | */ | 1459 | */ |
1457 | void audit_log_end(struct audit_buffer *ab) | 1460 | void audit_log_end(struct audit_buffer *ab) |
1458 | { | 1461 | { |
1459 | if (!ab) | 1462 | if (!ab) |
1460 | return; | 1463 | return; |
1461 | if (!audit_rate_check()) { | 1464 | if (!audit_rate_check()) { |
1462 | audit_log_lost("rate limit exceeded"); | 1465 | audit_log_lost("rate limit exceeded"); |
1463 | } else { | 1466 | } else { |
1464 | struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); | 1467 | struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); |
1465 | nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0); | 1468 | nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0); |
1466 | 1469 | ||
1467 | if (audit_pid) { | 1470 | if (audit_pid) { |
1468 | skb_queue_tail(&audit_skb_queue, ab->skb); | 1471 | skb_queue_tail(&audit_skb_queue, ab->skb); |
1469 | wake_up_interruptible(&kauditd_wait); | 1472 | wake_up_interruptible(&kauditd_wait); |
1470 | } else { | 1473 | } else { |
1471 | audit_printk_skb(ab->skb); | 1474 | audit_printk_skb(ab->skb); |
1472 | } | 1475 | } |
1473 | ab->skb = NULL; | 1476 | ab->skb = NULL; |
1474 | } | 1477 | } |
1475 | audit_buffer_free(ab); | 1478 | audit_buffer_free(ab); |
1476 | } | 1479 | } |
1477 | 1480 | ||
1478 | /** | 1481 | /** |
1479 | * audit_log - Log an audit record | 1482 | * audit_log - Log an audit record |
1480 | * @ctx: audit context | 1483 | * @ctx: audit context |
1481 | * @gfp_mask: type of allocation | 1484 | * @gfp_mask: type of allocation |
1482 | * @type: audit message type | 1485 | * @type: audit message type |
1483 | * @fmt: format string to use | 1486 | * @fmt: format string to use |
1484 | * @...: variable parameters matching the format string | 1487 | * @...: variable parameters matching the format string |
1485 | * | 1488 | * |
1486 | * This is a convenience function that calls audit_log_start, | 1489 | * This is a convenience function that calls audit_log_start, |
1487 | * audit_log_vformat, and audit_log_end. It may be called | 1490 | * audit_log_vformat, and audit_log_end. It may be called |
1488 | * in any context. | 1491 | * in any context. |
1489 | */ | 1492 | */ |
1490 | void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, | 1493 | void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, |
1491 | const char *fmt, ...) | 1494 | const char *fmt, ...) |
1492 | { | 1495 | { |
1493 | struct audit_buffer *ab; | 1496 | struct audit_buffer *ab; |
1494 | va_list args; | 1497 | va_list args; |
1495 | 1498 | ||
1496 | ab = audit_log_start(ctx, gfp_mask, type); | 1499 | ab = audit_log_start(ctx, gfp_mask, type); |
1497 | if (ab) { | 1500 | if (ab) { |
1498 | va_start(args, fmt); | 1501 | va_start(args, fmt); |
1499 | audit_log_vformat(ab, fmt, args); | 1502 | audit_log_vformat(ab, fmt, args); |
1500 | va_end(args); | 1503 | va_end(args); |
1501 | audit_log_end(ab); | 1504 | audit_log_end(ab); |
1502 | } | 1505 | } |
1503 | } | 1506 | } |
1507 | |||
1508 | #ifdef CONFIG_SECURITY | ||
1509 | /** | ||
1510 | * audit_log_secctx - Converts and logs SELinux context | ||
1511 | * @ab: audit_buffer | ||
1512 | * @secid: security number | ||
1513 | * | ||
1514 | * This is a helper function that calls security_secid_to_secctx to convert | ||
1515 | * secid to secctx and then adds the (converted) SELinux context to the audit | ||
1516 | * log by calling audit_log_format, thus also preventing leak of internal secid | ||
1517 | * to userspace. If secid cannot be converted audit_panic is called. | ||
1518 | */ | ||
1519 | void audit_log_secctx(struct audit_buffer *ab, u32 secid) | ||
1520 | { | ||
1521 | u32 len; | ||
1522 | char *secctx; | ||
1523 | |||
1524 | if (security_secid_to_secctx(secid, &secctx, &len)) { | ||
1525 | audit_panic("Cannot convert secid to context"); | ||
1526 | } else { | ||
1527 | audit_log_format(ab, " obj=%s", secctx); | ||
1528 | security_release_secctx(secctx, len); | ||
1529 | } | ||
1530 | } | ||
1531 | EXPORT_SYMBOL(audit_log_secctx); | ||
1532 | #endif | ||
1504 | 1533 | ||
1505 | EXPORT_SYMBOL(audit_log_start); | 1534 | EXPORT_SYMBOL(audit_log_start); |
1506 | EXPORT_SYMBOL(audit_log_end); | 1535 | EXPORT_SYMBOL(audit_log_end); |
1507 | EXPORT_SYMBOL(audit_log_format); | 1536 | EXPORT_SYMBOL(audit_log_format); |
1508 | EXPORT_SYMBOL(audit_log); | 1537 | EXPORT_SYMBOL(audit_log); |
1509 | 1538 |
net/netfilter/ipset/ip_set_hash_ip.c
1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 as | 4 | * it under the terms of the GNU General Public License version 2 as |
5 | * published by the Free Software Foundation. | 5 | * published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | /* Kernel module implementing an IP set type: the hash:ip type */ | 8 | /* Kernel module implementing an IP set type: the hash:ip type */ |
9 | 9 | ||
10 | #include <linux/jhash.h> | 10 | #include <linux/jhash.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | #include <net/ipv6.h> | 17 | #include <net/ipv6.h> |
18 | #include <net/netlink.h> | 18 | #include <net/netlink.h> |
19 | #include <net/tcp.h> | 19 | #include <net/tcp.h> |
20 | 20 | ||
21 | #include <linux/netfilter.h> | 21 | #include <linux/netfilter.h> |
22 | #include <linux/netfilter/ipset/pfxlen.h> | 22 | #include <linux/netfilter/ipset/pfxlen.h> |
23 | #include <linux/netfilter/ipset/ip_set.h> | 23 | #include <linux/netfilter/ipset/ip_set.h> |
24 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 24 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
25 | #include <linux/netfilter/ipset/ip_set_hash.h> | 25 | #include <linux/netfilter/ipset/ip_set_hash.h> |
26 | 26 | ||
27 | MODULE_LICENSE("GPL"); | 27 | MODULE_LICENSE("GPL"); |
28 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); | 28 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
29 | MODULE_DESCRIPTION("hash:ip type of IP sets"); | 29 | MODULE_DESCRIPTION("hash:ip type of IP sets"); |
30 | MODULE_ALIAS("ip_set_hash:ip"); | 30 | MODULE_ALIAS("ip_set_hash:ip"); |
31 | 31 | ||
32 | /* Type specific function prefix */ | 32 | /* Type specific function prefix */ |
33 | #define TYPE hash_ip | 33 | #define TYPE hash_ip |
34 | 34 | ||
35 | static bool | 35 | static bool |
36 | hash_ip_same_set(const struct ip_set *a, const struct ip_set *b); | 36 | hash_ip_same_set(const struct ip_set *a, const struct ip_set *b); |
37 | 37 | ||
38 | #define hash_ip4_same_set hash_ip_same_set | 38 | #define hash_ip4_same_set hash_ip_same_set |
39 | #define hash_ip6_same_set hash_ip_same_set | 39 | #define hash_ip6_same_set hash_ip_same_set |
40 | 40 | ||
41 | /* The type variant functions: IPv4 */ | 41 | /* The type variant functions: IPv4 */ |
42 | 42 | ||
43 | /* Member elements without timeout */ | 43 | /* Member elements without timeout */ |
44 | struct hash_ip4_elem { | 44 | struct hash_ip4_elem { |
45 | __be32 ip; | 45 | __be32 ip; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* Member elements with timeout support */ | 48 | /* Member elements with timeout support */ |
49 | struct hash_ip4_telem { | 49 | struct hash_ip4_telem { |
50 | __be32 ip; | 50 | __be32 ip; |
51 | unsigned long timeout; | 51 | unsigned long timeout; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static inline bool | 54 | static inline bool |
55 | hash_ip4_data_equal(const struct hash_ip4_elem *ip1, | 55 | hash_ip4_data_equal(const struct hash_ip4_elem *ip1, |
56 | const struct hash_ip4_elem *ip2) | 56 | const struct hash_ip4_elem *ip2, |
57 | u32 *multi) | ||
57 | { | 58 | { |
58 | return ip1->ip == ip2->ip; | 59 | return ip1->ip == ip2->ip; |
59 | } | 60 | } |
60 | 61 | ||
61 | static inline bool | 62 | static inline bool |
62 | hash_ip4_data_isnull(const struct hash_ip4_elem *elem) | 63 | hash_ip4_data_isnull(const struct hash_ip4_elem *elem) |
63 | { | 64 | { |
64 | return elem->ip == 0; | 65 | return elem->ip == 0; |
65 | } | 66 | } |
66 | 67 | ||
67 | static inline void | 68 | static inline void |
68 | hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src) | 69 | hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src) |
69 | { | 70 | { |
70 | dst->ip = src->ip; | 71 | dst->ip = src->ip; |
71 | } | 72 | } |
72 | 73 | ||
73 | /* Zero valued IP addresses cannot be stored */ | 74 | /* Zero valued IP addresses cannot be stored */ |
74 | static inline void | 75 | static inline void |
75 | hash_ip4_data_zero_out(struct hash_ip4_elem *elem) | 76 | hash_ip4_data_zero_out(struct hash_ip4_elem *elem) |
76 | { | 77 | { |
77 | elem->ip = 0; | 78 | elem->ip = 0; |
78 | } | 79 | } |
79 | 80 | ||
80 | static inline bool | 81 | static inline bool |
81 | hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data) | 82 | hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data) |
82 | { | 83 | { |
83 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); | 84 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); |
84 | return 0; | 85 | return 0; |
85 | 86 | ||
86 | nla_put_failure: | 87 | nla_put_failure: |
87 | return 1; | 88 | return 1; |
88 | } | 89 | } |
89 | 90 | ||
90 | static bool | 91 | static bool |
91 | hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data) | 92 | hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data) |
92 | { | 93 | { |
93 | const struct hash_ip4_telem *tdata = | 94 | const struct hash_ip4_telem *tdata = |
94 | (const struct hash_ip4_telem *)data; | 95 | (const struct hash_ip4_telem *)data; |
95 | 96 | ||
96 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); | 97 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); |
97 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 98 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
98 | htonl(ip_set_timeout_get(tdata->timeout))); | 99 | htonl(ip_set_timeout_get(tdata->timeout))); |
99 | 100 | ||
100 | return 0; | 101 | return 0; |
101 | 102 | ||
102 | nla_put_failure: | 103 | nla_put_failure: |
103 | return 1; | 104 | return 1; |
104 | } | 105 | } |
105 | 106 | ||
106 | #define IP_SET_HASH_WITH_NETMASK | 107 | #define IP_SET_HASH_WITH_NETMASK |
107 | #define PF 4 | 108 | #define PF 4 |
108 | #define HOST_MASK 32 | 109 | #define HOST_MASK 32 |
109 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 110 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
110 | 111 | ||
111 | static inline void | 112 | static inline void |
112 | hash_ip4_data_next(struct ip_set_hash *h, const struct hash_ip4_elem *d) | 113 | hash_ip4_data_next(struct ip_set_hash *h, const struct hash_ip4_elem *d) |
113 | { | 114 | { |
114 | h->next.ip = ntohl(d->ip); | 115 | h->next.ip = ntohl(d->ip); |
115 | } | 116 | } |
116 | 117 | ||
117 | static int | 118 | static int |
118 | hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, | 119 | hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, |
119 | const struct xt_action_param *par, | 120 | const struct xt_action_param *par, |
120 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 121 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
121 | { | 122 | { |
122 | const struct ip_set_hash *h = set->data; | 123 | const struct ip_set_hash *h = set->data; |
123 | ipset_adtfn adtfn = set->variant->adt[adt]; | 124 | ipset_adtfn adtfn = set->variant->adt[adt]; |
124 | __be32 ip; | 125 | __be32 ip; |
125 | 126 | ||
126 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip); | 127 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip); |
127 | ip &= ip_set_netmask(h->netmask); | 128 | ip &= ip_set_netmask(h->netmask); |
128 | if (ip == 0) | 129 | if (ip == 0) |
129 | return -EINVAL; | 130 | return -EINVAL; |
130 | 131 | ||
131 | return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags); | 132 | return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags); |
132 | } | 133 | } |
133 | 134 | ||
134 | static int | 135 | static int |
135 | hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], | 136 | hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], |
136 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 137 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
137 | { | 138 | { |
138 | const struct ip_set_hash *h = set->data; | 139 | const struct ip_set_hash *h = set->data; |
139 | ipset_adtfn adtfn = set->variant->adt[adt]; | 140 | ipset_adtfn adtfn = set->variant->adt[adt]; |
140 | u32 ip, ip_to, hosts, timeout = h->timeout; | 141 | u32 ip, ip_to, hosts, timeout = h->timeout; |
141 | __be32 nip; | 142 | __be32 nip; |
142 | int ret = 0; | 143 | int ret = 0; |
143 | 144 | ||
144 | if (unlikely(!tb[IPSET_ATTR_IP] || | 145 | if (unlikely(!tb[IPSET_ATTR_IP] || |
145 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 146 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
146 | return -IPSET_ERR_PROTOCOL; | 147 | return -IPSET_ERR_PROTOCOL; |
147 | 148 | ||
148 | if (tb[IPSET_ATTR_LINENO]) | 149 | if (tb[IPSET_ATTR_LINENO]) |
149 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 150 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
150 | 151 | ||
151 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); | 152 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); |
152 | if (ret) | 153 | if (ret) |
153 | return ret; | 154 | return ret; |
154 | 155 | ||
155 | ip &= ip_set_hostmask(h->netmask); | 156 | ip &= ip_set_hostmask(h->netmask); |
156 | 157 | ||
157 | if (tb[IPSET_ATTR_TIMEOUT]) { | 158 | if (tb[IPSET_ATTR_TIMEOUT]) { |
158 | if (!with_timeout(h->timeout)) | 159 | if (!with_timeout(h->timeout)) |
159 | return -IPSET_ERR_TIMEOUT; | 160 | return -IPSET_ERR_TIMEOUT; |
160 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 161 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
161 | } | 162 | } |
162 | 163 | ||
163 | if (adt == IPSET_TEST) { | 164 | if (adt == IPSET_TEST) { |
164 | nip = htonl(ip); | 165 | nip = htonl(ip); |
165 | if (nip == 0) | 166 | if (nip == 0) |
166 | return -IPSET_ERR_HASH_ELEM; | 167 | return -IPSET_ERR_HASH_ELEM; |
167 | return adtfn(set, &nip, timeout, flags); | 168 | return adtfn(set, &nip, timeout, flags); |
168 | } | 169 | } |
169 | 170 | ||
170 | if (tb[IPSET_ATTR_IP_TO]) { | 171 | if (tb[IPSET_ATTR_IP_TO]) { |
171 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 172 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
172 | if (ret) | 173 | if (ret) |
173 | return ret; | 174 | return ret; |
174 | if (ip > ip_to) | 175 | if (ip > ip_to) |
175 | swap(ip, ip_to); | 176 | swap(ip, ip_to); |
176 | } else if (tb[IPSET_ATTR_CIDR]) { | 177 | } else if (tb[IPSET_ATTR_CIDR]) { |
177 | u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 178 | u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
178 | 179 | ||
179 | if (cidr > 32) | 180 | if (cidr > 32) |
180 | return -IPSET_ERR_INVALID_CIDR; | 181 | return -IPSET_ERR_INVALID_CIDR; |
181 | ip_set_mask_from_to(ip, ip_to, cidr); | 182 | ip_set_mask_from_to(ip, ip_to, cidr); |
182 | } else | 183 | } else |
183 | ip_to = ip; | 184 | ip_to = ip; |
184 | 185 | ||
185 | hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); | 186 | hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); |
186 | 187 | ||
187 | if (retried) | 188 | if (retried) |
188 | ip = h->next.ip; | 189 | ip = h->next.ip; |
189 | for (; !before(ip_to, ip); ip += hosts) { | 190 | for (; !before(ip_to, ip); ip += hosts) { |
190 | nip = htonl(ip); | 191 | nip = htonl(ip); |
191 | if (nip == 0) | 192 | if (nip == 0) |
192 | return -IPSET_ERR_HASH_ELEM; | 193 | return -IPSET_ERR_HASH_ELEM; |
193 | ret = adtfn(set, &nip, timeout, flags); | 194 | ret = adtfn(set, &nip, timeout, flags); |
194 | 195 | ||
195 | if (ret && !ip_set_eexist(ret, flags)) | 196 | if (ret && !ip_set_eexist(ret, flags)) |
196 | return ret; | 197 | return ret; |
197 | else | 198 | else |
198 | ret = 0; | 199 | ret = 0; |
199 | } | 200 | } |
200 | return ret; | 201 | return ret; |
201 | } | 202 | } |
202 | 203 | ||
203 | static bool | 204 | static bool |
204 | hash_ip_same_set(const struct ip_set *a, const struct ip_set *b) | 205 | hash_ip_same_set(const struct ip_set *a, const struct ip_set *b) |
205 | { | 206 | { |
206 | const struct ip_set_hash *x = a->data; | 207 | const struct ip_set_hash *x = a->data; |
207 | const struct ip_set_hash *y = b->data; | 208 | const struct ip_set_hash *y = b->data; |
208 | 209 | ||
209 | /* Resizing changes htable_bits, so we ignore it */ | 210 | /* Resizing changes htable_bits, so we ignore it */ |
210 | return x->maxelem == y->maxelem && | 211 | return x->maxelem == y->maxelem && |
211 | x->timeout == y->timeout && | 212 | x->timeout == y->timeout && |
212 | x->netmask == y->netmask; | 213 | x->netmask == y->netmask; |
213 | } | 214 | } |
214 | 215 | ||
215 | /* The type variant functions: IPv6 */ | 216 | /* The type variant functions: IPv6 */ |
216 | 217 | ||
217 | struct hash_ip6_elem { | 218 | struct hash_ip6_elem { |
218 | union nf_inet_addr ip; | 219 | union nf_inet_addr ip; |
219 | }; | 220 | }; |
220 | 221 | ||
221 | struct hash_ip6_telem { | 222 | struct hash_ip6_telem { |
222 | union nf_inet_addr ip; | 223 | union nf_inet_addr ip; |
223 | unsigned long timeout; | 224 | unsigned long timeout; |
224 | }; | 225 | }; |
225 | 226 | ||
226 | static inline bool | 227 | static inline bool |
227 | hash_ip6_data_equal(const struct hash_ip6_elem *ip1, | 228 | hash_ip6_data_equal(const struct hash_ip6_elem *ip1, |
228 | const struct hash_ip6_elem *ip2) | 229 | const struct hash_ip6_elem *ip2, |
230 | u32 *multi) | ||
229 | { | 231 | { |
230 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0; | 232 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0; |
231 | } | 233 | } |
232 | 234 | ||
233 | static inline bool | 235 | static inline bool |
234 | hash_ip6_data_isnull(const struct hash_ip6_elem *elem) | 236 | hash_ip6_data_isnull(const struct hash_ip6_elem *elem) |
235 | { | 237 | { |
236 | return ipv6_addr_any(&elem->ip.in6); | 238 | return ipv6_addr_any(&elem->ip.in6); |
237 | } | 239 | } |
238 | 240 | ||
239 | static inline void | 241 | static inline void |
240 | hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src) | 242 | hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src) |
241 | { | 243 | { |
242 | ipv6_addr_copy(&dst->ip.in6, &src->ip.in6); | 244 | ipv6_addr_copy(&dst->ip.in6, &src->ip.in6); |
243 | } | 245 | } |
244 | 246 | ||
245 | static inline void | 247 | static inline void |
246 | hash_ip6_data_zero_out(struct hash_ip6_elem *elem) | 248 | hash_ip6_data_zero_out(struct hash_ip6_elem *elem) |
247 | { | 249 | { |
248 | ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0); | 250 | ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0); |
249 | } | 251 | } |
250 | 252 | ||
251 | static inline void | 253 | static inline void |
252 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) | 254 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) |
253 | { | 255 | { |
254 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; | 256 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; |
255 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; | 257 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; |
256 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; | 258 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; |
257 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; | 259 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; |
258 | } | 260 | } |
259 | 261 | ||
260 | static bool | 262 | static bool |
261 | hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) | 263 | hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) |
262 | { | 264 | { |
263 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); | 265 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); |
264 | return 0; | 266 | return 0; |
265 | 267 | ||
266 | nla_put_failure: | 268 | nla_put_failure: |
267 | return 1; | 269 | return 1; |
268 | } | 270 | } |
269 | 271 | ||
270 | static bool | 272 | static bool |
271 | hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data) | 273 | hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data) |
272 | { | 274 | { |
273 | const struct hash_ip6_telem *e = | 275 | const struct hash_ip6_telem *e = |
274 | (const struct hash_ip6_telem *)data; | 276 | (const struct hash_ip6_telem *)data; |
275 | 277 | ||
276 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); | 278 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); |
277 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 279 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
278 | htonl(ip_set_timeout_get(e->timeout))); | 280 | htonl(ip_set_timeout_get(e->timeout))); |
279 | return 0; | 281 | return 0; |
280 | 282 | ||
281 | nla_put_failure: | 283 | nla_put_failure: |
282 | return 1; | 284 | return 1; |
283 | } | 285 | } |
284 | 286 | ||
285 | #undef PF | 287 | #undef PF |
286 | #undef HOST_MASK | 288 | #undef HOST_MASK |
287 | 289 | ||
288 | #define PF 6 | 290 | #define PF 6 |
289 | #define HOST_MASK 128 | 291 | #define HOST_MASK 128 |
290 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 292 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
291 | 293 | ||
292 | static inline void | 294 | static inline void |
293 | hash_ip6_data_next(struct ip_set_hash *h, const struct hash_ip6_elem *d) | 295 | hash_ip6_data_next(struct ip_set_hash *h, const struct hash_ip6_elem *d) |
294 | { | 296 | { |
295 | } | 297 | } |
296 | 298 | ||
297 | static int | 299 | static int |
298 | hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb, | 300 | hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb, |
299 | const struct xt_action_param *par, | 301 | const struct xt_action_param *par, |
300 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 302 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
301 | { | 303 | { |
302 | const struct ip_set_hash *h = set->data; | 304 | const struct ip_set_hash *h = set->data; |
303 | ipset_adtfn adtfn = set->variant->adt[adt]; | 305 | ipset_adtfn adtfn = set->variant->adt[adt]; |
304 | union nf_inet_addr ip; | 306 | union nf_inet_addr ip; |
305 | 307 | ||
306 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip.in6); | 308 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip.in6); |
307 | ip6_netmask(&ip, h->netmask); | 309 | ip6_netmask(&ip, h->netmask); |
308 | if (ipv6_addr_any(&ip.in6)) | 310 | if (ipv6_addr_any(&ip.in6)) |
309 | return -EINVAL; | 311 | return -EINVAL; |
310 | 312 | ||
311 | return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags); | 313 | return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags); |
312 | } | 314 | } |
313 | 315 | ||
314 | static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = { | 316 | static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = { |
315 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, | 317 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, |
316 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 318 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
317 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, | 319 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, |
318 | }; | 320 | }; |
319 | 321 | ||
320 | static int | 322 | static int |
321 | hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], | 323 | hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], |
322 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 324 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
323 | { | 325 | { |
324 | const struct ip_set_hash *h = set->data; | 326 | const struct ip_set_hash *h = set->data; |
325 | ipset_adtfn adtfn = set->variant->adt[adt]; | 327 | ipset_adtfn adtfn = set->variant->adt[adt]; |
326 | union nf_inet_addr ip; | 328 | union nf_inet_addr ip; |
327 | u32 timeout = h->timeout; | 329 | u32 timeout = h->timeout; |
328 | int ret; | 330 | int ret; |
329 | 331 | ||
330 | if (unlikely(!tb[IPSET_ATTR_IP] || | 332 | if (unlikely(!tb[IPSET_ATTR_IP] || |
331 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || | 333 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || |
332 | tb[IPSET_ATTR_IP_TO] || | 334 | tb[IPSET_ATTR_IP_TO] || |
333 | tb[IPSET_ATTR_CIDR])) | 335 | tb[IPSET_ATTR_CIDR])) |
334 | return -IPSET_ERR_PROTOCOL; | 336 | return -IPSET_ERR_PROTOCOL; |
335 | 337 | ||
336 | if (tb[IPSET_ATTR_LINENO]) | 338 | if (tb[IPSET_ATTR_LINENO]) |
337 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 339 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
338 | 340 | ||
339 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip); | 341 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip); |
340 | if (ret) | 342 | if (ret) |
341 | return ret; | 343 | return ret; |
342 | 344 | ||
343 | ip6_netmask(&ip, h->netmask); | 345 | ip6_netmask(&ip, h->netmask); |
344 | if (ipv6_addr_any(&ip.in6)) | 346 | if (ipv6_addr_any(&ip.in6)) |
345 | return -IPSET_ERR_HASH_ELEM; | 347 | return -IPSET_ERR_HASH_ELEM; |
346 | 348 | ||
347 | if (tb[IPSET_ATTR_TIMEOUT]) { | 349 | if (tb[IPSET_ATTR_TIMEOUT]) { |
348 | if (!with_timeout(h->timeout)) | 350 | if (!with_timeout(h->timeout)) |
349 | return -IPSET_ERR_TIMEOUT; | 351 | return -IPSET_ERR_TIMEOUT; |
350 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 352 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
351 | } | 353 | } |
352 | 354 | ||
353 | ret = adtfn(set, &ip, timeout, flags); | 355 | ret = adtfn(set, &ip, timeout, flags); |
354 | 356 | ||
355 | return ip_set_eexist(ret, flags) ? 0 : ret; | 357 | return ip_set_eexist(ret, flags) ? 0 : ret; |
356 | } | 358 | } |
357 | 359 | ||
358 | /* Create hash:ip type of sets */ | 360 | /* Create hash:ip type of sets */ |
359 | 361 | ||
360 | static int | 362 | static int |
361 | hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) | 363 | hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) |
362 | { | 364 | { |
363 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; | 365 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; |
364 | u8 netmask, hbits; | 366 | u8 netmask, hbits; |
365 | struct ip_set_hash *h; | 367 | struct ip_set_hash *h; |
366 | 368 | ||
367 | if (!(set->family == AF_INET || set->family == AF_INET6)) | 369 | if (!(set->family == AF_INET || set->family == AF_INET6)) |
368 | return -IPSET_ERR_INVALID_FAMILY; | 370 | return -IPSET_ERR_INVALID_FAMILY; |
369 | netmask = set->family == AF_INET ? 32 : 128; | 371 | netmask = set->family == AF_INET ? 32 : 128; |
370 | pr_debug("Create set %s with family %s\n", | 372 | pr_debug("Create set %s with family %s\n", |
371 | set->name, set->family == AF_INET ? "inet" : "inet6"); | 373 | set->name, set->family == AF_INET ? "inet" : "inet6"); |
372 | 374 | ||
373 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || | 375 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || |
374 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || | 376 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || |
375 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 377 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
376 | return -IPSET_ERR_PROTOCOL; | 378 | return -IPSET_ERR_PROTOCOL; |
377 | 379 | ||
378 | if (tb[IPSET_ATTR_HASHSIZE]) { | 380 | if (tb[IPSET_ATTR_HASHSIZE]) { |
379 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); | 381 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); |
380 | if (hashsize < IPSET_MIMINAL_HASHSIZE) | 382 | if (hashsize < IPSET_MIMINAL_HASHSIZE) |
381 | hashsize = IPSET_MIMINAL_HASHSIZE; | 383 | hashsize = IPSET_MIMINAL_HASHSIZE; |
382 | } | 384 | } |
383 | 385 | ||
384 | if (tb[IPSET_ATTR_MAXELEM]) | 386 | if (tb[IPSET_ATTR_MAXELEM]) |
385 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); | 387 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); |
386 | 388 | ||
387 | if (tb[IPSET_ATTR_NETMASK]) { | 389 | if (tb[IPSET_ATTR_NETMASK]) { |
388 | netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]); | 390 | netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]); |
389 | 391 | ||
390 | if ((set->family == AF_INET && netmask > 32) || | 392 | if ((set->family == AF_INET && netmask > 32) || |
391 | (set->family == AF_INET6 && netmask > 128) || | 393 | (set->family == AF_INET6 && netmask > 128) || |
392 | netmask == 0) | 394 | netmask == 0) |
393 | return -IPSET_ERR_INVALID_NETMASK; | 395 | return -IPSET_ERR_INVALID_NETMASK; |
394 | } | 396 | } |
395 | 397 | ||
396 | h = kzalloc(sizeof(*h), GFP_KERNEL); | 398 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
397 | if (!h) | 399 | if (!h) |
398 | return -ENOMEM; | 400 | return -ENOMEM; |
399 | 401 | ||
400 | h->maxelem = maxelem; | 402 | h->maxelem = maxelem; |
401 | h->netmask = netmask; | 403 | h->netmask = netmask; |
402 | get_random_bytes(&h->initval, sizeof(h->initval)); | 404 | get_random_bytes(&h->initval, sizeof(h->initval)); |
403 | h->timeout = IPSET_NO_TIMEOUT; | 405 | h->timeout = IPSET_NO_TIMEOUT; |
404 | 406 | ||
405 | hbits = htable_bits(hashsize); | 407 | hbits = htable_bits(hashsize); |
406 | h->table = ip_set_alloc( | 408 | h->table = ip_set_alloc( |
407 | sizeof(struct htable) | 409 | sizeof(struct htable) |
408 | + jhash_size(hbits) * sizeof(struct hbucket)); | 410 | + jhash_size(hbits) * sizeof(struct hbucket)); |
409 | if (!h->table) { | 411 | if (!h->table) { |
410 | kfree(h); | 412 | kfree(h); |
411 | return -ENOMEM; | 413 | return -ENOMEM; |
412 | } | 414 | } |
413 | h->table->htable_bits = hbits; | 415 | h->table->htable_bits = hbits; |
414 | 416 | ||
415 | set->data = h; | 417 | set->data = h; |
416 | 418 | ||
417 | if (tb[IPSET_ATTR_TIMEOUT]) { | 419 | if (tb[IPSET_ATTR_TIMEOUT]) { |
418 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 420 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
419 | 421 | ||
420 | set->variant = set->family == AF_INET | 422 | set->variant = set->family == AF_INET |
421 | ? &hash_ip4_tvariant : &hash_ip6_tvariant; | 423 | ? &hash_ip4_tvariant : &hash_ip6_tvariant; |
422 | 424 | ||
423 | if (set->family == AF_INET) | 425 | if (set->family == AF_INET) |
424 | hash_ip4_gc_init(set); | 426 | hash_ip4_gc_init(set); |
425 | else | 427 | else |
426 | hash_ip6_gc_init(set); | 428 | hash_ip6_gc_init(set); |
427 | } else { | 429 | } else { |
428 | set->variant = set->family == AF_INET | 430 | set->variant = set->family == AF_INET |
429 | ? &hash_ip4_variant : &hash_ip6_variant; | 431 | ? &hash_ip4_variant : &hash_ip6_variant; |
430 | } | 432 | } |
431 | 433 | ||
432 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", | 434 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", |
433 | set->name, jhash_size(h->table->htable_bits), | 435 | set->name, jhash_size(h->table->htable_bits), |
434 | h->table->htable_bits, h->maxelem, set->data, h->table); | 436 | h->table->htable_bits, h->maxelem, set->data, h->table); |
435 | 437 | ||
436 | return 0; | 438 | return 0; |
437 | } | 439 | } |
438 | 440 | ||
439 | static struct ip_set_type hash_ip_type __read_mostly = { | 441 | static struct ip_set_type hash_ip_type __read_mostly = { |
440 | .name = "hash:ip", | 442 | .name = "hash:ip", |
441 | .protocol = IPSET_PROTOCOL, | 443 | .protocol = IPSET_PROTOCOL, |
442 | .features = IPSET_TYPE_IP, | 444 | .features = IPSET_TYPE_IP, |
443 | .dimension = IPSET_DIM_ONE, | 445 | .dimension = IPSET_DIM_ONE, |
444 | .family = AF_UNSPEC, | 446 | .family = AF_UNSPEC, |
445 | .revision_min = 0, | 447 | .revision_min = 0, |
446 | .revision_max = 0, | 448 | .revision_max = 0, |
447 | .create = hash_ip_create, | 449 | .create = hash_ip_create, |
448 | .create_policy = { | 450 | .create_policy = { |
449 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, | 451 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, |
450 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, | 452 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, |
451 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, | 453 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, |
452 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, | 454 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, |
453 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 455 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
454 | [IPSET_ATTR_NETMASK] = { .type = NLA_U8 }, | 456 | [IPSET_ATTR_NETMASK] = { .type = NLA_U8 }, |
455 | }, | 457 | }, |
456 | .adt_policy = { | 458 | .adt_policy = { |
457 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, | 459 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, |
458 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, | 460 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, |
459 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, | 461 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, |
460 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 462 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
461 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, | 463 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, |
462 | }, | 464 | }, |
463 | .me = THIS_MODULE, | 465 | .me = THIS_MODULE, |
464 | }; | 466 | }; |
465 | 467 | ||
466 | static int __init | 468 | static int __init |
467 | hash_ip_init(void) | 469 | hash_ip_init(void) |
468 | { | 470 | { |
469 | return ip_set_type_register(&hash_ip_type); | 471 | return ip_set_type_register(&hash_ip_type); |
470 | } | 472 | } |
471 | 473 | ||
472 | static void __exit | 474 | static void __exit |
473 | hash_ip_fini(void) | 475 | hash_ip_fini(void) |
474 | { | 476 | { |
475 | ip_set_type_unregister(&hash_ip_type); | 477 | ip_set_type_unregister(&hash_ip_type); |
476 | } | 478 | } |
477 | 479 | ||
478 | module_init(hash_ip_init); | 480 | module_init(hash_ip_init); |
479 | module_exit(hash_ip_fini); | 481 | module_exit(hash_ip_fini); |
480 | 482 |
net/netfilter/ipset/ip_set_hash_ipport.c
1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 as | 4 | * it under the terms of the GNU General Public License version 2 as |
5 | * published by the Free Software Foundation. | 5 | * published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | /* Kernel module implementing an IP set type: the hash:ip,port type */ | 8 | /* Kernel module implementing an IP set type: the hash:ip,port type */ |
9 | 9 | ||
10 | #include <linux/jhash.h> | 10 | #include <linux/jhash.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | #include <net/ipv6.h> | 17 | #include <net/ipv6.h> |
18 | #include <net/netlink.h> | 18 | #include <net/netlink.h> |
19 | #include <net/tcp.h> | 19 | #include <net/tcp.h> |
20 | 20 | ||
21 | #include <linux/netfilter.h> | 21 | #include <linux/netfilter.h> |
22 | #include <linux/netfilter/ipset/pfxlen.h> | 22 | #include <linux/netfilter/ipset/pfxlen.h> |
23 | #include <linux/netfilter/ipset/ip_set.h> | 23 | #include <linux/netfilter/ipset/ip_set.h> |
24 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 24 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
25 | #include <linux/netfilter/ipset/ip_set_getport.h> | 25 | #include <linux/netfilter/ipset/ip_set_getport.h> |
26 | #include <linux/netfilter/ipset/ip_set_hash.h> | 26 | #include <linux/netfilter/ipset/ip_set_hash.h> |
27 | 27 | ||
28 | MODULE_LICENSE("GPL"); | 28 | MODULE_LICENSE("GPL"); |
29 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); | 29 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
30 | MODULE_DESCRIPTION("hash:ip,port type of IP sets"); | 30 | MODULE_DESCRIPTION("hash:ip,port type of IP sets"); |
31 | MODULE_ALIAS("ip_set_hash:ip,port"); | 31 | MODULE_ALIAS("ip_set_hash:ip,port"); |
32 | 32 | ||
33 | /* Type specific function prefix */ | 33 | /* Type specific function prefix */ |
34 | #define TYPE hash_ipport | 34 | #define TYPE hash_ipport |
35 | 35 | ||
36 | static bool | 36 | static bool |
37 | hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b); | 37 | hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b); |
38 | 38 | ||
39 | #define hash_ipport4_same_set hash_ipport_same_set | 39 | #define hash_ipport4_same_set hash_ipport_same_set |
40 | #define hash_ipport6_same_set hash_ipport_same_set | 40 | #define hash_ipport6_same_set hash_ipport_same_set |
41 | 41 | ||
42 | /* The type variant functions: IPv4 */ | 42 | /* The type variant functions: IPv4 */ |
43 | 43 | ||
44 | /* Member elements without timeout */ | 44 | /* Member elements without timeout */ |
45 | struct hash_ipport4_elem { | 45 | struct hash_ipport4_elem { |
46 | __be32 ip; | 46 | __be32 ip; |
47 | __be16 port; | 47 | __be16 port; |
48 | u8 proto; | 48 | u8 proto; |
49 | u8 padding; | 49 | u8 padding; |
50 | }; | 50 | }; |
51 | 51 | ||
52 | /* Member elements with timeout support */ | 52 | /* Member elements with timeout support */ |
53 | struct hash_ipport4_telem { | 53 | struct hash_ipport4_telem { |
54 | __be32 ip; | 54 | __be32 ip; |
55 | __be16 port; | 55 | __be16 port; |
56 | u8 proto; | 56 | u8 proto; |
57 | u8 padding; | 57 | u8 padding; |
58 | unsigned long timeout; | 58 | unsigned long timeout; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static inline bool | 61 | static inline bool |
62 | hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1, | 62 | hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1, |
63 | const struct hash_ipport4_elem *ip2) | 63 | const struct hash_ipport4_elem *ip2, |
64 | u32 *multi) | ||
64 | { | 65 | { |
65 | return ip1->ip == ip2->ip && | 66 | return ip1->ip == ip2->ip && |
66 | ip1->port == ip2->port && | 67 | ip1->port == ip2->port && |
67 | ip1->proto == ip2->proto; | 68 | ip1->proto == ip2->proto; |
68 | } | 69 | } |
69 | 70 | ||
70 | static inline bool | 71 | static inline bool |
71 | hash_ipport4_data_isnull(const struct hash_ipport4_elem *elem) | 72 | hash_ipport4_data_isnull(const struct hash_ipport4_elem *elem) |
72 | { | 73 | { |
73 | return elem->proto == 0; | 74 | return elem->proto == 0; |
74 | } | 75 | } |
75 | 76 | ||
76 | static inline void | 77 | static inline void |
77 | hash_ipport4_data_copy(struct hash_ipport4_elem *dst, | 78 | hash_ipport4_data_copy(struct hash_ipport4_elem *dst, |
78 | const struct hash_ipport4_elem *src) | 79 | const struct hash_ipport4_elem *src) |
79 | { | 80 | { |
80 | dst->ip = src->ip; | 81 | dst->ip = src->ip; |
81 | dst->port = src->port; | 82 | dst->port = src->port; |
82 | dst->proto = src->proto; | 83 | dst->proto = src->proto; |
83 | } | 84 | } |
84 | 85 | ||
85 | static inline void | 86 | static inline void |
86 | hash_ipport4_data_zero_out(struct hash_ipport4_elem *elem) | 87 | hash_ipport4_data_zero_out(struct hash_ipport4_elem *elem) |
87 | { | 88 | { |
88 | elem->proto = 0; | 89 | elem->proto = 0; |
89 | } | 90 | } |
90 | 91 | ||
91 | static bool | 92 | static bool |
92 | hash_ipport4_data_list(struct sk_buff *skb, | 93 | hash_ipport4_data_list(struct sk_buff *skb, |
93 | const struct hash_ipport4_elem *data) | 94 | const struct hash_ipport4_elem *data) |
94 | { | 95 | { |
95 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); | 96 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); |
96 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 97 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
97 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 98 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
98 | return 0; | 99 | return 0; |
99 | 100 | ||
100 | nla_put_failure: | 101 | nla_put_failure: |
101 | return 1; | 102 | return 1; |
102 | } | 103 | } |
103 | 104 | ||
104 | static bool | 105 | static bool |
105 | hash_ipport4_data_tlist(struct sk_buff *skb, | 106 | hash_ipport4_data_tlist(struct sk_buff *skb, |
106 | const struct hash_ipport4_elem *data) | 107 | const struct hash_ipport4_elem *data) |
107 | { | 108 | { |
108 | const struct hash_ipport4_telem *tdata = | 109 | const struct hash_ipport4_telem *tdata = |
109 | (const struct hash_ipport4_telem *)data; | 110 | (const struct hash_ipport4_telem *)data; |
110 | 111 | ||
111 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); | 112 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); |
112 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); | 113 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); |
113 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 114 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
114 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 115 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
115 | htonl(ip_set_timeout_get(tdata->timeout))); | 116 | htonl(ip_set_timeout_get(tdata->timeout))); |
116 | 117 | ||
117 | return 0; | 118 | return 0; |
118 | 119 | ||
119 | nla_put_failure: | 120 | nla_put_failure: |
120 | return 1; | 121 | return 1; |
121 | } | 122 | } |
122 | 123 | ||
123 | #define PF 4 | 124 | #define PF 4 |
124 | #define HOST_MASK 32 | 125 | #define HOST_MASK 32 |
125 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 126 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
126 | 127 | ||
127 | static inline void | 128 | static inline void |
128 | hash_ipport4_data_next(struct ip_set_hash *h, | 129 | hash_ipport4_data_next(struct ip_set_hash *h, |
129 | const struct hash_ipport4_elem *d) | 130 | const struct hash_ipport4_elem *d) |
130 | { | 131 | { |
131 | h->next.ip = ntohl(d->ip); | 132 | h->next.ip = ntohl(d->ip); |
132 | h->next.port = ntohs(d->port); | 133 | h->next.port = ntohs(d->port); |
133 | } | 134 | } |
134 | 135 | ||
135 | static int | 136 | static int |
136 | hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb, | 137 | hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb, |
137 | const struct xt_action_param *par, | 138 | const struct xt_action_param *par, |
138 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 139 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
139 | { | 140 | { |
140 | const struct ip_set_hash *h = set->data; | 141 | const struct ip_set_hash *h = set->data; |
141 | ipset_adtfn adtfn = set->variant->adt[adt]; | 142 | ipset_adtfn adtfn = set->variant->adt[adt]; |
142 | struct hash_ipport4_elem data = { }; | 143 | struct hash_ipport4_elem data = { }; |
143 | 144 | ||
144 | if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, | 145 | if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, |
145 | &data.port, &data.proto)) | 146 | &data.port, &data.proto)) |
146 | return -EINVAL; | 147 | return -EINVAL; |
147 | 148 | ||
148 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); | 149 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); |
149 | 150 | ||
150 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 151 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
151 | } | 152 | } |
152 | 153 | ||
153 | static int | 154 | static int |
154 | hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], | 155 | hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], |
155 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 156 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
156 | { | 157 | { |
157 | const struct ip_set_hash *h = set->data; | 158 | const struct ip_set_hash *h = set->data; |
158 | ipset_adtfn adtfn = set->variant->adt[adt]; | 159 | ipset_adtfn adtfn = set->variant->adt[adt]; |
159 | struct hash_ipport4_elem data = { }; | 160 | struct hash_ipport4_elem data = { }; |
160 | u32 ip, ip_to, p = 0, port, port_to; | 161 | u32 ip, ip_to, p = 0, port, port_to; |
161 | u32 timeout = h->timeout; | 162 | u32 timeout = h->timeout; |
162 | bool with_ports = false; | 163 | bool with_ports = false; |
163 | int ret; | 164 | int ret; |
164 | 165 | ||
165 | if (unlikely(!tb[IPSET_ATTR_IP] || | 166 | if (unlikely(!tb[IPSET_ATTR_IP] || |
166 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || | 167 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || |
167 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || | 168 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || |
168 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 169 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
169 | return -IPSET_ERR_PROTOCOL; | 170 | return -IPSET_ERR_PROTOCOL; |
170 | 171 | ||
171 | if (tb[IPSET_ATTR_LINENO]) | 172 | if (tb[IPSET_ATTR_LINENO]) |
172 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 173 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
173 | 174 | ||
174 | ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); | 175 | ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); |
175 | if (ret) | 176 | if (ret) |
176 | return ret; | 177 | return ret; |
177 | 178 | ||
178 | if (tb[IPSET_ATTR_PORT]) | 179 | if (tb[IPSET_ATTR_PORT]) |
179 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); | 180 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); |
180 | else | 181 | else |
181 | return -IPSET_ERR_PROTOCOL; | 182 | return -IPSET_ERR_PROTOCOL; |
182 | 183 | ||
183 | if (tb[IPSET_ATTR_PROTO]) { | 184 | if (tb[IPSET_ATTR_PROTO]) { |
184 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); | 185 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); |
185 | with_ports = ip_set_proto_with_ports(data.proto); | 186 | with_ports = ip_set_proto_with_ports(data.proto); |
186 | 187 | ||
187 | if (data.proto == 0) | 188 | if (data.proto == 0) |
188 | return -IPSET_ERR_INVALID_PROTO; | 189 | return -IPSET_ERR_INVALID_PROTO; |
189 | } else | 190 | } else |
190 | return -IPSET_ERR_MISSING_PROTO; | 191 | return -IPSET_ERR_MISSING_PROTO; |
191 | 192 | ||
192 | if (!(with_ports || data.proto == IPPROTO_ICMP)) | 193 | if (!(with_ports || data.proto == IPPROTO_ICMP)) |
193 | data.port = 0; | 194 | data.port = 0; |
194 | 195 | ||
195 | if (tb[IPSET_ATTR_TIMEOUT]) { | 196 | if (tb[IPSET_ATTR_TIMEOUT]) { |
196 | if (!with_timeout(h->timeout)) | 197 | if (!with_timeout(h->timeout)) |
197 | return -IPSET_ERR_TIMEOUT; | 198 | return -IPSET_ERR_TIMEOUT; |
198 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 199 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
199 | } | 200 | } |
200 | 201 | ||
201 | if (adt == IPSET_TEST || | 202 | if (adt == IPSET_TEST || |
202 | !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || | 203 | !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || |
203 | tb[IPSET_ATTR_PORT_TO])) { | 204 | tb[IPSET_ATTR_PORT_TO])) { |
204 | ret = adtfn(set, &data, timeout, flags); | 205 | ret = adtfn(set, &data, timeout, flags); |
205 | return ip_set_eexist(ret, flags) ? 0 : ret; | 206 | return ip_set_eexist(ret, flags) ? 0 : ret; |
206 | } | 207 | } |
207 | 208 | ||
208 | ip = ntohl(data.ip); | 209 | ip = ntohl(data.ip); |
209 | if (tb[IPSET_ATTR_IP_TO]) { | 210 | if (tb[IPSET_ATTR_IP_TO]) { |
210 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 211 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
211 | if (ret) | 212 | if (ret) |
212 | return ret; | 213 | return ret; |
213 | if (ip > ip_to) | 214 | if (ip > ip_to) |
214 | swap(ip, ip_to); | 215 | swap(ip, ip_to); |
215 | } else if (tb[IPSET_ATTR_CIDR]) { | 216 | } else if (tb[IPSET_ATTR_CIDR]) { |
216 | u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 217 | u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
217 | 218 | ||
218 | if (cidr > 32) | 219 | if (cidr > 32) |
219 | return -IPSET_ERR_INVALID_CIDR; | 220 | return -IPSET_ERR_INVALID_CIDR; |
220 | ip_set_mask_from_to(ip, ip_to, cidr); | 221 | ip_set_mask_from_to(ip, ip_to, cidr); |
221 | } else | 222 | } else |
222 | ip_to = ip; | 223 | ip_to = ip; |
223 | 224 | ||
224 | port_to = port = ntohs(data.port); | 225 | port_to = port = ntohs(data.port); |
225 | if (with_ports && tb[IPSET_ATTR_PORT_TO]) { | 226 | if (with_ports && tb[IPSET_ATTR_PORT_TO]) { |
226 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); | 227 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); |
227 | if (port > port_to) | 228 | if (port > port_to) |
228 | swap(port, port_to); | 229 | swap(port, port_to); |
229 | } | 230 | } |
230 | 231 | ||
231 | if (retried) | 232 | if (retried) |
232 | ip = h->next.ip; | 233 | ip = h->next.ip; |
233 | for (; !before(ip_to, ip); ip++) { | 234 | for (; !before(ip_to, ip); ip++) { |
234 | p = retried && ip == h->next.ip ? h->next.port : port; | 235 | p = retried && ip == h->next.ip ? h->next.port : port; |
235 | for (; p <= port_to; p++) { | 236 | for (; p <= port_to; p++) { |
236 | data.ip = htonl(ip); | 237 | data.ip = htonl(ip); |
237 | data.port = htons(p); | 238 | data.port = htons(p); |
238 | ret = adtfn(set, &data, timeout, flags); | 239 | ret = adtfn(set, &data, timeout, flags); |
239 | 240 | ||
240 | if (ret && !ip_set_eexist(ret, flags)) | 241 | if (ret && !ip_set_eexist(ret, flags)) |
241 | return ret; | 242 | return ret; |
242 | else | 243 | else |
243 | ret = 0; | 244 | ret = 0; |
244 | } | 245 | } |
245 | } | 246 | } |
246 | return ret; | 247 | return ret; |
247 | } | 248 | } |
248 | 249 | ||
249 | static bool | 250 | static bool |
250 | hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b) | 251 | hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b) |
251 | { | 252 | { |
252 | const struct ip_set_hash *x = a->data; | 253 | const struct ip_set_hash *x = a->data; |
253 | const struct ip_set_hash *y = b->data; | 254 | const struct ip_set_hash *y = b->data; |
254 | 255 | ||
255 | /* Resizing changes htable_bits, so we ignore it */ | 256 | /* Resizing changes htable_bits, so we ignore it */ |
256 | return x->maxelem == y->maxelem && | 257 | return x->maxelem == y->maxelem && |
257 | x->timeout == y->timeout; | 258 | x->timeout == y->timeout; |
258 | } | 259 | } |
259 | 260 | ||
260 | /* The type variant functions: IPv6 */ | 261 | /* The type variant functions: IPv6 */ |
261 | 262 | ||
262 | struct hash_ipport6_elem { | 263 | struct hash_ipport6_elem { |
263 | union nf_inet_addr ip; | 264 | union nf_inet_addr ip; |
264 | __be16 port; | 265 | __be16 port; |
265 | u8 proto; | 266 | u8 proto; |
266 | u8 padding; | 267 | u8 padding; |
267 | }; | 268 | }; |
268 | 269 | ||
269 | struct hash_ipport6_telem { | 270 | struct hash_ipport6_telem { |
270 | union nf_inet_addr ip; | 271 | union nf_inet_addr ip; |
271 | __be16 port; | 272 | __be16 port; |
272 | u8 proto; | 273 | u8 proto; |
273 | u8 padding; | 274 | u8 padding; |
274 | unsigned long timeout; | 275 | unsigned long timeout; |
275 | }; | 276 | }; |
276 | 277 | ||
277 | static inline bool | 278 | static inline bool |
278 | hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1, | 279 | hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1, |
279 | const struct hash_ipport6_elem *ip2) | 280 | const struct hash_ipport6_elem *ip2, |
281 | u32 *multi) | ||
280 | { | 282 | { |
281 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && | 283 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
282 | ip1->port == ip2->port && | 284 | ip1->port == ip2->port && |
283 | ip1->proto == ip2->proto; | 285 | ip1->proto == ip2->proto; |
284 | } | 286 | } |
285 | 287 | ||
286 | static inline bool | 288 | static inline bool |
287 | hash_ipport6_data_isnull(const struct hash_ipport6_elem *elem) | 289 | hash_ipport6_data_isnull(const struct hash_ipport6_elem *elem) |
288 | { | 290 | { |
289 | return elem->proto == 0; | 291 | return elem->proto == 0; |
290 | } | 292 | } |
291 | 293 | ||
292 | static inline void | 294 | static inline void |
293 | hash_ipport6_data_copy(struct hash_ipport6_elem *dst, | 295 | hash_ipport6_data_copy(struct hash_ipport6_elem *dst, |
294 | const struct hash_ipport6_elem *src) | 296 | const struct hash_ipport6_elem *src) |
295 | { | 297 | { |
296 | memcpy(dst, src, sizeof(*dst)); | 298 | memcpy(dst, src, sizeof(*dst)); |
297 | } | 299 | } |
298 | 300 | ||
299 | static inline void | 301 | static inline void |
300 | hash_ipport6_data_zero_out(struct hash_ipport6_elem *elem) | 302 | hash_ipport6_data_zero_out(struct hash_ipport6_elem *elem) |
301 | { | 303 | { |
302 | elem->proto = 0; | 304 | elem->proto = 0; |
303 | } | 305 | } |
304 | 306 | ||
305 | static bool | 307 | static bool |
306 | hash_ipport6_data_list(struct sk_buff *skb, | 308 | hash_ipport6_data_list(struct sk_buff *skb, |
307 | const struct hash_ipport6_elem *data) | 309 | const struct hash_ipport6_elem *data) |
308 | { | 310 | { |
309 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); | 311 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); |
310 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 312 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
311 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 313 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
312 | return 0; | 314 | return 0; |
313 | 315 | ||
314 | nla_put_failure: | 316 | nla_put_failure: |
315 | return 1; | 317 | return 1; |
316 | } | 318 | } |
317 | 319 | ||
318 | static bool | 320 | static bool |
319 | hash_ipport6_data_tlist(struct sk_buff *skb, | 321 | hash_ipport6_data_tlist(struct sk_buff *skb, |
320 | const struct hash_ipport6_elem *data) | 322 | const struct hash_ipport6_elem *data) |
321 | { | 323 | { |
322 | const struct hash_ipport6_telem *e = | 324 | const struct hash_ipport6_telem *e = |
323 | (const struct hash_ipport6_telem *)data; | 325 | (const struct hash_ipport6_telem *)data; |
324 | 326 | ||
325 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); | 327 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); |
326 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 328 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
327 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 329 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
328 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 330 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
329 | htonl(ip_set_timeout_get(e->timeout))); | 331 | htonl(ip_set_timeout_get(e->timeout))); |
330 | return 0; | 332 | return 0; |
331 | 333 | ||
332 | nla_put_failure: | 334 | nla_put_failure: |
333 | return 1; | 335 | return 1; |
334 | } | 336 | } |
335 | 337 | ||
336 | #undef PF | 338 | #undef PF |
337 | #undef HOST_MASK | 339 | #undef HOST_MASK |
338 | 340 | ||
339 | #define PF 6 | 341 | #define PF 6 |
340 | #define HOST_MASK 128 | 342 | #define HOST_MASK 128 |
341 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 343 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
342 | 344 | ||
343 | static inline void | 345 | static inline void |
344 | hash_ipport6_data_next(struct ip_set_hash *h, | 346 | hash_ipport6_data_next(struct ip_set_hash *h, |
345 | const struct hash_ipport6_elem *d) | 347 | const struct hash_ipport6_elem *d) |
346 | { | 348 | { |
347 | h->next.port = ntohs(d->port); | 349 | h->next.port = ntohs(d->port); |
348 | } | 350 | } |
349 | 351 | ||
350 | static int | 352 | static int |
351 | hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb, | 353 | hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb, |
352 | const struct xt_action_param *par, | 354 | const struct xt_action_param *par, |
353 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 355 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
354 | { | 356 | { |
355 | const struct ip_set_hash *h = set->data; | 357 | const struct ip_set_hash *h = set->data; |
356 | ipset_adtfn adtfn = set->variant->adt[adt]; | 358 | ipset_adtfn adtfn = set->variant->adt[adt]; |
357 | struct hash_ipport6_elem data = { }; | 359 | struct hash_ipport6_elem data = { }; |
358 | 360 | ||
359 | if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, | 361 | if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, |
360 | &data.port, &data.proto)) | 362 | &data.port, &data.proto)) |
361 | return -EINVAL; | 363 | return -EINVAL; |
362 | 364 | ||
363 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); | 365 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); |
364 | 366 | ||
365 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 367 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
366 | } | 368 | } |
367 | 369 | ||
368 | static int | 370 | static int |
369 | hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], | 371 | hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], |
370 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 372 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
371 | { | 373 | { |
372 | const struct ip_set_hash *h = set->data; | 374 | const struct ip_set_hash *h = set->data; |
373 | ipset_adtfn adtfn = set->variant->adt[adt]; | 375 | ipset_adtfn adtfn = set->variant->adt[adt]; |
374 | struct hash_ipport6_elem data = { }; | 376 | struct hash_ipport6_elem data = { }; |
375 | u32 port, port_to; | 377 | u32 port, port_to; |
376 | u32 timeout = h->timeout; | 378 | u32 timeout = h->timeout; |
377 | bool with_ports = false; | 379 | bool with_ports = false; |
378 | int ret; | 380 | int ret; |
379 | 381 | ||
380 | if (unlikely(!tb[IPSET_ATTR_IP] || | 382 | if (unlikely(!tb[IPSET_ATTR_IP] || |
381 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || | 383 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || |
382 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || | 384 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || |
383 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || | 385 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || |
384 | tb[IPSET_ATTR_IP_TO] || | 386 | tb[IPSET_ATTR_IP_TO] || |
385 | tb[IPSET_ATTR_CIDR])) | 387 | tb[IPSET_ATTR_CIDR])) |
386 | return -IPSET_ERR_PROTOCOL; | 388 | return -IPSET_ERR_PROTOCOL; |
387 | 389 | ||
388 | if (tb[IPSET_ATTR_LINENO]) | 390 | if (tb[IPSET_ATTR_LINENO]) |
389 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 391 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
390 | 392 | ||
391 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); | 393 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); |
392 | if (ret) | 394 | if (ret) |
393 | return ret; | 395 | return ret; |
394 | 396 | ||
395 | if (tb[IPSET_ATTR_PORT]) | 397 | if (tb[IPSET_ATTR_PORT]) |
396 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); | 398 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); |
397 | else | 399 | else |
398 | return -IPSET_ERR_PROTOCOL; | 400 | return -IPSET_ERR_PROTOCOL; |
399 | 401 | ||
400 | if (tb[IPSET_ATTR_PROTO]) { | 402 | if (tb[IPSET_ATTR_PROTO]) { |
401 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); | 403 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); |
402 | with_ports = ip_set_proto_with_ports(data.proto); | 404 | with_ports = ip_set_proto_with_ports(data.proto); |
403 | 405 | ||
404 | if (data.proto == 0) | 406 | if (data.proto == 0) |
405 | return -IPSET_ERR_INVALID_PROTO; | 407 | return -IPSET_ERR_INVALID_PROTO; |
406 | } else | 408 | } else |
407 | return -IPSET_ERR_MISSING_PROTO; | 409 | return -IPSET_ERR_MISSING_PROTO; |
408 | 410 | ||
409 | if (!(with_ports || data.proto == IPPROTO_ICMPV6)) | 411 | if (!(with_ports || data.proto == IPPROTO_ICMPV6)) |
410 | data.port = 0; | 412 | data.port = 0; |
411 | 413 | ||
412 | if (tb[IPSET_ATTR_TIMEOUT]) { | 414 | if (tb[IPSET_ATTR_TIMEOUT]) { |
413 | if (!with_timeout(h->timeout)) | 415 | if (!with_timeout(h->timeout)) |
414 | return -IPSET_ERR_TIMEOUT; | 416 | return -IPSET_ERR_TIMEOUT; |
415 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 417 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
416 | } | 418 | } |
417 | 419 | ||
418 | if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { | 420 | if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { |
419 | ret = adtfn(set, &data, timeout, flags); | 421 | ret = adtfn(set, &data, timeout, flags); |
420 | return ip_set_eexist(ret, flags) ? 0 : ret; | 422 | return ip_set_eexist(ret, flags) ? 0 : ret; |
421 | } | 423 | } |
422 | 424 | ||
423 | port = ntohs(data.port); | 425 | port = ntohs(data.port); |
424 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); | 426 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); |
425 | if (port > port_to) | 427 | if (port > port_to) |
426 | swap(port, port_to); | 428 | swap(port, port_to); |
427 | 429 | ||
428 | if (retried) | 430 | if (retried) |
429 | port = h->next.port; | 431 | port = h->next.port; |
430 | for (; port <= port_to; port++) { | 432 | for (; port <= port_to; port++) { |
431 | data.port = htons(port); | 433 | data.port = htons(port); |
432 | ret = adtfn(set, &data, timeout, flags); | 434 | ret = adtfn(set, &data, timeout, flags); |
433 | 435 | ||
434 | if (ret && !ip_set_eexist(ret, flags)) | 436 | if (ret && !ip_set_eexist(ret, flags)) |
435 | return ret; | 437 | return ret; |
436 | else | 438 | else |
437 | ret = 0; | 439 | ret = 0; |
438 | } | 440 | } |
439 | return ret; | 441 | return ret; |
440 | } | 442 | } |
441 | 443 | ||
442 | /* Create hash:ip type of sets */ | 444 | /* Create hash:ip type of sets */ |
443 | 445 | ||
444 | static int | 446 | static int |
445 | hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags) | 447 | hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags) |
446 | { | 448 | { |
447 | struct ip_set_hash *h; | 449 | struct ip_set_hash *h; |
448 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; | 450 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; |
449 | u8 hbits; | 451 | u8 hbits; |
450 | 452 | ||
451 | if (!(set->family == AF_INET || set->family == AF_INET6)) | 453 | if (!(set->family == AF_INET || set->family == AF_INET6)) |
452 | return -IPSET_ERR_INVALID_FAMILY; | 454 | return -IPSET_ERR_INVALID_FAMILY; |
453 | 455 | ||
454 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || | 456 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || |
455 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || | 457 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || |
456 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 458 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
457 | return -IPSET_ERR_PROTOCOL; | 459 | return -IPSET_ERR_PROTOCOL; |
458 | 460 | ||
459 | if (tb[IPSET_ATTR_HASHSIZE]) { | 461 | if (tb[IPSET_ATTR_HASHSIZE]) { |
460 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); | 462 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); |
461 | if (hashsize < IPSET_MIMINAL_HASHSIZE) | 463 | if (hashsize < IPSET_MIMINAL_HASHSIZE) |
462 | hashsize = IPSET_MIMINAL_HASHSIZE; | 464 | hashsize = IPSET_MIMINAL_HASHSIZE; |
463 | } | 465 | } |
464 | 466 | ||
465 | if (tb[IPSET_ATTR_MAXELEM]) | 467 | if (tb[IPSET_ATTR_MAXELEM]) |
466 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); | 468 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); |
467 | 469 | ||
468 | h = kzalloc(sizeof(*h), GFP_KERNEL); | 470 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
469 | if (!h) | 471 | if (!h) |
470 | return -ENOMEM; | 472 | return -ENOMEM; |
471 | 473 | ||
472 | h->maxelem = maxelem; | 474 | h->maxelem = maxelem; |
473 | get_random_bytes(&h->initval, sizeof(h->initval)); | 475 | get_random_bytes(&h->initval, sizeof(h->initval)); |
474 | h->timeout = IPSET_NO_TIMEOUT; | 476 | h->timeout = IPSET_NO_TIMEOUT; |
475 | 477 | ||
476 | hbits = htable_bits(hashsize); | 478 | hbits = htable_bits(hashsize); |
477 | h->table = ip_set_alloc( | 479 | h->table = ip_set_alloc( |
478 | sizeof(struct htable) | 480 | sizeof(struct htable) |
479 | + jhash_size(hbits) * sizeof(struct hbucket)); | 481 | + jhash_size(hbits) * sizeof(struct hbucket)); |
480 | if (!h->table) { | 482 | if (!h->table) { |
481 | kfree(h); | 483 | kfree(h); |
482 | return -ENOMEM; | 484 | return -ENOMEM; |
483 | } | 485 | } |
484 | h->table->htable_bits = hbits; | 486 | h->table->htable_bits = hbits; |
485 | 487 | ||
486 | set->data = h; | 488 | set->data = h; |
487 | 489 | ||
488 | if (tb[IPSET_ATTR_TIMEOUT]) { | 490 | if (tb[IPSET_ATTR_TIMEOUT]) { |
489 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 491 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
490 | 492 | ||
491 | set->variant = set->family == AF_INET | 493 | set->variant = set->family == AF_INET |
492 | ? &hash_ipport4_tvariant : &hash_ipport6_tvariant; | 494 | ? &hash_ipport4_tvariant : &hash_ipport6_tvariant; |
493 | 495 | ||
494 | if (set->family == AF_INET) | 496 | if (set->family == AF_INET) |
495 | hash_ipport4_gc_init(set); | 497 | hash_ipport4_gc_init(set); |
496 | else | 498 | else |
497 | hash_ipport6_gc_init(set); | 499 | hash_ipport6_gc_init(set); |
498 | } else { | 500 | } else { |
499 | set->variant = set->family == AF_INET | 501 | set->variant = set->family == AF_INET |
500 | ? &hash_ipport4_variant : &hash_ipport6_variant; | 502 | ? &hash_ipport4_variant : &hash_ipport6_variant; |
501 | } | 503 | } |
502 | 504 | ||
503 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", | 505 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", |
504 | set->name, jhash_size(h->table->htable_bits), | 506 | set->name, jhash_size(h->table->htable_bits), |
505 | h->table->htable_bits, h->maxelem, set->data, h->table); | 507 | h->table->htable_bits, h->maxelem, set->data, h->table); |
506 | 508 | ||
507 | return 0; | 509 | return 0; |
508 | } | 510 | } |
509 | 511 | ||
510 | static struct ip_set_type hash_ipport_type __read_mostly = { | 512 | static struct ip_set_type hash_ipport_type __read_mostly = { |
511 | .name = "hash:ip,port", | 513 | .name = "hash:ip,port", |
512 | .protocol = IPSET_PROTOCOL, | 514 | .protocol = IPSET_PROTOCOL, |
513 | .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, | 515 | .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, |
514 | .dimension = IPSET_DIM_TWO, | 516 | .dimension = IPSET_DIM_TWO, |
515 | .family = AF_UNSPEC, | 517 | .family = AF_UNSPEC, |
516 | .revision_min = 0, | 518 | .revision_min = 0, |
517 | .revision_max = 1, /* SCTP and UDPLITE support added */ | 519 | .revision_max = 1, /* SCTP and UDPLITE support added */ |
518 | .create = hash_ipport_create, | 520 | .create = hash_ipport_create, |
519 | .create_policy = { | 521 | .create_policy = { |
520 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, | 522 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, |
521 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, | 523 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, |
522 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, | 524 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, |
523 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, | 525 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, |
524 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, | 526 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, |
525 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 527 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
526 | }, | 528 | }, |
527 | .adt_policy = { | 529 | .adt_policy = { |
528 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, | 530 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, |
529 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, | 531 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, |
530 | [IPSET_ATTR_PORT] = { .type = NLA_U16 }, | 532 | [IPSET_ATTR_PORT] = { .type = NLA_U16 }, |
531 | [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, | 533 | [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, |
532 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, | 534 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, |
533 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, | 535 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, |
534 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 536 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
535 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, | 537 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, |
536 | }, | 538 | }, |
537 | .me = THIS_MODULE, | 539 | .me = THIS_MODULE, |
538 | }; | 540 | }; |
539 | 541 | ||
540 | static int __init | 542 | static int __init |
541 | hash_ipport_init(void) | 543 | hash_ipport_init(void) |
542 | { | 544 | { |
543 | return ip_set_type_register(&hash_ipport_type); | 545 | return ip_set_type_register(&hash_ipport_type); |
544 | } | 546 | } |
545 | 547 | ||
546 | static void __exit | 548 | static void __exit |
547 | hash_ipport_fini(void) | 549 | hash_ipport_fini(void) |
548 | { | 550 | { |
549 | ip_set_type_unregister(&hash_ipport_type); | 551 | ip_set_type_unregister(&hash_ipport_type); |
550 | } | 552 | } |
551 | 553 | ||
552 | module_init(hash_ipport_init); | 554 | module_init(hash_ipport_init); |
553 | module_exit(hash_ipport_fini); | 555 | module_exit(hash_ipport_fini); |
554 | 556 |
net/netfilter/ipset/ip_set_hash_ipportip.c
1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 as | 4 | * it under the terms of the GNU General Public License version 2 as |
5 | * published by the Free Software Foundation. | 5 | * published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | /* Kernel module implementing an IP set type: the hash:ip,port,ip type */ | 8 | /* Kernel module implementing an IP set type: the hash:ip,port,ip type */ |
9 | 9 | ||
10 | #include <linux/jhash.h> | 10 | #include <linux/jhash.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | #include <net/ipv6.h> | 17 | #include <net/ipv6.h> |
18 | #include <net/netlink.h> | 18 | #include <net/netlink.h> |
19 | #include <net/tcp.h> | 19 | #include <net/tcp.h> |
20 | 20 | ||
21 | #include <linux/netfilter.h> | 21 | #include <linux/netfilter.h> |
22 | #include <linux/netfilter/ipset/pfxlen.h> | 22 | #include <linux/netfilter/ipset/pfxlen.h> |
23 | #include <linux/netfilter/ipset/ip_set.h> | 23 | #include <linux/netfilter/ipset/ip_set.h> |
24 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 24 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
25 | #include <linux/netfilter/ipset/ip_set_getport.h> | 25 | #include <linux/netfilter/ipset/ip_set_getport.h> |
26 | #include <linux/netfilter/ipset/ip_set_hash.h> | 26 | #include <linux/netfilter/ipset/ip_set_hash.h> |
27 | 27 | ||
28 | MODULE_LICENSE("GPL"); | 28 | MODULE_LICENSE("GPL"); |
29 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); | 29 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
30 | MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets"); | 30 | MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets"); |
31 | MODULE_ALIAS("ip_set_hash:ip,port,ip"); | 31 | MODULE_ALIAS("ip_set_hash:ip,port,ip"); |
32 | 32 | ||
33 | /* Type specific function prefix */ | 33 | /* Type specific function prefix */ |
34 | #define TYPE hash_ipportip | 34 | #define TYPE hash_ipportip |
35 | 35 | ||
36 | static bool | 36 | static bool |
37 | hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b); | 37 | hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b); |
38 | 38 | ||
39 | #define hash_ipportip4_same_set hash_ipportip_same_set | 39 | #define hash_ipportip4_same_set hash_ipportip_same_set |
40 | #define hash_ipportip6_same_set hash_ipportip_same_set | 40 | #define hash_ipportip6_same_set hash_ipportip_same_set |
41 | 41 | ||
42 | /* The type variant functions: IPv4 */ | 42 | /* The type variant functions: IPv4 */ |
43 | 43 | ||
44 | /* Member elements without timeout */ | 44 | /* Member elements without timeout */ |
45 | struct hash_ipportip4_elem { | 45 | struct hash_ipportip4_elem { |
46 | __be32 ip; | 46 | __be32 ip; |
47 | __be32 ip2; | 47 | __be32 ip2; |
48 | __be16 port; | 48 | __be16 port; |
49 | u8 proto; | 49 | u8 proto; |
50 | u8 padding; | 50 | u8 padding; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | /* Member elements with timeout support */ | 53 | /* Member elements with timeout support */ |
54 | struct hash_ipportip4_telem { | 54 | struct hash_ipportip4_telem { |
55 | __be32 ip; | 55 | __be32 ip; |
56 | __be32 ip2; | 56 | __be32 ip2; |
57 | __be16 port; | 57 | __be16 port; |
58 | u8 proto; | 58 | u8 proto; |
59 | u8 padding; | 59 | u8 padding; |
60 | unsigned long timeout; | 60 | unsigned long timeout; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | static inline bool | 63 | static inline bool |
64 | hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1, | 64 | hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1, |
65 | const struct hash_ipportip4_elem *ip2) | 65 | const struct hash_ipportip4_elem *ip2, |
66 | u32 *multi) | ||
66 | { | 67 | { |
67 | return ip1->ip == ip2->ip && | 68 | return ip1->ip == ip2->ip && |
68 | ip1->ip2 == ip2->ip2 && | 69 | ip1->ip2 == ip2->ip2 && |
69 | ip1->port == ip2->port && | 70 | ip1->port == ip2->port && |
70 | ip1->proto == ip2->proto; | 71 | ip1->proto == ip2->proto; |
71 | } | 72 | } |
72 | 73 | ||
73 | static inline bool | 74 | static inline bool |
74 | hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem) | 75 | hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem) |
75 | { | 76 | { |
76 | return elem->proto == 0; | 77 | return elem->proto == 0; |
77 | } | 78 | } |
78 | 79 | ||
79 | static inline void | 80 | static inline void |
80 | hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst, | 81 | hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst, |
81 | const struct hash_ipportip4_elem *src) | 82 | const struct hash_ipportip4_elem *src) |
82 | { | 83 | { |
83 | memcpy(dst, src, sizeof(*dst)); | 84 | memcpy(dst, src, sizeof(*dst)); |
84 | } | 85 | } |
85 | 86 | ||
86 | static inline void | 87 | static inline void |
87 | hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem) | 88 | hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem) |
88 | { | 89 | { |
89 | elem->proto = 0; | 90 | elem->proto = 0; |
90 | } | 91 | } |
91 | 92 | ||
92 | static bool | 93 | static bool |
93 | hash_ipportip4_data_list(struct sk_buff *skb, | 94 | hash_ipportip4_data_list(struct sk_buff *skb, |
94 | const struct hash_ipportip4_elem *data) | 95 | const struct hash_ipportip4_elem *data) |
95 | { | 96 | { |
96 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); | 97 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); |
97 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); | 98 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); |
98 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 99 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
99 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 100 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
100 | return 0; | 101 | return 0; |
101 | 102 | ||
102 | nla_put_failure: | 103 | nla_put_failure: |
103 | return 1; | 104 | return 1; |
104 | } | 105 | } |
105 | 106 | ||
106 | static bool | 107 | static bool |
107 | hash_ipportip4_data_tlist(struct sk_buff *skb, | 108 | hash_ipportip4_data_tlist(struct sk_buff *skb, |
108 | const struct hash_ipportip4_elem *data) | 109 | const struct hash_ipportip4_elem *data) |
109 | { | 110 | { |
110 | const struct hash_ipportip4_telem *tdata = | 111 | const struct hash_ipportip4_telem *tdata = |
111 | (const struct hash_ipportip4_telem *)data; | 112 | (const struct hash_ipportip4_telem *)data; |
112 | 113 | ||
113 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); | 114 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); |
114 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); | 115 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); |
115 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); | 116 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); |
116 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 117 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
117 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 118 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
118 | htonl(ip_set_timeout_get(tdata->timeout))); | 119 | htonl(ip_set_timeout_get(tdata->timeout))); |
119 | 120 | ||
120 | return 0; | 121 | return 0; |
121 | 122 | ||
122 | nla_put_failure: | 123 | nla_put_failure: |
123 | return 1; | 124 | return 1; |
124 | } | 125 | } |
125 | 126 | ||
126 | #define PF 4 | 127 | #define PF 4 |
127 | #define HOST_MASK 32 | 128 | #define HOST_MASK 32 |
128 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 129 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
129 | 130 | ||
130 | static inline void | 131 | static inline void |
131 | hash_ipportip4_data_next(struct ip_set_hash *h, | 132 | hash_ipportip4_data_next(struct ip_set_hash *h, |
132 | const struct hash_ipportip4_elem *d) | 133 | const struct hash_ipportip4_elem *d) |
133 | { | 134 | { |
134 | h->next.ip = ntohl(d->ip); | 135 | h->next.ip = ntohl(d->ip); |
135 | h->next.port = ntohs(d->port); | 136 | h->next.port = ntohs(d->port); |
136 | } | 137 | } |
137 | 138 | ||
138 | static int | 139 | static int |
139 | hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb, | 140 | hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb, |
140 | const struct xt_action_param *par, | 141 | const struct xt_action_param *par, |
141 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 142 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
142 | { | 143 | { |
143 | const struct ip_set_hash *h = set->data; | 144 | const struct ip_set_hash *h = set->data; |
144 | ipset_adtfn adtfn = set->variant->adt[adt]; | 145 | ipset_adtfn adtfn = set->variant->adt[adt]; |
145 | struct hash_ipportip4_elem data = { }; | 146 | struct hash_ipportip4_elem data = { }; |
146 | 147 | ||
147 | if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, | 148 | if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, |
148 | &data.port, &data.proto)) | 149 | &data.port, &data.proto)) |
149 | return -EINVAL; | 150 | return -EINVAL; |
150 | 151 | ||
151 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); | 152 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); |
152 | ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2); | 153 | ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2); |
153 | 154 | ||
154 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 155 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
155 | } | 156 | } |
156 | 157 | ||
157 | static int | 158 | static int |
158 | hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], | 159 | hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], |
159 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 160 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
160 | { | 161 | { |
161 | const struct ip_set_hash *h = set->data; | 162 | const struct ip_set_hash *h = set->data; |
162 | ipset_adtfn adtfn = set->variant->adt[adt]; | 163 | ipset_adtfn adtfn = set->variant->adt[adt]; |
163 | struct hash_ipportip4_elem data = { }; | 164 | struct hash_ipportip4_elem data = { }; |
164 | u32 ip, ip_to, p = 0, port, port_to; | 165 | u32 ip, ip_to, p = 0, port, port_to; |
165 | u32 timeout = h->timeout; | 166 | u32 timeout = h->timeout; |
166 | bool with_ports = false; | 167 | bool with_ports = false; |
167 | int ret; | 168 | int ret; |
168 | 169 | ||
169 | if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || | 170 | if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || |
170 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || | 171 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || |
171 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || | 172 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || |
172 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 173 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
173 | return -IPSET_ERR_PROTOCOL; | 174 | return -IPSET_ERR_PROTOCOL; |
174 | 175 | ||
175 | if (tb[IPSET_ATTR_LINENO]) | 176 | if (tb[IPSET_ATTR_LINENO]) |
176 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 177 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
177 | 178 | ||
178 | ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); | 179 | ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); |
179 | if (ret) | 180 | if (ret) |
180 | return ret; | 181 | return ret; |
181 | 182 | ||
182 | ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2); | 183 | ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2); |
183 | if (ret) | 184 | if (ret) |
184 | return ret; | 185 | return ret; |
185 | 186 | ||
186 | if (tb[IPSET_ATTR_PORT]) | 187 | if (tb[IPSET_ATTR_PORT]) |
187 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); | 188 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); |
188 | else | 189 | else |
189 | return -IPSET_ERR_PROTOCOL; | 190 | return -IPSET_ERR_PROTOCOL; |
190 | 191 | ||
191 | if (tb[IPSET_ATTR_PROTO]) { | 192 | if (tb[IPSET_ATTR_PROTO]) { |
192 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); | 193 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); |
193 | with_ports = ip_set_proto_with_ports(data.proto); | 194 | with_ports = ip_set_proto_with_ports(data.proto); |
194 | 195 | ||
195 | if (data.proto == 0) | 196 | if (data.proto == 0) |
196 | return -IPSET_ERR_INVALID_PROTO; | 197 | return -IPSET_ERR_INVALID_PROTO; |
197 | } else | 198 | } else |
198 | return -IPSET_ERR_MISSING_PROTO; | 199 | return -IPSET_ERR_MISSING_PROTO; |
199 | 200 | ||
200 | if (!(with_ports || data.proto == IPPROTO_ICMP)) | 201 | if (!(with_ports || data.proto == IPPROTO_ICMP)) |
201 | data.port = 0; | 202 | data.port = 0; |
202 | 203 | ||
203 | if (tb[IPSET_ATTR_TIMEOUT]) { | 204 | if (tb[IPSET_ATTR_TIMEOUT]) { |
204 | if (!with_timeout(h->timeout)) | 205 | if (!with_timeout(h->timeout)) |
205 | return -IPSET_ERR_TIMEOUT; | 206 | return -IPSET_ERR_TIMEOUT; |
206 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 207 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
207 | } | 208 | } |
208 | 209 | ||
209 | if (adt == IPSET_TEST || | 210 | if (adt == IPSET_TEST || |
210 | !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || | 211 | !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || |
211 | tb[IPSET_ATTR_PORT_TO])) { | 212 | tb[IPSET_ATTR_PORT_TO])) { |
212 | ret = adtfn(set, &data, timeout, flags); | 213 | ret = adtfn(set, &data, timeout, flags); |
213 | return ip_set_eexist(ret, flags) ? 0 : ret; | 214 | return ip_set_eexist(ret, flags) ? 0 : ret; |
214 | } | 215 | } |
215 | 216 | ||
216 | ip = ntohl(data.ip); | 217 | ip = ntohl(data.ip); |
217 | if (tb[IPSET_ATTR_IP_TO]) { | 218 | if (tb[IPSET_ATTR_IP_TO]) { |
218 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 219 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
219 | if (ret) | 220 | if (ret) |
220 | return ret; | 221 | return ret; |
221 | if (ip > ip_to) | 222 | if (ip > ip_to) |
222 | swap(ip, ip_to); | 223 | swap(ip, ip_to); |
223 | } else if (tb[IPSET_ATTR_CIDR]) { | 224 | } else if (tb[IPSET_ATTR_CIDR]) { |
224 | u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 225 | u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
225 | 226 | ||
226 | if (cidr > 32) | 227 | if (cidr > 32) |
227 | return -IPSET_ERR_INVALID_CIDR; | 228 | return -IPSET_ERR_INVALID_CIDR; |
228 | ip_set_mask_from_to(ip, ip_to, cidr); | 229 | ip_set_mask_from_to(ip, ip_to, cidr); |
229 | } else | 230 | } else |
230 | ip_to = ip; | 231 | ip_to = ip; |
231 | 232 | ||
232 | port_to = port = ntohs(data.port); | 233 | port_to = port = ntohs(data.port); |
233 | if (with_ports && tb[IPSET_ATTR_PORT_TO]) { | 234 | if (with_ports && tb[IPSET_ATTR_PORT_TO]) { |
234 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); | 235 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); |
235 | if (port > port_to) | 236 | if (port > port_to) |
236 | swap(port, port_to); | 237 | swap(port, port_to); |
237 | } | 238 | } |
238 | 239 | ||
239 | if (retried) | 240 | if (retried) |
240 | ip = h->next.ip; | 241 | ip = h->next.ip; |
241 | for (; !before(ip_to, ip); ip++) { | 242 | for (; !before(ip_to, ip); ip++) { |
242 | p = retried && ip == h->next.ip ? h->next.port : port; | 243 | p = retried && ip == h->next.ip ? h->next.port : port; |
243 | for (; p <= port_to; p++) { | 244 | for (; p <= port_to; p++) { |
244 | data.ip = htonl(ip); | 245 | data.ip = htonl(ip); |
245 | data.port = htons(p); | 246 | data.port = htons(p); |
246 | ret = adtfn(set, &data, timeout, flags); | 247 | ret = adtfn(set, &data, timeout, flags); |
247 | 248 | ||
248 | if (ret && !ip_set_eexist(ret, flags)) | 249 | if (ret && !ip_set_eexist(ret, flags)) |
249 | return ret; | 250 | return ret; |
250 | else | 251 | else |
251 | ret = 0; | 252 | ret = 0; |
252 | } | 253 | } |
253 | } | 254 | } |
254 | return ret; | 255 | return ret; |
255 | } | 256 | } |
256 | 257 | ||
257 | static bool | 258 | static bool |
258 | hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b) | 259 | hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b) |
259 | { | 260 | { |
260 | const struct ip_set_hash *x = a->data; | 261 | const struct ip_set_hash *x = a->data; |
261 | const struct ip_set_hash *y = b->data; | 262 | const struct ip_set_hash *y = b->data; |
262 | 263 | ||
263 | /* Resizing changes htable_bits, so we ignore it */ | 264 | /* Resizing changes htable_bits, so we ignore it */ |
264 | return x->maxelem == y->maxelem && | 265 | return x->maxelem == y->maxelem && |
265 | x->timeout == y->timeout; | 266 | x->timeout == y->timeout; |
266 | } | 267 | } |
267 | 268 | ||
268 | /* The type variant functions: IPv6 */ | 269 | /* The type variant functions: IPv6 */ |
269 | 270 | ||
270 | struct hash_ipportip6_elem { | 271 | struct hash_ipportip6_elem { |
271 | union nf_inet_addr ip; | 272 | union nf_inet_addr ip; |
272 | union nf_inet_addr ip2; | 273 | union nf_inet_addr ip2; |
273 | __be16 port; | 274 | __be16 port; |
274 | u8 proto; | 275 | u8 proto; |
275 | u8 padding; | 276 | u8 padding; |
276 | }; | 277 | }; |
277 | 278 | ||
278 | struct hash_ipportip6_telem { | 279 | struct hash_ipportip6_telem { |
279 | union nf_inet_addr ip; | 280 | union nf_inet_addr ip; |
280 | union nf_inet_addr ip2; | 281 | union nf_inet_addr ip2; |
281 | __be16 port; | 282 | __be16 port; |
282 | u8 proto; | 283 | u8 proto; |
283 | u8 padding; | 284 | u8 padding; |
284 | unsigned long timeout; | 285 | unsigned long timeout; |
285 | }; | 286 | }; |
286 | 287 | ||
287 | static inline bool | 288 | static inline bool |
288 | hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1, | 289 | hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1, |
289 | const struct hash_ipportip6_elem *ip2) | 290 | const struct hash_ipportip6_elem *ip2, |
291 | u32 *multi) | ||
290 | { | 292 | { |
291 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && | 293 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
292 | ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && | 294 | ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && |
293 | ip1->port == ip2->port && | 295 | ip1->port == ip2->port && |
294 | ip1->proto == ip2->proto; | 296 | ip1->proto == ip2->proto; |
295 | } | 297 | } |
296 | 298 | ||
297 | static inline bool | 299 | static inline bool |
298 | hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem) | 300 | hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem) |
299 | { | 301 | { |
300 | return elem->proto == 0; | 302 | return elem->proto == 0; |
301 | } | 303 | } |
302 | 304 | ||
303 | static inline void | 305 | static inline void |
304 | hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst, | 306 | hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst, |
305 | const struct hash_ipportip6_elem *src) | 307 | const struct hash_ipportip6_elem *src) |
306 | { | 308 | { |
307 | memcpy(dst, src, sizeof(*dst)); | 309 | memcpy(dst, src, sizeof(*dst)); |
308 | } | 310 | } |
309 | 311 | ||
310 | static inline void | 312 | static inline void |
311 | hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem) | 313 | hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem) |
312 | { | 314 | { |
313 | elem->proto = 0; | 315 | elem->proto = 0; |
314 | } | 316 | } |
315 | 317 | ||
316 | static bool | 318 | static bool |
317 | hash_ipportip6_data_list(struct sk_buff *skb, | 319 | hash_ipportip6_data_list(struct sk_buff *skb, |
318 | const struct hash_ipportip6_elem *data) | 320 | const struct hash_ipportip6_elem *data) |
319 | { | 321 | { |
320 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); | 322 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); |
321 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); | 323 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); |
322 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 324 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
323 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 325 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
324 | return 0; | 326 | return 0; |
325 | 327 | ||
326 | nla_put_failure: | 328 | nla_put_failure: |
327 | return 1; | 329 | return 1; |
328 | } | 330 | } |
329 | 331 | ||
330 | static bool | 332 | static bool |
331 | hash_ipportip6_data_tlist(struct sk_buff *skb, | 333 | hash_ipportip6_data_tlist(struct sk_buff *skb, |
332 | const struct hash_ipportip6_elem *data) | 334 | const struct hash_ipportip6_elem *data) |
333 | { | 335 | { |
334 | const struct hash_ipportip6_telem *e = | 336 | const struct hash_ipportip6_telem *e = |
335 | (const struct hash_ipportip6_telem *)data; | 337 | (const struct hash_ipportip6_telem *)data; |
336 | 338 | ||
337 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); | 339 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); |
338 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); | 340 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); |
339 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 341 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
340 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 342 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
341 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 343 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
342 | htonl(ip_set_timeout_get(e->timeout))); | 344 | htonl(ip_set_timeout_get(e->timeout))); |
343 | return 0; | 345 | return 0; |
344 | 346 | ||
345 | nla_put_failure: | 347 | nla_put_failure: |
346 | return 1; | 348 | return 1; |
347 | } | 349 | } |
348 | 350 | ||
349 | #undef PF | 351 | #undef PF |
350 | #undef HOST_MASK | 352 | #undef HOST_MASK |
351 | 353 | ||
352 | #define PF 6 | 354 | #define PF 6 |
353 | #define HOST_MASK 128 | 355 | #define HOST_MASK 128 |
354 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 356 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
355 | 357 | ||
356 | static inline void | 358 | static inline void |
357 | hash_ipportip6_data_next(struct ip_set_hash *h, | 359 | hash_ipportip6_data_next(struct ip_set_hash *h, |
358 | const struct hash_ipportip6_elem *d) | 360 | const struct hash_ipportip6_elem *d) |
359 | { | 361 | { |
360 | h->next.port = ntohs(d->port); | 362 | h->next.port = ntohs(d->port); |
361 | } | 363 | } |
362 | 364 | ||
363 | static int | 365 | static int |
364 | hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb, | 366 | hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb, |
365 | const struct xt_action_param *par, | 367 | const struct xt_action_param *par, |
366 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 368 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
367 | { | 369 | { |
368 | const struct ip_set_hash *h = set->data; | 370 | const struct ip_set_hash *h = set->data; |
369 | ipset_adtfn adtfn = set->variant->adt[adt]; | 371 | ipset_adtfn adtfn = set->variant->adt[adt]; |
370 | struct hash_ipportip6_elem data = { }; | 372 | struct hash_ipportip6_elem data = { }; |
371 | 373 | ||
372 | if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, | 374 | if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, |
373 | &data.port, &data.proto)) | 375 | &data.port, &data.proto)) |
374 | return -EINVAL; | 376 | return -EINVAL; |
375 | 377 | ||
376 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); | 378 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); |
377 | ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); | 379 | ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); |
378 | 380 | ||
379 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 381 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
380 | } | 382 | } |
381 | 383 | ||
382 | static int | 384 | static int |
383 | hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], | 385 | hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], |
384 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 386 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
385 | { | 387 | { |
386 | const struct ip_set_hash *h = set->data; | 388 | const struct ip_set_hash *h = set->data; |
387 | ipset_adtfn adtfn = set->variant->adt[adt]; | 389 | ipset_adtfn adtfn = set->variant->adt[adt]; |
388 | struct hash_ipportip6_elem data = { }; | 390 | struct hash_ipportip6_elem data = { }; |
389 | u32 port, port_to; | 391 | u32 port, port_to; |
390 | u32 timeout = h->timeout; | 392 | u32 timeout = h->timeout; |
391 | bool with_ports = false; | 393 | bool with_ports = false; |
392 | int ret; | 394 | int ret; |
393 | 395 | ||
394 | if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || | 396 | if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || |
395 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || | 397 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || |
396 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || | 398 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || |
397 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || | 399 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || |
398 | tb[IPSET_ATTR_IP_TO] || | 400 | tb[IPSET_ATTR_IP_TO] || |
399 | tb[IPSET_ATTR_CIDR])) | 401 | tb[IPSET_ATTR_CIDR])) |
400 | return -IPSET_ERR_PROTOCOL; | 402 | return -IPSET_ERR_PROTOCOL; |
401 | 403 | ||
402 | if (tb[IPSET_ATTR_LINENO]) | 404 | if (tb[IPSET_ATTR_LINENO]) |
403 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 405 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
404 | 406 | ||
405 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); | 407 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); |
406 | if (ret) | 408 | if (ret) |
407 | return ret; | 409 | return ret; |
408 | 410 | ||
409 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2); | 411 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2); |
410 | if (ret) | 412 | if (ret) |
411 | return ret; | 413 | return ret; |
412 | 414 | ||
413 | if (tb[IPSET_ATTR_PORT]) | 415 | if (tb[IPSET_ATTR_PORT]) |
414 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); | 416 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); |
415 | else | 417 | else |
416 | return -IPSET_ERR_PROTOCOL; | 418 | return -IPSET_ERR_PROTOCOL; |
417 | 419 | ||
418 | if (tb[IPSET_ATTR_PROTO]) { | 420 | if (tb[IPSET_ATTR_PROTO]) { |
419 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); | 421 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); |
420 | with_ports = ip_set_proto_with_ports(data.proto); | 422 | with_ports = ip_set_proto_with_ports(data.proto); |
421 | 423 | ||
422 | if (data.proto == 0) | 424 | if (data.proto == 0) |
423 | return -IPSET_ERR_INVALID_PROTO; | 425 | return -IPSET_ERR_INVALID_PROTO; |
424 | } else | 426 | } else |
425 | return -IPSET_ERR_MISSING_PROTO; | 427 | return -IPSET_ERR_MISSING_PROTO; |
426 | 428 | ||
427 | if (!(with_ports || data.proto == IPPROTO_ICMPV6)) | 429 | if (!(with_ports || data.proto == IPPROTO_ICMPV6)) |
428 | data.port = 0; | 430 | data.port = 0; |
429 | 431 | ||
430 | if (tb[IPSET_ATTR_TIMEOUT]) { | 432 | if (tb[IPSET_ATTR_TIMEOUT]) { |
431 | if (!with_timeout(h->timeout)) | 433 | if (!with_timeout(h->timeout)) |
432 | return -IPSET_ERR_TIMEOUT; | 434 | return -IPSET_ERR_TIMEOUT; |
433 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 435 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
434 | } | 436 | } |
435 | 437 | ||
436 | if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { | 438 | if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { |
437 | ret = adtfn(set, &data, timeout, flags); | 439 | ret = adtfn(set, &data, timeout, flags); |
438 | return ip_set_eexist(ret, flags) ? 0 : ret; | 440 | return ip_set_eexist(ret, flags) ? 0 : ret; |
439 | } | 441 | } |
440 | 442 | ||
441 | port = ntohs(data.port); | 443 | port = ntohs(data.port); |
442 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); | 444 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); |
443 | if (port > port_to) | 445 | if (port > port_to) |
444 | swap(port, port_to); | 446 | swap(port, port_to); |
445 | 447 | ||
446 | if (retried) | 448 | if (retried) |
447 | port = h->next.port; | 449 | port = h->next.port; |
448 | for (; port <= port_to; port++) { | 450 | for (; port <= port_to; port++) { |
449 | data.port = htons(port); | 451 | data.port = htons(port); |
450 | ret = adtfn(set, &data, timeout, flags); | 452 | ret = adtfn(set, &data, timeout, flags); |
451 | 453 | ||
452 | if (ret && !ip_set_eexist(ret, flags)) | 454 | if (ret && !ip_set_eexist(ret, flags)) |
453 | return ret; | 455 | return ret; |
454 | else | 456 | else |
455 | ret = 0; | 457 | ret = 0; |
456 | } | 458 | } |
457 | return ret; | 459 | return ret; |
458 | } | 460 | } |
459 | 461 | ||
460 | /* Create hash:ip type of sets */ | 462 | /* Create hash:ip type of sets */ |
461 | 463 | ||
462 | static int | 464 | static int |
463 | hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) | 465 | hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) |
464 | { | 466 | { |
465 | struct ip_set_hash *h; | 467 | struct ip_set_hash *h; |
466 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; | 468 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; |
467 | u8 hbits; | 469 | u8 hbits; |
468 | 470 | ||
469 | if (!(set->family == AF_INET || set->family == AF_INET6)) | 471 | if (!(set->family == AF_INET || set->family == AF_INET6)) |
470 | return -IPSET_ERR_INVALID_FAMILY; | 472 | return -IPSET_ERR_INVALID_FAMILY; |
471 | 473 | ||
472 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || | 474 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || |
473 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || | 475 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || |
474 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 476 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
475 | return -IPSET_ERR_PROTOCOL; | 477 | return -IPSET_ERR_PROTOCOL; |
476 | 478 | ||
477 | if (tb[IPSET_ATTR_HASHSIZE]) { | 479 | if (tb[IPSET_ATTR_HASHSIZE]) { |
478 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); | 480 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); |
479 | if (hashsize < IPSET_MIMINAL_HASHSIZE) | 481 | if (hashsize < IPSET_MIMINAL_HASHSIZE) |
480 | hashsize = IPSET_MIMINAL_HASHSIZE; | 482 | hashsize = IPSET_MIMINAL_HASHSIZE; |
481 | } | 483 | } |
482 | 484 | ||
483 | if (tb[IPSET_ATTR_MAXELEM]) | 485 | if (tb[IPSET_ATTR_MAXELEM]) |
484 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); | 486 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); |
485 | 487 | ||
486 | h = kzalloc(sizeof(*h), GFP_KERNEL); | 488 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
487 | if (!h) | 489 | if (!h) |
488 | return -ENOMEM; | 490 | return -ENOMEM; |
489 | 491 | ||
490 | h->maxelem = maxelem; | 492 | h->maxelem = maxelem; |
491 | get_random_bytes(&h->initval, sizeof(h->initval)); | 493 | get_random_bytes(&h->initval, sizeof(h->initval)); |
492 | h->timeout = IPSET_NO_TIMEOUT; | 494 | h->timeout = IPSET_NO_TIMEOUT; |
493 | 495 | ||
494 | hbits = htable_bits(hashsize); | 496 | hbits = htable_bits(hashsize); |
495 | h->table = ip_set_alloc( | 497 | h->table = ip_set_alloc( |
496 | sizeof(struct htable) | 498 | sizeof(struct htable) |
497 | + jhash_size(hbits) * sizeof(struct hbucket)); | 499 | + jhash_size(hbits) * sizeof(struct hbucket)); |
498 | if (!h->table) { | 500 | if (!h->table) { |
499 | kfree(h); | 501 | kfree(h); |
500 | return -ENOMEM; | 502 | return -ENOMEM; |
501 | } | 503 | } |
502 | h->table->htable_bits = hbits; | 504 | h->table->htable_bits = hbits; |
503 | 505 | ||
504 | set->data = h; | 506 | set->data = h; |
505 | 507 | ||
506 | if (tb[IPSET_ATTR_TIMEOUT]) { | 508 | if (tb[IPSET_ATTR_TIMEOUT]) { |
507 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 509 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
508 | 510 | ||
509 | set->variant = set->family == AF_INET | 511 | set->variant = set->family == AF_INET |
510 | ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant; | 512 | ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant; |
511 | 513 | ||
512 | if (set->family == AF_INET) | 514 | if (set->family == AF_INET) |
513 | hash_ipportip4_gc_init(set); | 515 | hash_ipportip4_gc_init(set); |
514 | else | 516 | else |
515 | hash_ipportip6_gc_init(set); | 517 | hash_ipportip6_gc_init(set); |
516 | } else { | 518 | } else { |
517 | set->variant = set->family == AF_INET | 519 | set->variant = set->family == AF_INET |
518 | ? &hash_ipportip4_variant : &hash_ipportip6_variant; | 520 | ? &hash_ipportip4_variant : &hash_ipportip6_variant; |
519 | } | 521 | } |
520 | 522 | ||
521 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", | 523 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", |
522 | set->name, jhash_size(h->table->htable_bits), | 524 | set->name, jhash_size(h->table->htable_bits), |
523 | h->table->htable_bits, h->maxelem, set->data, h->table); | 525 | h->table->htable_bits, h->maxelem, set->data, h->table); |
524 | 526 | ||
525 | return 0; | 527 | return 0; |
526 | } | 528 | } |
527 | 529 | ||
528 | static struct ip_set_type hash_ipportip_type __read_mostly = { | 530 | static struct ip_set_type hash_ipportip_type __read_mostly = { |
529 | .name = "hash:ip,port,ip", | 531 | .name = "hash:ip,port,ip", |
530 | .protocol = IPSET_PROTOCOL, | 532 | .protocol = IPSET_PROTOCOL, |
531 | .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, | 533 | .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, |
532 | .dimension = IPSET_DIM_THREE, | 534 | .dimension = IPSET_DIM_THREE, |
533 | .family = AF_UNSPEC, | 535 | .family = AF_UNSPEC, |
534 | .revision_min = 0, | 536 | .revision_min = 0, |
535 | .revision_max = 1, /* SCTP and UDPLITE support added */ | 537 | .revision_max = 1, /* SCTP and UDPLITE support added */ |
536 | .create = hash_ipportip_create, | 538 | .create = hash_ipportip_create, |
537 | .create_policy = { | 539 | .create_policy = { |
538 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, | 540 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, |
539 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, | 541 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, |
540 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, | 542 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, |
541 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, | 543 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, |
542 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 544 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
543 | }, | 545 | }, |
544 | .adt_policy = { | 546 | .adt_policy = { |
545 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, | 547 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, |
546 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, | 548 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, |
547 | [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, | 549 | [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, |
548 | [IPSET_ATTR_PORT] = { .type = NLA_U16 }, | 550 | [IPSET_ATTR_PORT] = { .type = NLA_U16 }, |
549 | [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, | 551 | [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, |
550 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, | 552 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, |
551 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, | 553 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, |
552 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 554 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
553 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, | 555 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, |
554 | }, | 556 | }, |
555 | .me = THIS_MODULE, | 557 | .me = THIS_MODULE, |
556 | }; | 558 | }; |
557 | 559 | ||
558 | static int __init | 560 | static int __init |
559 | hash_ipportip_init(void) | 561 | hash_ipportip_init(void) |
560 | { | 562 | { |
561 | return ip_set_type_register(&hash_ipportip_type); | 563 | return ip_set_type_register(&hash_ipportip_type); |
562 | } | 564 | } |
563 | 565 | ||
564 | static void __exit | 566 | static void __exit |
565 | hash_ipportip_fini(void) | 567 | hash_ipportip_fini(void) |
566 | { | 568 | { |
567 | ip_set_type_unregister(&hash_ipportip_type); | 569 | ip_set_type_unregister(&hash_ipportip_type); |
568 | } | 570 | } |
569 | 571 | ||
570 | module_init(hash_ipportip_init); | 572 | module_init(hash_ipportip_init); |
571 | module_exit(hash_ipportip_fini); | 573 | module_exit(hash_ipportip_fini); |
572 | 574 |
net/netfilter/ipset/ip_set_hash_ipportnet.c
1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 as | 4 | * it under the terms of the GNU General Public License version 2 as |
5 | * published by the Free Software Foundation. | 5 | * published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | /* Kernel module implementing an IP set type: the hash:ip,port,net type */ | 8 | /* Kernel module implementing an IP set type: the hash:ip,port,net type */ |
9 | 9 | ||
10 | #include <linux/jhash.h> | 10 | #include <linux/jhash.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | #include <net/ipv6.h> | 17 | #include <net/ipv6.h> |
18 | #include <net/netlink.h> | 18 | #include <net/netlink.h> |
19 | #include <net/tcp.h> | 19 | #include <net/tcp.h> |
20 | 20 | ||
21 | #include <linux/netfilter.h> | 21 | #include <linux/netfilter.h> |
22 | #include <linux/netfilter/ipset/pfxlen.h> | 22 | #include <linux/netfilter/ipset/pfxlen.h> |
23 | #include <linux/netfilter/ipset/ip_set.h> | 23 | #include <linux/netfilter/ipset/ip_set.h> |
24 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 24 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
25 | #include <linux/netfilter/ipset/ip_set_getport.h> | 25 | #include <linux/netfilter/ipset/ip_set_getport.h> |
26 | #include <linux/netfilter/ipset/ip_set_hash.h> | 26 | #include <linux/netfilter/ipset/ip_set_hash.h> |
27 | 27 | ||
28 | MODULE_LICENSE("GPL"); | 28 | MODULE_LICENSE("GPL"); |
29 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); | 29 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
30 | MODULE_DESCRIPTION("hash:ip,port,net type of IP sets"); | 30 | MODULE_DESCRIPTION("hash:ip,port,net type of IP sets"); |
31 | MODULE_ALIAS("ip_set_hash:ip,port,net"); | 31 | MODULE_ALIAS("ip_set_hash:ip,port,net"); |
32 | 32 | ||
33 | /* Type specific function prefix */ | 33 | /* Type specific function prefix */ |
34 | #define TYPE hash_ipportnet | 34 | #define TYPE hash_ipportnet |
35 | 35 | ||
36 | static bool | 36 | static bool |
37 | hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b); | 37 | hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b); |
38 | 38 | ||
39 | #define hash_ipportnet4_same_set hash_ipportnet_same_set | 39 | #define hash_ipportnet4_same_set hash_ipportnet_same_set |
40 | #define hash_ipportnet6_same_set hash_ipportnet_same_set | 40 | #define hash_ipportnet6_same_set hash_ipportnet_same_set |
41 | 41 | ||
42 | /* The type variant functions: IPv4 */ | 42 | /* The type variant functions: IPv4 */ |
43 | 43 | ||
44 | /* Member elements without timeout */ | 44 | /* Member elements without timeout */ |
45 | struct hash_ipportnet4_elem { | 45 | struct hash_ipportnet4_elem { |
46 | __be32 ip; | 46 | __be32 ip; |
47 | __be32 ip2; | 47 | __be32 ip2; |
48 | __be16 port; | 48 | __be16 port; |
49 | u8 cidr; | 49 | u8 cidr; |
50 | u8 proto; | 50 | u8 proto; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | /* Member elements with timeout support */ | 53 | /* Member elements with timeout support */ |
54 | struct hash_ipportnet4_telem { | 54 | struct hash_ipportnet4_telem { |
55 | __be32 ip; | 55 | __be32 ip; |
56 | __be32 ip2; | 56 | __be32 ip2; |
57 | __be16 port; | 57 | __be16 port; |
58 | u8 cidr; | 58 | u8 cidr; |
59 | u8 proto; | 59 | u8 proto; |
60 | unsigned long timeout; | 60 | unsigned long timeout; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | static inline bool | 63 | static inline bool |
64 | hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, | 64 | hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, |
65 | const struct hash_ipportnet4_elem *ip2) | 65 | const struct hash_ipportnet4_elem *ip2, |
66 | u32 *multi) | ||
66 | { | 67 | { |
67 | return ip1->ip == ip2->ip && | 68 | return ip1->ip == ip2->ip && |
68 | ip1->ip2 == ip2->ip2 && | 69 | ip1->ip2 == ip2->ip2 && |
69 | ip1->cidr == ip2->cidr && | 70 | ip1->cidr == ip2->cidr && |
70 | ip1->port == ip2->port && | 71 | ip1->port == ip2->port && |
71 | ip1->proto == ip2->proto; | 72 | ip1->proto == ip2->proto; |
72 | } | 73 | } |
73 | 74 | ||
74 | static inline bool | 75 | static inline bool |
75 | hash_ipportnet4_data_isnull(const struct hash_ipportnet4_elem *elem) | 76 | hash_ipportnet4_data_isnull(const struct hash_ipportnet4_elem *elem) |
76 | { | 77 | { |
77 | return elem->proto == 0; | 78 | return elem->proto == 0; |
78 | } | 79 | } |
79 | 80 | ||
80 | static inline void | 81 | static inline void |
81 | hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst, | 82 | hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst, |
82 | const struct hash_ipportnet4_elem *src) | 83 | const struct hash_ipportnet4_elem *src) |
83 | { | 84 | { |
84 | memcpy(dst, src, sizeof(*dst)); | 85 | memcpy(dst, src, sizeof(*dst)); |
85 | } | 86 | } |
86 | 87 | ||
87 | static inline void | 88 | static inline void |
88 | hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr) | 89 | hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr) |
89 | { | 90 | { |
90 | elem->ip2 &= ip_set_netmask(cidr); | 91 | elem->ip2 &= ip_set_netmask(cidr); |
91 | elem->cidr = cidr; | 92 | elem->cidr = cidr; |
92 | } | 93 | } |
93 | 94 | ||
94 | static inline void | 95 | static inline void |
95 | hash_ipportnet4_data_zero_out(struct hash_ipportnet4_elem *elem) | 96 | hash_ipportnet4_data_zero_out(struct hash_ipportnet4_elem *elem) |
96 | { | 97 | { |
97 | elem->proto = 0; | 98 | elem->proto = 0; |
98 | } | 99 | } |
99 | 100 | ||
100 | static bool | 101 | static bool |
101 | hash_ipportnet4_data_list(struct sk_buff *skb, | 102 | hash_ipportnet4_data_list(struct sk_buff *skb, |
102 | const struct hash_ipportnet4_elem *data) | 103 | const struct hash_ipportnet4_elem *data) |
103 | { | 104 | { |
104 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); | 105 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); |
105 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); | 106 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); |
106 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 107 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
107 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); | 108 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); |
108 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 109 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
109 | return 0; | 110 | return 0; |
110 | 111 | ||
111 | nla_put_failure: | 112 | nla_put_failure: |
112 | return 1; | 113 | return 1; |
113 | } | 114 | } |
114 | 115 | ||
115 | static bool | 116 | static bool |
116 | hash_ipportnet4_data_tlist(struct sk_buff *skb, | 117 | hash_ipportnet4_data_tlist(struct sk_buff *skb, |
117 | const struct hash_ipportnet4_elem *data) | 118 | const struct hash_ipportnet4_elem *data) |
118 | { | 119 | { |
119 | const struct hash_ipportnet4_telem *tdata = | 120 | const struct hash_ipportnet4_telem *tdata = |
120 | (const struct hash_ipportnet4_telem *)data; | 121 | (const struct hash_ipportnet4_telem *)data; |
121 | 122 | ||
122 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); | 123 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); |
123 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); | 124 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); |
124 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); | 125 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); |
125 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); | 126 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); |
126 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 127 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
127 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 128 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
128 | htonl(ip_set_timeout_get(tdata->timeout))); | 129 | htonl(ip_set_timeout_get(tdata->timeout))); |
129 | 130 | ||
130 | return 0; | 131 | return 0; |
131 | 132 | ||
132 | nla_put_failure: | 133 | nla_put_failure: |
133 | return 1; | 134 | return 1; |
134 | } | 135 | } |
135 | 136 | ||
136 | #define IP_SET_HASH_WITH_PROTO | 137 | #define IP_SET_HASH_WITH_PROTO |
137 | #define IP_SET_HASH_WITH_NETS | 138 | #define IP_SET_HASH_WITH_NETS |
138 | 139 | ||
139 | #define PF 4 | 140 | #define PF 4 |
140 | #define HOST_MASK 32 | 141 | #define HOST_MASK 32 |
141 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 142 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
142 | 143 | ||
143 | static inline void | 144 | static inline void |
144 | hash_ipportnet4_data_next(struct ip_set_hash *h, | 145 | hash_ipportnet4_data_next(struct ip_set_hash *h, |
145 | const struct hash_ipportnet4_elem *d) | 146 | const struct hash_ipportnet4_elem *d) |
146 | { | 147 | { |
147 | h->next.ip = ntohl(d->ip); | 148 | h->next.ip = ntohl(d->ip); |
148 | h->next.port = ntohs(d->port); | 149 | h->next.port = ntohs(d->port); |
149 | h->next.ip2 = ntohl(d->ip2); | 150 | h->next.ip2 = ntohl(d->ip2); |
150 | } | 151 | } |
151 | 152 | ||
152 | static int | 153 | static int |
153 | hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, | 154 | hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, |
154 | const struct xt_action_param *par, | 155 | const struct xt_action_param *par, |
155 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 156 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
156 | { | 157 | { |
157 | const struct ip_set_hash *h = set->data; | 158 | const struct ip_set_hash *h = set->data; |
158 | ipset_adtfn adtfn = set->variant->adt[adt]; | 159 | ipset_adtfn adtfn = set->variant->adt[adt]; |
159 | struct hash_ipportnet4_elem data = { | 160 | struct hash_ipportnet4_elem data = { |
160 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK | 161 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK |
161 | }; | 162 | }; |
162 | 163 | ||
163 | if (data.cidr == 0) | 164 | if (data.cidr == 0) |
164 | return -EINVAL; | 165 | return -EINVAL; |
165 | if (adt == IPSET_TEST) | 166 | if (adt == IPSET_TEST) |
166 | data.cidr = HOST_MASK; | 167 | data.cidr = HOST_MASK; |
167 | 168 | ||
168 | if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, | 169 | if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, |
169 | &data.port, &data.proto)) | 170 | &data.port, &data.proto)) |
170 | return -EINVAL; | 171 | return -EINVAL; |
171 | 172 | ||
172 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); | 173 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); |
173 | ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2); | 174 | ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2); |
174 | data.ip2 &= ip_set_netmask(data.cidr); | 175 | data.ip2 &= ip_set_netmask(data.cidr); |
175 | 176 | ||
176 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 177 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
177 | } | 178 | } |
178 | 179 | ||
179 | static int | 180 | static int |
180 | hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | 181 | hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], |
181 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 182 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
182 | { | 183 | { |
183 | const struct ip_set_hash *h = set->data; | 184 | const struct ip_set_hash *h = set->data; |
184 | ipset_adtfn adtfn = set->variant->adt[adt]; | 185 | ipset_adtfn adtfn = set->variant->adt[adt]; |
185 | struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; | 186 | struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; |
186 | u32 ip, ip_to, p = 0, port, port_to; | 187 | u32 ip, ip_to, p = 0, port, port_to; |
187 | u32 ip2_from = 0, ip2_to, ip2_last, ip2; | 188 | u32 ip2_from = 0, ip2_to, ip2_last, ip2; |
188 | u32 timeout = h->timeout; | 189 | u32 timeout = h->timeout; |
189 | bool with_ports = false; | 190 | bool with_ports = false; |
190 | int ret; | 191 | int ret; |
191 | 192 | ||
192 | if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || | 193 | if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || |
193 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || | 194 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || |
194 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || | 195 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || |
195 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 196 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
196 | return -IPSET_ERR_PROTOCOL; | 197 | return -IPSET_ERR_PROTOCOL; |
197 | 198 | ||
198 | if (tb[IPSET_ATTR_LINENO]) | 199 | if (tb[IPSET_ATTR_LINENO]) |
199 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 200 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
200 | 201 | ||
201 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); | 202 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); |
202 | if (ret) | 203 | if (ret) |
203 | return ret; | 204 | return ret; |
204 | 205 | ||
205 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); | 206 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); |
206 | if (ret) | 207 | if (ret) |
207 | return ret; | 208 | return ret; |
208 | 209 | ||
209 | if (tb[IPSET_ATTR_CIDR2]) { | 210 | if (tb[IPSET_ATTR_CIDR2]) { |
210 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); | 211 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); |
211 | if (!data.cidr) | 212 | if (!data.cidr) |
212 | return -IPSET_ERR_INVALID_CIDR; | 213 | return -IPSET_ERR_INVALID_CIDR; |
213 | } | 214 | } |
214 | 215 | ||
215 | if (tb[IPSET_ATTR_PORT]) | 216 | if (tb[IPSET_ATTR_PORT]) |
216 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); | 217 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); |
217 | else | 218 | else |
218 | return -IPSET_ERR_PROTOCOL; | 219 | return -IPSET_ERR_PROTOCOL; |
219 | 220 | ||
220 | if (tb[IPSET_ATTR_PROTO]) { | 221 | if (tb[IPSET_ATTR_PROTO]) { |
221 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); | 222 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); |
222 | with_ports = ip_set_proto_with_ports(data.proto); | 223 | with_ports = ip_set_proto_with_ports(data.proto); |
223 | 224 | ||
224 | if (data.proto == 0) | 225 | if (data.proto == 0) |
225 | return -IPSET_ERR_INVALID_PROTO; | 226 | return -IPSET_ERR_INVALID_PROTO; |
226 | } else | 227 | } else |
227 | return -IPSET_ERR_MISSING_PROTO; | 228 | return -IPSET_ERR_MISSING_PROTO; |
228 | 229 | ||
229 | if (!(with_ports || data.proto == IPPROTO_ICMP)) | 230 | if (!(with_ports || data.proto == IPPROTO_ICMP)) |
230 | data.port = 0; | 231 | data.port = 0; |
231 | 232 | ||
232 | if (tb[IPSET_ATTR_TIMEOUT]) { | 233 | if (tb[IPSET_ATTR_TIMEOUT]) { |
233 | if (!with_timeout(h->timeout)) | 234 | if (!with_timeout(h->timeout)) |
234 | return -IPSET_ERR_TIMEOUT; | 235 | return -IPSET_ERR_TIMEOUT; |
235 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 236 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
236 | } | 237 | } |
237 | 238 | ||
238 | with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; | 239 | with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; |
239 | if (adt == IPSET_TEST || | 240 | if (adt == IPSET_TEST || |
240 | !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || | 241 | !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || |
241 | tb[IPSET_ATTR_IP2_TO])) { | 242 | tb[IPSET_ATTR_IP2_TO])) { |
242 | data.ip = htonl(ip); | 243 | data.ip = htonl(ip); |
243 | data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr)); | 244 | data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr)); |
244 | ret = adtfn(set, &data, timeout, flags); | 245 | ret = adtfn(set, &data, timeout, flags); |
245 | return ip_set_eexist(ret, flags) ? 0 : ret; | 246 | return ip_set_eexist(ret, flags) ? 0 : ret; |
246 | } | 247 | } |
247 | 248 | ||
248 | if (tb[IPSET_ATTR_IP_TO]) { | 249 | if (tb[IPSET_ATTR_IP_TO]) { |
249 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 250 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
250 | if (ret) | 251 | if (ret) |
251 | return ret; | 252 | return ret; |
252 | if (ip > ip_to) | 253 | if (ip > ip_to) |
253 | swap(ip, ip_to); | 254 | swap(ip, ip_to); |
254 | } else if (tb[IPSET_ATTR_CIDR]) { | 255 | } else if (tb[IPSET_ATTR_CIDR]) { |
255 | u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 256 | u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
256 | 257 | ||
257 | if (cidr > 32) | 258 | if (cidr > 32) |
258 | return -IPSET_ERR_INVALID_CIDR; | 259 | return -IPSET_ERR_INVALID_CIDR; |
259 | ip_set_mask_from_to(ip, ip_to, cidr); | 260 | ip_set_mask_from_to(ip, ip_to, cidr); |
260 | } | 261 | } |
261 | 262 | ||
262 | port_to = port = ntohs(data.port); | 263 | port_to = port = ntohs(data.port); |
263 | if (tb[IPSET_ATTR_PORT_TO]) { | 264 | if (tb[IPSET_ATTR_PORT_TO]) { |
264 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); | 265 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); |
265 | if (port > port_to) | 266 | if (port > port_to) |
266 | swap(port, port_to); | 267 | swap(port, port_to); |
267 | } | 268 | } |
268 | if (tb[IPSET_ATTR_IP2_TO]) { | 269 | if (tb[IPSET_ATTR_IP2_TO]) { |
269 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); | 270 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); |
270 | if (ret) | 271 | if (ret) |
271 | return ret; | 272 | return ret; |
272 | if (ip2_from > ip2_to) | 273 | if (ip2_from > ip2_to) |
273 | swap(ip2_from, ip2_to); | 274 | swap(ip2_from, ip2_to); |
274 | if (ip2_from + UINT_MAX == ip2_to) | 275 | if (ip2_from + UINT_MAX == ip2_to) |
275 | return -IPSET_ERR_HASH_RANGE; | 276 | return -IPSET_ERR_HASH_RANGE; |
276 | } else { | 277 | } else { |
277 | ip_set_mask_from_to(ip2_from, ip2_to, data.cidr); | 278 | ip_set_mask_from_to(ip2_from, ip2_to, data.cidr); |
278 | } | 279 | } |
279 | 280 | ||
280 | if (retried) | 281 | if (retried) |
281 | ip = h->next.ip; | 282 | ip = h->next.ip; |
282 | for (; !before(ip_to, ip); ip++) { | 283 | for (; !before(ip_to, ip); ip++) { |
283 | data.ip = htonl(ip); | 284 | data.ip = htonl(ip); |
284 | p = retried && ip == h->next.ip ? h->next.port : port; | 285 | p = retried && ip == h->next.ip ? h->next.port : port; |
285 | for (; p <= port_to; p++) { | 286 | for (; p <= port_to; p++) { |
286 | data.port = htons(p); | 287 | data.port = htons(p); |
287 | ip2 = retried && ip == h->next.ip && p == h->next.port | 288 | ip2 = retried && ip == h->next.ip && p == h->next.port |
288 | ? h->next.ip2 : ip2_from; | 289 | ? h->next.ip2 : ip2_from; |
289 | while (!after(ip2, ip2_to)) { | 290 | while (!after(ip2, ip2_to)) { |
290 | data.ip2 = htonl(ip2); | 291 | data.ip2 = htonl(ip2); |
291 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, | 292 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, |
292 | &data.cidr); | 293 | &data.cidr); |
293 | ret = adtfn(set, &data, timeout, flags); | 294 | ret = adtfn(set, &data, timeout, flags); |
294 | 295 | ||
295 | if (ret && !ip_set_eexist(ret, flags)) | 296 | if (ret && !ip_set_eexist(ret, flags)) |
296 | return ret; | 297 | return ret; |
297 | else | 298 | else |
298 | ret = 0; | 299 | ret = 0; |
299 | ip2 = ip2_last + 1; | 300 | ip2 = ip2_last + 1; |
300 | } | 301 | } |
301 | } | 302 | } |
302 | } | 303 | } |
303 | return ret; | 304 | return ret; |
304 | } | 305 | } |
305 | 306 | ||
306 | static bool | 307 | static bool |
307 | hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b) | 308 | hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b) |
308 | { | 309 | { |
309 | const struct ip_set_hash *x = a->data; | 310 | const struct ip_set_hash *x = a->data; |
310 | const struct ip_set_hash *y = b->data; | 311 | const struct ip_set_hash *y = b->data; |
311 | 312 | ||
312 | /* Resizing changes htable_bits, so we ignore it */ | 313 | /* Resizing changes htable_bits, so we ignore it */ |
313 | return x->maxelem == y->maxelem && | 314 | return x->maxelem == y->maxelem && |
314 | x->timeout == y->timeout; | 315 | x->timeout == y->timeout; |
315 | } | 316 | } |
316 | 317 | ||
317 | /* The type variant functions: IPv6 */ | 318 | /* The type variant functions: IPv6 */ |
318 | 319 | ||
319 | struct hash_ipportnet6_elem { | 320 | struct hash_ipportnet6_elem { |
320 | union nf_inet_addr ip; | 321 | union nf_inet_addr ip; |
321 | union nf_inet_addr ip2; | 322 | union nf_inet_addr ip2; |
322 | __be16 port; | 323 | __be16 port; |
323 | u8 cidr; | 324 | u8 cidr; |
324 | u8 proto; | 325 | u8 proto; |
325 | }; | 326 | }; |
326 | 327 | ||
327 | struct hash_ipportnet6_telem { | 328 | struct hash_ipportnet6_telem { |
328 | union nf_inet_addr ip; | 329 | union nf_inet_addr ip; |
329 | union nf_inet_addr ip2; | 330 | union nf_inet_addr ip2; |
330 | __be16 port; | 331 | __be16 port; |
331 | u8 cidr; | 332 | u8 cidr; |
332 | u8 proto; | 333 | u8 proto; |
333 | unsigned long timeout; | 334 | unsigned long timeout; |
334 | }; | 335 | }; |
335 | 336 | ||
336 | static inline bool | 337 | static inline bool |
337 | hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, | 338 | hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, |
338 | const struct hash_ipportnet6_elem *ip2) | 339 | const struct hash_ipportnet6_elem *ip2, |
340 | u32 *multi) | ||
339 | { | 341 | { |
340 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && | 342 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
341 | ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && | 343 | ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && |
342 | ip1->cidr == ip2->cidr && | 344 | ip1->cidr == ip2->cidr && |
343 | ip1->port == ip2->port && | 345 | ip1->port == ip2->port && |
344 | ip1->proto == ip2->proto; | 346 | ip1->proto == ip2->proto; |
345 | } | 347 | } |
346 | 348 | ||
347 | static inline bool | 349 | static inline bool |
348 | hash_ipportnet6_data_isnull(const struct hash_ipportnet6_elem *elem) | 350 | hash_ipportnet6_data_isnull(const struct hash_ipportnet6_elem *elem) |
349 | { | 351 | { |
350 | return elem->proto == 0; | 352 | return elem->proto == 0; |
351 | } | 353 | } |
352 | 354 | ||
353 | static inline void | 355 | static inline void |
354 | hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst, | 356 | hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst, |
355 | const struct hash_ipportnet6_elem *src) | 357 | const struct hash_ipportnet6_elem *src) |
356 | { | 358 | { |
357 | memcpy(dst, src, sizeof(*dst)); | 359 | memcpy(dst, src, sizeof(*dst)); |
358 | } | 360 | } |
359 | 361 | ||
360 | static inline void | 362 | static inline void |
361 | hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem) | 363 | hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem) |
362 | { | 364 | { |
363 | elem->proto = 0; | 365 | elem->proto = 0; |
364 | } | 366 | } |
365 | 367 | ||
366 | static inline void | 368 | static inline void |
367 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) | 369 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) |
368 | { | 370 | { |
369 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; | 371 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; |
370 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; | 372 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; |
371 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; | 373 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; |
372 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; | 374 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; |
373 | } | 375 | } |
374 | 376 | ||
375 | static inline void | 377 | static inline void |
376 | hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr) | 378 | hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr) |
377 | { | 379 | { |
378 | ip6_netmask(&elem->ip2, cidr); | 380 | ip6_netmask(&elem->ip2, cidr); |
379 | elem->cidr = cidr; | 381 | elem->cidr = cidr; |
380 | } | 382 | } |
381 | 383 | ||
382 | static bool | 384 | static bool |
383 | hash_ipportnet6_data_list(struct sk_buff *skb, | 385 | hash_ipportnet6_data_list(struct sk_buff *skb, |
384 | const struct hash_ipportnet6_elem *data) | 386 | const struct hash_ipportnet6_elem *data) |
385 | { | 387 | { |
386 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); | 388 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); |
387 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); | 389 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); |
388 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 390 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
389 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); | 391 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); |
390 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 392 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
391 | return 0; | 393 | return 0; |
392 | 394 | ||
393 | nla_put_failure: | 395 | nla_put_failure: |
394 | return 1; | 396 | return 1; |
395 | } | 397 | } |
396 | 398 | ||
397 | static bool | 399 | static bool |
398 | hash_ipportnet6_data_tlist(struct sk_buff *skb, | 400 | hash_ipportnet6_data_tlist(struct sk_buff *skb, |
399 | const struct hash_ipportnet6_elem *data) | 401 | const struct hash_ipportnet6_elem *data) |
400 | { | 402 | { |
401 | const struct hash_ipportnet6_telem *e = | 403 | const struct hash_ipportnet6_telem *e = |
402 | (const struct hash_ipportnet6_telem *)data; | 404 | (const struct hash_ipportnet6_telem *)data; |
403 | 405 | ||
404 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); | 406 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); |
405 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); | 407 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); |
406 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 408 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
407 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); | 409 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); |
408 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 410 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
409 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 411 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
410 | htonl(ip_set_timeout_get(e->timeout))); | 412 | htonl(ip_set_timeout_get(e->timeout))); |
411 | return 0; | 413 | return 0; |
412 | 414 | ||
413 | nla_put_failure: | 415 | nla_put_failure: |
414 | return 1; | 416 | return 1; |
415 | } | 417 | } |
416 | 418 | ||
417 | #undef PF | 419 | #undef PF |
418 | #undef HOST_MASK | 420 | #undef HOST_MASK |
419 | 421 | ||
420 | #define PF 6 | 422 | #define PF 6 |
421 | #define HOST_MASK 128 | 423 | #define HOST_MASK 128 |
422 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 424 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
423 | 425 | ||
424 | static inline void | 426 | static inline void |
425 | hash_ipportnet6_data_next(struct ip_set_hash *h, | 427 | hash_ipportnet6_data_next(struct ip_set_hash *h, |
426 | const struct hash_ipportnet6_elem *d) | 428 | const struct hash_ipportnet6_elem *d) |
427 | { | 429 | { |
428 | h->next.port = ntohs(d->port); | 430 | h->next.port = ntohs(d->port); |
429 | } | 431 | } |
430 | 432 | ||
431 | static int | 433 | static int |
432 | hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb, | 434 | hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb, |
433 | const struct xt_action_param *par, | 435 | const struct xt_action_param *par, |
434 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 436 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
435 | { | 437 | { |
436 | const struct ip_set_hash *h = set->data; | 438 | const struct ip_set_hash *h = set->data; |
437 | ipset_adtfn adtfn = set->variant->adt[adt]; | 439 | ipset_adtfn adtfn = set->variant->adt[adt]; |
438 | struct hash_ipportnet6_elem data = { | 440 | struct hash_ipportnet6_elem data = { |
439 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK | 441 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK |
440 | }; | 442 | }; |
441 | 443 | ||
442 | if (data.cidr == 0) | 444 | if (data.cidr == 0) |
443 | return -EINVAL; | 445 | return -EINVAL; |
444 | if (adt == IPSET_TEST) | 446 | if (adt == IPSET_TEST) |
445 | data.cidr = HOST_MASK; | 447 | data.cidr = HOST_MASK; |
446 | 448 | ||
447 | if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, | 449 | if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, |
448 | &data.port, &data.proto)) | 450 | &data.port, &data.proto)) |
449 | return -EINVAL; | 451 | return -EINVAL; |
450 | 452 | ||
451 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); | 453 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); |
452 | ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); | 454 | ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); |
453 | ip6_netmask(&data.ip2, data.cidr); | 455 | ip6_netmask(&data.ip2, data.cidr); |
454 | 456 | ||
455 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 457 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
456 | } | 458 | } |
457 | 459 | ||
458 | static int | 460 | static int |
459 | hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], | 461 | hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], |
460 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 462 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
461 | { | 463 | { |
462 | const struct ip_set_hash *h = set->data; | 464 | const struct ip_set_hash *h = set->data; |
463 | ipset_adtfn adtfn = set->variant->adt[adt]; | 465 | ipset_adtfn adtfn = set->variant->adt[adt]; |
464 | struct hash_ipportnet6_elem data = { .cidr = HOST_MASK }; | 466 | struct hash_ipportnet6_elem data = { .cidr = HOST_MASK }; |
465 | u32 port, port_to; | 467 | u32 port, port_to; |
466 | u32 timeout = h->timeout; | 468 | u32 timeout = h->timeout; |
467 | bool with_ports = false; | 469 | bool with_ports = false; |
468 | int ret; | 470 | int ret; |
469 | 471 | ||
470 | if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || | 472 | if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || |
471 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || | 473 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || |
472 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || | 474 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || |
473 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || | 475 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || |
474 | tb[IPSET_ATTR_IP_TO] || | 476 | tb[IPSET_ATTR_IP_TO] || |
475 | tb[IPSET_ATTR_CIDR])) | 477 | tb[IPSET_ATTR_CIDR])) |
476 | return -IPSET_ERR_PROTOCOL; | 478 | return -IPSET_ERR_PROTOCOL; |
477 | if (unlikely(tb[IPSET_ATTR_IP_TO])) | 479 | if (unlikely(tb[IPSET_ATTR_IP_TO])) |
478 | return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; | 480 | return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; |
479 | 481 | ||
480 | if (tb[IPSET_ATTR_LINENO]) | 482 | if (tb[IPSET_ATTR_LINENO]) |
481 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 483 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
482 | 484 | ||
483 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); | 485 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); |
484 | if (ret) | 486 | if (ret) |
485 | return ret; | 487 | return ret; |
486 | 488 | ||
487 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2); | 489 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2); |
488 | if (ret) | 490 | if (ret) |
489 | return ret; | 491 | return ret; |
490 | 492 | ||
491 | if (tb[IPSET_ATTR_CIDR2]) | 493 | if (tb[IPSET_ATTR_CIDR2]) |
492 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); | 494 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); |
493 | 495 | ||
494 | if (!data.cidr) | 496 | if (!data.cidr) |
495 | return -IPSET_ERR_INVALID_CIDR; | 497 | return -IPSET_ERR_INVALID_CIDR; |
496 | 498 | ||
497 | ip6_netmask(&data.ip2, data.cidr); | 499 | ip6_netmask(&data.ip2, data.cidr); |
498 | 500 | ||
499 | if (tb[IPSET_ATTR_PORT]) | 501 | if (tb[IPSET_ATTR_PORT]) |
500 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); | 502 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); |
501 | else | 503 | else |
502 | return -IPSET_ERR_PROTOCOL; | 504 | return -IPSET_ERR_PROTOCOL; |
503 | 505 | ||
504 | if (tb[IPSET_ATTR_PROTO]) { | 506 | if (tb[IPSET_ATTR_PROTO]) { |
505 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); | 507 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); |
506 | with_ports = ip_set_proto_with_ports(data.proto); | 508 | with_ports = ip_set_proto_with_ports(data.proto); |
507 | 509 | ||
508 | if (data.proto == 0) | 510 | if (data.proto == 0) |
509 | return -IPSET_ERR_INVALID_PROTO; | 511 | return -IPSET_ERR_INVALID_PROTO; |
510 | } else | 512 | } else |
511 | return -IPSET_ERR_MISSING_PROTO; | 513 | return -IPSET_ERR_MISSING_PROTO; |
512 | 514 | ||
513 | if (!(with_ports || data.proto == IPPROTO_ICMPV6)) | 515 | if (!(with_ports || data.proto == IPPROTO_ICMPV6)) |
514 | data.port = 0; | 516 | data.port = 0; |
515 | 517 | ||
516 | if (tb[IPSET_ATTR_TIMEOUT]) { | 518 | if (tb[IPSET_ATTR_TIMEOUT]) { |
517 | if (!with_timeout(h->timeout)) | 519 | if (!with_timeout(h->timeout)) |
518 | return -IPSET_ERR_TIMEOUT; | 520 | return -IPSET_ERR_TIMEOUT; |
519 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 521 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
520 | } | 522 | } |
521 | 523 | ||
522 | if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { | 524 | if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { |
523 | ret = adtfn(set, &data, timeout, flags); | 525 | ret = adtfn(set, &data, timeout, flags); |
524 | return ip_set_eexist(ret, flags) ? 0 : ret; | 526 | return ip_set_eexist(ret, flags) ? 0 : ret; |
525 | } | 527 | } |
526 | 528 | ||
527 | port = ntohs(data.port); | 529 | port = ntohs(data.port); |
528 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); | 530 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); |
529 | if (port > port_to) | 531 | if (port > port_to) |
530 | swap(port, port_to); | 532 | swap(port, port_to); |
531 | 533 | ||
532 | if (retried) | 534 | if (retried) |
533 | port = h->next.port; | 535 | port = h->next.port; |
534 | for (; port <= port_to; port++) { | 536 | for (; port <= port_to; port++) { |
535 | data.port = htons(port); | 537 | data.port = htons(port); |
536 | ret = adtfn(set, &data, timeout, flags); | 538 | ret = adtfn(set, &data, timeout, flags); |
537 | 539 | ||
538 | if (ret && !ip_set_eexist(ret, flags)) | 540 | if (ret && !ip_set_eexist(ret, flags)) |
539 | return ret; | 541 | return ret; |
540 | else | 542 | else |
541 | ret = 0; | 543 | ret = 0; |
542 | } | 544 | } |
543 | return ret; | 545 | return ret; |
544 | } | 546 | } |
545 | 547 | ||
546 | /* Create hash:ip type of sets */ | 548 | /* Create hash:ip type of sets */ |
547 | 549 | ||
548 | static int | 550 | static int |
549 | hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags) | 551 | hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags) |
550 | { | 552 | { |
551 | struct ip_set_hash *h; | 553 | struct ip_set_hash *h; |
552 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; | 554 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; |
553 | u8 hbits; | 555 | u8 hbits; |
554 | 556 | ||
555 | if (!(set->family == AF_INET || set->family == AF_INET6)) | 557 | if (!(set->family == AF_INET || set->family == AF_INET6)) |
556 | return -IPSET_ERR_INVALID_FAMILY; | 558 | return -IPSET_ERR_INVALID_FAMILY; |
557 | 559 | ||
558 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || | 560 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || |
559 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || | 561 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || |
560 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 562 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
561 | return -IPSET_ERR_PROTOCOL; | 563 | return -IPSET_ERR_PROTOCOL; |
562 | 564 | ||
563 | if (tb[IPSET_ATTR_HASHSIZE]) { | 565 | if (tb[IPSET_ATTR_HASHSIZE]) { |
564 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); | 566 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); |
565 | if (hashsize < IPSET_MIMINAL_HASHSIZE) | 567 | if (hashsize < IPSET_MIMINAL_HASHSIZE) |
566 | hashsize = IPSET_MIMINAL_HASHSIZE; | 568 | hashsize = IPSET_MIMINAL_HASHSIZE; |
567 | } | 569 | } |
568 | 570 | ||
569 | if (tb[IPSET_ATTR_MAXELEM]) | 571 | if (tb[IPSET_ATTR_MAXELEM]) |
570 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); | 572 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); |
571 | 573 | ||
572 | h = kzalloc(sizeof(*h) | 574 | h = kzalloc(sizeof(*h) |
573 | + sizeof(struct ip_set_hash_nets) | 575 | + sizeof(struct ip_set_hash_nets) |
574 | * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); | 576 | * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); |
575 | if (!h) | 577 | if (!h) |
576 | return -ENOMEM; | 578 | return -ENOMEM; |
577 | 579 | ||
578 | h->maxelem = maxelem; | 580 | h->maxelem = maxelem; |
579 | get_random_bytes(&h->initval, sizeof(h->initval)); | 581 | get_random_bytes(&h->initval, sizeof(h->initval)); |
580 | h->timeout = IPSET_NO_TIMEOUT; | 582 | h->timeout = IPSET_NO_TIMEOUT; |
581 | 583 | ||
582 | hbits = htable_bits(hashsize); | 584 | hbits = htable_bits(hashsize); |
583 | h->table = ip_set_alloc( | 585 | h->table = ip_set_alloc( |
584 | sizeof(struct htable) | 586 | sizeof(struct htable) |
585 | + jhash_size(hbits) * sizeof(struct hbucket)); | 587 | + jhash_size(hbits) * sizeof(struct hbucket)); |
586 | if (!h->table) { | 588 | if (!h->table) { |
587 | kfree(h); | 589 | kfree(h); |
588 | return -ENOMEM; | 590 | return -ENOMEM; |
589 | } | 591 | } |
590 | h->table->htable_bits = hbits; | 592 | h->table->htable_bits = hbits; |
591 | 593 | ||
592 | set->data = h; | 594 | set->data = h; |
593 | 595 | ||
594 | if (tb[IPSET_ATTR_TIMEOUT]) { | 596 | if (tb[IPSET_ATTR_TIMEOUT]) { |
595 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 597 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
596 | 598 | ||
597 | set->variant = set->family == AF_INET | 599 | set->variant = set->family == AF_INET |
598 | ? &hash_ipportnet4_tvariant | 600 | ? &hash_ipportnet4_tvariant |
599 | : &hash_ipportnet6_tvariant; | 601 | : &hash_ipportnet6_tvariant; |
600 | 602 | ||
601 | if (set->family == AF_INET) | 603 | if (set->family == AF_INET) |
602 | hash_ipportnet4_gc_init(set); | 604 | hash_ipportnet4_gc_init(set); |
603 | else | 605 | else |
604 | hash_ipportnet6_gc_init(set); | 606 | hash_ipportnet6_gc_init(set); |
605 | } else { | 607 | } else { |
606 | set->variant = set->family == AF_INET | 608 | set->variant = set->family == AF_INET |
607 | ? &hash_ipportnet4_variant : &hash_ipportnet6_variant; | 609 | ? &hash_ipportnet4_variant : &hash_ipportnet6_variant; |
608 | } | 610 | } |
609 | 611 | ||
610 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", | 612 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", |
611 | set->name, jhash_size(h->table->htable_bits), | 613 | set->name, jhash_size(h->table->htable_bits), |
612 | h->table->htable_bits, h->maxelem, set->data, h->table); | 614 | h->table->htable_bits, h->maxelem, set->data, h->table); |
613 | 615 | ||
614 | return 0; | 616 | return 0; |
615 | } | 617 | } |
616 | 618 | ||
617 | static struct ip_set_type hash_ipportnet_type __read_mostly = { | 619 | static struct ip_set_type hash_ipportnet_type __read_mostly = { |
618 | .name = "hash:ip,port,net", | 620 | .name = "hash:ip,port,net", |
619 | .protocol = IPSET_PROTOCOL, | 621 | .protocol = IPSET_PROTOCOL, |
620 | .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, | 622 | .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, |
621 | .dimension = IPSET_DIM_THREE, | 623 | .dimension = IPSET_DIM_THREE, |
622 | .family = AF_UNSPEC, | 624 | .family = AF_UNSPEC, |
623 | .revision_min = 0, | 625 | .revision_min = 0, |
624 | /* 1 SCTP and UDPLITE support added */ | 626 | /* 1 SCTP and UDPLITE support added */ |
625 | .revision_max = 2, /* Range as input support for IPv4 added */ | 627 | .revision_max = 2, /* Range as input support for IPv4 added */ |
626 | .create = hash_ipportnet_create, | 628 | .create = hash_ipportnet_create, |
627 | .create_policy = { | 629 | .create_policy = { |
628 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, | 630 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, |
629 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, | 631 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, |
630 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, | 632 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, |
631 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, | 633 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, |
632 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 634 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
633 | }, | 635 | }, |
634 | .adt_policy = { | 636 | .adt_policy = { |
635 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, | 637 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, |
636 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, | 638 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, |
637 | [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, | 639 | [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, |
638 | [IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED }, | 640 | [IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED }, |
639 | [IPSET_ATTR_PORT] = { .type = NLA_U16 }, | 641 | [IPSET_ATTR_PORT] = { .type = NLA_U16 }, |
640 | [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, | 642 | [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, |
641 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, | 643 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, |
642 | [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, | 644 | [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, |
643 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, | 645 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, |
644 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 646 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
645 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, | 647 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, |
646 | }, | 648 | }, |
647 | .me = THIS_MODULE, | 649 | .me = THIS_MODULE, |
648 | }; | 650 | }; |
649 | 651 | ||
650 | static int __init | 652 | static int __init |
651 | hash_ipportnet_init(void) | 653 | hash_ipportnet_init(void) |
652 | { | 654 | { |
653 | return ip_set_type_register(&hash_ipportnet_type); | 655 | return ip_set_type_register(&hash_ipportnet_type); |
654 | } | 656 | } |
655 | 657 | ||
656 | static void __exit | 658 | static void __exit |
657 | hash_ipportnet_fini(void) | 659 | hash_ipportnet_fini(void) |
658 | { | 660 | { |
659 | ip_set_type_unregister(&hash_ipportnet_type); | 661 | ip_set_type_unregister(&hash_ipportnet_type); |
660 | } | 662 | } |
661 | 663 | ||
662 | module_init(hash_ipportnet_init); | 664 | module_init(hash_ipportnet_init); |
663 | module_exit(hash_ipportnet_fini); | 665 | module_exit(hash_ipportnet_fini); |
664 | 666 |
net/netfilter/ipset/ip_set_hash_net.c
1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 as | 4 | * it under the terms of the GNU General Public License version 2 as |
5 | * published by the Free Software Foundation. | 5 | * published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | /* Kernel module implementing an IP set type: the hash:net type */ | 8 | /* Kernel module implementing an IP set type: the hash:net type */ |
9 | 9 | ||
10 | #include <linux/jhash.h> | 10 | #include <linux/jhash.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | #include <net/ipv6.h> | 17 | #include <net/ipv6.h> |
18 | #include <net/netlink.h> | 18 | #include <net/netlink.h> |
19 | 19 | ||
20 | #include <linux/netfilter.h> | 20 | #include <linux/netfilter.h> |
21 | #include <linux/netfilter/ipset/pfxlen.h> | 21 | #include <linux/netfilter/ipset/pfxlen.h> |
22 | #include <linux/netfilter/ipset/ip_set.h> | 22 | #include <linux/netfilter/ipset/ip_set.h> |
23 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 23 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
24 | #include <linux/netfilter/ipset/ip_set_hash.h> | 24 | #include <linux/netfilter/ipset/ip_set_hash.h> |
25 | 25 | ||
26 | MODULE_LICENSE("GPL"); | 26 | MODULE_LICENSE("GPL"); |
27 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); | 27 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
28 | MODULE_DESCRIPTION("hash:net type of IP sets"); | 28 | MODULE_DESCRIPTION("hash:net type of IP sets"); |
29 | MODULE_ALIAS("ip_set_hash:net"); | 29 | MODULE_ALIAS("ip_set_hash:net"); |
30 | 30 | ||
31 | /* Type specific function prefix */ | 31 | /* Type specific function prefix */ |
32 | #define TYPE hash_net | 32 | #define TYPE hash_net |
33 | 33 | ||
34 | static bool | 34 | static bool |
35 | hash_net_same_set(const struct ip_set *a, const struct ip_set *b); | 35 | hash_net_same_set(const struct ip_set *a, const struct ip_set *b); |
36 | 36 | ||
37 | #define hash_net4_same_set hash_net_same_set | 37 | #define hash_net4_same_set hash_net_same_set |
38 | #define hash_net6_same_set hash_net_same_set | 38 | #define hash_net6_same_set hash_net_same_set |
39 | 39 | ||
40 | /* The type variant functions: IPv4 */ | 40 | /* The type variant functions: IPv4 */ |
41 | 41 | ||
42 | /* Member elements without timeout */ | 42 | /* Member elements without timeout */ |
43 | struct hash_net4_elem { | 43 | struct hash_net4_elem { |
44 | __be32 ip; | 44 | __be32 ip; |
45 | u16 padding0; | 45 | u16 padding0; |
46 | u8 padding1; | 46 | u8 padding1; |
47 | u8 cidr; | 47 | u8 cidr; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | /* Member elements with timeout support */ | 50 | /* Member elements with timeout support */ |
51 | struct hash_net4_telem { | 51 | struct hash_net4_telem { |
52 | __be32 ip; | 52 | __be32 ip; |
53 | u16 padding0; | 53 | u16 padding0; |
54 | u8 padding1; | 54 | u8 padding1; |
55 | u8 cidr; | 55 | u8 cidr; |
56 | unsigned long timeout; | 56 | unsigned long timeout; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static inline bool | 59 | static inline bool |
60 | hash_net4_data_equal(const struct hash_net4_elem *ip1, | 60 | hash_net4_data_equal(const struct hash_net4_elem *ip1, |
61 | const struct hash_net4_elem *ip2) | 61 | const struct hash_net4_elem *ip2, |
62 | u32 *multi) | ||
62 | { | 63 | { |
63 | return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr; | 64 | return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr; |
64 | } | 65 | } |
65 | 66 | ||
66 | static inline bool | 67 | static inline bool |
67 | hash_net4_data_isnull(const struct hash_net4_elem *elem) | 68 | hash_net4_data_isnull(const struct hash_net4_elem *elem) |
68 | { | 69 | { |
69 | return elem->cidr == 0; | 70 | return elem->cidr == 0; |
70 | } | 71 | } |
71 | 72 | ||
72 | static inline void | 73 | static inline void |
73 | hash_net4_data_copy(struct hash_net4_elem *dst, | 74 | hash_net4_data_copy(struct hash_net4_elem *dst, |
74 | const struct hash_net4_elem *src) | 75 | const struct hash_net4_elem *src) |
75 | { | 76 | { |
76 | dst->ip = src->ip; | 77 | dst->ip = src->ip; |
77 | dst->cidr = src->cidr; | 78 | dst->cidr = src->cidr; |
78 | } | 79 | } |
79 | 80 | ||
80 | static inline void | 81 | static inline void |
81 | hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr) | 82 | hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr) |
82 | { | 83 | { |
83 | elem->ip &= ip_set_netmask(cidr); | 84 | elem->ip &= ip_set_netmask(cidr); |
84 | elem->cidr = cidr; | 85 | elem->cidr = cidr; |
85 | } | 86 | } |
86 | 87 | ||
87 | /* Zero CIDR values cannot be stored */ | 88 | /* Zero CIDR values cannot be stored */ |
88 | static inline void | 89 | static inline void |
89 | hash_net4_data_zero_out(struct hash_net4_elem *elem) | 90 | hash_net4_data_zero_out(struct hash_net4_elem *elem) |
90 | { | 91 | { |
91 | elem->cidr = 0; | 92 | elem->cidr = 0; |
92 | } | 93 | } |
93 | 94 | ||
94 | static bool | 95 | static bool |
95 | hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data) | 96 | hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data) |
96 | { | 97 | { |
97 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); | 98 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); |
98 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 99 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
99 | return 0; | 100 | return 0; |
100 | 101 | ||
101 | nla_put_failure: | 102 | nla_put_failure: |
102 | return 1; | 103 | return 1; |
103 | } | 104 | } |
104 | 105 | ||
105 | static bool | 106 | static bool |
106 | hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data) | 107 | hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data) |
107 | { | 108 | { |
108 | const struct hash_net4_telem *tdata = | 109 | const struct hash_net4_telem *tdata = |
109 | (const struct hash_net4_telem *)data; | 110 | (const struct hash_net4_telem *)data; |
110 | 111 | ||
111 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); | 112 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); |
112 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr); | 113 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr); |
113 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 114 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
114 | htonl(ip_set_timeout_get(tdata->timeout))); | 115 | htonl(ip_set_timeout_get(tdata->timeout))); |
115 | 116 | ||
116 | return 0; | 117 | return 0; |
117 | 118 | ||
118 | nla_put_failure: | 119 | nla_put_failure: |
119 | return 1; | 120 | return 1; |
120 | } | 121 | } |
121 | 122 | ||
122 | #define IP_SET_HASH_WITH_NETS | 123 | #define IP_SET_HASH_WITH_NETS |
123 | 124 | ||
124 | #define PF 4 | 125 | #define PF 4 |
125 | #define HOST_MASK 32 | 126 | #define HOST_MASK 32 |
126 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 127 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
127 | 128 | ||
128 | static inline void | 129 | static inline void |
129 | hash_net4_data_next(struct ip_set_hash *h, | 130 | hash_net4_data_next(struct ip_set_hash *h, |
130 | const struct hash_net4_elem *d) | 131 | const struct hash_net4_elem *d) |
131 | { | 132 | { |
132 | h->next.ip = ntohl(d->ip); | 133 | h->next.ip = ntohl(d->ip); |
133 | } | 134 | } |
134 | 135 | ||
135 | static int | 136 | static int |
136 | hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb, | 137 | hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb, |
137 | const struct xt_action_param *par, | 138 | const struct xt_action_param *par, |
138 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 139 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
139 | { | 140 | { |
140 | const struct ip_set_hash *h = set->data; | 141 | const struct ip_set_hash *h = set->data; |
141 | ipset_adtfn adtfn = set->variant->adt[adt]; | 142 | ipset_adtfn adtfn = set->variant->adt[adt]; |
142 | struct hash_net4_elem data = { | 143 | struct hash_net4_elem data = { |
143 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK | 144 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK |
144 | }; | 145 | }; |
145 | 146 | ||
146 | if (data.cidr == 0) | 147 | if (data.cidr == 0) |
147 | return -EINVAL; | 148 | return -EINVAL; |
148 | if (adt == IPSET_TEST) | 149 | if (adt == IPSET_TEST) |
149 | data.cidr = HOST_MASK; | 150 | data.cidr = HOST_MASK; |
150 | 151 | ||
151 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); | 152 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); |
152 | data.ip &= ip_set_netmask(data.cidr); | 153 | data.ip &= ip_set_netmask(data.cidr); |
153 | 154 | ||
154 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 155 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
155 | } | 156 | } |
156 | 157 | ||
157 | static int | 158 | static int |
158 | hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], | 159 | hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], |
159 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 160 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
160 | { | 161 | { |
161 | const struct ip_set_hash *h = set->data; | 162 | const struct ip_set_hash *h = set->data; |
162 | ipset_adtfn adtfn = set->variant->adt[adt]; | 163 | ipset_adtfn adtfn = set->variant->adt[adt]; |
163 | struct hash_net4_elem data = { .cidr = HOST_MASK }; | 164 | struct hash_net4_elem data = { .cidr = HOST_MASK }; |
164 | u32 timeout = h->timeout; | 165 | u32 timeout = h->timeout; |
165 | u32 ip = 0, ip_to, last; | 166 | u32 ip = 0, ip_to, last; |
166 | int ret; | 167 | int ret; |
167 | 168 | ||
168 | if (unlikely(!tb[IPSET_ATTR_IP] || | 169 | if (unlikely(!tb[IPSET_ATTR_IP] || |
169 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 170 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
170 | return -IPSET_ERR_PROTOCOL; | 171 | return -IPSET_ERR_PROTOCOL; |
171 | 172 | ||
172 | if (tb[IPSET_ATTR_LINENO]) | 173 | if (tb[IPSET_ATTR_LINENO]) |
173 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 174 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
174 | 175 | ||
175 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); | 176 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); |
176 | if (ret) | 177 | if (ret) |
177 | return ret; | 178 | return ret; |
178 | 179 | ||
179 | if (tb[IPSET_ATTR_CIDR]) { | 180 | if (tb[IPSET_ATTR_CIDR]) { |
180 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 181 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
181 | if (!data.cidr) | 182 | if (!data.cidr) |
182 | return -IPSET_ERR_INVALID_CIDR; | 183 | return -IPSET_ERR_INVALID_CIDR; |
183 | } | 184 | } |
184 | 185 | ||
185 | if (tb[IPSET_ATTR_TIMEOUT]) { | 186 | if (tb[IPSET_ATTR_TIMEOUT]) { |
186 | if (!with_timeout(h->timeout)) | 187 | if (!with_timeout(h->timeout)) |
187 | return -IPSET_ERR_TIMEOUT; | 188 | return -IPSET_ERR_TIMEOUT; |
188 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 189 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
189 | } | 190 | } |
190 | 191 | ||
191 | if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { | 192 | if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { |
192 | data.ip = htonl(ip & ip_set_hostmask(data.cidr)); | 193 | data.ip = htonl(ip & ip_set_hostmask(data.cidr)); |
193 | ret = adtfn(set, &data, timeout, flags); | 194 | ret = adtfn(set, &data, timeout, flags); |
194 | return ip_set_eexist(ret, flags) ? 0 : ret; | 195 | return ip_set_eexist(ret, flags) ? 0 : ret; |
195 | } | 196 | } |
196 | 197 | ||
197 | ip_to = ip; | 198 | ip_to = ip; |
198 | if (tb[IPSET_ATTR_IP_TO]) { | 199 | if (tb[IPSET_ATTR_IP_TO]) { |
199 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 200 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
200 | if (ret) | 201 | if (ret) |
201 | return ret; | 202 | return ret; |
202 | if (ip_to < ip) | 203 | if (ip_to < ip) |
203 | swap(ip, ip_to); | 204 | swap(ip, ip_to); |
204 | if (ip + UINT_MAX == ip_to) | 205 | if (ip + UINT_MAX == ip_to) |
205 | return -IPSET_ERR_HASH_RANGE; | 206 | return -IPSET_ERR_HASH_RANGE; |
206 | } | 207 | } |
207 | if (retried) | 208 | if (retried) |
208 | ip = h->next.ip; | 209 | ip = h->next.ip; |
209 | while (!after(ip, ip_to)) { | 210 | while (!after(ip, ip_to)) { |
210 | data.ip = htonl(ip); | 211 | data.ip = htonl(ip); |
211 | last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); | 212 | last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); |
212 | ret = adtfn(set, &data, timeout, flags); | 213 | ret = adtfn(set, &data, timeout, flags); |
213 | if (ret && !ip_set_eexist(ret, flags)) | 214 | if (ret && !ip_set_eexist(ret, flags)) |
214 | return ret; | 215 | return ret; |
215 | else | 216 | else |
216 | ret = 0; | 217 | ret = 0; |
217 | ip = last + 1; | 218 | ip = last + 1; |
218 | } | 219 | } |
219 | return ret; | 220 | return ret; |
220 | } | 221 | } |
221 | 222 | ||
222 | static bool | 223 | static bool |
223 | hash_net_same_set(const struct ip_set *a, const struct ip_set *b) | 224 | hash_net_same_set(const struct ip_set *a, const struct ip_set *b) |
224 | { | 225 | { |
225 | const struct ip_set_hash *x = a->data; | 226 | const struct ip_set_hash *x = a->data; |
226 | const struct ip_set_hash *y = b->data; | 227 | const struct ip_set_hash *y = b->data; |
227 | 228 | ||
228 | /* Resizing changes htable_bits, so we ignore it */ | 229 | /* Resizing changes htable_bits, so we ignore it */ |
229 | return x->maxelem == y->maxelem && | 230 | return x->maxelem == y->maxelem && |
230 | x->timeout == y->timeout; | 231 | x->timeout == y->timeout; |
231 | } | 232 | } |
232 | 233 | ||
233 | /* The type variant functions: IPv6 */ | 234 | /* The type variant functions: IPv6 */ |
234 | 235 | ||
235 | struct hash_net6_elem { | 236 | struct hash_net6_elem { |
236 | union nf_inet_addr ip; | 237 | union nf_inet_addr ip; |
237 | u16 padding0; | 238 | u16 padding0; |
238 | u8 padding1; | 239 | u8 padding1; |
239 | u8 cidr; | 240 | u8 cidr; |
240 | }; | 241 | }; |
241 | 242 | ||
242 | struct hash_net6_telem { | 243 | struct hash_net6_telem { |
243 | union nf_inet_addr ip; | 244 | union nf_inet_addr ip; |
244 | u16 padding0; | 245 | u16 padding0; |
245 | u8 padding1; | 246 | u8 padding1; |
246 | u8 cidr; | 247 | u8 cidr; |
247 | unsigned long timeout; | 248 | unsigned long timeout; |
248 | }; | 249 | }; |
249 | 250 | ||
250 | static inline bool | 251 | static inline bool |
251 | hash_net6_data_equal(const struct hash_net6_elem *ip1, | 252 | hash_net6_data_equal(const struct hash_net6_elem *ip1, |
252 | const struct hash_net6_elem *ip2) | 253 | const struct hash_net6_elem *ip2, |
254 | u32 *multi) | ||
253 | { | 255 | { |
254 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && | 256 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
255 | ip1->cidr == ip2->cidr; | 257 | ip1->cidr == ip2->cidr; |
256 | } | 258 | } |
257 | 259 | ||
258 | static inline bool | 260 | static inline bool |
259 | hash_net6_data_isnull(const struct hash_net6_elem *elem) | 261 | hash_net6_data_isnull(const struct hash_net6_elem *elem) |
260 | { | 262 | { |
261 | return elem->cidr == 0; | 263 | return elem->cidr == 0; |
262 | } | 264 | } |
263 | 265 | ||
264 | static inline void | 266 | static inline void |
265 | hash_net6_data_copy(struct hash_net6_elem *dst, | 267 | hash_net6_data_copy(struct hash_net6_elem *dst, |
266 | const struct hash_net6_elem *src) | 268 | const struct hash_net6_elem *src) |
267 | { | 269 | { |
268 | ipv6_addr_copy(&dst->ip.in6, &src->ip.in6); | 270 | ipv6_addr_copy(&dst->ip.in6, &src->ip.in6); |
269 | dst->cidr = src->cidr; | 271 | dst->cidr = src->cidr; |
270 | } | 272 | } |
271 | 273 | ||
272 | static inline void | 274 | static inline void |
273 | hash_net6_data_zero_out(struct hash_net6_elem *elem) | 275 | hash_net6_data_zero_out(struct hash_net6_elem *elem) |
274 | { | 276 | { |
275 | elem->cidr = 0; | 277 | elem->cidr = 0; |
276 | } | 278 | } |
277 | 279 | ||
278 | static inline void | 280 | static inline void |
279 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) | 281 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) |
280 | { | 282 | { |
281 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; | 283 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; |
282 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; | 284 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; |
283 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; | 285 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; |
284 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; | 286 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; |
285 | } | 287 | } |
286 | 288 | ||
287 | static inline void | 289 | static inline void |
288 | hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr) | 290 | hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr) |
289 | { | 291 | { |
290 | ip6_netmask(&elem->ip, cidr); | 292 | ip6_netmask(&elem->ip, cidr); |
291 | elem->cidr = cidr; | 293 | elem->cidr = cidr; |
292 | } | 294 | } |
293 | 295 | ||
294 | static bool | 296 | static bool |
295 | hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data) | 297 | hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data) |
296 | { | 298 | { |
297 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); | 299 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); |
298 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 300 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
299 | return 0; | 301 | return 0; |
300 | 302 | ||
301 | nla_put_failure: | 303 | nla_put_failure: |
302 | return 1; | 304 | return 1; |
303 | } | 305 | } |
304 | 306 | ||
305 | static bool | 307 | static bool |
306 | hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data) | 308 | hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data) |
307 | { | 309 | { |
308 | const struct hash_net6_telem *e = | 310 | const struct hash_net6_telem *e = |
309 | (const struct hash_net6_telem *)data; | 311 | (const struct hash_net6_telem *)data; |
310 | 312 | ||
311 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); | 313 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); |
312 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr); | 314 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr); |
313 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 315 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
314 | htonl(ip_set_timeout_get(e->timeout))); | 316 | htonl(ip_set_timeout_get(e->timeout))); |
315 | return 0; | 317 | return 0; |
316 | 318 | ||
317 | nla_put_failure: | 319 | nla_put_failure: |
318 | return 1; | 320 | return 1; |
319 | } | 321 | } |
320 | 322 | ||
321 | #undef PF | 323 | #undef PF |
322 | #undef HOST_MASK | 324 | #undef HOST_MASK |
323 | 325 | ||
324 | #define PF 6 | 326 | #define PF 6 |
325 | #define HOST_MASK 128 | 327 | #define HOST_MASK 128 |
326 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 328 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
327 | 329 | ||
328 | static inline void | 330 | static inline void |
329 | hash_net6_data_next(struct ip_set_hash *h, | 331 | hash_net6_data_next(struct ip_set_hash *h, |
330 | const struct hash_net6_elem *d) | 332 | const struct hash_net6_elem *d) |
331 | { | 333 | { |
332 | } | 334 | } |
333 | 335 | ||
334 | static int | 336 | static int |
335 | hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb, | 337 | hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb, |
336 | const struct xt_action_param *par, | 338 | const struct xt_action_param *par, |
337 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 339 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
338 | { | 340 | { |
339 | const struct ip_set_hash *h = set->data; | 341 | const struct ip_set_hash *h = set->data; |
340 | ipset_adtfn adtfn = set->variant->adt[adt]; | 342 | ipset_adtfn adtfn = set->variant->adt[adt]; |
341 | struct hash_net6_elem data = { | 343 | struct hash_net6_elem data = { |
342 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK | 344 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK |
343 | }; | 345 | }; |
344 | 346 | ||
345 | if (data.cidr == 0) | 347 | if (data.cidr == 0) |
346 | return -EINVAL; | 348 | return -EINVAL; |
347 | if (adt == IPSET_TEST) | 349 | if (adt == IPSET_TEST) |
348 | data.cidr = HOST_MASK; | 350 | data.cidr = HOST_MASK; |
349 | 351 | ||
350 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); | 352 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); |
351 | ip6_netmask(&data.ip, data.cidr); | 353 | ip6_netmask(&data.ip, data.cidr); |
352 | 354 | ||
353 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 355 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
354 | } | 356 | } |
355 | 357 | ||
356 | static int | 358 | static int |
357 | hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], | 359 | hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], |
358 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 360 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
359 | { | 361 | { |
360 | const struct ip_set_hash *h = set->data; | 362 | const struct ip_set_hash *h = set->data; |
361 | ipset_adtfn adtfn = set->variant->adt[adt]; | 363 | ipset_adtfn adtfn = set->variant->adt[adt]; |
362 | struct hash_net6_elem data = { .cidr = HOST_MASK }; | 364 | struct hash_net6_elem data = { .cidr = HOST_MASK }; |
363 | u32 timeout = h->timeout; | 365 | u32 timeout = h->timeout; |
364 | int ret; | 366 | int ret; |
365 | 367 | ||
366 | if (unlikely(!tb[IPSET_ATTR_IP] || | 368 | if (unlikely(!tb[IPSET_ATTR_IP] || |
367 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 369 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
368 | return -IPSET_ERR_PROTOCOL; | 370 | return -IPSET_ERR_PROTOCOL; |
369 | if (unlikely(tb[IPSET_ATTR_IP_TO])) | 371 | if (unlikely(tb[IPSET_ATTR_IP_TO])) |
370 | return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; | 372 | return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; |
371 | 373 | ||
372 | if (tb[IPSET_ATTR_LINENO]) | 374 | if (tb[IPSET_ATTR_LINENO]) |
373 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 375 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
374 | 376 | ||
375 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); | 377 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); |
376 | if (ret) | 378 | if (ret) |
377 | return ret; | 379 | return ret; |
378 | 380 | ||
379 | if (tb[IPSET_ATTR_CIDR]) | 381 | if (tb[IPSET_ATTR_CIDR]) |
380 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 382 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
381 | 383 | ||
382 | if (!data.cidr) | 384 | if (!data.cidr) |
383 | return -IPSET_ERR_INVALID_CIDR; | 385 | return -IPSET_ERR_INVALID_CIDR; |
384 | 386 | ||
385 | ip6_netmask(&data.ip, data.cidr); | 387 | ip6_netmask(&data.ip, data.cidr); |
386 | 388 | ||
387 | if (tb[IPSET_ATTR_TIMEOUT]) { | 389 | if (tb[IPSET_ATTR_TIMEOUT]) { |
388 | if (!with_timeout(h->timeout)) | 390 | if (!with_timeout(h->timeout)) |
389 | return -IPSET_ERR_TIMEOUT; | 391 | return -IPSET_ERR_TIMEOUT; |
390 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 392 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
391 | } | 393 | } |
392 | 394 | ||
393 | ret = adtfn(set, &data, timeout, flags); | 395 | ret = adtfn(set, &data, timeout, flags); |
394 | 396 | ||
395 | return ip_set_eexist(ret, flags) ? 0 : ret; | 397 | return ip_set_eexist(ret, flags) ? 0 : ret; |
396 | } | 398 | } |
397 | 399 | ||
398 | /* Create hash:ip type of sets */ | 400 | /* Create hash:ip type of sets */ |
399 | 401 | ||
400 | static int | 402 | static int |
401 | hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags) | 403 | hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags) |
402 | { | 404 | { |
403 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; | 405 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; |
404 | struct ip_set_hash *h; | 406 | struct ip_set_hash *h; |
405 | u8 hbits; | 407 | u8 hbits; |
406 | 408 | ||
407 | if (!(set->family == AF_INET || set->family == AF_INET6)) | 409 | if (!(set->family == AF_INET || set->family == AF_INET6)) |
408 | return -IPSET_ERR_INVALID_FAMILY; | 410 | return -IPSET_ERR_INVALID_FAMILY; |
409 | 411 | ||
410 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || | 412 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || |
411 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || | 413 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || |
412 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 414 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
413 | return -IPSET_ERR_PROTOCOL; | 415 | return -IPSET_ERR_PROTOCOL; |
414 | 416 | ||
415 | if (tb[IPSET_ATTR_HASHSIZE]) { | 417 | if (tb[IPSET_ATTR_HASHSIZE]) { |
416 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); | 418 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); |
417 | if (hashsize < IPSET_MIMINAL_HASHSIZE) | 419 | if (hashsize < IPSET_MIMINAL_HASHSIZE) |
418 | hashsize = IPSET_MIMINAL_HASHSIZE; | 420 | hashsize = IPSET_MIMINAL_HASHSIZE; |
419 | } | 421 | } |
420 | 422 | ||
421 | if (tb[IPSET_ATTR_MAXELEM]) | 423 | if (tb[IPSET_ATTR_MAXELEM]) |
422 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); | 424 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); |
423 | 425 | ||
424 | h = kzalloc(sizeof(*h) | 426 | h = kzalloc(sizeof(*h) |
425 | + sizeof(struct ip_set_hash_nets) | 427 | + sizeof(struct ip_set_hash_nets) |
426 | * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); | 428 | * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); |
427 | if (!h) | 429 | if (!h) |
428 | return -ENOMEM; | 430 | return -ENOMEM; |
429 | 431 | ||
430 | h->maxelem = maxelem; | 432 | h->maxelem = maxelem; |
431 | get_random_bytes(&h->initval, sizeof(h->initval)); | 433 | get_random_bytes(&h->initval, sizeof(h->initval)); |
432 | h->timeout = IPSET_NO_TIMEOUT; | 434 | h->timeout = IPSET_NO_TIMEOUT; |
433 | 435 | ||
434 | hbits = htable_bits(hashsize); | 436 | hbits = htable_bits(hashsize); |
435 | h->table = ip_set_alloc( | 437 | h->table = ip_set_alloc( |
436 | sizeof(struct htable) | 438 | sizeof(struct htable) |
437 | + jhash_size(hbits) * sizeof(struct hbucket)); | 439 | + jhash_size(hbits) * sizeof(struct hbucket)); |
438 | if (!h->table) { | 440 | if (!h->table) { |
439 | kfree(h); | 441 | kfree(h); |
440 | return -ENOMEM; | 442 | return -ENOMEM; |
441 | } | 443 | } |
442 | h->table->htable_bits = hbits; | 444 | h->table->htable_bits = hbits; |
443 | 445 | ||
444 | set->data = h; | 446 | set->data = h; |
445 | 447 | ||
446 | if (tb[IPSET_ATTR_TIMEOUT]) { | 448 | if (tb[IPSET_ATTR_TIMEOUT]) { |
447 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 449 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
448 | 450 | ||
449 | set->variant = set->family == AF_INET | 451 | set->variant = set->family == AF_INET |
450 | ? &hash_net4_tvariant : &hash_net6_tvariant; | 452 | ? &hash_net4_tvariant : &hash_net6_tvariant; |
451 | 453 | ||
452 | if (set->family == AF_INET) | 454 | if (set->family == AF_INET) |
453 | hash_net4_gc_init(set); | 455 | hash_net4_gc_init(set); |
454 | else | 456 | else |
455 | hash_net6_gc_init(set); | 457 | hash_net6_gc_init(set); |
456 | } else { | 458 | } else { |
457 | set->variant = set->family == AF_INET | 459 | set->variant = set->family == AF_INET |
458 | ? &hash_net4_variant : &hash_net6_variant; | 460 | ? &hash_net4_variant : &hash_net6_variant; |
459 | } | 461 | } |
460 | 462 | ||
461 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", | 463 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", |
462 | set->name, jhash_size(h->table->htable_bits), | 464 | set->name, jhash_size(h->table->htable_bits), |
463 | h->table->htable_bits, h->maxelem, set->data, h->table); | 465 | h->table->htable_bits, h->maxelem, set->data, h->table); |
464 | 466 | ||
465 | return 0; | 467 | return 0; |
466 | } | 468 | } |
467 | 469 | ||
468 | static struct ip_set_type hash_net_type __read_mostly = { | 470 | static struct ip_set_type hash_net_type __read_mostly = { |
469 | .name = "hash:net", | 471 | .name = "hash:net", |
470 | .protocol = IPSET_PROTOCOL, | 472 | .protocol = IPSET_PROTOCOL, |
471 | .features = IPSET_TYPE_IP, | 473 | .features = IPSET_TYPE_IP, |
472 | .dimension = IPSET_DIM_ONE, | 474 | .dimension = IPSET_DIM_ONE, |
473 | .family = AF_UNSPEC, | 475 | .family = AF_UNSPEC, |
474 | .revision_min = 0, | 476 | .revision_min = 0, |
475 | .revision_max = 1, /* Range as input support for IPv4 added */ | 477 | .revision_max = 1, /* Range as input support for IPv4 added */ |
476 | .create = hash_net_create, | 478 | .create = hash_net_create, |
477 | .create_policy = { | 479 | .create_policy = { |
478 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, | 480 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, |
479 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, | 481 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, |
480 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, | 482 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, |
481 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, | 483 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, |
482 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 484 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
483 | }, | 485 | }, |
484 | .adt_policy = { | 486 | .adt_policy = { |
485 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, | 487 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, |
486 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, | 488 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, |
487 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, | 489 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, |
488 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 490 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
489 | }, | 491 | }, |
490 | .me = THIS_MODULE, | 492 | .me = THIS_MODULE, |
491 | }; | 493 | }; |
492 | 494 | ||
493 | static int __init | 495 | static int __init |
494 | hash_net_init(void) | 496 | hash_net_init(void) |
495 | { | 497 | { |
496 | return ip_set_type_register(&hash_net_type); | 498 | return ip_set_type_register(&hash_net_type); |
497 | } | 499 | } |
498 | 500 | ||
499 | static void __exit | 501 | static void __exit |
500 | hash_net_fini(void) | 502 | hash_net_fini(void) |
501 | { | 503 | { |
502 | ip_set_type_unregister(&hash_net_type); | 504 | ip_set_type_unregister(&hash_net_type); |
503 | } | 505 | } |
504 | 506 | ||
505 | module_init(hash_net_init); | 507 | module_init(hash_net_init); |
506 | module_exit(hash_net_fini); | 508 | module_exit(hash_net_fini); |
507 | 509 |
net/netfilter/ipset/ip_set_hash_netiface.c
1 | /* Copyright (C) 2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 1 | /* Copyright (C) 2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 as | 4 | * it under the terms of the GNU General Public License version 2 as |
5 | * published by the Free Software Foundation. | 5 | * published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | /* Kernel module implementing an IP set type: the hash:net,iface type */ | 8 | /* Kernel module implementing an IP set type: the hash:net,iface type */ |
9 | 9 | ||
10 | #include <linux/jhash.h> | 10 | #include <linux/jhash.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/rbtree.h> | 16 | #include <linux/rbtree.h> |
17 | #include <net/ip.h> | 17 | #include <net/ip.h> |
18 | #include <net/ipv6.h> | 18 | #include <net/ipv6.h> |
19 | #include <net/netlink.h> | 19 | #include <net/netlink.h> |
20 | 20 | ||
21 | #include <linux/netfilter.h> | 21 | #include <linux/netfilter.h> |
22 | #include <linux/netfilter/ipset/pfxlen.h> | 22 | #include <linux/netfilter/ipset/pfxlen.h> |
23 | #include <linux/netfilter/ipset/ip_set.h> | 23 | #include <linux/netfilter/ipset/ip_set.h> |
24 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 24 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
25 | #include <linux/netfilter/ipset/ip_set_hash.h> | 25 | #include <linux/netfilter/ipset/ip_set_hash.h> |
26 | 26 | ||
27 | MODULE_LICENSE("GPL"); | 27 | MODULE_LICENSE("GPL"); |
28 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); | 28 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
29 | MODULE_DESCRIPTION("hash:net,iface type of IP sets"); | 29 | MODULE_DESCRIPTION("hash:net,iface type of IP sets"); |
30 | MODULE_ALIAS("ip_set_hash:net,iface"); | 30 | MODULE_ALIAS("ip_set_hash:net,iface"); |
31 | 31 | ||
32 | /* Interface name rbtree */ | 32 | /* Interface name rbtree */ |
33 | 33 | ||
34 | struct iface_node { | 34 | struct iface_node { |
35 | struct rb_node node; | 35 | struct rb_node node; |
36 | char iface[IFNAMSIZ]; | 36 | char iface[IFNAMSIZ]; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | #define iface_data(n) (rb_entry(n, struct iface_node, node)->iface) | 39 | #define iface_data(n) (rb_entry(n, struct iface_node, node)->iface) |
40 | 40 | ||
41 | static inline long | 41 | static inline long |
42 | ifname_compare(const char *_a, const char *_b) | 42 | ifname_compare(const char *_a, const char *_b) |
43 | { | 43 | { |
44 | const long *a = (const long *)_a; | 44 | const long *a = (const long *)_a; |
45 | const long *b = (const long *)_b; | 45 | const long *b = (const long *)_b; |
46 | 46 | ||
47 | BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); | 47 | BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); |
48 | if (a[0] != b[0]) | 48 | if (a[0] != b[0]) |
49 | return a[0] - b[0]; | 49 | return a[0] - b[0]; |
50 | if (IFNAMSIZ > sizeof(long)) { | 50 | if (IFNAMSIZ > sizeof(long)) { |
51 | if (a[1] != b[1]) | 51 | if (a[1] != b[1]) |
52 | return a[1] - b[1]; | 52 | return a[1] - b[1]; |
53 | } | 53 | } |
54 | if (IFNAMSIZ > 2 * sizeof(long)) { | 54 | if (IFNAMSIZ > 2 * sizeof(long)) { |
55 | if (a[2] != b[2]) | 55 | if (a[2] != b[2]) |
56 | return a[2] - b[2]; | 56 | return a[2] - b[2]; |
57 | } | 57 | } |
58 | if (IFNAMSIZ > 3 * sizeof(long)) { | 58 | if (IFNAMSIZ > 3 * sizeof(long)) { |
59 | if (a[3] != b[3]) | 59 | if (a[3] != b[3]) |
60 | return a[3] - b[3]; | 60 | return a[3] - b[3]; |
61 | } | 61 | } |
62 | return 0; | 62 | return 0; |
63 | } | 63 | } |
64 | 64 | ||
65 | static void | 65 | static void |
66 | rbtree_destroy(struct rb_root *root) | 66 | rbtree_destroy(struct rb_root *root) |
67 | { | 67 | { |
68 | struct rb_node *p, *n = root->rb_node; | 68 | struct rb_node *p, *n = root->rb_node; |
69 | struct iface_node *node; | 69 | struct iface_node *node; |
70 | 70 | ||
71 | /* Non-recursive destroy, like in ext3 */ | 71 | /* Non-recursive destroy, like in ext3 */ |
72 | while (n) { | 72 | while (n) { |
73 | if (n->rb_left) { | 73 | if (n->rb_left) { |
74 | n = n->rb_left; | 74 | n = n->rb_left; |
75 | continue; | 75 | continue; |
76 | } | 76 | } |
77 | if (n->rb_right) { | 77 | if (n->rb_right) { |
78 | n = n->rb_right; | 78 | n = n->rb_right; |
79 | continue; | 79 | continue; |
80 | } | 80 | } |
81 | p = rb_parent(n); | 81 | p = rb_parent(n); |
82 | node = rb_entry(n, struct iface_node, node); | 82 | node = rb_entry(n, struct iface_node, node); |
83 | if (!p) | 83 | if (!p) |
84 | *root = RB_ROOT; | 84 | *root = RB_ROOT; |
85 | else if (p->rb_left == n) | 85 | else if (p->rb_left == n) |
86 | p->rb_left = NULL; | 86 | p->rb_left = NULL; |
87 | else if (p->rb_right == n) | 87 | else if (p->rb_right == n) |
88 | p->rb_right = NULL; | 88 | p->rb_right = NULL; |
89 | 89 | ||
90 | kfree(node); | 90 | kfree(node); |
91 | n = p; | 91 | n = p; |
92 | } | 92 | } |
93 | } | 93 | } |
94 | 94 | ||
95 | static int | 95 | static int |
96 | iface_test(struct rb_root *root, const char **iface) | 96 | iface_test(struct rb_root *root, const char **iface) |
97 | { | 97 | { |
98 | struct rb_node *n = root->rb_node; | 98 | struct rb_node *n = root->rb_node; |
99 | 99 | ||
100 | while (n) { | 100 | while (n) { |
101 | const char *d = iface_data(n); | 101 | const char *d = iface_data(n); |
102 | int res = ifname_compare(*iface, d); | 102 | long res = ifname_compare(*iface, d); |
103 | 103 | ||
104 | if (res < 0) | 104 | if (res < 0) |
105 | n = n->rb_left; | 105 | n = n->rb_left; |
106 | else if (res > 0) | 106 | else if (res > 0) |
107 | n = n->rb_right; | 107 | n = n->rb_right; |
108 | else { | 108 | else { |
109 | *iface = d; | 109 | *iface = d; |
110 | return 1; | 110 | return 1; |
111 | } | 111 | } |
112 | } | 112 | } |
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
115 | 115 | ||
116 | static int | 116 | static int |
117 | iface_add(struct rb_root *root, const char **iface) | 117 | iface_add(struct rb_root *root, const char **iface) |
118 | { | 118 | { |
119 | struct rb_node **n = &(root->rb_node), *p = NULL; | 119 | struct rb_node **n = &(root->rb_node), *p = NULL; |
120 | struct iface_node *d; | 120 | struct iface_node *d; |
121 | 121 | ||
122 | while (*n) { | 122 | while (*n) { |
123 | char *ifname = iface_data(*n); | 123 | char *ifname = iface_data(*n); |
124 | int res = ifname_compare(*iface, ifname); | 124 | long res = ifname_compare(*iface, ifname); |
125 | 125 | ||
126 | p = *n; | 126 | p = *n; |
127 | if (res < 0) | 127 | if (res < 0) |
128 | n = &((*n)->rb_left); | 128 | n = &((*n)->rb_left); |
129 | else if (res > 0) | 129 | else if (res > 0) |
130 | n = &((*n)->rb_right); | 130 | n = &((*n)->rb_right); |
131 | else { | 131 | else { |
132 | *iface = ifname; | 132 | *iface = ifname; |
133 | return 0; | 133 | return 0; |
134 | } | 134 | } |
135 | } | 135 | } |
136 | 136 | ||
137 | d = kzalloc(sizeof(*d), GFP_ATOMIC); | 137 | d = kzalloc(sizeof(*d), GFP_ATOMIC); |
138 | if (!d) | 138 | if (!d) |
139 | return -ENOMEM; | 139 | return -ENOMEM; |
140 | strcpy(d->iface, *iface); | 140 | strcpy(d->iface, *iface); |
141 | 141 | ||
142 | rb_link_node(&d->node, p, n); | 142 | rb_link_node(&d->node, p, n); |
143 | rb_insert_color(&d->node, root); | 143 | rb_insert_color(&d->node, root); |
144 | 144 | ||
145 | *iface = d->iface; | 145 | *iface = d->iface; |
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
148 | 148 | ||
149 | /* Type specific function prefix */ | 149 | /* Type specific function prefix */ |
150 | #define TYPE hash_netiface | 150 | #define TYPE hash_netiface |
151 | 151 | ||
152 | static bool | 152 | static bool |
153 | hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b); | 153 | hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b); |
154 | 154 | ||
155 | #define hash_netiface4_same_set hash_netiface_same_set | 155 | #define hash_netiface4_same_set hash_netiface_same_set |
156 | #define hash_netiface6_same_set hash_netiface_same_set | 156 | #define hash_netiface6_same_set hash_netiface_same_set |
157 | 157 | ||
158 | #define STREQ(a, b) (strcmp(a, b) == 0) | 158 | #define STREQ(a, b) (strcmp(a, b) == 0) |
159 | 159 | ||
160 | /* The type variant functions: IPv4 */ | 160 | /* The type variant functions: IPv4 */ |
161 | 161 | ||
162 | struct hash_netiface4_elem_hashed { | ||
163 | __be32 ip; | ||
164 | u8 physdev; | ||
165 | u8 cidr; | ||
166 | u16 padding; | ||
167 | }; | ||
168 | |||
169 | #define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed) | ||
170 | |||
162 | /* Member elements without timeout */ | 171 | /* Member elements without timeout */ |
163 | struct hash_netiface4_elem { | 172 | struct hash_netiface4_elem { |
164 | __be32 ip; | 173 | __be32 ip; |
165 | const char *iface; | ||
166 | u8 physdev; | 174 | u8 physdev; |
167 | u8 cidr; | 175 | u8 cidr; |
168 | u16 padding; | 176 | u16 padding; |
177 | const char *iface; | ||
169 | }; | 178 | }; |
170 | 179 | ||
171 | /* Member elements with timeout support */ | 180 | /* Member elements with timeout support */ |
172 | struct hash_netiface4_telem { | 181 | struct hash_netiface4_telem { |
173 | __be32 ip; | 182 | __be32 ip; |
174 | const char *iface; | ||
175 | u8 physdev; | 183 | u8 physdev; |
176 | u8 cidr; | 184 | u8 cidr; |
177 | u16 padding; | 185 | u16 padding; |
186 | const char *iface; | ||
178 | unsigned long timeout; | 187 | unsigned long timeout; |
179 | }; | 188 | }; |
180 | 189 | ||
181 | static inline bool | 190 | static inline bool |
182 | hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, | 191 | hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, |
183 | const struct hash_netiface4_elem *ip2) | 192 | const struct hash_netiface4_elem *ip2, |
193 | u32 *multi) | ||
184 | { | 194 | { |
185 | return ip1->ip == ip2->ip && | 195 | return ip1->ip == ip2->ip && |
186 | ip1->cidr == ip2->cidr && | 196 | ip1->cidr == ip2->cidr && |
197 | (++*multi) && | ||
187 | ip1->physdev == ip2->physdev && | 198 | ip1->physdev == ip2->physdev && |
188 | ip1->iface == ip2->iface; | 199 | ip1->iface == ip2->iface; |
189 | } | 200 | } |
190 | 201 | ||
191 | static inline bool | 202 | static inline bool |
192 | hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem) | 203 | hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem) |
193 | { | 204 | { |
194 | return elem->cidr == 0; | 205 | return elem->cidr == 0; |
195 | } | 206 | } |
196 | 207 | ||
197 | static inline void | 208 | static inline void |
198 | hash_netiface4_data_copy(struct hash_netiface4_elem *dst, | 209 | hash_netiface4_data_copy(struct hash_netiface4_elem *dst, |
199 | const struct hash_netiface4_elem *src) { | 210 | const struct hash_netiface4_elem *src) { |
200 | dst->ip = src->ip; | 211 | dst->ip = src->ip; |
201 | dst->cidr = src->cidr; | 212 | dst->cidr = src->cidr; |
202 | dst->physdev = src->physdev; | 213 | dst->physdev = src->physdev; |
203 | dst->iface = src->iface; | 214 | dst->iface = src->iface; |
204 | } | 215 | } |
205 | 216 | ||
206 | static inline void | 217 | static inline void |
207 | hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr) | 218 | hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr) |
208 | { | 219 | { |
209 | elem->ip &= ip_set_netmask(cidr); | 220 | elem->ip &= ip_set_netmask(cidr); |
210 | elem->cidr = cidr; | 221 | elem->cidr = cidr; |
211 | } | 222 | } |
212 | 223 | ||
213 | static inline void | 224 | static inline void |
214 | hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem) | 225 | hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem) |
215 | { | 226 | { |
216 | elem->cidr = 0; | 227 | elem->cidr = 0; |
217 | } | 228 | } |
218 | 229 | ||
219 | static bool | 230 | static bool |
220 | hash_netiface4_data_list(struct sk_buff *skb, | 231 | hash_netiface4_data_list(struct sk_buff *skb, |
221 | const struct hash_netiface4_elem *data) | 232 | const struct hash_netiface4_elem *data) |
222 | { | 233 | { |
223 | u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; | 234 | u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; |
224 | 235 | ||
225 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); | 236 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); |
226 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 237 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
227 | NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); | 238 | NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); |
228 | if (flags) | 239 | if (flags) |
229 | NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); | 240 | NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); |
230 | return 0; | 241 | return 0; |
231 | 242 | ||
232 | nla_put_failure: | 243 | nla_put_failure: |
233 | return 1; | 244 | return 1; |
234 | } | 245 | } |
235 | 246 | ||
236 | static bool | 247 | static bool |
237 | hash_netiface4_data_tlist(struct sk_buff *skb, | 248 | hash_netiface4_data_tlist(struct sk_buff *skb, |
238 | const struct hash_netiface4_elem *data) | 249 | const struct hash_netiface4_elem *data) |
239 | { | 250 | { |
240 | const struct hash_netiface4_telem *tdata = | 251 | const struct hash_netiface4_telem *tdata = |
241 | (const struct hash_netiface4_telem *)data; | 252 | (const struct hash_netiface4_telem *)data; |
242 | u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; | 253 | u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; |
243 | 254 | ||
244 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); | 255 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); |
245 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 256 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
246 | NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); | 257 | NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); |
247 | if (flags) | 258 | if (flags) |
248 | NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); | 259 | NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); |
249 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 260 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
250 | htonl(ip_set_timeout_get(tdata->timeout))); | 261 | htonl(ip_set_timeout_get(tdata->timeout))); |
251 | 262 | ||
252 | return 0; | 263 | return 0; |
253 | 264 | ||
254 | nla_put_failure: | 265 | nla_put_failure: |
255 | return 1; | 266 | return 1; |
256 | } | 267 | } |
257 | 268 | ||
258 | #define IP_SET_HASH_WITH_NETS | 269 | #define IP_SET_HASH_WITH_NETS |
259 | #define IP_SET_HASH_WITH_RBTREE | 270 | #define IP_SET_HASH_WITH_RBTREE |
271 | #define IP_SET_HASH_WITH_MULTI | ||
260 | 272 | ||
261 | #define PF 4 | 273 | #define PF 4 |
262 | #define HOST_MASK 32 | 274 | #define HOST_MASK 32 |
263 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 275 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
264 | 276 | ||
265 | static inline void | 277 | static inline void |
266 | hash_netiface4_data_next(struct ip_set_hash *h, | 278 | hash_netiface4_data_next(struct ip_set_hash *h, |
267 | const struct hash_netiface4_elem *d) | 279 | const struct hash_netiface4_elem *d) |
268 | { | 280 | { |
269 | h->next.ip = ntohl(d->ip); | 281 | h->next.ip = ntohl(d->ip); |
270 | } | 282 | } |
271 | 283 | ||
272 | static int | 284 | static int |
273 | hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, | 285 | hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, |
274 | const struct xt_action_param *par, | 286 | const struct xt_action_param *par, |
275 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 287 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
276 | { | 288 | { |
277 | struct ip_set_hash *h = set->data; | 289 | struct ip_set_hash *h = set->data; |
278 | ipset_adtfn adtfn = set->variant->adt[adt]; | 290 | ipset_adtfn adtfn = set->variant->adt[adt]; |
279 | struct hash_netiface4_elem data = { | 291 | struct hash_netiface4_elem data = { |
280 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK | 292 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK |
281 | }; | 293 | }; |
282 | int ret; | 294 | int ret; |
283 | 295 | ||
284 | if (data.cidr == 0) | 296 | if (data.cidr == 0) |
285 | return -EINVAL; | 297 | return -EINVAL; |
286 | if (adt == IPSET_TEST) | 298 | if (adt == IPSET_TEST) |
287 | data.cidr = HOST_MASK; | 299 | data.cidr = HOST_MASK; |
288 | 300 | ||
289 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); | 301 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); |
290 | data.ip &= ip_set_netmask(data.cidr); | 302 | data.ip &= ip_set_netmask(data.cidr); |
291 | 303 | ||
292 | #define IFACE(dir) (par->dir ? par->dir->name : NULL) | 304 | #define IFACE(dir) (par->dir ? par->dir->name : NULL) |
293 | #define PHYSDEV(dir) (nf_bridge->dir ? nf_bridge->dir->name : NULL) | 305 | #define PHYSDEV(dir) (nf_bridge->dir ? nf_bridge->dir->name : NULL) |
294 | #define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC) | 306 | #define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC) |
295 | 307 | ||
296 | if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { | 308 | if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { |
297 | #ifdef CONFIG_BRIDGE_NETFILTER | 309 | #ifdef CONFIG_BRIDGE_NETFILTER |
298 | const struct nf_bridge_info *nf_bridge = skb->nf_bridge; | 310 | const struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
299 | 311 | ||
300 | if (!nf_bridge) | 312 | if (!nf_bridge) |
301 | return -EINVAL; | 313 | return -EINVAL; |
302 | data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev); | 314 | data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev); |
303 | data.physdev = 1; | 315 | data.physdev = 1; |
304 | #else | 316 | #else |
305 | data.iface = NULL; | 317 | data.iface = NULL; |
306 | #endif | 318 | #endif |
307 | } else | 319 | } else |
308 | data.iface = SRCDIR ? IFACE(in) : IFACE(out); | 320 | data.iface = SRCDIR ? IFACE(in) : IFACE(out); |
309 | 321 | ||
310 | if (!data.iface) | 322 | if (!data.iface) |
311 | return -EINVAL; | 323 | return -EINVAL; |
312 | ret = iface_test(&h->rbtree, &data.iface); | 324 | ret = iface_test(&h->rbtree, &data.iface); |
313 | if (adt == IPSET_ADD) { | 325 | if (adt == IPSET_ADD) { |
314 | if (!ret) { | 326 | if (!ret) { |
315 | ret = iface_add(&h->rbtree, &data.iface); | 327 | ret = iface_add(&h->rbtree, &data.iface); |
316 | if (ret) | 328 | if (ret) |
317 | return ret; | 329 | return ret; |
318 | } | 330 | } |
319 | } else if (!ret) | 331 | } else if (!ret) |
320 | return ret; | 332 | return ret; |
321 | 333 | ||
322 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 334 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
323 | } | 335 | } |
324 | 336 | ||
325 | static int | 337 | static int |
326 | hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], | 338 | hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], |
327 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 339 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
328 | { | 340 | { |
329 | struct ip_set_hash *h = set->data; | 341 | struct ip_set_hash *h = set->data; |
330 | ipset_adtfn adtfn = set->variant->adt[adt]; | 342 | ipset_adtfn adtfn = set->variant->adt[adt]; |
331 | struct hash_netiface4_elem data = { .cidr = HOST_MASK }; | 343 | struct hash_netiface4_elem data = { .cidr = HOST_MASK }; |
332 | u32 ip = 0, ip_to, last; | 344 | u32 ip = 0, ip_to, last; |
333 | u32 timeout = h->timeout; | 345 | u32 timeout = h->timeout; |
334 | char iface[IFNAMSIZ] = {}; | 346 | char iface[IFNAMSIZ] = {}; |
335 | int ret; | 347 | int ret; |
336 | 348 | ||
337 | if (unlikely(!tb[IPSET_ATTR_IP] || | 349 | if (unlikely(!tb[IPSET_ATTR_IP] || |
338 | !tb[IPSET_ATTR_IFACE] || | 350 | !tb[IPSET_ATTR_IFACE] || |
339 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || | 351 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || |
340 | !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) | 352 | !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) |
341 | return -IPSET_ERR_PROTOCOL; | 353 | return -IPSET_ERR_PROTOCOL; |
342 | 354 | ||
343 | if (tb[IPSET_ATTR_LINENO]) | 355 | if (tb[IPSET_ATTR_LINENO]) |
344 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 356 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
345 | 357 | ||
346 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); | 358 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); |
347 | if (ret) | 359 | if (ret) |
348 | return ret; | 360 | return ret; |
349 | 361 | ||
350 | if (tb[IPSET_ATTR_CIDR]) { | 362 | if (tb[IPSET_ATTR_CIDR]) { |
351 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 363 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
352 | if (!data.cidr) | 364 | if (!data.cidr) |
353 | return -IPSET_ERR_INVALID_CIDR; | 365 | return -IPSET_ERR_INVALID_CIDR; |
354 | } | 366 | } |
355 | 367 | ||
356 | if (tb[IPSET_ATTR_TIMEOUT]) { | 368 | if (tb[IPSET_ATTR_TIMEOUT]) { |
357 | if (!with_timeout(h->timeout)) | 369 | if (!with_timeout(h->timeout)) |
358 | return -IPSET_ERR_TIMEOUT; | 370 | return -IPSET_ERR_TIMEOUT; |
359 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 371 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
360 | } | 372 | } |
361 | 373 | ||
362 | strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); | 374 | strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); |
363 | data.iface = iface; | 375 | data.iface = iface; |
364 | ret = iface_test(&h->rbtree, &data.iface); | 376 | ret = iface_test(&h->rbtree, &data.iface); |
365 | if (adt == IPSET_ADD) { | 377 | if (adt == IPSET_ADD) { |
366 | if (!ret) { | 378 | if (!ret) { |
367 | ret = iface_add(&h->rbtree, &data.iface); | 379 | ret = iface_add(&h->rbtree, &data.iface); |
368 | if (ret) | 380 | if (ret) |
369 | return ret; | 381 | return ret; |
370 | } | 382 | } |
371 | } else if (!ret) | 383 | } else if (!ret) |
372 | return ret; | 384 | return ret; |
373 | 385 | ||
374 | if (tb[IPSET_ATTR_CADT_FLAGS]) { | 386 | if (tb[IPSET_ATTR_CADT_FLAGS]) { |
375 | u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); | 387 | u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); |
376 | if (cadt_flags & IPSET_FLAG_PHYSDEV) | 388 | if (cadt_flags & IPSET_FLAG_PHYSDEV) |
377 | data.physdev = 1; | 389 | data.physdev = 1; |
378 | } | 390 | } |
379 | 391 | ||
380 | if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { | 392 | if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { |
381 | data.ip = htonl(ip & ip_set_hostmask(data.cidr)); | 393 | data.ip = htonl(ip & ip_set_hostmask(data.cidr)); |
382 | ret = adtfn(set, &data, timeout, flags); | 394 | ret = adtfn(set, &data, timeout, flags); |
383 | return ip_set_eexist(ret, flags) ? 0 : ret; | 395 | return ip_set_eexist(ret, flags) ? 0 : ret; |
384 | } | 396 | } |
385 | 397 | ||
386 | if (tb[IPSET_ATTR_IP_TO]) { | 398 | if (tb[IPSET_ATTR_IP_TO]) { |
387 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 399 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
388 | if (ret) | 400 | if (ret) |
389 | return ret; | 401 | return ret; |
390 | if (ip_to < ip) | 402 | if (ip_to < ip) |
391 | swap(ip, ip_to); | 403 | swap(ip, ip_to); |
392 | if (ip + UINT_MAX == ip_to) | 404 | if (ip + UINT_MAX == ip_to) |
393 | return -IPSET_ERR_HASH_RANGE; | 405 | return -IPSET_ERR_HASH_RANGE; |
394 | } else { | 406 | } else { |
395 | ip_set_mask_from_to(ip, ip_to, data.cidr); | 407 | ip_set_mask_from_to(ip, ip_to, data.cidr); |
396 | } | 408 | } |
397 | 409 | ||
398 | if (retried) | 410 | if (retried) |
399 | ip = h->next.ip; | 411 | ip = h->next.ip; |
400 | while (!after(ip, ip_to)) { | 412 | while (!after(ip, ip_to)) { |
401 | data.ip = htonl(ip); | 413 | data.ip = htonl(ip); |
402 | last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); | 414 | last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); |
403 | ret = adtfn(set, &data, timeout, flags); | 415 | ret = adtfn(set, &data, timeout, flags); |
404 | 416 | ||
405 | if (ret && !ip_set_eexist(ret, flags)) | 417 | if (ret && !ip_set_eexist(ret, flags)) |
406 | return ret; | 418 | return ret; |
407 | else | 419 | else |
408 | ret = 0; | 420 | ret = 0; |
409 | ip = last + 1; | 421 | ip = last + 1; |
410 | } | 422 | } |
411 | return ret; | 423 | return ret; |
412 | } | 424 | } |
413 | 425 | ||
414 | static bool | 426 | static bool |
415 | hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b) | 427 | hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b) |
416 | { | 428 | { |
417 | const struct ip_set_hash *x = a->data; | 429 | const struct ip_set_hash *x = a->data; |
418 | const struct ip_set_hash *y = b->data; | 430 | const struct ip_set_hash *y = b->data; |
419 | 431 | ||
420 | /* Resizing changes htable_bits, so we ignore it */ | 432 | /* Resizing changes htable_bits, so we ignore it */ |
421 | return x->maxelem == y->maxelem && | 433 | return x->maxelem == y->maxelem && |
422 | x->timeout == y->timeout; | 434 | x->timeout == y->timeout; |
423 | } | 435 | } |
424 | 436 | ||
425 | /* The type variant functions: IPv6 */ | 437 | /* The type variant functions: IPv6 */ |
426 | 438 | ||
439 | struct hash_netiface6_elem_hashed { | ||
440 | union nf_inet_addr ip; | ||
441 | u8 physdev; | ||
442 | u8 cidr; | ||
443 | u16 padding; | ||
444 | }; | ||
445 | |||
446 | #define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed) | ||
447 | |||
427 | struct hash_netiface6_elem { | 448 | struct hash_netiface6_elem { |
428 | union nf_inet_addr ip; | 449 | union nf_inet_addr ip; |
429 | const char *iface; | ||
430 | u8 physdev; | 450 | u8 physdev; |
431 | u8 cidr; | 451 | u8 cidr; |
432 | u16 padding; | 452 | u16 padding; |
453 | const char *iface; | ||
433 | }; | 454 | }; |
434 | 455 | ||
435 | struct hash_netiface6_telem { | 456 | struct hash_netiface6_telem { |
436 | union nf_inet_addr ip; | 457 | union nf_inet_addr ip; |
437 | const char *iface; | ||
438 | u8 physdev; | 458 | u8 physdev; |
439 | u8 cidr; | 459 | u8 cidr; |
440 | u16 padding; | 460 | u16 padding; |
461 | const char *iface; | ||
441 | unsigned long timeout; | 462 | unsigned long timeout; |
442 | }; | 463 | }; |
443 | 464 | ||
444 | static inline bool | 465 | static inline bool |
445 | hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, | 466 | hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, |
446 | const struct hash_netiface6_elem *ip2) | 467 | const struct hash_netiface6_elem *ip2, |
468 | u32 *multi) | ||
447 | { | 469 | { |
448 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && | 470 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
449 | ip1->cidr == ip2->cidr && | 471 | ip1->cidr == ip2->cidr && |
472 | (++*multi) && | ||
450 | ip1->physdev == ip2->physdev && | 473 | ip1->physdev == ip2->physdev && |
451 | ip1->iface == ip2->iface; | 474 | ip1->iface == ip2->iface; |
452 | } | 475 | } |
453 | 476 | ||
454 | static inline bool | 477 | static inline bool |
455 | hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem) | 478 | hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem) |
456 | { | 479 | { |
457 | return elem->cidr == 0; | 480 | return elem->cidr == 0; |
458 | } | 481 | } |
459 | 482 | ||
460 | static inline void | 483 | static inline void |
461 | hash_netiface6_data_copy(struct hash_netiface6_elem *dst, | 484 | hash_netiface6_data_copy(struct hash_netiface6_elem *dst, |
462 | const struct hash_netiface6_elem *src) | 485 | const struct hash_netiface6_elem *src) |
463 | { | 486 | { |
464 | memcpy(dst, src, sizeof(*dst)); | 487 | memcpy(dst, src, sizeof(*dst)); |
465 | } | 488 | } |
466 | 489 | ||
467 | static inline void | 490 | static inline void |
468 | hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) | 491 | hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) |
469 | { | 492 | { |
470 | } | 493 | } |
471 | 494 | ||
472 | static inline void | 495 | static inline void |
473 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) | 496 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) |
474 | { | 497 | { |
475 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; | 498 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; |
476 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; | 499 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; |
477 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; | 500 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; |
478 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; | 501 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; |
479 | } | 502 | } |
480 | 503 | ||
481 | static inline void | 504 | static inline void |
482 | hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr) | 505 | hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr) |
483 | { | 506 | { |
484 | ip6_netmask(&elem->ip, cidr); | 507 | ip6_netmask(&elem->ip, cidr); |
485 | elem->cidr = cidr; | 508 | elem->cidr = cidr; |
486 | } | 509 | } |
487 | 510 | ||
488 | static bool | 511 | static bool |
489 | hash_netiface6_data_list(struct sk_buff *skb, | 512 | hash_netiface6_data_list(struct sk_buff *skb, |
490 | const struct hash_netiface6_elem *data) | 513 | const struct hash_netiface6_elem *data) |
491 | { | 514 | { |
492 | u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; | 515 | u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; |
493 | 516 | ||
494 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); | 517 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); |
495 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 518 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
496 | NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); | 519 | NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); |
497 | if (flags) | 520 | if (flags) |
498 | NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); | 521 | NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); |
499 | return 0; | 522 | return 0; |
500 | 523 | ||
501 | nla_put_failure: | 524 | nla_put_failure: |
502 | return 1; | 525 | return 1; |
503 | } | 526 | } |
504 | 527 | ||
505 | static bool | 528 | static bool |
506 | hash_netiface6_data_tlist(struct sk_buff *skb, | 529 | hash_netiface6_data_tlist(struct sk_buff *skb, |
507 | const struct hash_netiface6_elem *data) | 530 | const struct hash_netiface6_elem *data) |
508 | { | 531 | { |
509 | const struct hash_netiface6_telem *e = | 532 | const struct hash_netiface6_telem *e = |
510 | (const struct hash_netiface6_telem *)data; | 533 | (const struct hash_netiface6_telem *)data; |
511 | u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; | 534 | u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; |
512 | 535 | ||
513 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); | 536 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); |
514 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 537 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
515 | NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); | 538 | NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); |
516 | if (flags) | 539 | if (flags) |
517 | NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); | 540 | NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); |
518 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 541 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
519 | htonl(ip_set_timeout_get(e->timeout))); | 542 | htonl(ip_set_timeout_get(e->timeout))); |
520 | return 0; | 543 | return 0; |
521 | 544 | ||
522 | nla_put_failure: | 545 | nla_put_failure: |
523 | return 1; | 546 | return 1; |
524 | } | 547 | } |
525 | 548 | ||
526 | #undef PF | 549 | #undef PF |
527 | #undef HOST_MASK | 550 | #undef HOST_MASK |
528 | 551 | ||
529 | #define PF 6 | 552 | #define PF 6 |
530 | #define HOST_MASK 128 | 553 | #define HOST_MASK 128 |
531 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 554 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
532 | 555 | ||
533 | static inline void | 556 | static inline void |
534 | hash_netiface6_data_next(struct ip_set_hash *h, | 557 | hash_netiface6_data_next(struct ip_set_hash *h, |
535 | const struct hash_netiface6_elem *d) | 558 | const struct hash_netiface6_elem *d) |
536 | { | 559 | { |
537 | } | 560 | } |
538 | 561 | ||
539 | static int | 562 | static int |
540 | hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, | 563 | hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, |
541 | const struct xt_action_param *par, | 564 | const struct xt_action_param *par, |
542 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 565 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
543 | { | 566 | { |
544 | struct ip_set_hash *h = set->data; | 567 | struct ip_set_hash *h = set->data; |
545 | ipset_adtfn adtfn = set->variant->adt[adt]; | 568 | ipset_adtfn adtfn = set->variant->adt[adt]; |
546 | struct hash_netiface6_elem data = { | 569 | struct hash_netiface6_elem data = { |
547 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK | 570 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK |
548 | }; | 571 | }; |
549 | int ret; | 572 | int ret; |
550 | 573 | ||
551 | if (data.cidr == 0) | 574 | if (data.cidr == 0) |
552 | return -EINVAL; | 575 | return -EINVAL; |
553 | if (adt == IPSET_TEST) | 576 | if (adt == IPSET_TEST) |
554 | data.cidr = HOST_MASK; | 577 | data.cidr = HOST_MASK; |
555 | 578 | ||
556 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); | 579 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); |
557 | ip6_netmask(&data.ip, data.cidr); | 580 | ip6_netmask(&data.ip, data.cidr); |
558 | 581 | ||
559 | if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { | 582 | if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { |
560 | #ifdef CONFIG_BRIDGE_NETFILTER | 583 | #ifdef CONFIG_BRIDGE_NETFILTER |
561 | const struct nf_bridge_info *nf_bridge = skb->nf_bridge; | 584 | const struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
562 | 585 | ||
563 | if (!nf_bridge) | 586 | if (!nf_bridge) |
564 | return -EINVAL; | 587 | return -EINVAL; |
565 | data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev); | 588 | data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev); |
566 | data.physdev = 1; | 589 | data.physdev = 1; |
567 | #else | 590 | #else |
568 | data.iface = NULL; | 591 | data.iface = NULL; |
569 | #endif | 592 | #endif |
570 | } else | 593 | } else |
571 | data.iface = SRCDIR ? IFACE(in) : IFACE(out); | 594 | data.iface = SRCDIR ? IFACE(in) : IFACE(out); |
572 | 595 | ||
573 | if (!data.iface) | 596 | if (!data.iface) |
574 | return -EINVAL; | 597 | return -EINVAL; |
575 | ret = iface_test(&h->rbtree, &data.iface); | 598 | ret = iface_test(&h->rbtree, &data.iface); |
576 | if (adt == IPSET_ADD) { | 599 | if (adt == IPSET_ADD) { |
577 | if (!ret) { | 600 | if (!ret) { |
578 | ret = iface_add(&h->rbtree, &data.iface); | 601 | ret = iface_add(&h->rbtree, &data.iface); |
579 | if (ret) | 602 | if (ret) |
580 | return ret; | 603 | return ret; |
581 | } | 604 | } |
582 | } else if (!ret) | 605 | } else if (!ret) |
583 | return ret; | 606 | return ret; |
584 | 607 | ||
585 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 608 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
586 | } | 609 | } |
587 | 610 | ||
588 | static int | 611 | static int |
589 | hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], | 612 | hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], |
590 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 613 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
591 | { | 614 | { |
592 | struct ip_set_hash *h = set->data; | 615 | struct ip_set_hash *h = set->data; |
593 | ipset_adtfn adtfn = set->variant->adt[adt]; | 616 | ipset_adtfn adtfn = set->variant->adt[adt]; |
594 | struct hash_netiface6_elem data = { .cidr = HOST_MASK }; | 617 | struct hash_netiface6_elem data = { .cidr = HOST_MASK }; |
595 | u32 timeout = h->timeout; | 618 | u32 timeout = h->timeout; |
596 | char iface[IFNAMSIZ] = {}; | 619 | char iface[IFNAMSIZ] = {}; |
597 | int ret; | 620 | int ret; |
598 | 621 | ||
599 | if (unlikely(!tb[IPSET_ATTR_IP] || | 622 | if (unlikely(!tb[IPSET_ATTR_IP] || |
600 | !tb[IPSET_ATTR_IFACE] || | 623 | !tb[IPSET_ATTR_IFACE] || |
601 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || | 624 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || |
602 | !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) | 625 | !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) |
603 | return -IPSET_ERR_PROTOCOL; | 626 | return -IPSET_ERR_PROTOCOL; |
604 | if (unlikely(tb[IPSET_ATTR_IP_TO])) | 627 | if (unlikely(tb[IPSET_ATTR_IP_TO])) |
605 | return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; | 628 | return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; |
606 | 629 | ||
607 | if (tb[IPSET_ATTR_LINENO]) | 630 | if (tb[IPSET_ATTR_LINENO]) |
608 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 631 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
609 | 632 | ||
610 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); | 633 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); |
611 | if (ret) | 634 | if (ret) |
612 | return ret; | 635 | return ret; |
613 | 636 | ||
614 | if (tb[IPSET_ATTR_CIDR]) | 637 | if (tb[IPSET_ATTR_CIDR]) |
615 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 638 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
616 | if (!data.cidr) | 639 | if (!data.cidr) |
617 | return -IPSET_ERR_INVALID_CIDR; | 640 | return -IPSET_ERR_INVALID_CIDR; |
618 | ip6_netmask(&data.ip, data.cidr); | 641 | ip6_netmask(&data.ip, data.cidr); |
619 | 642 | ||
620 | if (tb[IPSET_ATTR_TIMEOUT]) { | 643 | if (tb[IPSET_ATTR_TIMEOUT]) { |
621 | if (!with_timeout(h->timeout)) | 644 | if (!with_timeout(h->timeout)) |
622 | return -IPSET_ERR_TIMEOUT; | 645 | return -IPSET_ERR_TIMEOUT; |
623 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 646 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
624 | } | 647 | } |
625 | 648 | ||
626 | strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); | 649 | strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); |
627 | data.iface = iface; | 650 | data.iface = iface; |
628 | ret = iface_test(&h->rbtree, &data.iface); | 651 | ret = iface_test(&h->rbtree, &data.iface); |
629 | if (adt == IPSET_ADD) { | 652 | if (adt == IPSET_ADD) { |
630 | if (!ret) { | 653 | if (!ret) { |
631 | ret = iface_add(&h->rbtree, &data.iface); | 654 | ret = iface_add(&h->rbtree, &data.iface); |
632 | if (ret) | 655 | if (ret) |
633 | return ret; | 656 | return ret; |
634 | } | 657 | } |
635 | } else if (!ret) | 658 | } else if (!ret) |
636 | return ret; | 659 | return ret; |
637 | 660 | ||
638 | if (tb[IPSET_ATTR_CADT_FLAGS]) { | 661 | if (tb[IPSET_ATTR_CADT_FLAGS]) { |
639 | u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); | 662 | u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); |
640 | if (cadt_flags & IPSET_FLAG_PHYSDEV) | 663 | if (cadt_flags & IPSET_FLAG_PHYSDEV) |
641 | data.physdev = 1; | 664 | data.physdev = 1; |
642 | } | 665 | } |
643 | 666 | ||
644 | ret = adtfn(set, &data, timeout, flags); | 667 | ret = adtfn(set, &data, timeout, flags); |
645 | 668 | ||
646 | return ip_set_eexist(ret, flags) ? 0 : ret; | 669 | return ip_set_eexist(ret, flags) ? 0 : ret; |
647 | } | 670 | } |
648 | 671 | ||
649 | /* Create hash:ip type of sets */ | 672 | /* Create hash:ip type of sets */ |
650 | 673 | ||
651 | static int | 674 | static int |
652 | hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags) | 675 | hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags) |
653 | { | 676 | { |
654 | struct ip_set_hash *h; | 677 | struct ip_set_hash *h; |
655 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; | 678 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; |
656 | u8 hbits; | 679 | u8 hbits; |
657 | 680 | ||
658 | if (!(set->family == AF_INET || set->family == AF_INET6)) | 681 | if (!(set->family == AF_INET || set->family == AF_INET6)) |
659 | return -IPSET_ERR_INVALID_FAMILY; | 682 | return -IPSET_ERR_INVALID_FAMILY; |
660 | 683 | ||
661 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || | 684 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || |
662 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || | 685 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || |
663 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 686 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
664 | return -IPSET_ERR_PROTOCOL; | 687 | return -IPSET_ERR_PROTOCOL; |
665 | 688 | ||
666 | if (tb[IPSET_ATTR_HASHSIZE]) { | 689 | if (tb[IPSET_ATTR_HASHSIZE]) { |
667 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); | 690 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); |
668 | if (hashsize < IPSET_MIMINAL_HASHSIZE) | 691 | if (hashsize < IPSET_MIMINAL_HASHSIZE) |
669 | hashsize = IPSET_MIMINAL_HASHSIZE; | 692 | hashsize = IPSET_MIMINAL_HASHSIZE; |
670 | } | 693 | } |
671 | 694 | ||
672 | if (tb[IPSET_ATTR_MAXELEM]) | 695 | if (tb[IPSET_ATTR_MAXELEM]) |
673 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); | 696 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); |
674 | 697 | ||
675 | h = kzalloc(sizeof(*h) | 698 | h = kzalloc(sizeof(*h) |
676 | + sizeof(struct ip_set_hash_nets) | 699 | + sizeof(struct ip_set_hash_nets) |
677 | * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); | 700 | * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); |
678 | if (!h) | 701 | if (!h) |
679 | return -ENOMEM; | 702 | return -ENOMEM; |
680 | 703 | ||
681 | h->maxelem = maxelem; | 704 | h->maxelem = maxelem; |
682 | get_random_bytes(&h->initval, sizeof(h->initval)); | 705 | get_random_bytes(&h->initval, sizeof(h->initval)); |
683 | h->timeout = IPSET_NO_TIMEOUT; | 706 | h->timeout = IPSET_NO_TIMEOUT; |
707 | h->ahash_max = AHASH_MAX_SIZE; | ||
684 | 708 | ||
685 | hbits = htable_bits(hashsize); | 709 | hbits = htable_bits(hashsize); |
686 | h->table = ip_set_alloc( | 710 | h->table = ip_set_alloc( |
687 | sizeof(struct htable) | 711 | sizeof(struct htable) |
688 | + jhash_size(hbits) * sizeof(struct hbucket)); | 712 | + jhash_size(hbits) * sizeof(struct hbucket)); |
689 | if (!h->table) { | 713 | if (!h->table) { |
690 | kfree(h); | 714 | kfree(h); |
691 | return -ENOMEM; | 715 | return -ENOMEM; |
692 | } | 716 | } |
693 | h->table->htable_bits = hbits; | 717 | h->table->htable_bits = hbits; |
694 | h->rbtree = RB_ROOT; | 718 | h->rbtree = RB_ROOT; |
695 | 719 | ||
696 | set->data = h; | 720 | set->data = h; |
697 | 721 | ||
698 | if (tb[IPSET_ATTR_TIMEOUT]) { | 722 | if (tb[IPSET_ATTR_TIMEOUT]) { |
699 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 723 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
700 | 724 | ||
701 | set->variant = set->family == AF_INET | 725 | set->variant = set->family == AF_INET |
702 | ? &hash_netiface4_tvariant : &hash_netiface6_tvariant; | 726 | ? &hash_netiface4_tvariant : &hash_netiface6_tvariant; |
703 | 727 | ||
704 | if (set->family == AF_INET) | 728 | if (set->family == AF_INET) |
705 | hash_netiface4_gc_init(set); | 729 | hash_netiface4_gc_init(set); |
706 | else | 730 | else |
707 | hash_netiface6_gc_init(set); | 731 | hash_netiface6_gc_init(set); |
708 | } else { | 732 | } else { |
709 | set->variant = set->family == AF_INET | 733 | set->variant = set->family == AF_INET |
710 | ? &hash_netiface4_variant : &hash_netiface6_variant; | 734 | ? &hash_netiface4_variant : &hash_netiface6_variant; |
711 | } | 735 | } |
712 | 736 | ||
713 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", | 737 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", |
714 | set->name, jhash_size(h->table->htable_bits), | 738 | set->name, jhash_size(h->table->htable_bits), |
715 | h->table->htable_bits, h->maxelem, set->data, h->table); | 739 | h->table->htable_bits, h->maxelem, set->data, h->table); |
716 | 740 | ||
717 | return 0; | 741 | return 0; |
718 | } | 742 | } |
719 | 743 | ||
720 | static struct ip_set_type hash_netiface_type __read_mostly = { | 744 | static struct ip_set_type hash_netiface_type __read_mostly = { |
721 | .name = "hash:net,iface", | 745 | .name = "hash:net,iface", |
722 | .protocol = IPSET_PROTOCOL, | 746 | .protocol = IPSET_PROTOCOL, |
723 | .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE, | 747 | .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE, |
724 | .dimension = IPSET_DIM_TWO, | 748 | .dimension = IPSET_DIM_TWO, |
725 | .family = AF_UNSPEC, | 749 | .family = AF_UNSPEC, |
726 | .revision_min = 0, | 750 | .revision_min = 0, |
727 | .create = hash_netiface_create, | 751 | .create = hash_netiface_create, |
728 | .create_policy = { | 752 | .create_policy = { |
729 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, | 753 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, |
730 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, | 754 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, |
731 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, | 755 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, |
732 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, | 756 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, |
733 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, | 757 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, |
734 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 758 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
735 | }, | 759 | }, |
736 | .adt_policy = { | 760 | .adt_policy = { |
737 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, | 761 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, |
738 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, | 762 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, |
739 | [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING, | 763 | [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING, |
740 | .len = IPSET_MAXNAMELEN - 1 }, | 764 | .len = IPSET_MAXNAMELEN - 1 }, |
741 | [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, | 765 | [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, |
742 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, | 766 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, |
743 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 767 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
744 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, | 768 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, |
745 | }, | 769 | }, |
746 | .me = THIS_MODULE, | 770 | .me = THIS_MODULE, |
747 | }; | 771 | }; |
748 | 772 | ||
749 | static int __init | 773 | static int __init |
750 | hash_netiface_init(void) | 774 | hash_netiface_init(void) |
751 | { | 775 | { |
752 | return ip_set_type_register(&hash_netiface_type); | 776 | return ip_set_type_register(&hash_netiface_type); |
753 | } | 777 | } |
754 | 778 | ||
755 | static void __exit | 779 | static void __exit |
756 | hash_netiface_fini(void) | 780 | hash_netiface_fini(void) |
757 | { | 781 | { |
758 | ip_set_type_unregister(&hash_netiface_type); | 782 | ip_set_type_unregister(&hash_netiface_type); |
759 | } | 783 | } |
net/netfilter/ipset/ip_set_hash_netport.c
1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 1 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 as | 4 | * it under the terms of the GNU General Public License version 2 as |
5 | * published by the Free Software Foundation. | 5 | * published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | /* Kernel module implementing an IP set type: the hash:net,port type */ | 8 | /* Kernel module implementing an IP set type: the hash:net,port type */ |
9 | 9 | ||
10 | #include <linux/jhash.h> | 10 | #include <linux/jhash.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | #include <net/ipv6.h> | 17 | #include <net/ipv6.h> |
18 | #include <net/netlink.h> | 18 | #include <net/netlink.h> |
19 | 19 | ||
20 | #include <linux/netfilter.h> | 20 | #include <linux/netfilter.h> |
21 | #include <linux/netfilter/ipset/pfxlen.h> | 21 | #include <linux/netfilter/ipset/pfxlen.h> |
22 | #include <linux/netfilter/ipset/ip_set.h> | 22 | #include <linux/netfilter/ipset/ip_set.h> |
23 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 23 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
24 | #include <linux/netfilter/ipset/ip_set_getport.h> | 24 | #include <linux/netfilter/ipset/ip_set_getport.h> |
25 | #include <linux/netfilter/ipset/ip_set_hash.h> | 25 | #include <linux/netfilter/ipset/ip_set_hash.h> |
26 | 26 | ||
27 | MODULE_LICENSE("GPL"); | 27 | MODULE_LICENSE("GPL"); |
28 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); | 28 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
29 | MODULE_DESCRIPTION("hash:net,port type of IP sets"); | 29 | MODULE_DESCRIPTION("hash:net,port type of IP sets"); |
30 | MODULE_ALIAS("ip_set_hash:net,port"); | 30 | MODULE_ALIAS("ip_set_hash:net,port"); |
31 | 31 | ||
32 | /* Type specific function prefix */ | 32 | /* Type specific function prefix */ |
33 | #define TYPE hash_netport | 33 | #define TYPE hash_netport |
34 | 34 | ||
35 | static bool | 35 | static bool |
36 | hash_netport_same_set(const struct ip_set *a, const struct ip_set *b); | 36 | hash_netport_same_set(const struct ip_set *a, const struct ip_set *b); |
37 | 37 | ||
38 | #define hash_netport4_same_set hash_netport_same_set | 38 | #define hash_netport4_same_set hash_netport_same_set |
39 | #define hash_netport6_same_set hash_netport_same_set | 39 | #define hash_netport6_same_set hash_netport_same_set |
40 | 40 | ||
41 | /* The type variant functions: IPv4 */ | 41 | /* The type variant functions: IPv4 */ |
42 | 42 | ||
43 | /* Member elements without timeout */ | 43 | /* Member elements without timeout */ |
44 | struct hash_netport4_elem { | 44 | struct hash_netport4_elem { |
45 | __be32 ip; | 45 | __be32 ip; |
46 | __be16 port; | 46 | __be16 port; |
47 | u8 proto; | 47 | u8 proto; |
48 | u8 cidr; | 48 | u8 cidr; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | /* Member elements with timeout support */ | 51 | /* Member elements with timeout support */ |
52 | struct hash_netport4_telem { | 52 | struct hash_netport4_telem { |
53 | __be32 ip; | 53 | __be32 ip; |
54 | __be16 port; | 54 | __be16 port; |
55 | u8 proto; | 55 | u8 proto; |
56 | u8 cidr; | 56 | u8 cidr; |
57 | unsigned long timeout; | 57 | unsigned long timeout; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static inline bool | 60 | static inline bool |
61 | hash_netport4_data_equal(const struct hash_netport4_elem *ip1, | 61 | hash_netport4_data_equal(const struct hash_netport4_elem *ip1, |
62 | const struct hash_netport4_elem *ip2) | 62 | const struct hash_netport4_elem *ip2, |
63 | u32 *multi) | ||
63 | { | 64 | { |
64 | return ip1->ip == ip2->ip && | 65 | return ip1->ip == ip2->ip && |
65 | ip1->port == ip2->port && | 66 | ip1->port == ip2->port && |
66 | ip1->proto == ip2->proto && | 67 | ip1->proto == ip2->proto && |
67 | ip1->cidr == ip2->cidr; | 68 | ip1->cidr == ip2->cidr; |
68 | } | 69 | } |
69 | 70 | ||
70 | static inline bool | 71 | static inline bool |
71 | hash_netport4_data_isnull(const struct hash_netport4_elem *elem) | 72 | hash_netport4_data_isnull(const struct hash_netport4_elem *elem) |
72 | { | 73 | { |
73 | return elem->proto == 0; | 74 | return elem->proto == 0; |
74 | } | 75 | } |
75 | 76 | ||
76 | static inline void | 77 | static inline void |
77 | hash_netport4_data_copy(struct hash_netport4_elem *dst, | 78 | hash_netport4_data_copy(struct hash_netport4_elem *dst, |
78 | const struct hash_netport4_elem *src) | 79 | const struct hash_netport4_elem *src) |
79 | { | 80 | { |
80 | dst->ip = src->ip; | 81 | dst->ip = src->ip; |
81 | dst->port = src->port; | 82 | dst->port = src->port; |
82 | dst->proto = src->proto; | 83 | dst->proto = src->proto; |
83 | dst->cidr = src->cidr; | 84 | dst->cidr = src->cidr; |
84 | } | 85 | } |
85 | 86 | ||
86 | static inline void | 87 | static inline void |
87 | hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr) | 88 | hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr) |
88 | { | 89 | { |
89 | elem->ip &= ip_set_netmask(cidr); | 90 | elem->ip &= ip_set_netmask(cidr); |
90 | elem->cidr = cidr; | 91 | elem->cidr = cidr; |
91 | } | 92 | } |
92 | 93 | ||
93 | static inline void | 94 | static inline void |
94 | hash_netport4_data_zero_out(struct hash_netport4_elem *elem) | 95 | hash_netport4_data_zero_out(struct hash_netport4_elem *elem) |
95 | { | 96 | { |
96 | elem->proto = 0; | 97 | elem->proto = 0; |
97 | } | 98 | } |
98 | 99 | ||
99 | static bool | 100 | static bool |
100 | hash_netport4_data_list(struct sk_buff *skb, | 101 | hash_netport4_data_list(struct sk_buff *skb, |
101 | const struct hash_netport4_elem *data) | 102 | const struct hash_netport4_elem *data) |
102 | { | 103 | { |
103 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); | 104 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); |
104 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 105 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
105 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 106 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
106 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 107 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
107 | return 0; | 108 | return 0; |
108 | 109 | ||
109 | nla_put_failure: | 110 | nla_put_failure: |
110 | return 1; | 111 | return 1; |
111 | } | 112 | } |
112 | 113 | ||
113 | static bool | 114 | static bool |
114 | hash_netport4_data_tlist(struct sk_buff *skb, | 115 | hash_netport4_data_tlist(struct sk_buff *skb, |
115 | const struct hash_netport4_elem *data) | 116 | const struct hash_netport4_elem *data) |
116 | { | 117 | { |
117 | const struct hash_netport4_telem *tdata = | 118 | const struct hash_netport4_telem *tdata = |
118 | (const struct hash_netport4_telem *)data; | 119 | (const struct hash_netport4_telem *)data; |
119 | 120 | ||
120 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); | 121 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); |
121 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); | 122 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); |
122 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 123 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
123 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 124 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
124 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 125 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
125 | htonl(ip_set_timeout_get(tdata->timeout))); | 126 | htonl(ip_set_timeout_get(tdata->timeout))); |
126 | 127 | ||
127 | return 0; | 128 | return 0; |
128 | 129 | ||
129 | nla_put_failure: | 130 | nla_put_failure: |
130 | return 1; | 131 | return 1; |
131 | } | 132 | } |
132 | 133 | ||
133 | #define IP_SET_HASH_WITH_PROTO | 134 | #define IP_SET_HASH_WITH_PROTO |
134 | #define IP_SET_HASH_WITH_NETS | 135 | #define IP_SET_HASH_WITH_NETS |
135 | 136 | ||
136 | #define PF 4 | 137 | #define PF 4 |
137 | #define HOST_MASK 32 | 138 | #define HOST_MASK 32 |
138 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 139 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
139 | 140 | ||
140 | static inline void | 141 | static inline void |
141 | hash_netport4_data_next(struct ip_set_hash *h, | 142 | hash_netport4_data_next(struct ip_set_hash *h, |
142 | const struct hash_netport4_elem *d) | 143 | const struct hash_netport4_elem *d) |
143 | { | 144 | { |
144 | h->next.ip = ntohl(d->ip); | 145 | h->next.ip = ntohl(d->ip); |
145 | h->next.port = ntohs(d->port); | 146 | h->next.port = ntohs(d->port); |
146 | } | 147 | } |
147 | 148 | ||
148 | static int | 149 | static int |
149 | hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb, | 150 | hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb, |
150 | const struct xt_action_param *par, | 151 | const struct xt_action_param *par, |
151 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 152 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
152 | { | 153 | { |
153 | const struct ip_set_hash *h = set->data; | 154 | const struct ip_set_hash *h = set->data; |
154 | ipset_adtfn adtfn = set->variant->adt[adt]; | 155 | ipset_adtfn adtfn = set->variant->adt[adt]; |
155 | struct hash_netport4_elem data = { | 156 | struct hash_netport4_elem data = { |
156 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK | 157 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK |
157 | }; | 158 | }; |
158 | 159 | ||
159 | if (data.cidr == 0) | 160 | if (data.cidr == 0) |
160 | return -EINVAL; | 161 | return -EINVAL; |
161 | if (adt == IPSET_TEST) | 162 | if (adt == IPSET_TEST) |
162 | data.cidr = HOST_MASK; | 163 | data.cidr = HOST_MASK; |
163 | 164 | ||
164 | if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, | 165 | if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, |
165 | &data.port, &data.proto)) | 166 | &data.port, &data.proto)) |
166 | return -EINVAL; | 167 | return -EINVAL; |
167 | 168 | ||
168 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); | 169 | ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); |
169 | data.ip &= ip_set_netmask(data.cidr); | 170 | data.ip &= ip_set_netmask(data.cidr); |
170 | 171 | ||
171 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 172 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
172 | } | 173 | } |
173 | 174 | ||
174 | static int | 175 | static int |
175 | hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], | 176 | hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], |
176 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 177 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
177 | { | 178 | { |
178 | const struct ip_set_hash *h = set->data; | 179 | const struct ip_set_hash *h = set->data; |
179 | ipset_adtfn adtfn = set->variant->adt[adt]; | 180 | ipset_adtfn adtfn = set->variant->adt[adt]; |
180 | struct hash_netport4_elem data = { .cidr = HOST_MASK }; | 181 | struct hash_netport4_elem data = { .cidr = HOST_MASK }; |
181 | u32 port, port_to, p = 0, ip = 0, ip_to, last; | 182 | u32 port, port_to, p = 0, ip = 0, ip_to, last; |
182 | u32 timeout = h->timeout; | 183 | u32 timeout = h->timeout; |
183 | bool with_ports = false; | 184 | bool with_ports = false; |
184 | int ret; | 185 | int ret; |
185 | 186 | ||
186 | if (unlikely(!tb[IPSET_ATTR_IP] || | 187 | if (unlikely(!tb[IPSET_ATTR_IP] || |
187 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || | 188 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || |
188 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || | 189 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || |
189 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 190 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
190 | return -IPSET_ERR_PROTOCOL; | 191 | return -IPSET_ERR_PROTOCOL; |
191 | 192 | ||
192 | if (tb[IPSET_ATTR_LINENO]) | 193 | if (tb[IPSET_ATTR_LINENO]) |
193 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 194 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
194 | 195 | ||
195 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); | 196 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); |
196 | if (ret) | 197 | if (ret) |
197 | return ret; | 198 | return ret; |
198 | 199 | ||
199 | if (tb[IPSET_ATTR_CIDR]) { | 200 | if (tb[IPSET_ATTR_CIDR]) { |
200 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 201 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
201 | if (!data.cidr) | 202 | if (!data.cidr) |
202 | return -IPSET_ERR_INVALID_CIDR; | 203 | return -IPSET_ERR_INVALID_CIDR; |
203 | } | 204 | } |
204 | 205 | ||
205 | if (tb[IPSET_ATTR_PORT]) | 206 | if (tb[IPSET_ATTR_PORT]) |
206 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); | 207 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); |
207 | else | 208 | else |
208 | return -IPSET_ERR_PROTOCOL; | 209 | return -IPSET_ERR_PROTOCOL; |
209 | 210 | ||
210 | if (tb[IPSET_ATTR_PROTO]) { | 211 | if (tb[IPSET_ATTR_PROTO]) { |
211 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); | 212 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); |
212 | with_ports = ip_set_proto_with_ports(data.proto); | 213 | with_ports = ip_set_proto_with_ports(data.proto); |
213 | 214 | ||
214 | if (data.proto == 0) | 215 | if (data.proto == 0) |
215 | return -IPSET_ERR_INVALID_PROTO; | 216 | return -IPSET_ERR_INVALID_PROTO; |
216 | } else | 217 | } else |
217 | return -IPSET_ERR_MISSING_PROTO; | 218 | return -IPSET_ERR_MISSING_PROTO; |
218 | 219 | ||
219 | if (!(with_ports || data.proto == IPPROTO_ICMP)) | 220 | if (!(with_ports || data.proto == IPPROTO_ICMP)) |
220 | data.port = 0; | 221 | data.port = 0; |
221 | 222 | ||
222 | if (tb[IPSET_ATTR_TIMEOUT]) { | 223 | if (tb[IPSET_ATTR_TIMEOUT]) { |
223 | if (!with_timeout(h->timeout)) | 224 | if (!with_timeout(h->timeout)) |
224 | return -IPSET_ERR_TIMEOUT; | 225 | return -IPSET_ERR_TIMEOUT; |
225 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 226 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
226 | } | 227 | } |
227 | 228 | ||
228 | with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; | 229 | with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; |
229 | if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { | 230 | if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { |
230 | data.ip = htonl(ip & ip_set_hostmask(data.cidr)); | 231 | data.ip = htonl(ip & ip_set_hostmask(data.cidr)); |
231 | ret = adtfn(set, &data, timeout, flags); | 232 | ret = adtfn(set, &data, timeout, flags); |
232 | return ip_set_eexist(ret, flags) ? 0 : ret; | 233 | return ip_set_eexist(ret, flags) ? 0 : ret; |
233 | } | 234 | } |
234 | 235 | ||
235 | port = port_to = ntohs(data.port); | 236 | port = port_to = ntohs(data.port); |
236 | if (tb[IPSET_ATTR_PORT_TO]) { | 237 | if (tb[IPSET_ATTR_PORT_TO]) { |
237 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); | 238 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); |
238 | if (port_to < port) | 239 | if (port_to < port) |
239 | swap(port, port_to); | 240 | swap(port, port_to); |
240 | } | 241 | } |
241 | if (tb[IPSET_ATTR_IP_TO]) { | 242 | if (tb[IPSET_ATTR_IP_TO]) { |
242 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 243 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
243 | if (ret) | 244 | if (ret) |
244 | return ret; | 245 | return ret; |
245 | if (ip_to < ip) | 246 | if (ip_to < ip) |
246 | swap(ip, ip_to); | 247 | swap(ip, ip_to); |
247 | if (ip + UINT_MAX == ip_to) | 248 | if (ip + UINT_MAX == ip_to) |
248 | return -IPSET_ERR_HASH_RANGE; | 249 | return -IPSET_ERR_HASH_RANGE; |
249 | } else { | 250 | } else { |
250 | ip_set_mask_from_to(ip, ip_to, data.cidr); | 251 | ip_set_mask_from_to(ip, ip_to, data.cidr); |
251 | } | 252 | } |
252 | 253 | ||
253 | if (retried) | 254 | if (retried) |
254 | ip = h->next.ip; | 255 | ip = h->next.ip; |
255 | while (!after(ip, ip_to)) { | 256 | while (!after(ip, ip_to)) { |
256 | data.ip = htonl(ip); | 257 | data.ip = htonl(ip); |
257 | last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); | 258 | last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); |
258 | p = retried && ip == h->next.ip ? h->next.port : port; | 259 | p = retried && ip == h->next.ip ? h->next.port : port; |
259 | for (; p <= port_to; p++) { | 260 | for (; p <= port_to; p++) { |
260 | data.port = htons(p); | 261 | data.port = htons(p); |
261 | ret = adtfn(set, &data, timeout, flags); | 262 | ret = adtfn(set, &data, timeout, flags); |
262 | 263 | ||
263 | if (ret && !ip_set_eexist(ret, flags)) | 264 | if (ret && !ip_set_eexist(ret, flags)) |
264 | return ret; | 265 | return ret; |
265 | else | 266 | else |
266 | ret = 0; | 267 | ret = 0; |
267 | } | 268 | } |
268 | ip = last + 1; | 269 | ip = last + 1; |
269 | } | 270 | } |
270 | return ret; | 271 | return ret; |
271 | } | 272 | } |
272 | 273 | ||
273 | static bool | 274 | static bool |
274 | hash_netport_same_set(const struct ip_set *a, const struct ip_set *b) | 275 | hash_netport_same_set(const struct ip_set *a, const struct ip_set *b) |
275 | { | 276 | { |
276 | const struct ip_set_hash *x = a->data; | 277 | const struct ip_set_hash *x = a->data; |
277 | const struct ip_set_hash *y = b->data; | 278 | const struct ip_set_hash *y = b->data; |
278 | 279 | ||
279 | /* Resizing changes htable_bits, so we ignore it */ | 280 | /* Resizing changes htable_bits, so we ignore it */ |
280 | return x->maxelem == y->maxelem && | 281 | return x->maxelem == y->maxelem && |
281 | x->timeout == y->timeout; | 282 | x->timeout == y->timeout; |
282 | } | 283 | } |
283 | 284 | ||
284 | /* The type variant functions: IPv6 */ | 285 | /* The type variant functions: IPv6 */ |
285 | 286 | ||
286 | struct hash_netport6_elem { | 287 | struct hash_netport6_elem { |
287 | union nf_inet_addr ip; | 288 | union nf_inet_addr ip; |
288 | __be16 port; | 289 | __be16 port; |
289 | u8 proto; | 290 | u8 proto; |
290 | u8 cidr; | 291 | u8 cidr; |
291 | }; | 292 | }; |
292 | 293 | ||
293 | struct hash_netport6_telem { | 294 | struct hash_netport6_telem { |
294 | union nf_inet_addr ip; | 295 | union nf_inet_addr ip; |
295 | __be16 port; | 296 | __be16 port; |
296 | u8 proto; | 297 | u8 proto; |
297 | u8 cidr; | 298 | u8 cidr; |
298 | unsigned long timeout; | 299 | unsigned long timeout; |
299 | }; | 300 | }; |
300 | 301 | ||
301 | static inline bool | 302 | static inline bool |
302 | hash_netport6_data_equal(const struct hash_netport6_elem *ip1, | 303 | hash_netport6_data_equal(const struct hash_netport6_elem *ip1, |
303 | const struct hash_netport6_elem *ip2) | 304 | const struct hash_netport6_elem *ip2, |
305 | u32 *multi) | ||
304 | { | 306 | { |
305 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && | 307 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
306 | ip1->port == ip2->port && | 308 | ip1->port == ip2->port && |
307 | ip1->proto == ip2->proto && | 309 | ip1->proto == ip2->proto && |
308 | ip1->cidr == ip2->cidr; | 310 | ip1->cidr == ip2->cidr; |
309 | } | 311 | } |
310 | 312 | ||
311 | static inline bool | 313 | static inline bool |
312 | hash_netport6_data_isnull(const struct hash_netport6_elem *elem) | 314 | hash_netport6_data_isnull(const struct hash_netport6_elem *elem) |
313 | { | 315 | { |
314 | return elem->proto == 0; | 316 | return elem->proto == 0; |
315 | } | 317 | } |
316 | 318 | ||
317 | static inline void | 319 | static inline void |
318 | hash_netport6_data_copy(struct hash_netport6_elem *dst, | 320 | hash_netport6_data_copy(struct hash_netport6_elem *dst, |
319 | const struct hash_netport6_elem *src) | 321 | const struct hash_netport6_elem *src) |
320 | { | 322 | { |
321 | memcpy(dst, src, sizeof(*dst)); | 323 | memcpy(dst, src, sizeof(*dst)); |
322 | } | 324 | } |
323 | 325 | ||
324 | static inline void | 326 | static inline void |
325 | hash_netport6_data_zero_out(struct hash_netport6_elem *elem) | 327 | hash_netport6_data_zero_out(struct hash_netport6_elem *elem) |
326 | { | 328 | { |
327 | elem->proto = 0; | 329 | elem->proto = 0; |
328 | } | 330 | } |
329 | 331 | ||
330 | static inline void | 332 | static inline void |
331 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) | 333 | ip6_netmask(union nf_inet_addr *ip, u8 prefix) |
332 | { | 334 | { |
333 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; | 335 | ip->ip6[0] &= ip_set_netmask6(prefix)[0]; |
334 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; | 336 | ip->ip6[1] &= ip_set_netmask6(prefix)[1]; |
335 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; | 337 | ip->ip6[2] &= ip_set_netmask6(prefix)[2]; |
336 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; | 338 | ip->ip6[3] &= ip_set_netmask6(prefix)[3]; |
337 | } | 339 | } |
338 | 340 | ||
339 | static inline void | 341 | static inline void |
340 | hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr) | 342 | hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr) |
341 | { | 343 | { |
342 | ip6_netmask(&elem->ip, cidr); | 344 | ip6_netmask(&elem->ip, cidr); |
343 | elem->cidr = cidr; | 345 | elem->cidr = cidr; |
344 | } | 346 | } |
345 | 347 | ||
346 | static bool | 348 | static bool |
347 | hash_netport6_data_list(struct sk_buff *skb, | 349 | hash_netport6_data_list(struct sk_buff *skb, |
348 | const struct hash_netport6_elem *data) | 350 | const struct hash_netport6_elem *data) |
349 | { | 351 | { |
350 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); | 352 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); |
351 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 353 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
352 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 354 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
353 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 355 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
354 | return 0; | 356 | return 0; |
355 | 357 | ||
356 | nla_put_failure: | 358 | nla_put_failure: |
357 | return 1; | 359 | return 1; |
358 | } | 360 | } |
359 | 361 | ||
360 | static bool | 362 | static bool |
361 | hash_netport6_data_tlist(struct sk_buff *skb, | 363 | hash_netport6_data_tlist(struct sk_buff *skb, |
362 | const struct hash_netport6_elem *data) | 364 | const struct hash_netport6_elem *data) |
363 | { | 365 | { |
364 | const struct hash_netport6_telem *e = | 366 | const struct hash_netport6_telem *e = |
365 | (const struct hash_netport6_telem *)data; | 367 | (const struct hash_netport6_telem *)data; |
366 | 368 | ||
367 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); | 369 | NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); |
368 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); | 370 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); |
369 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); | 371 | NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); |
370 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); | 372 | NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); |
371 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, | 373 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, |
372 | htonl(ip_set_timeout_get(e->timeout))); | 374 | htonl(ip_set_timeout_get(e->timeout))); |
373 | return 0; | 375 | return 0; |
374 | 376 | ||
375 | nla_put_failure: | 377 | nla_put_failure: |
376 | return 1; | 378 | return 1; |
377 | } | 379 | } |
378 | 380 | ||
379 | #undef PF | 381 | #undef PF |
380 | #undef HOST_MASK | 382 | #undef HOST_MASK |
381 | 383 | ||
382 | #define PF 6 | 384 | #define PF 6 |
383 | #define HOST_MASK 128 | 385 | #define HOST_MASK 128 |
384 | #include <linux/netfilter/ipset/ip_set_ahash.h> | 386 | #include <linux/netfilter/ipset/ip_set_ahash.h> |
385 | 387 | ||
386 | static inline void | 388 | static inline void |
387 | hash_netport6_data_next(struct ip_set_hash *h, | 389 | hash_netport6_data_next(struct ip_set_hash *h, |
388 | const struct hash_netport6_elem *d) | 390 | const struct hash_netport6_elem *d) |
389 | { | 391 | { |
390 | h->next.port = ntohs(d->port); | 392 | h->next.port = ntohs(d->port); |
391 | } | 393 | } |
392 | 394 | ||
393 | static int | 395 | static int |
394 | hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb, | 396 | hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb, |
395 | const struct xt_action_param *par, | 397 | const struct xt_action_param *par, |
396 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) | 398 | enum ipset_adt adt, const struct ip_set_adt_opt *opt) |
397 | { | 399 | { |
398 | const struct ip_set_hash *h = set->data; | 400 | const struct ip_set_hash *h = set->data; |
399 | ipset_adtfn adtfn = set->variant->adt[adt]; | 401 | ipset_adtfn adtfn = set->variant->adt[adt]; |
400 | struct hash_netport6_elem data = { | 402 | struct hash_netport6_elem data = { |
401 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK | 403 | .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK |
402 | }; | 404 | }; |
403 | 405 | ||
404 | if (data.cidr == 0) | 406 | if (data.cidr == 0) |
405 | return -EINVAL; | 407 | return -EINVAL; |
406 | if (adt == IPSET_TEST) | 408 | if (adt == IPSET_TEST) |
407 | data.cidr = HOST_MASK; | 409 | data.cidr = HOST_MASK; |
408 | 410 | ||
409 | if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, | 411 | if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, |
410 | &data.port, &data.proto)) | 412 | &data.port, &data.proto)) |
411 | return -EINVAL; | 413 | return -EINVAL; |
412 | 414 | ||
413 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); | 415 | ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); |
414 | ip6_netmask(&data.ip, data.cidr); | 416 | ip6_netmask(&data.ip, data.cidr); |
415 | 417 | ||
416 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); | 418 | return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); |
417 | } | 419 | } |
418 | 420 | ||
419 | static int | 421 | static int |
420 | hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], | 422 | hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], |
421 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) | 423 | enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) |
422 | { | 424 | { |
423 | const struct ip_set_hash *h = set->data; | 425 | const struct ip_set_hash *h = set->data; |
424 | ipset_adtfn adtfn = set->variant->adt[adt]; | 426 | ipset_adtfn adtfn = set->variant->adt[adt]; |
425 | struct hash_netport6_elem data = { .cidr = HOST_MASK }; | 427 | struct hash_netport6_elem data = { .cidr = HOST_MASK }; |
426 | u32 port, port_to; | 428 | u32 port, port_to; |
427 | u32 timeout = h->timeout; | 429 | u32 timeout = h->timeout; |
428 | bool with_ports = false; | 430 | bool with_ports = false; |
429 | int ret; | 431 | int ret; |
430 | 432 | ||
431 | if (unlikely(!tb[IPSET_ATTR_IP] || | 433 | if (unlikely(!tb[IPSET_ATTR_IP] || |
432 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || | 434 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || |
433 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || | 435 | !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || |
434 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 436 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
435 | return -IPSET_ERR_PROTOCOL; | 437 | return -IPSET_ERR_PROTOCOL; |
436 | if (unlikely(tb[IPSET_ATTR_IP_TO])) | 438 | if (unlikely(tb[IPSET_ATTR_IP_TO])) |
437 | return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; | 439 | return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; |
438 | 440 | ||
439 | if (tb[IPSET_ATTR_LINENO]) | 441 | if (tb[IPSET_ATTR_LINENO]) |
440 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); | 442 | *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); |
441 | 443 | ||
442 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); | 444 | ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); |
443 | if (ret) | 445 | if (ret) |
444 | return ret; | 446 | return ret; |
445 | 447 | ||
446 | if (tb[IPSET_ATTR_CIDR]) | 448 | if (tb[IPSET_ATTR_CIDR]) |
447 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); | 449 | data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); |
448 | if (!data.cidr) | 450 | if (!data.cidr) |
449 | return -IPSET_ERR_INVALID_CIDR; | 451 | return -IPSET_ERR_INVALID_CIDR; |
450 | ip6_netmask(&data.ip, data.cidr); | 452 | ip6_netmask(&data.ip, data.cidr); |
451 | 453 | ||
452 | if (tb[IPSET_ATTR_PORT]) | 454 | if (tb[IPSET_ATTR_PORT]) |
453 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); | 455 | data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); |
454 | else | 456 | else |
455 | return -IPSET_ERR_PROTOCOL; | 457 | return -IPSET_ERR_PROTOCOL; |
456 | 458 | ||
457 | if (tb[IPSET_ATTR_PROTO]) { | 459 | if (tb[IPSET_ATTR_PROTO]) { |
458 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); | 460 | data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); |
459 | with_ports = ip_set_proto_with_ports(data.proto); | 461 | with_ports = ip_set_proto_with_ports(data.proto); |
460 | 462 | ||
461 | if (data.proto == 0) | 463 | if (data.proto == 0) |
462 | return -IPSET_ERR_INVALID_PROTO; | 464 | return -IPSET_ERR_INVALID_PROTO; |
463 | } else | 465 | } else |
464 | return -IPSET_ERR_MISSING_PROTO; | 466 | return -IPSET_ERR_MISSING_PROTO; |
465 | 467 | ||
466 | if (!(with_ports || data.proto == IPPROTO_ICMPV6)) | 468 | if (!(with_ports || data.proto == IPPROTO_ICMPV6)) |
467 | data.port = 0; | 469 | data.port = 0; |
468 | 470 | ||
469 | if (tb[IPSET_ATTR_TIMEOUT]) { | 471 | if (tb[IPSET_ATTR_TIMEOUT]) { |
470 | if (!with_timeout(h->timeout)) | 472 | if (!with_timeout(h->timeout)) |
471 | return -IPSET_ERR_TIMEOUT; | 473 | return -IPSET_ERR_TIMEOUT; |
472 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 474 | timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
473 | } | 475 | } |
474 | 476 | ||
475 | if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { | 477 | if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { |
476 | ret = adtfn(set, &data, timeout, flags); | 478 | ret = adtfn(set, &data, timeout, flags); |
477 | return ip_set_eexist(ret, flags) ? 0 : ret; | 479 | return ip_set_eexist(ret, flags) ? 0 : ret; |
478 | } | 480 | } |
479 | 481 | ||
480 | port = ntohs(data.port); | 482 | port = ntohs(data.port); |
481 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); | 483 | port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); |
482 | if (port > port_to) | 484 | if (port > port_to) |
483 | swap(port, port_to); | 485 | swap(port, port_to); |
484 | 486 | ||
485 | if (retried) | 487 | if (retried) |
486 | port = h->next.port; | 488 | port = h->next.port; |
487 | for (; port <= port_to; port++) { | 489 | for (; port <= port_to; port++) { |
488 | data.port = htons(port); | 490 | data.port = htons(port); |
489 | ret = adtfn(set, &data, timeout, flags); | 491 | ret = adtfn(set, &data, timeout, flags); |
490 | 492 | ||
491 | if (ret && !ip_set_eexist(ret, flags)) | 493 | if (ret && !ip_set_eexist(ret, flags)) |
492 | return ret; | 494 | return ret; |
493 | else | 495 | else |
494 | ret = 0; | 496 | ret = 0; |
495 | } | 497 | } |
496 | return ret; | 498 | return ret; |
497 | } | 499 | } |
498 | 500 | ||
499 | /* Create hash:ip type of sets */ | 501 | /* Create hash:ip type of sets */ |
500 | 502 | ||
501 | static int | 503 | static int |
502 | hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags) | 504 | hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags) |
503 | { | 505 | { |
504 | struct ip_set_hash *h; | 506 | struct ip_set_hash *h; |
505 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; | 507 | u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; |
506 | u8 hbits; | 508 | u8 hbits; |
507 | 509 | ||
508 | if (!(set->family == AF_INET || set->family == AF_INET6)) | 510 | if (!(set->family == AF_INET || set->family == AF_INET6)) |
509 | return -IPSET_ERR_INVALID_FAMILY; | 511 | return -IPSET_ERR_INVALID_FAMILY; |
510 | 512 | ||
511 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || | 513 | if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || |
512 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || | 514 | !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || |
513 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) | 515 | !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) |
514 | return -IPSET_ERR_PROTOCOL; | 516 | return -IPSET_ERR_PROTOCOL; |
515 | 517 | ||
516 | if (tb[IPSET_ATTR_HASHSIZE]) { | 518 | if (tb[IPSET_ATTR_HASHSIZE]) { |
517 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); | 519 | hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); |
518 | if (hashsize < IPSET_MIMINAL_HASHSIZE) | 520 | if (hashsize < IPSET_MIMINAL_HASHSIZE) |
519 | hashsize = IPSET_MIMINAL_HASHSIZE; | 521 | hashsize = IPSET_MIMINAL_HASHSIZE; |
520 | } | 522 | } |
521 | 523 | ||
522 | if (tb[IPSET_ATTR_MAXELEM]) | 524 | if (tb[IPSET_ATTR_MAXELEM]) |
523 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); | 525 | maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); |
524 | 526 | ||
525 | h = kzalloc(sizeof(*h) | 527 | h = kzalloc(sizeof(*h) |
526 | + sizeof(struct ip_set_hash_nets) | 528 | + sizeof(struct ip_set_hash_nets) |
527 | * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); | 529 | * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); |
528 | if (!h) | 530 | if (!h) |
529 | return -ENOMEM; | 531 | return -ENOMEM; |
530 | 532 | ||
531 | h->maxelem = maxelem; | 533 | h->maxelem = maxelem; |
532 | get_random_bytes(&h->initval, sizeof(h->initval)); | 534 | get_random_bytes(&h->initval, sizeof(h->initval)); |
533 | h->timeout = IPSET_NO_TIMEOUT; | 535 | h->timeout = IPSET_NO_TIMEOUT; |
534 | 536 | ||
535 | hbits = htable_bits(hashsize); | 537 | hbits = htable_bits(hashsize); |
536 | h->table = ip_set_alloc( | 538 | h->table = ip_set_alloc( |
537 | sizeof(struct htable) | 539 | sizeof(struct htable) |
538 | + jhash_size(hbits) * sizeof(struct hbucket)); | 540 | + jhash_size(hbits) * sizeof(struct hbucket)); |
539 | if (!h->table) { | 541 | if (!h->table) { |
540 | kfree(h); | 542 | kfree(h); |
541 | return -ENOMEM; | 543 | return -ENOMEM; |
542 | } | 544 | } |
543 | h->table->htable_bits = hbits; | 545 | h->table->htable_bits = hbits; |
544 | 546 | ||
545 | set->data = h; | 547 | set->data = h; |
546 | 548 | ||
547 | if (tb[IPSET_ATTR_TIMEOUT]) { | 549 | if (tb[IPSET_ATTR_TIMEOUT]) { |
548 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); | 550 | h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); |
549 | 551 | ||
550 | set->variant = set->family == AF_INET | 552 | set->variant = set->family == AF_INET |
551 | ? &hash_netport4_tvariant : &hash_netport6_tvariant; | 553 | ? &hash_netport4_tvariant : &hash_netport6_tvariant; |
552 | 554 | ||
553 | if (set->family == AF_INET) | 555 | if (set->family == AF_INET) |
554 | hash_netport4_gc_init(set); | 556 | hash_netport4_gc_init(set); |
555 | else | 557 | else |
556 | hash_netport6_gc_init(set); | 558 | hash_netport6_gc_init(set); |
557 | } else { | 559 | } else { |
558 | set->variant = set->family == AF_INET | 560 | set->variant = set->family == AF_INET |
559 | ? &hash_netport4_variant : &hash_netport6_variant; | 561 | ? &hash_netport4_variant : &hash_netport6_variant; |
560 | } | 562 | } |
561 | 563 | ||
562 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", | 564 | pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", |
563 | set->name, jhash_size(h->table->htable_bits), | 565 | set->name, jhash_size(h->table->htable_bits), |
564 | h->table->htable_bits, h->maxelem, set->data, h->table); | 566 | h->table->htable_bits, h->maxelem, set->data, h->table); |
565 | 567 | ||
566 | return 0; | 568 | return 0; |
567 | } | 569 | } |
568 | 570 | ||
569 | static struct ip_set_type hash_netport_type __read_mostly = { | 571 | static struct ip_set_type hash_netport_type __read_mostly = { |
570 | .name = "hash:net,port", | 572 | .name = "hash:net,port", |
571 | .protocol = IPSET_PROTOCOL, | 573 | .protocol = IPSET_PROTOCOL, |
572 | .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, | 574 | .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, |
573 | .dimension = IPSET_DIM_TWO, | 575 | .dimension = IPSET_DIM_TWO, |
574 | .family = AF_UNSPEC, | 576 | .family = AF_UNSPEC, |
575 | .revision_min = 0, | 577 | .revision_min = 0, |
576 | /* 1 SCTP and UDPLITE support added */ | 578 | /* 1 SCTP and UDPLITE support added */ |
577 | .revision_max = 2, /* Range as input support for IPv4 added */ | 579 | .revision_max = 2, /* Range as input support for IPv4 added */ |
578 | .create = hash_netport_create, | 580 | .create = hash_netport_create, |
579 | .create_policy = { | 581 | .create_policy = { |
580 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, | 582 | [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, |
581 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, | 583 | [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, |
582 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, | 584 | [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, |
583 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, | 585 | [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, |
584 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, | 586 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, |
585 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 587 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
586 | }, | 588 | }, |
587 | .adt_policy = { | 589 | .adt_policy = { |
588 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, | 590 | [IPSET_ATTR_IP] = { .type = NLA_NESTED }, |
589 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, | 591 | [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, |
590 | [IPSET_ATTR_PORT] = { .type = NLA_U16 }, | 592 | [IPSET_ATTR_PORT] = { .type = NLA_U16 }, |
591 | [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, | 593 | [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, |
592 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, | 594 | [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, |
593 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, | 595 | [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, |
594 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, | 596 | [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, |
595 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, | 597 | [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, |
596 | }, | 598 | }, |
597 | .me = THIS_MODULE, | 599 | .me = THIS_MODULE, |
598 | }; | 600 | }; |
599 | 601 | ||
600 | static int __init | 602 | static int __init |
601 | hash_netport_init(void) | 603 | hash_netport_init(void) |
602 | { | 604 | { |
603 | return ip_set_type_register(&hash_netport_type); | 605 | return ip_set_type_register(&hash_netport_type); |
604 | } | 606 | } |
605 | 607 | ||
606 | static void __exit | 608 | static void __exit |
607 | hash_netport_fini(void) | 609 | hash_netport_fini(void) |
608 | { | 610 | { |
609 | ip_set_type_unregister(&hash_netport_type); | 611 | ip_set_type_unregister(&hash_netport_type); |
610 | } | 612 | } |
611 | 613 | ||
612 | module_init(hash_netport_init); | 614 | module_init(hash_netport_init); |
613 | module_exit(hash_netport_fini); | 615 | module_exit(hash_netport_fini); |
614 | 616 |
net/netfilter/nfnetlink.c
1 | /* Netfilter messages via netlink socket. Allows for user space | 1 | /* Netfilter messages via netlink socket. Allows for user space |
2 | * protocol helpers and general trouble making from userspace. | 2 | * protocol helpers and general trouble making from userspace. |
3 | * | 3 | * |
4 | * (C) 2001 by Jay Schulist <jschlst@samba.org>, | 4 | * (C) 2001 by Jay Schulist <jschlst@samba.org>, |
5 | * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> | 5 | * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> |
6 | * (C) 2005,2007 by Pablo Neira Ayuso <pablo@netfilter.org> | 6 | * (C) 2005,2007 by Pablo Neira Ayuso <pablo@netfilter.org> |
7 | * | 7 | * |
8 | * Initial netfilter messages via netlink development funded and | 8 | * Initial netfilter messages via netlink development funded and |
9 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) | 9 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) |
10 | * | 10 | * |
11 | * Further development of this code funded by Astaro AG (http://www.astaro.com) | 11 | * Further development of this code funded by Astaro AG (http://www.astaro.com) |
12 | * | 12 | * |
13 | * This software may be used and distributed according to the terms | 13 | * This software may be used and distributed according to the terms |
14 | * of the GNU General Public License, incorporated herein by reference. | 14 | * of the GNU General Public License, incorporated herein by reference. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/socket.h> | 19 | #include <linux/socket.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/sockios.h> | 22 | #include <linux/sockios.h> |
23 | #include <linux/net.h> | 23 | #include <linux/net.h> |
24 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | #include <asm/system.h> | 26 | #include <asm/system.h> |
27 | #include <net/sock.h> | 27 | #include <net/sock.h> |
28 | #include <net/netlink.h> | 28 | #include <net/netlink.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | 30 | ||
31 | #include <linux/netlink.h> | 31 | #include <linux/netlink.h> |
32 | #include <linux/netfilter/nfnetlink.h> | 32 | #include <linux/netfilter/nfnetlink.h> |
33 | 33 | ||
34 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
35 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 35 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
36 | MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); | 36 | MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); |
37 | 37 | ||
38 | static char __initdata nfversion[] = "0.30"; | 38 | static char __initdata nfversion[] = "0.30"; |
39 | 39 | ||
40 | static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; | 40 | static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT]; |
41 | static DEFINE_MUTEX(nfnl_mutex); | 41 | static DEFINE_MUTEX(nfnl_mutex); |
42 | 42 | ||
43 | void nfnl_lock(void) | 43 | void nfnl_lock(void) |
44 | { | 44 | { |
45 | mutex_lock(&nfnl_mutex); | 45 | mutex_lock(&nfnl_mutex); |
46 | } | 46 | } |
47 | EXPORT_SYMBOL_GPL(nfnl_lock); | 47 | EXPORT_SYMBOL_GPL(nfnl_lock); |
48 | 48 | ||
49 | void nfnl_unlock(void) | 49 | void nfnl_unlock(void) |
50 | { | 50 | { |
51 | mutex_unlock(&nfnl_mutex); | 51 | mutex_unlock(&nfnl_mutex); |
52 | } | 52 | } |
53 | EXPORT_SYMBOL_GPL(nfnl_unlock); | 53 | EXPORT_SYMBOL_GPL(nfnl_unlock); |
54 | 54 | ||
55 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) | 55 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) |
56 | { | 56 | { |
57 | nfnl_lock(); | 57 | nfnl_lock(); |
58 | if (subsys_table[n->subsys_id]) { | 58 | if (subsys_table[n->subsys_id]) { |
59 | nfnl_unlock(); | 59 | nfnl_unlock(); |
60 | return -EBUSY; | 60 | return -EBUSY; |
61 | } | 61 | } |
62 | subsys_table[n->subsys_id] = n; | 62 | rcu_assign_pointer(subsys_table[n->subsys_id], n); |
63 | nfnl_unlock(); | 63 | nfnl_unlock(); |
64 | 64 | ||
65 | return 0; | 65 | return 0; |
66 | } | 66 | } |
67 | EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); | 67 | EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); |
68 | 68 | ||
69 | int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) | 69 | int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) |
70 | { | 70 | { |
71 | nfnl_lock(); | 71 | nfnl_lock(); |
72 | subsys_table[n->subsys_id] = NULL; | 72 | subsys_table[n->subsys_id] = NULL; |
73 | nfnl_unlock(); | 73 | nfnl_unlock(); |
74 | 74 | synchronize_rcu(); | |
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); | 77 | EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); |
78 | 78 | ||
79 | static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type) | 79 | static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type) |
80 | { | 80 | { |
81 | u_int8_t subsys_id = NFNL_SUBSYS_ID(type); | 81 | u_int8_t subsys_id = NFNL_SUBSYS_ID(type); |
82 | 82 | ||
83 | if (subsys_id >= NFNL_SUBSYS_COUNT) | 83 | if (subsys_id >= NFNL_SUBSYS_COUNT) |
84 | return NULL; | 84 | return NULL; |
85 | 85 | ||
86 | return subsys_table[subsys_id]; | 86 | return rcu_dereference(subsys_table[subsys_id]); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline const struct nfnl_callback * | 89 | static inline const struct nfnl_callback * |
90 | nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss) | 90 | nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss) |
91 | { | 91 | { |
92 | u_int8_t cb_id = NFNL_MSG_TYPE(type); | 92 | u_int8_t cb_id = NFNL_MSG_TYPE(type); |
93 | 93 | ||
94 | if (cb_id >= ss->cb_count) | 94 | if (cb_id >= ss->cb_count) |
95 | return NULL; | 95 | return NULL; |
96 | 96 | ||
97 | return &ss->cb[cb_id]; | 97 | return &ss->cb[cb_id]; |
98 | } | 98 | } |
99 | 99 | ||
100 | int nfnetlink_has_listeners(struct net *net, unsigned int group) | 100 | int nfnetlink_has_listeners(struct net *net, unsigned int group) |
101 | { | 101 | { |
102 | return netlink_has_listeners(net->nfnl, group); | 102 | return netlink_has_listeners(net->nfnl, group); |
103 | } | 103 | } |
104 | EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); | 104 | EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); |
105 | 105 | ||
106 | int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, | 106 | int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, |
107 | unsigned group, int echo, gfp_t flags) | 107 | unsigned group, int echo, gfp_t flags) |
108 | { | 108 | { |
109 | return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); | 109 | return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); |
110 | } | 110 | } |
111 | EXPORT_SYMBOL_GPL(nfnetlink_send); | 111 | EXPORT_SYMBOL_GPL(nfnetlink_send); |
112 | 112 | ||
113 | int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) | 113 | int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) |
114 | { | 114 | { |
115 | return netlink_set_err(net->nfnl, pid, group, error); | 115 | return netlink_set_err(net->nfnl, pid, group, error); |
116 | } | 116 | } |
117 | EXPORT_SYMBOL_GPL(nfnetlink_set_err); | 117 | EXPORT_SYMBOL_GPL(nfnetlink_set_err); |
118 | 118 | ||
119 | int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags) | 119 | int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags) |
120 | { | 120 | { |
121 | return netlink_unicast(net->nfnl, skb, pid, flags); | 121 | return netlink_unicast(net->nfnl, skb, pid, flags); |
122 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(nfnetlink_unicast); | 123 | EXPORT_SYMBOL_GPL(nfnetlink_unicast); |
124 | 124 | ||
125 | /* Process one complete nfnetlink message. */ | 125 | /* Process one complete nfnetlink message. */ |
126 | static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | 126 | static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
127 | { | 127 | { |
128 | struct net *net = sock_net(skb->sk); | 128 | struct net *net = sock_net(skb->sk); |
129 | const struct nfnl_callback *nc; | 129 | const struct nfnl_callback *nc; |
130 | const struct nfnetlink_subsystem *ss; | 130 | const struct nfnetlink_subsystem *ss; |
131 | int type, err; | 131 | int type, err; |
132 | 132 | ||
133 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) | 133 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) |
134 | return -EPERM; | 134 | return -EPERM; |
135 | 135 | ||
136 | /* All the messages must at least contain nfgenmsg */ | 136 | /* All the messages must at least contain nfgenmsg */ |
137 | if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct nfgenmsg))) | 137 | if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct nfgenmsg))) |
138 | return 0; | 138 | return 0; |
139 | 139 | ||
140 | type = nlh->nlmsg_type; | 140 | type = nlh->nlmsg_type; |
141 | replay: | 141 | replay: |
142 | rcu_read_lock(); | ||
142 | ss = nfnetlink_get_subsys(type); | 143 | ss = nfnetlink_get_subsys(type); |
143 | if (!ss) { | 144 | if (!ss) { |
144 | #ifdef CONFIG_MODULES | 145 | #ifdef CONFIG_MODULES |
145 | nfnl_unlock(); | 146 | rcu_read_unlock(); |
146 | request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); | 147 | request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); |
147 | nfnl_lock(); | 148 | rcu_read_lock(); |
148 | ss = nfnetlink_get_subsys(type); | 149 | ss = nfnetlink_get_subsys(type); |
149 | if (!ss) | 150 | if (!ss) |
150 | #endif | 151 | #endif |
152 | { | ||
153 | rcu_read_unlock(); | ||
151 | return -EINVAL; | 154 | return -EINVAL; |
155 | } | ||
152 | } | 156 | } |
153 | 157 | ||
154 | nc = nfnetlink_find_client(type, ss); | 158 | nc = nfnetlink_find_client(type, ss); |
155 | if (!nc) | 159 | if (!nc) { |
160 | rcu_read_unlock(); | ||
156 | return -EINVAL; | 161 | return -EINVAL; |
162 | } | ||
157 | 163 | ||
158 | { | 164 | { |
159 | int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); | 165 | int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); |
160 | u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); | 166 | u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); |
161 | struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; | 167 | struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; |
162 | struct nlattr *attr = (void *)nlh + min_len; | 168 | struct nlattr *attr = (void *)nlh + min_len; |
163 | int attrlen = nlh->nlmsg_len - min_len; | 169 | int attrlen = nlh->nlmsg_len - min_len; |
164 | 170 | ||
165 | err = nla_parse(cda, ss->cb[cb_id].attr_count, | 171 | err = nla_parse(cda, ss->cb[cb_id].attr_count, |
166 | attr, attrlen, ss->cb[cb_id].policy); | 172 | attr, attrlen, ss->cb[cb_id].policy); |
167 | if (err < 0) | 173 | if (err < 0) |
168 | return err; | 174 | return err; |
169 | 175 | ||
170 | err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda); | 176 | if (nc->call_rcu) { |
177 | err = nc->call_rcu(net->nfnl, skb, nlh, | ||
178 | (const struct nlattr **)cda); | ||
179 | rcu_read_unlock(); | ||
180 | } else { | ||
181 | rcu_read_unlock(); | ||
182 | nfnl_lock(); | ||
183 | if (rcu_dereference_protected( | ||
184 | subsys_table[NFNL_SUBSYS_ID(type)], | ||
185 | lockdep_is_held(&nfnl_mutex)) != ss || | ||
186 | nfnetlink_find_client(type, ss) != nc) | ||
187 | err = -EAGAIN; | ||
188 | else | ||
189 | err = nc->call(net->nfnl, skb, nlh, | ||
190 | (const struct nlattr **)cda); | ||
191 | nfnl_unlock(); | ||
192 | } | ||
171 | if (err == -EAGAIN) | 193 | if (err == -EAGAIN) |
172 | goto replay; | 194 | goto replay; |
173 | return err; | 195 | return err; |
174 | } | 196 | } |
175 | } | 197 | } |
176 | 198 | ||
177 | static void nfnetlink_rcv(struct sk_buff *skb) | 199 | static void nfnetlink_rcv(struct sk_buff *skb) |
178 | { | 200 | { |
179 | nfnl_lock(); | ||
180 | netlink_rcv_skb(skb, &nfnetlink_rcv_msg); | 201 | netlink_rcv_skb(skb, &nfnetlink_rcv_msg); |
181 | nfnl_unlock(); | ||
182 | } | 202 | } |
183 | 203 | ||
184 | static int __net_init nfnetlink_net_init(struct net *net) | 204 | static int __net_init nfnetlink_net_init(struct net *net) |
185 | { | 205 | { |
186 | struct sock *nfnl; | 206 | struct sock *nfnl; |
187 | 207 | ||
188 | nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX, | 208 | nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX, |
189 | nfnetlink_rcv, NULL, THIS_MODULE); | 209 | nfnetlink_rcv, NULL, THIS_MODULE); |
190 | if (!nfnl) | 210 | if (!nfnl) |
191 | return -ENOMEM; | 211 | return -ENOMEM; |
192 | net->nfnl_stash = nfnl; | 212 | net->nfnl_stash = nfnl; |
193 | rcu_assign_pointer(net->nfnl, nfnl); | 213 | rcu_assign_pointer(net->nfnl, nfnl); |
194 | return 0; | 214 | return 0; |
195 | } | 215 | } |
196 | 216 | ||
197 | static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) | 217 | static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) |
198 | { | 218 | { |
199 | struct net *net; | 219 | struct net *net; |
200 | 220 | ||
201 | list_for_each_entry(net, net_exit_list, exit_list) | 221 | list_for_each_entry(net, net_exit_list, exit_list) |
202 | rcu_assign_pointer(net->nfnl, NULL); | 222 | rcu_assign_pointer(net->nfnl, NULL); |
203 | synchronize_net(); | 223 | synchronize_net(); |
204 | list_for_each_entry(net, net_exit_list, exit_list) | 224 | list_for_each_entry(net, net_exit_list, exit_list) |
205 | netlink_kernel_release(net->nfnl_stash); | 225 | netlink_kernel_release(net->nfnl_stash); |
206 | } | 226 | } |
207 | 227 | ||
208 | static struct pernet_operations nfnetlink_net_ops = { | 228 | static struct pernet_operations nfnetlink_net_ops = { |
209 | .init = nfnetlink_net_init, | 229 | .init = nfnetlink_net_init, |
210 | .exit_batch = nfnetlink_net_exit_batch, | 230 | .exit_batch = nfnetlink_net_exit_batch, |
211 | }; | 231 | }; |
212 | 232 | ||
213 | static int __init nfnetlink_init(void) | 233 | static int __init nfnetlink_init(void) |
214 | { | 234 | { |
215 | pr_info("Netfilter messages via NETLINK v%s.\n", nfversion); | 235 | pr_info("Netfilter messages via NETLINK v%s.\n", nfversion); |
216 | return register_pernet_subsys(&nfnetlink_net_ops); | 236 | return register_pernet_subsys(&nfnetlink_net_ops); |
217 | } | 237 | } |
218 | 238 | ||
219 | static void __exit nfnetlink_exit(void) | 239 | static void __exit nfnetlink_exit(void) |
220 | { | 240 | { |
221 | pr_info("Removing netfilter NETLINK layer.\n"); | 241 | pr_info("Removing netfilter NETLINK layer.\n"); |
222 | unregister_pernet_subsys(&nfnetlink_net_ops); | 242 | unregister_pernet_subsys(&nfnetlink_net_ops); |
223 | } | 243 | } |
224 | module_init(nfnetlink_init); | 244 | module_init(nfnetlink_init); |
net/netfilter/nfnetlink_queue.c
1 | /* | 1 | /* |
2 | * This is a module which is used for queueing packets and communicating with | 2 | * This is a module which is used for queueing packets and communicating with |
3 | * userspace via nfnetlink. | 3 | * userspace via nfnetlink. |
4 | * | 4 | * |
5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> | 5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> |
6 | * (C) 2007 by Patrick McHardy <kaber@trash.net> | 6 | * (C) 2007 by Patrick McHardy <kaber@trash.net> |
7 | * | 7 | * |
8 | * Based on the old ipv4-only ip_queue.c: | 8 | * Based on the old ipv4-only ip_queue.c: |
9 | * (C) 2000-2002 James Morris <jmorris@intercode.com.au> | 9 | * (C) 2000-2002 James Morris <jmorris@intercode.com.au> |
10 | * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> | 10 | * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 13 | * it under the terms of the GNU General Public License version 2 as |
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/notifier.h> | 22 | #include <linux/notifier.h> |
23 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
24 | #include <linux/netfilter.h> | 24 | #include <linux/netfilter.h> |
25 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
26 | #include <linux/netfilter_ipv4.h> | 26 | #include <linux/netfilter_ipv4.h> |
27 | #include <linux/netfilter_ipv6.h> | 27 | #include <linux/netfilter_ipv6.h> |
28 | #include <linux/netfilter/nfnetlink.h> | 28 | #include <linux/netfilter/nfnetlink.h> |
29 | #include <linux/netfilter/nfnetlink_queue.h> | 29 | #include <linux/netfilter/nfnetlink_queue.h> |
30 | #include <linux/list.h> | 30 | #include <linux/list.h> |
31 | #include <net/sock.h> | 31 | #include <net/sock.h> |
32 | #include <net/netfilter/nf_queue.h> | 32 | #include <net/netfilter/nf_queue.h> |
33 | 33 | ||
34 | #include <asm/atomic.h> | 34 | #include <asm/atomic.h> |
35 | 35 | ||
36 | #ifdef CONFIG_BRIDGE_NETFILTER | 36 | #ifdef CONFIG_BRIDGE_NETFILTER |
37 | #include "../bridge/br_private.h" | 37 | #include "../bridge/br_private.h" |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #define NFQNL_QMAX_DEFAULT 1024 | 40 | #define NFQNL_QMAX_DEFAULT 1024 |
41 | 41 | ||
42 | struct nfqnl_instance { | 42 | struct nfqnl_instance { |
43 | struct hlist_node hlist; /* global list of queues */ | 43 | struct hlist_node hlist; /* global list of queues */ |
44 | struct rcu_head rcu; | 44 | struct rcu_head rcu; |
45 | 45 | ||
46 | int peer_pid; | 46 | int peer_pid; |
47 | unsigned int queue_maxlen; | 47 | unsigned int queue_maxlen; |
48 | unsigned int copy_range; | 48 | unsigned int copy_range; |
49 | unsigned int queue_dropped; | 49 | unsigned int queue_dropped; |
50 | unsigned int queue_user_dropped; | 50 | unsigned int queue_user_dropped; |
51 | 51 | ||
52 | 52 | ||
53 | u_int16_t queue_num; /* number of this queue */ | 53 | u_int16_t queue_num; /* number of this queue */ |
54 | u_int8_t copy_mode; | 54 | u_int8_t copy_mode; |
55 | /* | 55 | /* |
56 | * Following fields are dirtied for each queued packet, | 56 | * Following fields are dirtied for each queued packet, |
57 | * keep them in same cache line if possible. | 57 | * keep them in same cache line if possible. |
58 | */ | 58 | */ |
59 | spinlock_t lock; | 59 | spinlock_t lock; |
60 | unsigned int queue_total; | 60 | unsigned int queue_total; |
61 | atomic_t id_sequence; /* 'sequence' of pkt ids */ | 61 | unsigned int id_sequence; /* 'sequence' of pkt ids */ |
62 | struct list_head queue_list; /* packets in queue */ | 62 | struct list_head queue_list; /* packets in queue */ |
63 | }; | 63 | }; |
64 | 64 | ||
65 | typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); | 65 | typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); |
66 | 66 | ||
67 | static DEFINE_SPINLOCK(instances_lock); | 67 | static DEFINE_SPINLOCK(instances_lock); |
68 | 68 | ||
69 | #define INSTANCE_BUCKETS 16 | 69 | #define INSTANCE_BUCKETS 16 |
70 | static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; | 70 | static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; |
71 | 71 | ||
72 | static inline u_int8_t instance_hashfn(u_int16_t queue_num) | 72 | static inline u_int8_t instance_hashfn(u_int16_t queue_num) |
73 | { | 73 | { |
74 | return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS; | 74 | return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS; |
75 | } | 75 | } |
76 | 76 | ||
77 | static struct nfqnl_instance * | 77 | static struct nfqnl_instance * |
78 | instance_lookup(u_int16_t queue_num) | 78 | instance_lookup(u_int16_t queue_num) |
79 | { | 79 | { |
80 | struct hlist_head *head; | 80 | struct hlist_head *head; |
81 | struct hlist_node *pos; | 81 | struct hlist_node *pos; |
82 | struct nfqnl_instance *inst; | 82 | struct nfqnl_instance *inst; |
83 | 83 | ||
84 | head = &instance_table[instance_hashfn(queue_num)]; | 84 | head = &instance_table[instance_hashfn(queue_num)]; |
85 | hlist_for_each_entry_rcu(inst, pos, head, hlist) { | 85 | hlist_for_each_entry_rcu(inst, pos, head, hlist) { |
86 | if (inst->queue_num == queue_num) | 86 | if (inst->queue_num == queue_num) |
87 | return inst; | 87 | return inst; |
88 | } | 88 | } |
89 | return NULL; | 89 | return NULL; |
90 | } | 90 | } |
91 | 91 | ||
92 | static struct nfqnl_instance * | 92 | static struct nfqnl_instance * |
93 | instance_create(u_int16_t queue_num, int pid) | 93 | instance_create(u_int16_t queue_num, int pid) |
94 | { | 94 | { |
95 | struct nfqnl_instance *inst; | 95 | struct nfqnl_instance *inst; |
96 | unsigned int h; | 96 | unsigned int h; |
97 | int err; | 97 | int err; |
98 | 98 | ||
99 | spin_lock(&instances_lock); | 99 | spin_lock(&instances_lock); |
100 | if (instance_lookup(queue_num)) { | 100 | if (instance_lookup(queue_num)) { |
101 | err = -EEXIST; | 101 | err = -EEXIST; |
102 | goto out_unlock; | 102 | goto out_unlock; |
103 | } | 103 | } |
104 | 104 | ||
105 | inst = kzalloc(sizeof(*inst), GFP_ATOMIC); | 105 | inst = kzalloc(sizeof(*inst), GFP_ATOMIC); |
106 | if (!inst) { | 106 | if (!inst) { |
107 | err = -ENOMEM; | 107 | err = -ENOMEM; |
108 | goto out_unlock; | 108 | goto out_unlock; |
109 | } | 109 | } |
110 | 110 | ||
111 | inst->queue_num = queue_num; | 111 | inst->queue_num = queue_num; |
112 | inst->peer_pid = pid; | 112 | inst->peer_pid = pid; |
113 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; | 113 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; |
114 | inst->copy_range = 0xfffff; | 114 | inst->copy_range = 0xfffff; |
115 | inst->copy_mode = NFQNL_COPY_NONE; | 115 | inst->copy_mode = NFQNL_COPY_NONE; |
116 | spin_lock_init(&inst->lock); | 116 | spin_lock_init(&inst->lock); |
117 | INIT_LIST_HEAD(&inst->queue_list); | 117 | INIT_LIST_HEAD(&inst->queue_list); |
118 | 118 | ||
119 | if (!try_module_get(THIS_MODULE)) { | 119 | if (!try_module_get(THIS_MODULE)) { |
120 | err = -EAGAIN; | 120 | err = -EAGAIN; |
121 | goto out_free; | 121 | goto out_free; |
122 | } | 122 | } |
123 | 123 | ||
124 | h = instance_hashfn(queue_num); | 124 | h = instance_hashfn(queue_num); |
125 | hlist_add_head_rcu(&inst->hlist, &instance_table[h]); | 125 | hlist_add_head_rcu(&inst->hlist, &instance_table[h]); |
126 | 126 | ||
127 | spin_unlock(&instances_lock); | 127 | spin_unlock(&instances_lock); |
128 | 128 | ||
129 | return inst; | 129 | return inst; |
130 | 130 | ||
131 | out_free: | 131 | out_free: |
132 | kfree(inst); | 132 | kfree(inst); |
133 | out_unlock: | 133 | out_unlock: |
134 | spin_unlock(&instances_lock); | 134 | spin_unlock(&instances_lock); |
135 | return ERR_PTR(err); | 135 | return ERR_PTR(err); |
136 | } | 136 | } |
137 | 137 | ||
138 | static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, | 138 | static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, |
139 | unsigned long data); | 139 | unsigned long data); |
140 | 140 | ||
141 | static void | 141 | static void |
142 | instance_destroy_rcu(struct rcu_head *head) | 142 | instance_destroy_rcu(struct rcu_head *head) |
143 | { | 143 | { |
144 | struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, | 144 | struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, |
145 | rcu); | 145 | rcu); |
146 | 146 | ||
147 | nfqnl_flush(inst, NULL, 0); | 147 | nfqnl_flush(inst, NULL, 0); |
148 | kfree(inst); | 148 | kfree(inst); |
149 | module_put(THIS_MODULE); | 149 | module_put(THIS_MODULE); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void | 152 | static void |
153 | __instance_destroy(struct nfqnl_instance *inst) | 153 | __instance_destroy(struct nfqnl_instance *inst) |
154 | { | 154 | { |
155 | hlist_del_rcu(&inst->hlist); | 155 | hlist_del_rcu(&inst->hlist); |
156 | call_rcu(&inst->rcu, instance_destroy_rcu); | 156 | call_rcu(&inst->rcu, instance_destroy_rcu); |
157 | } | 157 | } |
158 | 158 | ||
159 | static void | 159 | static void |
160 | instance_destroy(struct nfqnl_instance *inst) | 160 | instance_destroy(struct nfqnl_instance *inst) |
161 | { | 161 | { |
162 | spin_lock(&instances_lock); | 162 | spin_lock(&instances_lock); |
163 | __instance_destroy(inst); | 163 | __instance_destroy(inst); |
164 | spin_unlock(&instances_lock); | 164 | spin_unlock(&instances_lock); |
165 | } | 165 | } |
166 | 166 | ||
167 | static inline void | 167 | static inline void |
168 | __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) | 168 | __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) |
169 | { | 169 | { |
170 | list_add_tail(&entry->list, &queue->queue_list); | 170 | list_add_tail(&entry->list, &queue->queue_list); |
171 | queue->queue_total++; | 171 | queue->queue_total++; |
172 | } | 172 | } |
173 | 173 | ||
174 | static void | ||
175 | __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) | ||
176 | { | ||
177 | list_del(&entry->list); | ||
178 | queue->queue_total--; | ||
179 | } | ||
180 | |||
174 | static struct nf_queue_entry * | 181 | static struct nf_queue_entry * |
175 | find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) | 182 | find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) |
176 | { | 183 | { |
177 | struct nf_queue_entry *entry = NULL, *i; | 184 | struct nf_queue_entry *entry = NULL, *i; |
178 | 185 | ||
179 | spin_lock_bh(&queue->lock); | 186 | spin_lock_bh(&queue->lock); |
180 | 187 | ||
181 | list_for_each_entry(i, &queue->queue_list, list) { | 188 | list_for_each_entry(i, &queue->queue_list, list) { |
182 | if (i->id == id) { | 189 | if (i->id == id) { |
183 | entry = i; | 190 | entry = i; |
184 | break; | 191 | break; |
185 | } | 192 | } |
186 | } | 193 | } |
187 | 194 | ||
188 | if (entry) { | 195 | if (entry) |
189 | list_del(&entry->list); | 196 | __dequeue_entry(queue, entry); |
190 | queue->queue_total--; | ||
191 | } | ||
192 | 197 | ||
193 | spin_unlock_bh(&queue->lock); | 198 | spin_unlock_bh(&queue->lock); |
194 | 199 | ||
195 | return entry; | 200 | return entry; |
196 | } | 201 | } |
197 | 202 | ||
198 | static void | 203 | static void |
199 | nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) | 204 | nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) |
200 | { | 205 | { |
201 | struct nf_queue_entry *entry, *next; | 206 | struct nf_queue_entry *entry, *next; |
202 | 207 | ||
203 | spin_lock_bh(&queue->lock); | 208 | spin_lock_bh(&queue->lock); |
204 | list_for_each_entry_safe(entry, next, &queue->queue_list, list) { | 209 | list_for_each_entry_safe(entry, next, &queue->queue_list, list) { |
205 | if (!cmpfn || cmpfn(entry, data)) { | 210 | if (!cmpfn || cmpfn(entry, data)) { |
206 | list_del(&entry->list); | 211 | list_del(&entry->list); |
207 | queue->queue_total--; | 212 | queue->queue_total--; |
208 | nf_reinject(entry, NF_DROP); | 213 | nf_reinject(entry, NF_DROP); |
209 | } | 214 | } |
210 | } | 215 | } |
211 | spin_unlock_bh(&queue->lock); | 216 | spin_unlock_bh(&queue->lock); |
212 | } | 217 | } |
213 | 218 | ||
214 | static struct sk_buff * | 219 | static struct sk_buff * |
215 | nfqnl_build_packet_message(struct nfqnl_instance *queue, | 220 | nfqnl_build_packet_message(struct nfqnl_instance *queue, |
216 | struct nf_queue_entry *entry) | 221 | struct nf_queue_entry *entry, |
222 | __be32 **packet_id_ptr) | ||
217 | { | 223 | { |
218 | sk_buff_data_t old_tail; | 224 | sk_buff_data_t old_tail; |
219 | size_t size; | 225 | size_t size; |
220 | size_t data_len = 0; | 226 | size_t data_len = 0; |
221 | struct sk_buff *skb; | 227 | struct sk_buff *skb; |
222 | struct nfqnl_msg_packet_hdr pmsg; | 228 | struct nlattr *nla; |
229 | struct nfqnl_msg_packet_hdr *pmsg; | ||
223 | struct nlmsghdr *nlh; | 230 | struct nlmsghdr *nlh; |
224 | struct nfgenmsg *nfmsg; | 231 | struct nfgenmsg *nfmsg; |
225 | struct sk_buff *entskb = entry->skb; | 232 | struct sk_buff *entskb = entry->skb; |
226 | struct net_device *indev; | 233 | struct net_device *indev; |
227 | struct net_device *outdev; | 234 | struct net_device *outdev; |
228 | 235 | ||
229 | size = NLMSG_SPACE(sizeof(struct nfgenmsg)) | 236 | size = NLMSG_SPACE(sizeof(struct nfgenmsg)) |
230 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) | 237 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) |
231 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 238 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
232 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 239 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
233 | #ifdef CONFIG_BRIDGE_NETFILTER | 240 | #ifdef CONFIG_BRIDGE_NETFILTER |
234 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 241 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
235 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 242 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
236 | #endif | 243 | #endif |
237 | + nla_total_size(sizeof(u_int32_t)) /* mark */ | 244 | + nla_total_size(sizeof(u_int32_t)) /* mark */ |
238 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) | 245 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) |
239 | + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); | 246 | + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); |
240 | 247 | ||
241 | outdev = entry->outdev; | 248 | outdev = entry->outdev; |
242 | 249 | ||
243 | switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { | 250 | switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { |
244 | case NFQNL_COPY_META: | 251 | case NFQNL_COPY_META: |
245 | case NFQNL_COPY_NONE: | 252 | case NFQNL_COPY_NONE: |
246 | break; | 253 | break; |
247 | 254 | ||
248 | case NFQNL_COPY_PACKET: | 255 | case NFQNL_COPY_PACKET: |
249 | if (entskb->ip_summed == CHECKSUM_PARTIAL && | 256 | if (entskb->ip_summed == CHECKSUM_PARTIAL && |
250 | skb_checksum_help(entskb)) | 257 | skb_checksum_help(entskb)) |
251 | return NULL; | 258 | return NULL; |
252 | 259 | ||
253 | data_len = ACCESS_ONCE(queue->copy_range); | 260 | data_len = ACCESS_ONCE(queue->copy_range); |
254 | if (data_len == 0 || data_len > entskb->len) | 261 | if (data_len == 0 || data_len > entskb->len) |
255 | data_len = entskb->len; | 262 | data_len = entskb->len; |
256 | 263 | ||
257 | size += nla_total_size(data_len); | 264 | size += nla_total_size(data_len); |
258 | break; | 265 | break; |
259 | } | 266 | } |
260 | 267 | ||
261 | 268 | ||
262 | skb = alloc_skb(size, GFP_ATOMIC); | 269 | skb = alloc_skb(size, GFP_ATOMIC); |
263 | if (!skb) | 270 | if (!skb) |
264 | goto nlmsg_failure; | 271 | goto nlmsg_failure; |
265 | 272 | ||
266 | old_tail = skb->tail; | 273 | old_tail = skb->tail; |
267 | nlh = NLMSG_PUT(skb, 0, 0, | 274 | nlh = NLMSG_PUT(skb, 0, 0, |
268 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, | 275 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, |
269 | sizeof(struct nfgenmsg)); | 276 | sizeof(struct nfgenmsg)); |
270 | nfmsg = NLMSG_DATA(nlh); | 277 | nfmsg = NLMSG_DATA(nlh); |
271 | nfmsg->nfgen_family = entry->pf; | 278 | nfmsg->nfgen_family = entry->pf; |
272 | nfmsg->version = NFNETLINK_V0; | 279 | nfmsg->version = NFNETLINK_V0; |
273 | nfmsg->res_id = htons(queue->queue_num); | 280 | nfmsg->res_id = htons(queue->queue_num); |
274 | 281 | ||
275 | entry->id = atomic_inc_return(&queue->id_sequence); | 282 | nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); |
276 | pmsg.packet_id = htonl(entry->id); | 283 | pmsg = nla_data(nla); |
277 | pmsg.hw_protocol = entskb->protocol; | 284 | pmsg->hw_protocol = entskb->protocol; |
278 | pmsg.hook = entry->hook; | 285 | pmsg->hook = entry->hook; |
286 | *packet_id_ptr = &pmsg->packet_id; | ||
279 | 287 | ||
280 | NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); | ||
281 | |||
282 | indev = entry->indev; | 288 | indev = entry->indev; |
283 | if (indev) { | 289 | if (indev) { |
284 | #ifndef CONFIG_BRIDGE_NETFILTER | 290 | #ifndef CONFIG_BRIDGE_NETFILTER |
285 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); | 291 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); |
286 | #else | 292 | #else |
287 | if (entry->pf == PF_BRIDGE) { | 293 | if (entry->pf == PF_BRIDGE) { |
288 | /* Case 1: indev is physical input device, we need to | 294 | /* Case 1: indev is physical input device, we need to |
289 | * look for bridge group (when called from | 295 | * look for bridge group (when called from |
290 | * netfilter_bridge) */ | 296 | * netfilter_bridge) */ |
291 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, | 297 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, |
292 | htonl(indev->ifindex)); | 298 | htonl(indev->ifindex)); |
293 | /* this is the bridge group "brX" */ | 299 | /* this is the bridge group "brX" */ |
294 | /* rcu_read_lock()ed by __nf_queue */ | 300 | /* rcu_read_lock()ed by __nf_queue */ |
295 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, | 301 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, |
296 | htonl(br_port_get_rcu(indev)->br->dev->ifindex)); | 302 | htonl(br_port_get_rcu(indev)->br->dev->ifindex)); |
297 | } else { | 303 | } else { |
298 | /* Case 2: indev is bridge group, we need to look for | 304 | /* Case 2: indev is bridge group, we need to look for |
299 | * physical device (when called from ipv4) */ | 305 | * physical device (when called from ipv4) */ |
300 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, | 306 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, |
301 | htonl(indev->ifindex)); | 307 | htonl(indev->ifindex)); |
302 | if (entskb->nf_bridge && entskb->nf_bridge->physindev) | 308 | if (entskb->nf_bridge && entskb->nf_bridge->physindev) |
303 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, | 309 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, |
304 | htonl(entskb->nf_bridge->physindev->ifindex)); | 310 | htonl(entskb->nf_bridge->physindev->ifindex)); |
305 | } | 311 | } |
306 | #endif | 312 | #endif |
307 | } | 313 | } |
308 | 314 | ||
309 | if (outdev) { | 315 | if (outdev) { |
310 | #ifndef CONFIG_BRIDGE_NETFILTER | 316 | #ifndef CONFIG_BRIDGE_NETFILTER |
311 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); | 317 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); |
312 | #else | 318 | #else |
313 | if (entry->pf == PF_BRIDGE) { | 319 | if (entry->pf == PF_BRIDGE) { |
314 | /* Case 1: outdev is physical output device, we need to | 320 | /* Case 1: outdev is physical output device, we need to |
315 | * look for bridge group (when called from | 321 | * look for bridge group (when called from |
316 | * netfilter_bridge) */ | 322 | * netfilter_bridge) */ |
317 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, | 323 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, |
318 | htonl(outdev->ifindex)); | 324 | htonl(outdev->ifindex)); |
319 | /* this is the bridge group "brX" */ | 325 | /* this is the bridge group "brX" */ |
320 | /* rcu_read_lock()ed by __nf_queue */ | 326 | /* rcu_read_lock()ed by __nf_queue */ |
321 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, | 327 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, |
322 | htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); | 328 | htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); |
323 | } else { | 329 | } else { |
324 | /* Case 2: outdev is bridge group, we need to look for | 330 | /* Case 2: outdev is bridge group, we need to look for |
325 | * physical output device (when called from ipv4) */ | 331 | * physical output device (when called from ipv4) */ |
326 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, | 332 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, |
327 | htonl(outdev->ifindex)); | 333 | htonl(outdev->ifindex)); |
328 | if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) | 334 | if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) |
329 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, | 335 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, |
330 | htonl(entskb->nf_bridge->physoutdev->ifindex)); | 336 | htonl(entskb->nf_bridge->physoutdev->ifindex)); |
331 | } | 337 | } |
332 | #endif | 338 | #endif |
333 | } | 339 | } |
334 | 340 | ||
335 | if (entskb->mark) | 341 | if (entskb->mark) |
336 | NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); | 342 | NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); |
337 | 343 | ||
338 | if (indev && entskb->dev && | 344 | if (indev && entskb->dev && |
339 | entskb->mac_header != entskb->network_header) { | 345 | entskb->mac_header != entskb->network_header) { |
340 | struct nfqnl_msg_packet_hw phw; | 346 | struct nfqnl_msg_packet_hw phw; |
341 | int len = dev_parse_header(entskb, phw.hw_addr); | 347 | int len = dev_parse_header(entskb, phw.hw_addr); |
342 | if (len) { | 348 | if (len) { |
343 | phw.hw_addrlen = htons(len); | 349 | phw.hw_addrlen = htons(len); |
344 | NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); | 350 | NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); |
345 | } | 351 | } |
346 | } | 352 | } |
347 | 353 | ||
348 | if (entskb->tstamp.tv64) { | 354 | if (entskb->tstamp.tv64) { |
349 | struct nfqnl_msg_packet_timestamp ts; | 355 | struct nfqnl_msg_packet_timestamp ts; |
350 | struct timeval tv = ktime_to_timeval(entskb->tstamp); | 356 | struct timeval tv = ktime_to_timeval(entskb->tstamp); |
351 | ts.sec = cpu_to_be64(tv.tv_sec); | 357 | ts.sec = cpu_to_be64(tv.tv_sec); |
352 | ts.usec = cpu_to_be64(tv.tv_usec); | 358 | ts.usec = cpu_to_be64(tv.tv_usec); |
353 | 359 | ||
354 | NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); | 360 | NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); |
355 | } | 361 | } |
356 | 362 | ||
357 | if (data_len) { | 363 | if (data_len) { |
358 | struct nlattr *nla; | 364 | struct nlattr *nla; |
359 | int sz = nla_attr_size(data_len); | 365 | int sz = nla_attr_size(data_len); |
360 | 366 | ||
361 | if (skb_tailroom(skb) < nla_total_size(data_len)) { | 367 | if (skb_tailroom(skb) < nla_total_size(data_len)) { |
362 | printk(KERN_WARNING "nf_queue: no tailroom!\n"); | 368 | printk(KERN_WARNING "nf_queue: no tailroom!\n"); |
363 | goto nlmsg_failure; | 369 | goto nlmsg_failure; |
364 | } | 370 | } |
365 | 371 | ||
366 | nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); | 372 | nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); |
367 | nla->nla_type = NFQA_PAYLOAD; | 373 | nla->nla_type = NFQA_PAYLOAD; |
368 | nla->nla_len = sz; | 374 | nla->nla_len = sz; |
369 | 375 | ||
370 | if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) | 376 | if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) |
371 | BUG(); | 377 | BUG(); |
372 | } | 378 | } |
373 | 379 | ||
374 | nlh->nlmsg_len = skb->tail - old_tail; | 380 | nlh->nlmsg_len = skb->tail - old_tail; |
375 | return skb; | 381 | return skb; |
376 | 382 | ||
377 | nlmsg_failure: | 383 | nlmsg_failure: |
378 | nla_put_failure: | 384 | nla_put_failure: |
379 | if (skb) | 385 | if (skb) |
380 | kfree_skb(skb); | 386 | kfree_skb(skb); |
381 | if (net_ratelimit()) | 387 | if (net_ratelimit()) |
382 | printk(KERN_ERR "nf_queue: error creating packet message\n"); | 388 | printk(KERN_ERR "nf_queue: error creating packet message\n"); |
383 | return NULL; | 389 | return NULL; |
384 | } | 390 | } |
385 | 391 | ||
386 | static int | 392 | static int |
387 | nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) | 393 | nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) |
388 | { | 394 | { |
389 | struct sk_buff *nskb; | 395 | struct sk_buff *nskb; |
390 | struct nfqnl_instance *queue; | 396 | struct nfqnl_instance *queue; |
391 | int err = -ENOBUFS; | 397 | int err = -ENOBUFS; |
398 | __be32 *packet_id_ptr; | ||
392 | 399 | ||
393 | /* rcu_read_lock()ed by nf_hook_slow() */ | 400 | /* rcu_read_lock()ed by nf_hook_slow() */ |
394 | queue = instance_lookup(queuenum); | 401 | queue = instance_lookup(queuenum); |
395 | if (!queue) { | 402 | if (!queue) { |
396 | err = -ESRCH; | 403 | err = -ESRCH; |
397 | goto err_out; | 404 | goto err_out; |
398 | } | 405 | } |
399 | 406 | ||
400 | if (queue->copy_mode == NFQNL_COPY_NONE) { | 407 | if (queue->copy_mode == NFQNL_COPY_NONE) { |
401 | err = -EINVAL; | 408 | err = -EINVAL; |
402 | goto err_out; | 409 | goto err_out; |
403 | } | 410 | } |
404 | 411 | ||
405 | nskb = nfqnl_build_packet_message(queue, entry); | 412 | nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr); |
406 | if (nskb == NULL) { | 413 | if (nskb == NULL) { |
407 | err = -ENOMEM; | 414 | err = -ENOMEM; |
408 | goto err_out; | 415 | goto err_out; |
409 | } | 416 | } |
410 | spin_lock_bh(&queue->lock); | 417 | spin_lock_bh(&queue->lock); |
411 | 418 | ||
412 | if (!queue->peer_pid) { | 419 | if (!queue->peer_pid) { |
413 | err = -EINVAL; | 420 | err = -EINVAL; |
414 | goto err_out_free_nskb; | 421 | goto err_out_free_nskb; |
415 | } | 422 | } |
416 | if (queue->queue_total >= queue->queue_maxlen) { | 423 | if (queue->queue_total >= queue->queue_maxlen) { |
417 | queue->queue_dropped++; | 424 | queue->queue_dropped++; |
418 | if (net_ratelimit()) | 425 | if (net_ratelimit()) |
419 | printk(KERN_WARNING "nf_queue: full at %d entries, " | 426 | printk(KERN_WARNING "nf_queue: full at %d entries, " |
420 | "dropping packets(s).\n", | 427 | "dropping packets(s).\n", |
421 | queue->queue_total); | 428 | queue->queue_total); |
422 | goto err_out_free_nskb; | 429 | goto err_out_free_nskb; |
423 | } | 430 | } |
431 | entry->id = ++queue->id_sequence; | ||
432 | *packet_id_ptr = htonl(entry->id); | ||
424 | 433 | ||
425 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ | 434 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ |
426 | err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); | 435 | err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); |
427 | if (err < 0) { | 436 | if (err < 0) { |
428 | queue->queue_user_dropped++; | 437 | queue->queue_user_dropped++; |
429 | goto err_out_unlock; | 438 | goto err_out_unlock; |
430 | } | 439 | } |
431 | 440 | ||
432 | __enqueue_entry(queue, entry); | 441 | __enqueue_entry(queue, entry); |
433 | 442 | ||
434 | spin_unlock_bh(&queue->lock); | 443 | spin_unlock_bh(&queue->lock); |
435 | return 0; | 444 | return 0; |
436 | 445 | ||
437 | err_out_free_nskb: | 446 | err_out_free_nskb: |
438 | kfree_skb(nskb); | 447 | kfree_skb(nskb); |
439 | err_out_unlock: | 448 | err_out_unlock: |
440 | spin_unlock_bh(&queue->lock); | 449 | spin_unlock_bh(&queue->lock); |
441 | err_out: | 450 | err_out: |
442 | return err; | 451 | return err; |
443 | } | 452 | } |
444 | 453 | ||
445 | static int | 454 | static int |
446 | nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) | 455 | nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) |
447 | { | 456 | { |
448 | struct sk_buff *nskb; | 457 | struct sk_buff *nskb; |
449 | int diff; | 458 | int diff; |
450 | 459 | ||
451 | diff = data_len - e->skb->len; | 460 | diff = data_len - e->skb->len; |
452 | if (diff < 0) { | 461 | if (diff < 0) { |
453 | if (pskb_trim(e->skb, data_len)) | 462 | if (pskb_trim(e->skb, data_len)) |
454 | return -ENOMEM; | 463 | return -ENOMEM; |
455 | } else if (diff > 0) { | 464 | } else if (diff > 0) { |
456 | if (data_len > 0xFFFF) | 465 | if (data_len > 0xFFFF) |
457 | return -EINVAL; | 466 | return -EINVAL; |
458 | if (diff > skb_tailroom(e->skb)) { | 467 | if (diff > skb_tailroom(e->skb)) { |
459 | nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), | 468 | nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), |
460 | diff, GFP_ATOMIC); | 469 | diff, GFP_ATOMIC); |
461 | if (!nskb) { | 470 | if (!nskb) { |
462 | printk(KERN_WARNING "nf_queue: OOM " | 471 | printk(KERN_WARNING "nf_queue: OOM " |
463 | "in mangle, dropping packet\n"); | 472 | "in mangle, dropping packet\n"); |
464 | return -ENOMEM; | 473 | return -ENOMEM; |
465 | } | 474 | } |
466 | kfree_skb(e->skb); | 475 | kfree_skb(e->skb); |
467 | e->skb = nskb; | 476 | e->skb = nskb; |
468 | } | 477 | } |
469 | skb_put(e->skb, diff); | 478 | skb_put(e->skb, diff); |
470 | } | 479 | } |
471 | if (!skb_make_writable(e->skb, data_len)) | 480 | if (!skb_make_writable(e->skb, data_len)) |
472 | return -ENOMEM; | 481 | return -ENOMEM; |
473 | skb_copy_to_linear_data(e->skb, data, data_len); | 482 | skb_copy_to_linear_data(e->skb, data, data_len); |
474 | e->skb->ip_summed = CHECKSUM_NONE; | 483 | e->skb->ip_summed = CHECKSUM_NONE; |
475 | return 0; | 484 | return 0; |
476 | } | 485 | } |
477 | 486 | ||
478 | static int | 487 | static int |
479 | nfqnl_set_mode(struct nfqnl_instance *queue, | 488 | nfqnl_set_mode(struct nfqnl_instance *queue, |
480 | unsigned char mode, unsigned int range) | 489 | unsigned char mode, unsigned int range) |
481 | { | 490 | { |
482 | int status = 0; | 491 | int status = 0; |
483 | 492 | ||
484 | spin_lock_bh(&queue->lock); | 493 | spin_lock_bh(&queue->lock); |
485 | switch (mode) { | 494 | switch (mode) { |
486 | case NFQNL_COPY_NONE: | 495 | case NFQNL_COPY_NONE: |
487 | case NFQNL_COPY_META: | 496 | case NFQNL_COPY_META: |
488 | queue->copy_mode = mode; | 497 | queue->copy_mode = mode; |
489 | queue->copy_range = 0; | 498 | queue->copy_range = 0; |
490 | break; | 499 | break; |
491 | 500 | ||
492 | case NFQNL_COPY_PACKET: | 501 | case NFQNL_COPY_PACKET: |
493 | queue->copy_mode = mode; | 502 | queue->copy_mode = mode; |
494 | /* we're using struct nlattr which has 16bit nla_len */ | 503 | /* we're using struct nlattr which has 16bit nla_len */ |
495 | if (range > 0xffff) | 504 | if (range > 0xffff) |
496 | queue->copy_range = 0xffff; | 505 | queue->copy_range = 0xffff; |
497 | else | 506 | else |
498 | queue->copy_range = range; | 507 | queue->copy_range = range; |
499 | break; | 508 | break; |
500 | 509 | ||
501 | default: | 510 | default: |
502 | status = -EINVAL; | 511 | status = -EINVAL; |
503 | 512 | ||
504 | } | 513 | } |
505 | spin_unlock_bh(&queue->lock); | 514 | spin_unlock_bh(&queue->lock); |
506 | 515 | ||
507 | return status; | 516 | return status; |
508 | } | 517 | } |
509 | 518 | ||
510 | static int | 519 | static int |
511 | dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) | 520 | dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) |
512 | { | 521 | { |
513 | if (entry->indev) | 522 | if (entry->indev) |
514 | if (entry->indev->ifindex == ifindex) | 523 | if (entry->indev->ifindex == ifindex) |
515 | return 1; | 524 | return 1; |
516 | if (entry->outdev) | 525 | if (entry->outdev) |
517 | if (entry->outdev->ifindex == ifindex) | 526 | if (entry->outdev->ifindex == ifindex) |
518 | return 1; | 527 | return 1; |
519 | #ifdef CONFIG_BRIDGE_NETFILTER | 528 | #ifdef CONFIG_BRIDGE_NETFILTER |
520 | if (entry->skb->nf_bridge) { | 529 | if (entry->skb->nf_bridge) { |
521 | if (entry->skb->nf_bridge->physindev && | 530 | if (entry->skb->nf_bridge->physindev && |
522 | entry->skb->nf_bridge->physindev->ifindex == ifindex) | 531 | entry->skb->nf_bridge->physindev->ifindex == ifindex) |
523 | return 1; | 532 | return 1; |
524 | if (entry->skb->nf_bridge->physoutdev && | 533 | if (entry->skb->nf_bridge->physoutdev && |
525 | entry->skb->nf_bridge->physoutdev->ifindex == ifindex) | 534 | entry->skb->nf_bridge->physoutdev->ifindex == ifindex) |
526 | return 1; | 535 | return 1; |
527 | } | 536 | } |
528 | #endif | 537 | #endif |
529 | return 0; | 538 | return 0; |
530 | } | 539 | } |
531 | 540 | ||
532 | /* drop all packets with either indev or outdev == ifindex from all queue | 541 | /* drop all packets with either indev or outdev == ifindex from all queue |
533 | * instances */ | 542 | * instances */ |
534 | static void | 543 | static void |
535 | nfqnl_dev_drop(int ifindex) | 544 | nfqnl_dev_drop(int ifindex) |
536 | { | 545 | { |
537 | int i; | 546 | int i; |
538 | 547 | ||
539 | rcu_read_lock(); | 548 | rcu_read_lock(); |
540 | 549 | ||
541 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 550 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
542 | struct hlist_node *tmp; | 551 | struct hlist_node *tmp; |
543 | struct nfqnl_instance *inst; | 552 | struct nfqnl_instance *inst; |
544 | struct hlist_head *head = &instance_table[i]; | 553 | struct hlist_head *head = &instance_table[i]; |
545 | 554 | ||
546 | hlist_for_each_entry_rcu(inst, tmp, head, hlist) | 555 | hlist_for_each_entry_rcu(inst, tmp, head, hlist) |
547 | nfqnl_flush(inst, dev_cmp, ifindex); | 556 | nfqnl_flush(inst, dev_cmp, ifindex); |
548 | } | 557 | } |
549 | 558 | ||
550 | rcu_read_unlock(); | 559 | rcu_read_unlock(); |
551 | } | 560 | } |
552 | 561 | ||
553 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) | 562 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) |
554 | 563 | ||
555 | static int | 564 | static int |
556 | nfqnl_rcv_dev_event(struct notifier_block *this, | 565 | nfqnl_rcv_dev_event(struct notifier_block *this, |
557 | unsigned long event, void *ptr) | 566 | unsigned long event, void *ptr) |
558 | { | 567 | { |
559 | struct net_device *dev = ptr; | 568 | struct net_device *dev = ptr; |
560 | 569 | ||
561 | if (!net_eq(dev_net(dev), &init_net)) | 570 | if (!net_eq(dev_net(dev), &init_net)) |
562 | return NOTIFY_DONE; | 571 | return NOTIFY_DONE; |
563 | 572 | ||
564 | /* Drop any packets associated with the downed device */ | 573 | /* Drop any packets associated with the downed device */ |
565 | if (event == NETDEV_DOWN) | 574 | if (event == NETDEV_DOWN) |
566 | nfqnl_dev_drop(dev->ifindex); | 575 | nfqnl_dev_drop(dev->ifindex); |
567 | return NOTIFY_DONE; | 576 | return NOTIFY_DONE; |
568 | } | 577 | } |
569 | 578 | ||
570 | static struct notifier_block nfqnl_dev_notifier = { | 579 | static struct notifier_block nfqnl_dev_notifier = { |
571 | .notifier_call = nfqnl_rcv_dev_event, | 580 | .notifier_call = nfqnl_rcv_dev_event, |
572 | }; | 581 | }; |
573 | 582 | ||
574 | static int | 583 | static int |
575 | nfqnl_rcv_nl_event(struct notifier_block *this, | 584 | nfqnl_rcv_nl_event(struct notifier_block *this, |
576 | unsigned long event, void *ptr) | 585 | unsigned long event, void *ptr) |
577 | { | 586 | { |
578 | struct netlink_notify *n = ptr; | 587 | struct netlink_notify *n = ptr; |
579 | 588 | ||
580 | if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { | 589 | if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { |
581 | int i; | 590 | int i; |
582 | 591 | ||
583 | /* destroy all instances for this pid */ | 592 | /* destroy all instances for this pid */ |
584 | spin_lock(&instances_lock); | 593 | spin_lock(&instances_lock); |
585 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 594 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
586 | struct hlist_node *tmp, *t2; | 595 | struct hlist_node *tmp, *t2; |
587 | struct nfqnl_instance *inst; | 596 | struct nfqnl_instance *inst; |
588 | struct hlist_head *head = &instance_table[i]; | 597 | struct hlist_head *head = &instance_table[i]; |
589 | 598 | ||
590 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { | 599 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { |
591 | if ((n->net == &init_net) && | 600 | if ((n->net == &init_net) && |
592 | (n->pid == inst->peer_pid)) | 601 | (n->pid == inst->peer_pid)) |
593 | __instance_destroy(inst); | 602 | __instance_destroy(inst); |
594 | } | 603 | } |
595 | } | 604 | } |
596 | spin_unlock(&instances_lock); | 605 | spin_unlock(&instances_lock); |
597 | } | 606 | } |
598 | return NOTIFY_DONE; | 607 | return NOTIFY_DONE; |
599 | } | 608 | } |
600 | 609 | ||
601 | static struct notifier_block nfqnl_rtnl_notifier = { | 610 | static struct notifier_block nfqnl_rtnl_notifier = { |
602 | .notifier_call = nfqnl_rcv_nl_event, | 611 | .notifier_call = nfqnl_rcv_nl_event, |
603 | }; | 612 | }; |
604 | 613 | ||
605 | static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { | 614 | static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { |
606 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, | 615 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, |
607 | [NFQA_MARK] = { .type = NLA_U32 }, | 616 | [NFQA_MARK] = { .type = NLA_U32 }, |
608 | [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, | 617 | [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, |
609 | }; | 618 | }; |
610 | 619 | ||
620 | static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { | ||
621 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, | ||
622 | [NFQA_MARK] = { .type = NLA_U32 }, | ||
623 | }; | ||
624 | |||
625 | static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid) | ||
626 | { | ||
627 | struct nfqnl_instance *queue; | ||
628 | |||
629 | queue = instance_lookup(queue_num); | ||
630 | if (!queue) | ||
631 | return ERR_PTR(-ENODEV); | ||
632 | |||
633 | if (queue->peer_pid != nlpid) | ||
634 | return ERR_PTR(-EPERM); | ||
635 | |||
636 | return queue; | ||
637 | } | ||
638 | |||
639 | static struct nfqnl_msg_verdict_hdr* | ||
640 | verdicthdr_get(const struct nlattr * const nfqa[]) | ||
641 | { | ||
642 | struct nfqnl_msg_verdict_hdr *vhdr; | ||
643 | unsigned int verdict; | ||
644 | |||
645 | if (!nfqa[NFQA_VERDICT_HDR]) | ||
646 | return NULL; | ||
647 | |||
648 | vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); | ||
649 | verdict = ntohl(vhdr->verdict); | ||
650 | if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) | ||
651 | return NULL; | ||
652 | return vhdr; | ||
653 | } | ||
654 | |||
655 | static int nfq_id_after(unsigned int id, unsigned int max) | ||
656 | { | ||
657 | return (int)(id - max) > 0; | ||
658 | } | ||
659 | |||
611 | static int | 660 | static int |
661 | nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb, | ||
662 | const struct nlmsghdr *nlh, | ||
663 | const struct nlattr * const nfqa[]) | ||
664 | { | ||
665 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | ||
666 | struct nf_queue_entry *entry, *tmp; | ||
667 | unsigned int verdict, maxid; | ||
668 | struct nfqnl_msg_verdict_hdr *vhdr; | ||
669 | struct nfqnl_instance *queue; | ||
670 | LIST_HEAD(batch_list); | ||
671 | u16 queue_num = ntohs(nfmsg->res_id); | ||
672 | |||
673 | queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid); | ||
674 | if (IS_ERR(queue)) | ||
675 | return PTR_ERR(queue); | ||
676 | |||
677 | vhdr = verdicthdr_get(nfqa); | ||
678 | if (!vhdr) | ||
679 | return -EINVAL; | ||
680 | |||
681 | verdict = ntohl(vhdr->verdict); | ||
682 | maxid = ntohl(vhdr->id); | ||
683 | |||
684 | spin_lock_bh(&queue->lock); | ||
685 | |||
686 | list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { | ||
687 | if (nfq_id_after(entry->id, maxid)) | ||
688 | break; | ||
689 | __dequeue_entry(queue, entry); | ||
690 | list_add_tail(&entry->list, &batch_list); | ||
691 | } | ||
692 | |||
693 | spin_unlock_bh(&queue->lock); | ||
694 | |||
695 | if (list_empty(&batch_list)) | ||
696 | return -ENOENT; | ||
697 | |||
698 | list_for_each_entry_safe(entry, tmp, &batch_list, list) { | ||
699 | if (nfqa[NFQA_MARK]) | ||
700 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); | ||
701 | nf_reinject(entry, verdict); | ||
702 | } | ||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | static int | ||
612 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | 707 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, |
613 | const struct nlmsghdr *nlh, | 708 | const struct nlmsghdr *nlh, |
614 | const struct nlattr * const nfqa[]) | 709 | const struct nlattr * const nfqa[]) |
615 | { | 710 | { |
616 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | 711 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
617 | u_int16_t queue_num = ntohs(nfmsg->res_id); | 712 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
618 | 713 | ||
619 | struct nfqnl_msg_verdict_hdr *vhdr; | 714 | struct nfqnl_msg_verdict_hdr *vhdr; |
620 | struct nfqnl_instance *queue; | 715 | struct nfqnl_instance *queue; |
621 | unsigned int verdict; | 716 | unsigned int verdict; |
622 | struct nf_queue_entry *entry; | 717 | struct nf_queue_entry *entry; |
623 | int err; | ||
624 | 718 | ||
625 | rcu_read_lock(); | ||
626 | queue = instance_lookup(queue_num); | 719 | queue = instance_lookup(queue_num); |
627 | if (!queue) { | 720 | if (!queue) |
628 | err = -ENODEV; | ||
629 | goto err_out_unlock; | ||
630 | } | ||
631 | 721 | ||
632 | if (queue->peer_pid != NETLINK_CB(skb).pid) { | 722 | queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid); |
633 | err = -EPERM; | 723 | if (IS_ERR(queue)) |
634 | goto err_out_unlock; | 724 | return PTR_ERR(queue); |
635 | } | ||
636 | 725 | ||
637 | if (!nfqa[NFQA_VERDICT_HDR]) { | 726 | vhdr = verdicthdr_get(nfqa); |
638 | err = -EINVAL; | 727 | if (!vhdr) |
639 | goto err_out_unlock; | 728 | return -EINVAL; |
640 | } | ||
641 | 729 | ||
642 | vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); | ||
643 | verdict = ntohl(vhdr->verdict); | 730 | verdict = ntohl(vhdr->verdict); |
644 | 731 | ||
645 | if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) { | ||
646 | err = -EINVAL; | ||
647 | goto err_out_unlock; | ||
648 | } | ||
649 | |||
650 | entry = find_dequeue_entry(queue, ntohl(vhdr->id)); | 732 | entry = find_dequeue_entry(queue, ntohl(vhdr->id)); |
651 | if (entry == NULL) { | 733 | if (entry == NULL) |
652 | err = -ENOENT; | 734 | return -ENOENT; |
653 | goto err_out_unlock; | ||
654 | } | ||
655 | rcu_read_unlock(); | ||
656 | 735 | ||
657 | if (nfqa[NFQA_PAYLOAD]) { | 736 | if (nfqa[NFQA_PAYLOAD]) { |
658 | if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), | 737 | if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), |
659 | nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) | 738 | nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) |
660 | verdict = NF_DROP; | 739 | verdict = NF_DROP; |
661 | } | 740 | } |
662 | 741 | ||
663 | if (nfqa[NFQA_MARK]) | 742 | if (nfqa[NFQA_MARK]) |
664 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); | 743 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); |
665 | 744 | ||
666 | nf_reinject(entry, verdict); | 745 | nf_reinject(entry, verdict); |
667 | return 0; | 746 | return 0; |
668 | |||
669 | err_out_unlock: | ||
670 | rcu_read_unlock(); | ||
671 | return err; | ||
672 | } | 747 | } |
673 | 748 | ||
674 | static int | 749 | static int |
675 | nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, | 750 | nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, |
676 | const struct nlmsghdr *nlh, | 751 | const struct nlmsghdr *nlh, |
677 | const struct nlattr * const nfqa[]) | 752 | const struct nlattr * const nfqa[]) |
678 | { | 753 | { |
679 | return -ENOTSUPP; | 754 | return -ENOTSUPP; |
680 | } | 755 | } |
681 | 756 | ||
682 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { | 757 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { |
683 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, | 758 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, |
684 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, | 759 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, |
685 | }; | 760 | }; |
686 | 761 | ||
687 | static const struct nf_queue_handler nfqh = { | 762 | static const struct nf_queue_handler nfqh = { |
688 | .name = "nf_queue", | 763 | .name = "nf_queue", |
689 | .outfn = &nfqnl_enqueue_packet, | 764 | .outfn = &nfqnl_enqueue_packet, |
690 | }; | 765 | }; |
691 | 766 | ||
692 | static int | 767 | static int |
693 | nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | 768 | nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, |
694 | const struct nlmsghdr *nlh, | 769 | const struct nlmsghdr *nlh, |
695 | const struct nlattr * const nfqa[]) | 770 | const struct nlattr * const nfqa[]) |
696 | { | 771 | { |
697 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | 772 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
698 | u_int16_t queue_num = ntohs(nfmsg->res_id); | 773 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
699 | struct nfqnl_instance *queue; | 774 | struct nfqnl_instance *queue; |
700 | struct nfqnl_msg_config_cmd *cmd = NULL; | 775 | struct nfqnl_msg_config_cmd *cmd = NULL; |
701 | int ret = 0; | 776 | int ret = 0; |
702 | 777 | ||
703 | if (nfqa[NFQA_CFG_CMD]) { | 778 | if (nfqa[NFQA_CFG_CMD]) { |
704 | cmd = nla_data(nfqa[NFQA_CFG_CMD]); | 779 | cmd = nla_data(nfqa[NFQA_CFG_CMD]); |
705 | 780 | ||
706 | /* Commands without queue context - might sleep */ | 781 | /* Commands without queue context - might sleep */ |
707 | switch (cmd->command) { | 782 | switch (cmd->command) { |
708 | case NFQNL_CFG_CMD_PF_BIND: | 783 | case NFQNL_CFG_CMD_PF_BIND: |
709 | return nf_register_queue_handler(ntohs(cmd->pf), | 784 | return nf_register_queue_handler(ntohs(cmd->pf), |
710 | &nfqh); | 785 | &nfqh); |
711 | case NFQNL_CFG_CMD_PF_UNBIND: | 786 | case NFQNL_CFG_CMD_PF_UNBIND: |
712 | return nf_unregister_queue_handler(ntohs(cmd->pf), | 787 | return nf_unregister_queue_handler(ntohs(cmd->pf), |
713 | &nfqh); | 788 | &nfqh); |
714 | } | 789 | } |
715 | } | 790 | } |
716 | 791 | ||
717 | rcu_read_lock(); | 792 | rcu_read_lock(); |
718 | queue = instance_lookup(queue_num); | 793 | queue = instance_lookup(queue_num); |
719 | if (queue && queue->peer_pid != NETLINK_CB(skb).pid) { | 794 | if (queue && queue->peer_pid != NETLINK_CB(skb).pid) { |
720 | ret = -EPERM; | 795 | ret = -EPERM; |
721 | goto err_out_unlock; | 796 | goto err_out_unlock; |
722 | } | 797 | } |
723 | 798 | ||
724 | if (cmd != NULL) { | 799 | if (cmd != NULL) { |
725 | switch (cmd->command) { | 800 | switch (cmd->command) { |
726 | case NFQNL_CFG_CMD_BIND: | 801 | case NFQNL_CFG_CMD_BIND: |
727 | if (queue) { | 802 | if (queue) { |
728 | ret = -EBUSY; | 803 | ret = -EBUSY; |
729 | goto err_out_unlock; | 804 | goto err_out_unlock; |
730 | } | 805 | } |
731 | queue = instance_create(queue_num, NETLINK_CB(skb).pid); | 806 | queue = instance_create(queue_num, NETLINK_CB(skb).pid); |
732 | if (IS_ERR(queue)) { | 807 | if (IS_ERR(queue)) { |
733 | ret = PTR_ERR(queue); | 808 | ret = PTR_ERR(queue); |
734 | goto err_out_unlock; | 809 | goto err_out_unlock; |
735 | } | 810 | } |
736 | break; | 811 | break; |
737 | case NFQNL_CFG_CMD_UNBIND: | 812 | case NFQNL_CFG_CMD_UNBIND: |
738 | if (!queue) { | 813 | if (!queue) { |
739 | ret = -ENODEV; | 814 | ret = -ENODEV; |
740 | goto err_out_unlock; | 815 | goto err_out_unlock; |
741 | } | 816 | } |
742 | instance_destroy(queue); | 817 | instance_destroy(queue); |
743 | break; | 818 | break; |
744 | case NFQNL_CFG_CMD_PF_BIND: | 819 | case NFQNL_CFG_CMD_PF_BIND: |
745 | case NFQNL_CFG_CMD_PF_UNBIND: | 820 | case NFQNL_CFG_CMD_PF_UNBIND: |
746 | break; | 821 | break; |
747 | default: | 822 | default: |
748 | ret = -ENOTSUPP; | 823 | ret = -ENOTSUPP; |
749 | break; | 824 | break; |
750 | } | 825 | } |
751 | } | 826 | } |
752 | 827 | ||
753 | if (nfqa[NFQA_CFG_PARAMS]) { | 828 | if (nfqa[NFQA_CFG_PARAMS]) { |
754 | struct nfqnl_msg_config_params *params; | 829 | struct nfqnl_msg_config_params *params; |
755 | 830 | ||
756 | if (!queue) { | 831 | if (!queue) { |
757 | ret = -ENODEV; | 832 | ret = -ENODEV; |
758 | goto err_out_unlock; | 833 | goto err_out_unlock; |
759 | } | 834 | } |
760 | params = nla_data(nfqa[NFQA_CFG_PARAMS]); | 835 | params = nla_data(nfqa[NFQA_CFG_PARAMS]); |
761 | nfqnl_set_mode(queue, params->copy_mode, | 836 | nfqnl_set_mode(queue, params->copy_mode, |
762 | ntohl(params->copy_range)); | 837 | ntohl(params->copy_range)); |
763 | } | 838 | } |
764 | 839 | ||
765 | if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { | 840 | if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { |
766 | __be32 *queue_maxlen; | 841 | __be32 *queue_maxlen; |
767 | 842 | ||
768 | if (!queue) { | 843 | if (!queue) { |
769 | ret = -ENODEV; | 844 | ret = -ENODEV; |
770 | goto err_out_unlock; | 845 | goto err_out_unlock; |
771 | } | 846 | } |
772 | queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); | 847 | queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); |
773 | spin_lock_bh(&queue->lock); | 848 | spin_lock_bh(&queue->lock); |
774 | queue->queue_maxlen = ntohl(*queue_maxlen); | 849 | queue->queue_maxlen = ntohl(*queue_maxlen); |
775 | spin_unlock_bh(&queue->lock); | 850 | spin_unlock_bh(&queue->lock); |
776 | } | 851 | } |
777 | 852 | ||
778 | err_out_unlock: | 853 | err_out_unlock: |
779 | rcu_read_unlock(); | 854 | rcu_read_unlock(); |
780 | return ret; | 855 | return ret; |
781 | } | 856 | } |
782 | 857 | ||
783 | static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { | 858 | static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { |
784 | [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, | 859 | [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp, |
785 | .attr_count = NFQA_MAX, }, | 860 | .attr_count = NFQA_MAX, }, |
786 | [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, | 861 | [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict, |
787 | .attr_count = NFQA_MAX, | 862 | .attr_count = NFQA_MAX, |
788 | .policy = nfqa_verdict_policy }, | 863 | .policy = nfqa_verdict_policy }, |
789 | [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, | 864 | [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, |
790 | .attr_count = NFQA_CFG_MAX, | 865 | .attr_count = NFQA_CFG_MAX, |
791 | .policy = nfqa_cfg_policy }, | 866 | .policy = nfqa_cfg_policy }, |
867 | [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch, | ||
868 | .attr_count = NFQA_MAX, | ||
869 | .policy = nfqa_verdict_batch_policy }, | ||
792 | }; | 870 | }; |
793 | 871 | ||
794 | static const struct nfnetlink_subsystem nfqnl_subsys = { | 872 | static const struct nfnetlink_subsystem nfqnl_subsys = { |
795 | .name = "nf_queue", | 873 | .name = "nf_queue", |
796 | .subsys_id = NFNL_SUBSYS_QUEUE, | 874 | .subsys_id = NFNL_SUBSYS_QUEUE, |
797 | .cb_count = NFQNL_MSG_MAX, | 875 | .cb_count = NFQNL_MSG_MAX, |
798 | .cb = nfqnl_cb, | 876 | .cb = nfqnl_cb, |
799 | }; | 877 | }; |
800 | 878 | ||
801 | #ifdef CONFIG_PROC_FS | 879 | #ifdef CONFIG_PROC_FS |
802 | struct iter_state { | 880 | struct iter_state { |
803 | unsigned int bucket; | 881 | unsigned int bucket; |
804 | }; | 882 | }; |
805 | 883 | ||
806 | static struct hlist_node *get_first(struct seq_file *seq) | 884 | static struct hlist_node *get_first(struct seq_file *seq) |
807 | { | 885 | { |
808 | struct iter_state *st = seq->private; | 886 | struct iter_state *st = seq->private; |
809 | 887 | ||
810 | if (!st) | 888 | if (!st) |
811 | return NULL; | 889 | return NULL; |
812 | 890 | ||
813 | for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { | 891 | for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { |
814 | if (!hlist_empty(&instance_table[st->bucket])) | 892 | if (!hlist_empty(&instance_table[st->bucket])) |
815 | return instance_table[st->bucket].first; | 893 | return instance_table[st->bucket].first; |
816 | } | 894 | } |
817 | return NULL; | 895 | return NULL; |
818 | } | 896 | } |
819 | 897 | ||
820 | static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) | 898 | static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) |
821 | { | 899 | { |
822 | struct iter_state *st = seq->private; | 900 | struct iter_state *st = seq->private; |
823 | 901 | ||
824 | h = h->next; | 902 | h = h->next; |
825 | while (!h) { | 903 | while (!h) { |
826 | if (++st->bucket >= INSTANCE_BUCKETS) | 904 | if (++st->bucket >= INSTANCE_BUCKETS) |
827 | return NULL; | 905 | return NULL; |
828 | 906 | ||
829 | h = instance_table[st->bucket].first; | 907 | h = instance_table[st->bucket].first; |
830 | } | 908 | } |
831 | return h; | 909 | return h; |
832 | } | 910 | } |
833 | 911 | ||
834 | static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) | 912 | static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) |
835 | { | 913 | { |
836 | struct hlist_node *head; | 914 | struct hlist_node *head; |
837 | head = get_first(seq); | 915 | head = get_first(seq); |
838 | 916 | ||
839 | if (head) | 917 | if (head) |
840 | while (pos && (head = get_next(seq, head))) | 918 | while (pos && (head = get_next(seq, head))) |
841 | pos--; | 919 | pos--; |
842 | return pos ? NULL : head; | 920 | return pos ? NULL : head; |
843 | } | 921 | } |
844 | 922 | ||
845 | static void *seq_start(struct seq_file *seq, loff_t *pos) | 923 | static void *seq_start(struct seq_file *seq, loff_t *pos) |
846 | __acquires(instances_lock) | 924 | __acquires(instances_lock) |
847 | { | 925 | { |
848 | spin_lock(&instances_lock); | 926 | spin_lock(&instances_lock); |
849 | return get_idx(seq, *pos); | 927 | return get_idx(seq, *pos); |
850 | } | 928 | } |
851 | 929 | ||
852 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) | 930 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) |
853 | { | 931 | { |
854 | (*pos)++; | 932 | (*pos)++; |
855 | return get_next(s, v); | 933 | return get_next(s, v); |
856 | } | 934 | } |
857 | 935 | ||
858 | static void seq_stop(struct seq_file *s, void *v) | 936 | static void seq_stop(struct seq_file *s, void *v) |
859 | __releases(instances_lock) | 937 | __releases(instances_lock) |
860 | { | 938 | { |
861 | spin_unlock(&instances_lock); | 939 | spin_unlock(&instances_lock); |
862 | } | 940 | } |
863 | 941 | ||
864 | static int seq_show(struct seq_file *s, void *v) | 942 | static int seq_show(struct seq_file *s, void *v) |
865 | { | 943 | { |
866 | const struct nfqnl_instance *inst = v; | 944 | const struct nfqnl_instance *inst = v; |
867 | 945 | ||
868 | return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", | 946 | return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", |
869 | inst->queue_num, | 947 | inst->queue_num, |
870 | inst->peer_pid, inst->queue_total, | 948 | inst->peer_pid, inst->queue_total, |
871 | inst->copy_mode, inst->copy_range, | 949 | inst->copy_mode, inst->copy_range, |
872 | inst->queue_dropped, inst->queue_user_dropped, | 950 | inst->queue_dropped, inst->queue_user_dropped, |
873 | atomic_read(&inst->id_sequence), 1); | 951 | inst->id_sequence, 1); |
874 | } | 952 | } |
875 | 953 | ||
876 | static const struct seq_operations nfqnl_seq_ops = { | 954 | static const struct seq_operations nfqnl_seq_ops = { |
877 | .start = seq_start, | 955 | .start = seq_start, |
878 | .next = seq_next, | 956 | .next = seq_next, |
879 | .stop = seq_stop, | 957 | .stop = seq_stop, |
880 | .show = seq_show, | 958 | .show = seq_show, |
881 | }; | 959 | }; |
882 | 960 | ||
883 | static int nfqnl_open(struct inode *inode, struct file *file) | 961 | static int nfqnl_open(struct inode *inode, struct file *file) |
884 | { | 962 | { |
885 | return seq_open_private(file, &nfqnl_seq_ops, | 963 | return seq_open_private(file, &nfqnl_seq_ops, |
886 | sizeof(struct iter_state)); | 964 | sizeof(struct iter_state)); |
887 | } | 965 | } |
888 | 966 | ||
889 | static const struct file_operations nfqnl_file_ops = { | 967 | static const struct file_operations nfqnl_file_ops = { |
890 | .owner = THIS_MODULE, | 968 | .owner = THIS_MODULE, |
891 | .open = nfqnl_open, | 969 | .open = nfqnl_open, |
892 | .read = seq_read, | 970 | .read = seq_read, |
893 | .llseek = seq_lseek, | 971 | .llseek = seq_lseek, |
894 | .release = seq_release_private, | 972 | .release = seq_release_private, |
895 | }; | 973 | }; |
896 | 974 | ||
897 | #endif /* PROC_FS */ | 975 | #endif /* PROC_FS */ |
898 | 976 | ||
899 | static int __init nfnetlink_queue_init(void) | 977 | static int __init nfnetlink_queue_init(void) |
900 | { | 978 | { |
901 | int i, status = -ENOMEM; | 979 | int i, status = -ENOMEM; |
902 | 980 | ||
903 | for (i = 0; i < INSTANCE_BUCKETS; i++) | 981 | for (i = 0; i < INSTANCE_BUCKETS; i++) |
904 | INIT_HLIST_HEAD(&instance_table[i]); | 982 | INIT_HLIST_HEAD(&instance_table[i]); |
905 | 983 | ||
906 | netlink_register_notifier(&nfqnl_rtnl_notifier); | 984 | netlink_register_notifier(&nfqnl_rtnl_notifier); |
907 | status = nfnetlink_subsys_register(&nfqnl_subsys); | 985 | status = nfnetlink_subsys_register(&nfqnl_subsys); |
908 | if (status < 0) { | 986 | if (status < 0) { |
909 | printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); | 987 | printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); |
910 | goto cleanup_netlink_notifier; | 988 | goto cleanup_netlink_notifier; |
911 | } | 989 | } |
912 | 990 | ||
913 | #ifdef CONFIG_PROC_FS | 991 | #ifdef CONFIG_PROC_FS |
914 | if (!proc_create("nfnetlink_queue", 0440, | 992 | if (!proc_create("nfnetlink_queue", 0440, |
915 | proc_net_netfilter, &nfqnl_file_ops)) | 993 | proc_net_netfilter, &nfqnl_file_ops)) |
916 | goto cleanup_subsys; | 994 | goto cleanup_subsys; |
917 | #endif | 995 | #endif |
918 | 996 | ||
919 | register_netdevice_notifier(&nfqnl_dev_notifier); | 997 | register_netdevice_notifier(&nfqnl_dev_notifier); |
920 | return status; | 998 | return status; |
921 | 999 | ||
922 | #ifdef CONFIG_PROC_FS | 1000 | #ifdef CONFIG_PROC_FS |
923 | cleanup_subsys: | 1001 | cleanup_subsys: |
924 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 1002 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
925 | #endif | 1003 | #endif |
926 | cleanup_netlink_notifier: | 1004 | cleanup_netlink_notifier: |
927 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1005 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
net/netfilter/xt_AUDIT.c
1 | /* | 1 | /* |
2 | * Creates audit record for dropped/accepted packets | 2 | * Creates audit record for dropped/accepted packets |
3 | * | 3 | * |
4 | * (C) 2010-2011 Thomas Graf <tgraf@redhat.com> | 4 | * (C) 2010-2011 Thomas Graf <tgraf@redhat.com> |
5 | * (C) 2010-2011 Red Hat, Inc. | 5 | * (C) 2010-2011 Red Hat, Inc. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | 13 | ||
14 | #include <linux/audit.h> | 14 | #include <linux/audit.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
17 | #include <linux/tcp.h> | 17 | #include <linux/tcp.h> |
18 | #include <linux/udp.h> | 18 | #include <linux/udp.h> |
19 | #include <linux/if_arp.h> | 19 | #include <linux/if_arp.h> |
20 | #include <linux/netfilter/x_tables.h> | 20 | #include <linux/netfilter/x_tables.h> |
21 | #include <linux/netfilter/xt_AUDIT.h> | 21 | #include <linux/netfilter/xt_AUDIT.h> |
22 | #include <linux/netfilter_bridge/ebtables.h> | 22 | #include <linux/netfilter_bridge/ebtables.h> |
23 | #include <net/ipv6.h> | 23 | #include <net/ipv6.h> |
24 | #include <net/ip.h> | 24 | #include <net/ip.h> |
25 | 25 | ||
26 | MODULE_LICENSE("GPL"); | 26 | MODULE_LICENSE("GPL"); |
27 | MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>"); | 27 | MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>"); |
28 | MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets"); | 28 | MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets"); |
29 | MODULE_ALIAS("ipt_AUDIT"); | 29 | MODULE_ALIAS("ipt_AUDIT"); |
30 | MODULE_ALIAS("ip6t_AUDIT"); | 30 | MODULE_ALIAS("ip6t_AUDIT"); |
31 | MODULE_ALIAS("ebt_AUDIT"); | 31 | MODULE_ALIAS("ebt_AUDIT"); |
32 | MODULE_ALIAS("arpt_AUDIT"); | 32 | MODULE_ALIAS("arpt_AUDIT"); |
33 | 33 | ||
34 | static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb, | 34 | static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb, |
35 | unsigned int proto, unsigned int offset) | 35 | unsigned int proto, unsigned int offset) |
36 | { | 36 | { |
37 | switch (proto) { | 37 | switch (proto) { |
38 | case IPPROTO_TCP: | 38 | case IPPROTO_TCP: |
39 | case IPPROTO_UDP: | 39 | case IPPROTO_UDP: |
40 | case IPPROTO_UDPLITE: { | 40 | case IPPROTO_UDPLITE: { |
41 | const __be16 *pptr; | 41 | const __be16 *pptr; |
42 | __be16 _ports[2]; | 42 | __be16 _ports[2]; |
43 | 43 | ||
44 | pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports); | 44 | pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports); |
45 | if (pptr == NULL) { | 45 | if (pptr == NULL) { |
46 | audit_log_format(ab, " truncated=1"); | 46 | audit_log_format(ab, " truncated=1"); |
47 | return; | 47 | return; |
48 | } | 48 | } |
49 | 49 | ||
50 | audit_log_format(ab, " sport=%hu dport=%hu", | 50 | audit_log_format(ab, " sport=%hu dport=%hu", |
51 | ntohs(pptr[0]), ntohs(pptr[1])); | 51 | ntohs(pptr[0]), ntohs(pptr[1])); |
52 | } | 52 | } |
53 | break; | 53 | break; |
54 | 54 | ||
55 | case IPPROTO_ICMP: | 55 | case IPPROTO_ICMP: |
56 | case IPPROTO_ICMPV6: { | 56 | case IPPROTO_ICMPV6: { |
57 | const u8 *iptr; | 57 | const u8 *iptr; |
58 | u8 _ih[2]; | 58 | u8 _ih[2]; |
59 | 59 | ||
60 | iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih); | 60 | iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih); |
61 | if (iptr == NULL) { | 61 | if (iptr == NULL) { |
62 | audit_log_format(ab, " truncated=1"); | 62 | audit_log_format(ab, " truncated=1"); |
63 | return; | 63 | return; |
64 | } | 64 | } |
65 | 65 | ||
66 | audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu", | 66 | audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu", |
67 | iptr[0], iptr[1]); | 67 | iptr[0], iptr[1]); |
68 | 68 | ||
69 | } | 69 | } |
70 | break; | 70 | break; |
71 | } | 71 | } |
72 | } | 72 | } |
73 | 73 | ||
74 | static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb) | 74 | static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb) |
75 | { | 75 | { |
76 | struct iphdr _iph; | 76 | struct iphdr _iph; |
77 | const struct iphdr *ih; | 77 | const struct iphdr *ih; |
78 | 78 | ||
79 | ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); | 79 | ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); |
80 | if (!ih) { | 80 | if (!ih) { |
81 | audit_log_format(ab, " truncated=1"); | 81 | audit_log_format(ab, " truncated=1"); |
82 | return; | 82 | return; |
83 | } | 83 | } |
84 | 84 | ||
85 | audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu", | 85 | audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu", |
86 | &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol); | 86 | &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol); |
87 | 87 | ||
88 | if (ntohs(ih->frag_off) & IP_OFFSET) { | 88 | if (ntohs(ih->frag_off) & IP_OFFSET) { |
89 | audit_log_format(ab, " frag=1"); | 89 | audit_log_format(ab, " frag=1"); |
90 | return; | 90 | return; |
91 | } | 91 | } |
92 | 92 | ||
93 | audit_proto(ab, skb, ih->protocol, ih->ihl * 4); | 93 | audit_proto(ab, skb, ih->protocol, ih->ihl * 4); |
94 | } | 94 | } |
95 | 95 | ||
96 | static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb) | 96 | static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb) |
97 | { | 97 | { |
98 | struct ipv6hdr _ip6h; | 98 | struct ipv6hdr _ip6h; |
99 | const struct ipv6hdr *ih; | 99 | const struct ipv6hdr *ih; |
100 | u8 nexthdr; | 100 | u8 nexthdr; |
101 | int offset; | 101 | int offset; |
102 | 102 | ||
103 | ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h); | 103 | ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h); |
104 | if (!ih) { | 104 | if (!ih) { |
105 | audit_log_format(ab, " truncated=1"); | 105 | audit_log_format(ab, " truncated=1"); |
106 | return; | 106 | return; |
107 | } | 107 | } |
108 | 108 | ||
109 | nexthdr = ih->nexthdr; | 109 | nexthdr = ih->nexthdr; |
110 | offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), | 110 | offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), |
111 | &nexthdr); | 111 | &nexthdr); |
112 | 112 | ||
113 | audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu", | 113 | audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu", |
114 | &ih->saddr, &ih->daddr, nexthdr); | 114 | &ih->saddr, &ih->daddr, nexthdr); |
115 | 115 | ||
116 | if (offset) | 116 | if (offset) |
117 | audit_proto(ab, skb, nexthdr, offset); | 117 | audit_proto(ab, skb, nexthdr, offset); |
118 | } | 118 | } |
119 | 119 | ||
120 | static unsigned int | 120 | static unsigned int |
121 | audit_tg(struct sk_buff *skb, const struct xt_action_param *par) | 121 | audit_tg(struct sk_buff *skb, const struct xt_action_param *par) |
122 | { | 122 | { |
123 | const struct xt_audit_info *info = par->targinfo; | 123 | const struct xt_audit_info *info = par->targinfo; |
124 | struct audit_buffer *ab; | 124 | struct audit_buffer *ab; |
125 | 125 | ||
126 | ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); | 126 | ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); |
127 | if (ab == NULL) | 127 | if (ab == NULL) |
128 | goto errout; | 128 | goto errout; |
129 | 129 | ||
130 | audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s", | 130 | audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s", |
131 | info->type, par->hooknum, skb->len, | 131 | info->type, par->hooknum, skb->len, |
132 | par->in ? par->in->name : "?", | 132 | par->in ? par->in->name : "?", |
133 | par->out ? par->out->name : "?"); | 133 | par->out ? par->out->name : "?"); |
134 | 134 | ||
135 | if (skb->mark) | 135 | if (skb->mark) |
136 | audit_log_format(ab, " mark=%#x", skb->mark); | 136 | audit_log_format(ab, " mark=%#x", skb->mark); |
137 | 137 | ||
138 | if (skb->dev && skb->dev->type == ARPHRD_ETHER) { | 138 | if (skb->dev && skb->dev->type == ARPHRD_ETHER) { |
139 | audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x", | 139 | audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x", |
140 | eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, | 140 | eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, |
141 | ntohs(eth_hdr(skb)->h_proto)); | 141 | ntohs(eth_hdr(skb)->h_proto)); |
142 | 142 | ||
143 | if (par->family == NFPROTO_BRIDGE) { | 143 | if (par->family == NFPROTO_BRIDGE) { |
144 | switch (eth_hdr(skb)->h_proto) { | 144 | switch (eth_hdr(skb)->h_proto) { |
145 | case __constant_htons(ETH_P_IP): | 145 | case __constant_htons(ETH_P_IP): |
146 | audit_ip4(ab, skb); | 146 | audit_ip4(ab, skb); |
147 | break; | 147 | break; |
148 | 148 | ||
149 | case __constant_htons(ETH_P_IPV6): | 149 | case __constant_htons(ETH_P_IPV6): |
150 | audit_ip6(ab, skb); | 150 | audit_ip6(ab, skb); |
151 | break; | 151 | break; |
152 | } | 152 | } |
153 | } | 153 | } |
154 | } | 154 | } |
155 | 155 | ||
156 | switch (par->family) { | 156 | switch (par->family) { |
157 | case NFPROTO_IPV4: | 157 | case NFPROTO_IPV4: |
158 | audit_ip4(ab, skb); | 158 | audit_ip4(ab, skb); |
159 | break; | 159 | break; |
160 | 160 | ||
161 | case NFPROTO_IPV6: | 161 | case NFPROTO_IPV6: |
162 | audit_ip6(ab, skb); | 162 | audit_ip6(ab, skb); |
163 | break; | 163 | break; |
164 | } | 164 | } |
165 | 165 | ||
166 | #ifdef CONFIG_NETWORK_SECMARK | ||
167 | if (skb->secmark) | ||
168 | audit_log_secctx(ab, skb->secmark); | ||
169 | #endif | ||
170 | |||
166 | audit_log_end(ab); | 171 | audit_log_end(ab); |
167 | 172 | ||
168 | errout: | 173 | errout: |
169 | return XT_CONTINUE; | 174 | return XT_CONTINUE; |
170 | } | 175 | } |
171 | 176 | ||
172 | static unsigned int | 177 | static unsigned int |
173 | audit_tg_ebt(struct sk_buff *skb, const struct xt_action_param *par) | 178 | audit_tg_ebt(struct sk_buff *skb, const struct xt_action_param *par) |
174 | { | 179 | { |
175 | audit_tg(skb, par); | 180 | audit_tg(skb, par); |
176 | return EBT_CONTINUE; | 181 | return EBT_CONTINUE; |
177 | } | 182 | } |
178 | 183 | ||
179 | static int audit_tg_check(const struct xt_tgchk_param *par) | 184 | static int audit_tg_check(const struct xt_tgchk_param *par) |
180 | { | 185 | { |
181 | const struct xt_audit_info *info = par->targinfo; | 186 | const struct xt_audit_info *info = par->targinfo; |
182 | 187 | ||
183 | if (info->type > XT_AUDIT_TYPE_MAX) { | 188 | if (info->type > XT_AUDIT_TYPE_MAX) { |
184 | pr_info("Audit type out of range (valid range: 0..%hhu)\n", | 189 | pr_info("Audit type out of range (valid range: 0..%hhu)\n", |
185 | XT_AUDIT_TYPE_MAX); | 190 | XT_AUDIT_TYPE_MAX); |
186 | return -ERANGE; | 191 | return -ERANGE; |
187 | } | 192 | } |
188 | 193 | ||
189 | return 0; | 194 | return 0; |
190 | } | 195 | } |
191 | 196 | ||
192 | static struct xt_target audit_tg_reg[] __read_mostly = { | 197 | static struct xt_target audit_tg_reg[] __read_mostly = { |
193 | { | 198 | { |
194 | .name = "AUDIT", | 199 | .name = "AUDIT", |
195 | .family = NFPROTO_UNSPEC, | 200 | .family = NFPROTO_UNSPEC, |
196 | .target = audit_tg, | 201 | .target = audit_tg, |
197 | .targetsize = sizeof(struct xt_audit_info), | 202 | .targetsize = sizeof(struct xt_audit_info), |
198 | .checkentry = audit_tg_check, | 203 | .checkentry = audit_tg_check, |
199 | .me = THIS_MODULE, | 204 | .me = THIS_MODULE, |
200 | }, | 205 | }, |
201 | { | 206 | { |
202 | .name = "AUDIT", | 207 | .name = "AUDIT", |
203 | .family = NFPROTO_BRIDGE, | 208 | .family = NFPROTO_BRIDGE, |
204 | .target = audit_tg_ebt, | 209 | .target = audit_tg_ebt, |
205 | .targetsize = sizeof(struct xt_audit_info), | 210 | .targetsize = sizeof(struct xt_audit_info), |
206 | .checkentry = audit_tg_check, | 211 | .checkentry = audit_tg_check, |
207 | .me = THIS_MODULE, | 212 | .me = THIS_MODULE, |
208 | }, | 213 | }, |
209 | }; | 214 | }; |
210 | 215 | ||
211 | static int __init audit_tg_init(void) | 216 | static int __init audit_tg_init(void) |
212 | { | 217 | { |
213 | return xt_register_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg)); | 218 | return xt_register_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg)); |
214 | } | 219 | } |
215 | 220 | ||
216 | static void __exit audit_tg_exit(void) | 221 | static void __exit audit_tg_exit(void) |
217 | { | 222 | { |
218 | xt_unregister_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg)); | 223 | xt_unregister_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg)); |
219 | } | 224 | } |
220 | 225 | ||
221 | module_init(audit_tg_init); | 226 | module_init(audit_tg_init); |
222 | module_exit(audit_tg_exit); | 227 | module_exit(audit_tg_exit); |
223 | 228 |