Commit f5caadbb3d8fc0b71533e880c684b2230bdb76ac
Exists in
master
and in
6 other branches
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
Showing 15 changed files Side-by-side Diff
- include/linux/audit.h
- include/linux/netfilter/ipset/ip_set_ahash.h
- include/linux/netfilter/nfnetlink.h
- include/linux/netfilter/nfnetlink_queue.h
- kernel/audit.c
- net/netfilter/ipset/ip_set_hash_ip.c
- net/netfilter/ipset/ip_set_hash_ipport.c
- net/netfilter/ipset/ip_set_hash_ipportip.c
- net/netfilter/ipset/ip_set_hash_ipportnet.c
- net/netfilter/ipset/ip_set_hash_net.c
- net/netfilter/ipset/ip_set_hash_netiface.c
- net/netfilter/ipset/ip_set_hash_netport.c
- net/netfilter/nfnetlink.c
- net/netfilter/nfnetlink_queue.c
- net/netfilter/xt_AUDIT.c
include/linux/audit.h
... | ... | @@ -613,6 +613,12 @@ |
613 | 613 | extern void audit_log_key(struct audit_buffer *ab, |
614 | 614 | char *key); |
615 | 615 | extern void audit_log_lost(const char *message); |
616 | +#ifdef CONFIG_SECURITY | |
617 | +extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); | |
618 | +#else | |
619 | +#define audit_log_secctx(b,s) do { ; } while (0) | |
620 | +#endif | |
621 | + | |
616 | 622 | extern int audit_update_lsm_rules(void); |
617 | 623 | |
618 | 624 | /* Private API (for audit.c only) */ |
... | ... | @@ -635,6 +641,7 @@ |
635 | 641 | #define audit_log_untrustedstring(a,s) do { ; } while (0) |
636 | 642 | #define audit_log_d_path(b, p, d) do { ; } while (0) |
637 | 643 | #define audit_log_key(b, k) do { ; } while (0) |
644 | +#define audit_log_secctx(b,s) do { ; } while (0) | |
638 | 645 | #define audit_enabled 0 |
639 | 646 | #endif |
640 | 647 | #endif |
include/linux/netfilter/ipset/ip_set_ahash.h
... | ... | @@ -28,8 +28,33 @@ |
28 | 28 | /* Number of elements to store in an initial array block */ |
29 | 29 | #define AHASH_INIT_SIZE 4 |
30 | 30 | /* Max number of elements to store in an array block */ |
31 | -#define AHASH_MAX_SIZE (3*4) | |
31 | +#define AHASH_MAX_SIZE (3*AHASH_INIT_SIZE) | |
32 | 32 | |
33 | +/* Max number of elements can be tuned */ | |
34 | +#ifdef IP_SET_HASH_WITH_MULTI | |
35 | +#define AHASH_MAX(h) ((h)->ahash_max) | |
36 | + | |
37 | +static inline u8 | |
38 | +tune_ahash_max(u8 curr, u32 multi) | |
39 | +{ | |
40 | + u32 n; | |
41 | + | |
42 | + if (multi < curr) | |
43 | + return curr; | |
44 | + | |
45 | + n = curr + AHASH_INIT_SIZE; | |
46 | + /* Currently, at listing one hash bucket must fit into a message. | |
47 | + * Therefore we have a hard limit here. | |
48 | + */ | |
49 | + return n > curr && n <= 64 ? n : curr; | |
50 | +} | |
51 | +#define TUNE_AHASH_MAX(h, multi) \ | |
52 | + ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi)) | |
53 | +#else | |
54 | +#define AHASH_MAX(h) AHASH_MAX_SIZE | |
55 | +#define TUNE_AHASH_MAX(h, multi) | |
56 | +#endif | |
57 | + | |
33 | 58 | /* A hash bucket */ |
34 | 59 | struct hbucket { |
35 | 60 | void *value; /* the array of the values */ |
... | ... | @@ -60,6 +85,9 @@ |
60 | 85 | u32 timeout; /* timeout value, if enabled */ |
61 | 86 | struct timer_list gc; /* garbage collection when timeout enabled */ |
62 | 87 | struct type_pf_next next; /* temporary storage for uadd */ |
88 | +#ifdef IP_SET_HASH_WITH_MULTI | |
89 | + u8 ahash_max; /* max elements in an array block */ | |
90 | +#endif | |
63 | 91 | #ifdef IP_SET_HASH_WITH_NETMASK |
64 | 92 | u8 netmask; /* netmask value for subnets to store */ |
65 | 93 | #endif |
66 | 94 | |
... | ... | @@ -211,12 +239,16 @@ |
211 | 239 | set->data = NULL; |
212 | 240 | } |
213 | 241 | |
214 | -#define HKEY(data, initval, htable_bits) \ | |
215 | -(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \ | |
216 | - & jhash_mask(htable_bits)) | |
217 | - | |
218 | 242 | #endif /* _IP_SET_AHASH_H */ |
219 | 243 | |
244 | +#ifndef HKEY_DATALEN | |
245 | +#define HKEY_DATALEN sizeof(struct type_pf_elem) | |
246 | +#endif | |
247 | + | |
248 | +#define HKEY(data, initval, htable_bits) \ | |
249 | +(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \ | |
250 | + & jhash_mask(htable_bits)) | |
251 | + | |
220 | 252 | #define CONCAT(a, b, c) a##b##c |
221 | 253 | #define TOKEN(a, b, c) CONCAT(a, b, c) |
222 | 254 | |
223 | 255 | |
... | ... | @@ -275,12 +307,13 @@ |
275 | 307 | /* Add an element to the hash table when resizing the set: |
276 | 308 | * we spare the maintenance of the internal counters. */ |
277 | 309 | static int |
278 | -type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value) | |
310 | +type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value, | |
311 | + u8 ahash_max) | |
279 | 312 | { |
280 | 313 | if (n->pos >= n->size) { |
281 | 314 | void *tmp; |
282 | 315 | |
283 | - if (n->size >= AHASH_MAX_SIZE) | |
316 | + if (n->size >= ahash_max) | |
284 | 317 | /* Trigger rehashing */ |
285 | 318 | return -EAGAIN; |
286 | 319 | |
... | ... | @@ -335,7 +368,7 @@ |
335 | 368 | for (j = 0; j < n->pos; j++) { |
336 | 369 | data = ahash_data(n, j); |
337 | 370 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); |
338 | - ret = type_pf_elem_add(m, data); | |
371 | + ret = type_pf_elem_add(m, data, AHASH_MAX(h)); | |
339 | 372 | if (ret < 0) { |
340 | 373 | read_unlock_bh(&set->lock); |
341 | 374 | ahash_destroy(t); |
... | ... | @@ -359,7 +392,7 @@ |
359 | 392 | return 0; |
360 | 393 | } |
361 | 394 | |
362 | -static void | |
395 | +static inline void | |
363 | 396 | type_pf_data_next(struct ip_set_hash *h, const struct type_pf_elem *d); |
364 | 397 | |
365 | 398 | /* Add an element to a hash and update the internal counters when succeeded, |
... | ... | @@ -372,7 +405,7 @@ |
372 | 405 | const struct type_pf_elem *d = value; |
373 | 406 | struct hbucket *n; |
374 | 407 | int i, ret = 0; |
375 | - u32 key; | |
408 | + u32 key, multi = 0; | |
376 | 409 | |
377 | 410 | if (h->elements >= h->maxelem) |
378 | 411 | return -IPSET_ERR_HASH_FULL; |
379 | 412 | |
... | ... | @@ -382,12 +415,12 @@ |
382 | 415 | key = HKEY(value, h->initval, t->htable_bits); |
383 | 416 | n = hbucket(t, key); |
384 | 417 | for (i = 0; i < n->pos; i++) |
385 | - if (type_pf_data_equal(ahash_data(n, i), d)) { | |
418 | + if (type_pf_data_equal(ahash_data(n, i), d, &multi)) { | |
386 | 419 | ret = -IPSET_ERR_EXIST; |
387 | 420 | goto out; |
388 | 421 | } |
389 | - | |
390 | - ret = type_pf_elem_add(n, value); | |
422 | + TUNE_AHASH_MAX(h, multi); | |
423 | + ret = type_pf_elem_add(n, value, AHASH_MAX(h)); | |
391 | 424 | if (ret != 0) { |
392 | 425 | if (ret == -EAGAIN) |
393 | 426 | type_pf_data_next(h, d); |
394 | 427 | |
... | ... | @@ -415,13 +448,13 @@ |
415 | 448 | struct hbucket *n; |
416 | 449 | int i; |
417 | 450 | struct type_pf_elem *data; |
418 | - u32 key; | |
451 | + u32 key, multi = 0; | |
419 | 452 | |
420 | 453 | key = HKEY(value, h->initval, t->htable_bits); |
421 | 454 | n = hbucket(t, key); |
422 | 455 | for (i = 0; i < n->pos; i++) { |
423 | 456 | data = ahash_data(n, i); |
424 | - if (!type_pf_data_equal(data, d)) | |
457 | + if (!type_pf_data_equal(data, d, &multi)) | |
425 | 458 | continue; |
426 | 459 | if (i != n->pos - 1) |
427 | 460 | /* Not last one */ |
428 | 461 | |
429 | 462 | |
... | ... | @@ -462,17 +495,17 @@ |
462 | 495 | struct hbucket *n; |
463 | 496 | const struct type_pf_elem *data; |
464 | 497 | int i, j = 0; |
465 | - u32 key; | |
498 | + u32 key, multi = 0; | |
466 | 499 | u8 host_mask = SET_HOST_MASK(set->family); |
467 | 500 | |
468 | 501 | pr_debug("test by nets\n"); |
469 | - for (; j < host_mask && h->nets[j].cidr; j++) { | |
502 | + for (; j < host_mask && h->nets[j].cidr && !multi; j++) { | |
470 | 503 | type_pf_data_netmask(d, h->nets[j].cidr); |
471 | 504 | key = HKEY(d, h->initval, t->htable_bits); |
472 | 505 | n = hbucket(t, key); |
473 | 506 | for (i = 0; i < n->pos; i++) { |
474 | 507 | data = ahash_data(n, i); |
475 | - if (type_pf_data_equal(data, d)) | |
508 | + if (type_pf_data_equal(data, d, &multi)) | |
476 | 509 | return 1; |
477 | 510 | } |
478 | 511 | } |
... | ... | @@ -490,7 +523,7 @@ |
490 | 523 | struct hbucket *n; |
491 | 524 | const struct type_pf_elem *data; |
492 | 525 | int i; |
493 | - u32 key; | |
526 | + u32 key, multi = 0; | |
494 | 527 | |
495 | 528 | #ifdef IP_SET_HASH_WITH_NETS |
496 | 529 | /* If we test an IP address and not a network address, |
... | ... | @@ -503,7 +536,7 @@ |
503 | 536 | n = hbucket(t, key); |
504 | 537 | for (i = 0; i < n->pos; i++) { |
505 | 538 | data = ahash_data(n, i); |
506 | - if (type_pf_data_equal(data, d)) | |
539 | + if (type_pf_data_equal(data, d, &multi)) | |
507 | 540 | return 1; |
508 | 541 | } |
509 | 542 | return 0; |
510 | 543 | |
... | ... | @@ -660,14 +693,14 @@ |
660 | 693 | |
661 | 694 | static int |
662 | 695 | type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value, |
663 | - u32 timeout) | |
696 | + u8 ahash_max, u32 timeout) | |
664 | 697 | { |
665 | 698 | struct type_pf_elem *data; |
666 | 699 | |
667 | 700 | if (n->pos >= n->size) { |
668 | 701 | void *tmp; |
669 | 702 | |
670 | - if (n->size >= AHASH_MAX_SIZE) | |
703 | + if (n->size >= ahash_max) | |
671 | 704 | /* Trigger rehashing */ |
672 | 705 | return -EAGAIN; |
673 | 706 | |
... | ... | @@ -772,7 +805,7 @@ |
772 | 805 | for (j = 0; j < n->pos; j++) { |
773 | 806 | data = ahash_tdata(n, j); |
774 | 807 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); |
775 | - ret = type_pf_elem_tadd(m, data, | |
808 | + ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), | |
776 | 809 | type_pf_data_timeout(data)); |
777 | 810 | if (ret < 0) { |
778 | 811 | read_unlock_bh(&set->lock); |
779 | 812 | |
... | ... | @@ -803,9 +836,9 @@ |
803 | 836 | const struct type_pf_elem *d = value; |
804 | 837 | struct hbucket *n; |
805 | 838 | struct type_pf_elem *data; |
806 | - int ret = 0, i, j = AHASH_MAX_SIZE + 1; | |
839 | + int ret = 0, i, j = AHASH_MAX(h) + 1; | |
807 | 840 | bool flag_exist = flags & IPSET_FLAG_EXIST; |
808 | - u32 key; | |
841 | + u32 key, multi = 0; | |
809 | 842 | |
810 | 843 | if (h->elements >= h->maxelem) |
811 | 844 | /* FIXME: when set is full, we slow down here */ |
812 | 845 | |
813 | 846 | |
... | ... | @@ -819,18 +852,18 @@ |
819 | 852 | n = hbucket(t, key); |
820 | 853 | for (i = 0; i < n->pos; i++) { |
821 | 854 | data = ahash_tdata(n, i); |
822 | - if (type_pf_data_equal(data, d)) { | |
855 | + if (type_pf_data_equal(data, d, &multi)) { | |
823 | 856 | if (type_pf_data_expired(data) || flag_exist) |
824 | 857 | j = i; |
825 | 858 | else { |
826 | 859 | ret = -IPSET_ERR_EXIST; |
827 | 860 | goto out; |
828 | 861 | } |
829 | - } else if (j == AHASH_MAX_SIZE + 1 && | |
862 | + } else if (j == AHASH_MAX(h) + 1 && | |
830 | 863 | type_pf_data_expired(data)) |
831 | 864 | j = i; |
832 | 865 | } |
833 | - if (j != AHASH_MAX_SIZE + 1) { | |
866 | + if (j != AHASH_MAX(h) + 1) { | |
834 | 867 | data = ahash_tdata(n, j); |
835 | 868 | #ifdef IP_SET_HASH_WITH_NETS |
836 | 869 | del_cidr(h, data->cidr, HOST_MASK); |
... | ... | @@ -840,7 +873,8 @@ |
840 | 873 | type_pf_data_timeout_set(data, timeout); |
841 | 874 | goto out; |
842 | 875 | } |
843 | - ret = type_pf_elem_tadd(n, d, timeout); | |
876 | + TUNE_AHASH_MAX(h, multi); | |
877 | + ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), timeout); | |
844 | 878 | if (ret != 0) { |
845 | 879 | if (ret == -EAGAIN) |
846 | 880 | type_pf_data_next(h, d); |
847 | 881 | |
... | ... | @@ -865,13 +899,13 @@ |
865 | 899 | struct hbucket *n; |
866 | 900 | int i; |
867 | 901 | struct type_pf_elem *data; |
868 | - u32 key; | |
902 | + u32 key, multi = 0; | |
869 | 903 | |
870 | 904 | key = HKEY(value, h->initval, t->htable_bits); |
871 | 905 | n = hbucket(t, key); |
872 | 906 | for (i = 0; i < n->pos; i++) { |
873 | 907 | data = ahash_tdata(n, i); |
874 | - if (!type_pf_data_equal(data, d)) | |
908 | + if (!type_pf_data_equal(data, d, &multi)) | |
875 | 909 | continue; |
876 | 910 | if (type_pf_data_expired(data)) |
877 | 911 | return -IPSET_ERR_EXIST; |
878 | 912 | |
879 | 913 | |
... | ... | @@ -911,16 +945,16 @@ |
911 | 945 | struct type_pf_elem *data; |
912 | 946 | struct hbucket *n; |
913 | 947 | int i, j = 0; |
914 | - u32 key; | |
948 | + u32 key, multi = 0; | |
915 | 949 | u8 host_mask = SET_HOST_MASK(set->family); |
916 | 950 | |
917 | - for (; j < host_mask && h->nets[j].cidr; j++) { | |
951 | + for (; j < host_mask && h->nets[j].cidr && !multi; j++) { | |
918 | 952 | type_pf_data_netmask(d, h->nets[j].cidr); |
919 | 953 | key = HKEY(d, h->initval, t->htable_bits); |
920 | 954 | n = hbucket(t, key); |
921 | 955 | for (i = 0; i < n->pos; i++) { |
922 | 956 | data = ahash_tdata(n, i); |
923 | - if (type_pf_data_equal(data, d)) | |
957 | + if (type_pf_data_equal(data, d, &multi)) | |
924 | 958 | return !type_pf_data_expired(data); |
925 | 959 | } |
926 | 960 | } |
... | ... | @@ -936,7 +970,7 @@ |
936 | 970 | struct type_pf_elem *data, *d = value; |
937 | 971 | struct hbucket *n; |
938 | 972 | int i; |
939 | - u32 key; | |
973 | + u32 key, multi = 0; | |
940 | 974 | |
941 | 975 | #ifdef IP_SET_HASH_WITH_NETS |
942 | 976 | if (d->cidr == SET_HOST_MASK(set->family)) |
... | ... | @@ -946,7 +980,7 @@ |
946 | 980 | n = hbucket(t, key); |
947 | 981 | for (i = 0; i < n->pos; i++) { |
948 | 982 | data = ahash_tdata(n, i); |
949 | - if (type_pf_data_equal(data, d)) | |
983 | + if (type_pf_data_equal(data, d, &multi)) | |
950 | 984 | return !type_pf_data_expired(data); |
951 | 985 | } |
952 | 986 | return 0; |
... | ... | @@ -1054,6 +1088,8 @@ |
1054 | 1088 | IPSET_GC_PERIOD(h->timeout)); |
1055 | 1089 | } |
1056 | 1090 | |
1091 | +#undef HKEY_DATALEN | |
1092 | +#undef HKEY | |
1057 | 1093 | #undef type_pf_data_equal |
1058 | 1094 | #undef type_pf_data_isnull |
1059 | 1095 | #undef type_pf_data_copy |
include/linux/netfilter/nfnetlink.h
... | ... | @@ -60,6 +60,9 @@ |
60 | 60 | int (*call)(struct sock *nl, struct sk_buff *skb, |
61 | 61 | const struct nlmsghdr *nlh, |
62 | 62 | const struct nlattr * const cda[]); |
63 | + int (*call_rcu)(struct sock *nl, struct sk_buff *skb, | |
64 | + const struct nlmsghdr *nlh, | |
65 | + const struct nlattr * const cda[]); | |
63 | 66 | const struct nla_policy *policy; /* netlink attribute policy */ |
64 | 67 | const u_int16_t attr_count; /* number of nlattr's */ |
65 | 68 | }; |
include/linux/netfilter/nfnetlink_queue.h
... | ... | @@ -8,6 +8,7 @@ |
8 | 8 | NFQNL_MSG_PACKET, /* packet from kernel to userspace */ |
9 | 9 | NFQNL_MSG_VERDICT, /* verdict from userspace to kernel */ |
10 | 10 | NFQNL_MSG_CONFIG, /* connect to a particular queue */ |
11 | + NFQNL_MSG_VERDICT_BATCH, /* batchv from userspace to kernel */ | |
11 | 12 | |
12 | 13 | NFQNL_MSG_MAX |
13 | 14 | }; |
kernel/audit.c
... | ... | @@ -55,6 +55,9 @@ |
55 | 55 | #include <net/sock.h> |
56 | 56 | #include <net/netlink.h> |
57 | 57 | #include <linux/skbuff.h> |
58 | +#ifdef CONFIG_SECURITY | |
59 | +#include <linux/security.h> | |
60 | +#endif | |
58 | 61 | #include <linux/netlink.h> |
59 | 62 | #include <linux/freezer.h> |
60 | 63 | #include <linux/tty.h> |
... | ... | @@ -1501,6 +1504,32 @@ |
1501 | 1504 | audit_log_end(ab); |
1502 | 1505 | } |
1503 | 1506 | } |
1507 | + | |
1508 | +#ifdef CONFIG_SECURITY | |
1509 | +/** | |
1510 | + * audit_log_secctx - Converts and logs SELinux context | |
1511 | + * @ab: audit_buffer | |
1512 | + * @secid: security number | |
1513 | + * | |
1514 | + * This is a helper function that calls security_secid_to_secctx to convert | |
1515 | + * secid to secctx and then adds the (converted) SELinux context to the audit | |
1516 | + * log by calling audit_log_format, thus also preventing leak of internal secid | |
1517 | + * to userspace. If secid cannot be converted audit_panic is called. | |
1518 | + */ | |
1519 | +void audit_log_secctx(struct audit_buffer *ab, u32 secid) | |
1520 | +{ | |
1521 | + u32 len; | |
1522 | + char *secctx; | |
1523 | + | |
1524 | + if (security_secid_to_secctx(secid, &secctx, &len)) { | |
1525 | + audit_panic("Cannot convert secid to context"); | |
1526 | + } else { | |
1527 | + audit_log_format(ab, " obj=%s", secctx); | |
1528 | + security_release_secctx(secctx, len); | |
1529 | + } | |
1530 | +} | |
1531 | +EXPORT_SYMBOL(audit_log_secctx); | |
1532 | +#endif | |
1504 | 1533 | |
1505 | 1534 | EXPORT_SYMBOL(audit_log_start); |
1506 | 1535 | EXPORT_SYMBOL(audit_log_end); |
net/netfilter/ipset/ip_set_hash_ip.c
... | ... | @@ -53,7 +53,8 @@ |
53 | 53 | |
54 | 54 | static inline bool |
55 | 55 | hash_ip4_data_equal(const struct hash_ip4_elem *ip1, |
56 | - const struct hash_ip4_elem *ip2) | |
56 | + const struct hash_ip4_elem *ip2, | |
57 | + u32 *multi) | |
57 | 58 | { |
58 | 59 | return ip1->ip == ip2->ip; |
59 | 60 | } |
... | ... | @@ -225,7 +226,8 @@ |
225 | 226 | |
226 | 227 | static inline bool |
227 | 228 | hash_ip6_data_equal(const struct hash_ip6_elem *ip1, |
228 | - const struct hash_ip6_elem *ip2) | |
229 | + const struct hash_ip6_elem *ip2, | |
230 | + u32 *multi) | |
229 | 231 | { |
230 | 232 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0; |
231 | 233 | } |
net/netfilter/ipset/ip_set_hash_ipport.c
... | ... | @@ -60,7 +60,8 @@ |
60 | 60 | |
61 | 61 | static inline bool |
62 | 62 | hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1, |
63 | - const struct hash_ipport4_elem *ip2) | |
63 | + const struct hash_ipport4_elem *ip2, | |
64 | + u32 *multi) | |
64 | 65 | { |
65 | 66 | return ip1->ip == ip2->ip && |
66 | 67 | ip1->port == ip2->port && |
... | ... | @@ -276,7 +277,8 @@ |
276 | 277 | |
277 | 278 | static inline bool |
278 | 279 | hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1, |
279 | - const struct hash_ipport6_elem *ip2) | |
280 | + const struct hash_ipport6_elem *ip2, | |
281 | + u32 *multi) | |
280 | 282 | { |
281 | 283 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
282 | 284 | ip1->port == ip2->port && |
net/netfilter/ipset/ip_set_hash_ipportip.c
... | ... | @@ -62,7 +62,8 @@ |
62 | 62 | |
63 | 63 | static inline bool |
64 | 64 | hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1, |
65 | - const struct hash_ipportip4_elem *ip2) | |
65 | + const struct hash_ipportip4_elem *ip2, | |
66 | + u32 *multi) | |
66 | 67 | { |
67 | 68 | return ip1->ip == ip2->ip && |
68 | 69 | ip1->ip2 == ip2->ip2 && |
... | ... | @@ -286,7 +287,8 @@ |
286 | 287 | |
287 | 288 | static inline bool |
288 | 289 | hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1, |
289 | - const struct hash_ipportip6_elem *ip2) | |
290 | + const struct hash_ipportip6_elem *ip2, | |
291 | + u32 *multi) | |
290 | 292 | { |
291 | 293 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
292 | 294 | ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && |
net/netfilter/ipset/ip_set_hash_ipportnet.c
... | ... | @@ -62,7 +62,8 @@ |
62 | 62 | |
63 | 63 | static inline bool |
64 | 64 | hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, |
65 | - const struct hash_ipportnet4_elem *ip2) | |
65 | + const struct hash_ipportnet4_elem *ip2, | |
66 | + u32 *multi) | |
66 | 67 | { |
67 | 68 | return ip1->ip == ip2->ip && |
68 | 69 | ip1->ip2 == ip2->ip2 && |
... | ... | @@ -335,7 +336,8 @@ |
335 | 336 | |
336 | 337 | static inline bool |
337 | 338 | hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, |
338 | - const struct hash_ipportnet6_elem *ip2) | |
339 | + const struct hash_ipportnet6_elem *ip2, | |
340 | + u32 *multi) | |
339 | 341 | { |
340 | 342 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
341 | 343 | ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && |
net/netfilter/ipset/ip_set_hash_net.c
... | ... | @@ -58,7 +58,8 @@ |
58 | 58 | |
59 | 59 | static inline bool |
60 | 60 | hash_net4_data_equal(const struct hash_net4_elem *ip1, |
61 | - const struct hash_net4_elem *ip2) | |
61 | + const struct hash_net4_elem *ip2, | |
62 | + u32 *multi) | |
62 | 63 | { |
63 | 64 | return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr; |
64 | 65 | } |
... | ... | @@ -249,7 +250,8 @@ |
249 | 250 | |
250 | 251 | static inline bool |
251 | 252 | hash_net6_data_equal(const struct hash_net6_elem *ip1, |
252 | - const struct hash_net6_elem *ip2) | |
253 | + const struct hash_net6_elem *ip2, | |
254 | + u32 *multi) | |
253 | 255 | { |
254 | 256 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
255 | 257 | ip1->cidr == ip2->cidr; |
net/netfilter/ipset/ip_set_hash_netiface.c
... | ... | @@ -99,7 +99,7 @@ |
99 | 99 | |
100 | 100 | while (n) { |
101 | 101 | const char *d = iface_data(n); |
102 | - int res = ifname_compare(*iface, d); | |
102 | + long res = ifname_compare(*iface, d); | |
103 | 103 | |
104 | 104 | if (res < 0) |
105 | 105 | n = n->rb_left; |
... | ... | @@ -121,7 +121,7 @@ |
121 | 121 | |
122 | 122 | while (*n) { |
123 | 123 | char *ifname = iface_data(*n); |
124 | - int res = ifname_compare(*iface, ifname); | |
124 | + long res = ifname_compare(*iface, ifname); | |
125 | 125 | |
126 | 126 | p = *n; |
127 | 127 | if (res < 0) |
128 | 128 | |
129 | 129 | |
130 | 130 | |
131 | 131 | |
132 | 132 | |
133 | 133 | |
... | ... | @@ -159,31 +159,42 @@ |
159 | 159 | |
160 | 160 | /* The type variant functions: IPv4 */ |
161 | 161 | |
162 | +struct hash_netiface4_elem_hashed { | |
163 | + __be32 ip; | |
164 | + u8 physdev; | |
165 | + u8 cidr; | |
166 | + u16 padding; | |
167 | +}; | |
168 | + | |
169 | +#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed) | |
170 | + | |
162 | 171 | /* Member elements without timeout */ |
163 | 172 | struct hash_netiface4_elem { |
164 | 173 | __be32 ip; |
165 | - const char *iface; | |
166 | 174 | u8 physdev; |
167 | 175 | u8 cidr; |
168 | 176 | u16 padding; |
177 | + const char *iface; | |
169 | 178 | }; |
170 | 179 | |
171 | 180 | /* Member elements with timeout support */ |
172 | 181 | struct hash_netiface4_telem { |
173 | 182 | __be32 ip; |
174 | - const char *iface; | |
175 | 183 | u8 physdev; |
176 | 184 | u8 cidr; |
177 | 185 | u16 padding; |
186 | + const char *iface; | |
178 | 187 | unsigned long timeout; |
179 | 188 | }; |
180 | 189 | |
181 | 190 | static inline bool |
182 | 191 | hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, |
183 | - const struct hash_netiface4_elem *ip2) | |
192 | + const struct hash_netiface4_elem *ip2, | |
193 | + u32 *multi) | |
184 | 194 | { |
185 | 195 | return ip1->ip == ip2->ip && |
186 | 196 | ip1->cidr == ip2->cidr && |
197 | + (++*multi) && | |
187 | 198 | ip1->physdev == ip2->physdev && |
188 | 199 | ip1->iface == ip2->iface; |
189 | 200 | } |
... | ... | @@ -257,6 +268,7 @@ |
257 | 268 | |
258 | 269 | #define IP_SET_HASH_WITH_NETS |
259 | 270 | #define IP_SET_HASH_WITH_RBTREE |
271 | +#define IP_SET_HASH_WITH_MULTI | |
260 | 272 | |
261 | 273 | #define PF 4 |
262 | 274 | #define HOST_MASK 32 |
263 | 275 | |
264 | 276 | |
265 | 277 | |
266 | 278 | |
267 | 279 | |
268 | 280 | |
... | ... | @@ -424,29 +436,40 @@ |
424 | 436 | |
425 | 437 | /* The type variant functions: IPv6 */ |
426 | 438 | |
439 | +struct hash_netiface6_elem_hashed { | |
440 | + union nf_inet_addr ip; | |
441 | + u8 physdev; | |
442 | + u8 cidr; | |
443 | + u16 padding; | |
444 | +}; | |
445 | + | |
446 | +#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed) | |
447 | + | |
427 | 448 | struct hash_netiface6_elem { |
428 | 449 | union nf_inet_addr ip; |
429 | - const char *iface; | |
430 | 450 | u8 physdev; |
431 | 451 | u8 cidr; |
432 | 452 | u16 padding; |
453 | + const char *iface; | |
433 | 454 | }; |
434 | 455 | |
435 | 456 | struct hash_netiface6_telem { |
436 | 457 | union nf_inet_addr ip; |
437 | - const char *iface; | |
438 | 458 | u8 physdev; |
439 | 459 | u8 cidr; |
440 | 460 | u16 padding; |
461 | + const char *iface; | |
441 | 462 | unsigned long timeout; |
442 | 463 | }; |
443 | 464 | |
444 | 465 | static inline bool |
445 | 466 | hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, |
446 | - const struct hash_netiface6_elem *ip2) | |
467 | + const struct hash_netiface6_elem *ip2, | |
468 | + u32 *multi) | |
447 | 469 | { |
448 | 470 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
449 | 471 | ip1->cidr == ip2->cidr && |
472 | + (++*multi) && | |
450 | 473 | ip1->physdev == ip2->physdev && |
451 | 474 | ip1->iface == ip2->iface; |
452 | 475 | } |
... | ... | @@ -681,6 +704,7 @@ |
681 | 704 | h->maxelem = maxelem; |
682 | 705 | get_random_bytes(&h->initval, sizeof(h->initval)); |
683 | 706 | h->timeout = IPSET_NO_TIMEOUT; |
707 | + h->ahash_max = AHASH_MAX_SIZE; | |
684 | 708 | |
685 | 709 | hbits = htable_bits(hashsize); |
686 | 710 | h->table = ip_set_alloc( |
net/netfilter/ipset/ip_set_hash_netport.c
... | ... | @@ -59,7 +59,8 @@ |
59 | 59 | |
60 | 60 | static inline bool |
61 | 61 | hash_netport4_data_equal(const struct hash_netport4_elem *ip1, |
62 | - const struct hash_netport4_elem *ip2) | |
62 | + const struct hash_netport4_elem *ip2, | |
63 | + u32 *multi) | |
63 | 64 | { |
64 | 65 | return ip1->ip == ip2->ip && |
65 | 66 | ip1->port == ip2->port && |
... | ... | @@ -300,7 +301,8 @@ |
300 | 301 | |
301 | 302 | static inline bool |
302 | 303 | hash_netport6_data_equal(const struct hash_netport6_elem *ip1, |
303 | - const struct hash_netport6_elem *ip2) | |
304 | + const struct hash_netport6_elem *ip2, | |
305 | + u32 *multi) | |
304 | 306 | { |
305 | 307 | return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && |
306 | 308 | ip1->port == ip2->port && |
net/netfilter/nfnetlink.c
... | ... | @@ -37,7 +37,7 @@ |
37 | 37 | |
38 | 38 | static char __initdata nfversion[] = "0.30"; |
39 | 39 | |
40 | -static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; | |
40 | +static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT]; | |
41 | 41 | static DEFINE_MUTEX(nfnl_mutex); |
42 | 42 | |
43 | 43 | void nfnl_lock(void) |
... | ... | @@ -59,7 +59,7 @@ |
59 | 59 | nfnl_unlock(); |
60 | 60 | return -EBUSY; |
61 | 61 | } |
62 | - subsys_table[n->subsys_id] = n; | |
62 | + rcu_assign_pointer(subsys_table[n->subsys_id], n); | |
63 | 63 | nfnl_unlock(); |
64 | 64 | |
65 | 65 | return 0; |
... | ... | @@ -71,7 +71,7 @@ |
71 | 71 | nfnl_lock(); |
72 | 72 | subsys_table[n->subsys_id] = NULL; |
73 | 73 | nfnl_unlock(); |
74 | - | |
74 | + synchronize_rcu(); | |
75 | 75 | return 0; |
76 | 76 | } |
77 | 77 | EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); |
... | ... | @@ -83,7 +83,7 @@ |
83 | 83 | if (subsys_id >= NFNL_SUBSYS_COUNT) |
84 | 84 | return NULL; |
85 | 85 | |
86 | - return subsys_table[subsys_id]; | |
86 | + return rcu_dereference(subsys_table[subsys_id]); | |
87 | 87 | } |
88 | 88 | |
89 | 89 | static inline const struct nfnl_callback * |
90 | 90 | |
91 | 91 | |
92 | 92 | |
93 | 93 | |
94 | 94 | |
95 | 95 | |
... | ... | @@ -139,21 +139,27 @@ |
139 | 139 | |
140 | 140 | type = nlh->nlmsg_type; |
141 | 141 | replay: |
142 | + rcu_read_lock(); | |
142 | 143 | ss = nfnetlink_get_subsys(type); |
143 | 144 | if (!ss) { |
144 | 145 | #ifdef CONFIG_MODULES |
145 | - nfnl_unlock(); | |
146 | + rcu_read_unlock(); | |
146 | 147 | request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); |
147 | - nfnl_lock(); | |
148 | + rcu_read_lock(); | |
148 | 149 | ss = nfnetlink_get_subsys(type); |
149 | 150 | if (!ss) |
150 | 151 | #endif |
152 | + { | |
153 | + rcu_read_unlock(); | |
151 | 154 | return -EINVAL; |
155 | + } | |
152 | 156 | } |
153 | 157 | |
154 | 158 | nc = nfnetlink_find_client(type, ss); |
155 | - if (!nc) | |
159 | + if (!nc) { | |
160 | + rcu_read_unlock(); | |
156 | 161 | return -EINVAL; |
162 | + } | |
157 | 163 | |
158 | 164 | { |
159 | 165 | int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); |
... | ... | @@ -167,7 +173,23 @@ |
167 | 173 | if (err < 0) |
168 | 174 | return err; |
169 | 175 | |
170 | - err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda); | |
176 | + if (nc->call_rcu) { | |
177 | + err = nc->call_rcu(net->nfnl, skb, nlh, | |
178 | + (const struct nlattr **)cda); | |
179 | + rcu_read_unlock(); | |
180 | + } else { | |
181 | + rcu_read_unlock(); | |
182 | + nfnl_lock(); | |
183 | + if (rcu_dereference_protected( | |
184 | + subsys_table[NFNL_SUBSYS_ID(type)], | |
185 | + lockdep_is_held(&nfnl_mutex)) != ss || | |
186 | + nfnetlink_find_client(type, ss) != nc) | |
187 | + err = -EAGAIN; | |
188 | + else | |
189 | + err = nc->call(net->nfnl, skb, nlh, | |
190 | + (const struct nlattr **)cda); | |
191 | + nfnl_unlock(); | |
192 | + } | |
171 | 193 | if (err == -EAGAIN) |
172 | 194 | goto replay; |
173 | 195 | return err; |
174 | 196 | |
... | ... | @@ -176,9 +198,7 @@ |
176 | 198 | |
177 | 199 | static void nfnetlink_rcv(struct sk_buff *skb) |
178 | 200 | { |
179 | - nfnl_lock(); | |
180 | 201 | netlink_rcv_skb(skb, &nfnetlink_rcv_msg); |
181 | - nfnl_unlock(); | |
182 | 202 | } |
183 | 203 | |
184 | 204 | static int __net_init nfnetlink_net_init(struct net *net) |
net/netfilter/nfnetlink_queue.c
... | ... | @@ -58,7 +58,7 @@ |
58 | 58 | */ |
59 | 59 | spinlock_t lock; |
60 | 60 | unsigned int queue_total; |
61 | - atomic_t id_sequence; /* 'sequence' of pkt ids */ | |
61 | + unsigned int id_sequence; /* 'sequence' of pkt ids */ | |
62 | 62 | struct list_head queue_list; /* packets in queue */ |
63 | 63 | }; |
64 | 64 | |
... | ... | @@ -171,6 +171,13 @@ |
171 | 171 | queue->queue_total++; |
172 | 172 | } |
173 | 173 | |
174 | +static void | |
175 | +__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) | |
176 | +{ | |
177 | + list_del(&entry->list); | |
178 | + queue->queue_total--; | |
179 | +} | |
180 | + | |
174 | 181 | static struct nf_queue_entry * |
175 | 182 | find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) |
176 | 183 | { |
... | ... | @@ -185,10 +192,8 @@ |
185 | 192 | } |
186 | 193 | } |
187 | 194 | |
188 | - if (entry) { | |
189 | - list_del(&entry->list); | |
190 | - queue->queue_total--; | |
191 | - } | |
195 | + if (entry) | |
196 | + __dequeue_entry(queue, entry); | |
192 | 197 | |
193 | 198 | spin_unlock_bh(&queue->lock); |
194 | 199 | |
195 | 200 | |
... | ... | @@ -213,13 +218,15 @@ |
213 | 218 | |
214 | 219 | static struct sk_buff * |
215 | 220 | nfqnl_build_packet_message(struct nfqnl_instance *queue, |
216 | - struct nf_queue_entry *entry) | |
221 | + struct nf_queue_entry *entry, | |
222 | + __be32 **packet_id_ptr) | |
217 | 223 | { |
218 | 224 | sk_buff_data_t old_tail; |
219 | 225 | size_t size; |
220 | 226 | size_t data_len = 0; |
221 | 227 | struct sk_buff *skb; |
222 | - struct nfqnl_msg_packet_hdr pmsg; | |
228 | + struct nlattr *nla; | |
229 | + struct nfqnl_msg_packet_hdr *pmsg; | |
223 | 230 | struct nlmsghdr *nlh; |
224 | 231 | struct nfgenmsg *nfmsg; |
225 | 232 | struct sk_buff *entskb = entry->skb; |
226 | 233 | |
... | ... | @@ -272,13 +279,12 @@ |
272 | 279 | nfmsg->version = NFNETLINK_V0; |
273 | 280 | nfmsg->res_id = htons(queue->queue_num); |
274 | 281 | |
275 | - entry->id = atomic_inc_return(&queue->id_sequence); | |
276 | - pmsg.packet_id = htonl(entry->id); | |
277 | - pmsg.hw_protocol = entskb->protocol; | |
278 | - pmsg.hook = entry->hook; | |
282 | + nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); | |
283 | + pmsg = nla_data(nla); | |
284 | + pmsg->hw_protocol = entskb->protocol; | |
285 | + pmsg->hook = entry->hook; | |
286 | + *packet_id_ptr = &pmsg->packet_id; | |
279 | 287 | |
280 | - NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); | |
281 | - | |
282 | 288 | indev = entry->indev; |
283 | 289 | if (indev) { |
284 | 290 | #ifndef CONFIG_BRIDGE_NETFILTER |
... | ... | @@ -389,6 +395,7 @@ |
389 | 395 | struct sk_buff *nskb; |
390 | 396 | struct nfqnl_instance *queue; |
391 | 397 | int err = -ENOBUFS; |
398 | + __be32 *packet_id_ptr; | |
392 | 399 | |
393 | 400 | /* rcu_read_lock()ed by nf_hook_slow() */ |
394 | 401 | queue = instance_lookup(queuenum); |
... | ... | @@ -402,7 +409,7 @@ |
402 | 409 | goto err_out; |
403 | 410 | } |
404 | 411 | |
405 | - nskb = nfqnl_build_packet_message(queue, entry); | |
412 | + nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr); | |
406 | 413 | if (nskb == NULL) { |
407 | 414 | err = -ENOMEM; |
408 | 415 | goto err_out; |
... | ... | @@ -421,6 +428,8 @@ |
421 | 428 | queue->queue_total); |
422 | 429 | goto err_out_free_nskb; |
423 | 430 | } |
431 | + entry->id = ++queue->id_sequence; | |
432 | + *packet_id_ptr = htonl(entry->id); | |
424 | 433 | |
425 | 434 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ |
426 | 435 | err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); |
427 | 436 | |
... | ... | @@ -608,7 +617,93 @@ |
608 | 617 | [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, |
609 | 618 | }; |
610 | 619 | |
620 | +static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { | |
621 | + [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, | |
622 | + [NFQA_MARK] = { .type = NLA_U32 }, | |
623 | +}; | |
624 | + | |
625 | +static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid) | |
626 | +{ | |
627 | + struct nfqnl_instance *queue; | |
628 | + | |
629 | + queue = instance_lookup(queue_num); | |
630 | + if (!queue) | |
631 | + return ERR_PTR(-ENODEV); | |
632 | + | |
633 | + if (queue->peer_pid != nlpid) | |
634 | + return ERR_PTR(-EPERM); | |
635 | + | |
636 | + return queue; | |
637 | +} | |
638 | + | |
639 | +static struct nfqnl_msg_verdict_hdr* | |
640 | +verdicthdr_get(const struct nlattr * const nfqa[]) | |
641 | +{ | |
642 | + struct nfqnl_msg_verdict_hdr *vhdr; | |
643 | + unsigned int verdict; | |
644 | + | |
645 | + if (!nfqa[NFQA_VERDICT_HDR]) | |
646 | + return NULL; | |
647 | + | |
648 | + vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); | |
649 | + verdict = ntohl(vhdr->verdict); | |
650 | + if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) | |
651 | + return NULL; | |
652 | + return vhdr; | |
653 | +} | |
654 | + | |
655 | +static int nfq_id_after(unsigned int id, unsigned int max) | |
656 | +{ | |
657 | + return (int)(id - max) > 0; | |
658 | +} | |
659 | + | |
611 | 660 | static int |
661 | +nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb, | |
662 | + const struct nlmsghdr *nlh, | |
663 | + const struct nlattr * const nfqa[]) | |
664 | +{ | |
665 | + struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | |
666 | + struct nf_queue_entry *entry, *tmp; | |
667 | + unsigned int verdict, maxid; | |
668 | + struct nfqnl_msg_verdict_hdr *vhdr; | |
669 | + struct nfqnl_instance *queue; | |
670 | + LIST_HEAD(batch_list); | |
671 | + u16 queue_num = ntohs(nfmsg->res_id); | |
672 | + | |
673 | + queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid); | |
674 | + if (IS_ERR(queue)) | |
675 | + return PTR_ERR(queue); | |
676 | + | |
677 | + vhdr = verdicthdr_get(nfqa); | |
678 | + if (!vhdr) | |
679 | + return -EINVAL; | |
680 | + | |
681 | + verdict = ntohl(vhdr->verdict); | |
682 | + maxid = ntohl(vhdr->id); | |
683 | + | |
684 | + spin_lock_bh(&queue->lock); | |
685 | + | |
686 | + list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { | |
687 | + if (nfq_id_after(entry->id, maxid)) | |
688 | + break; | |
689 | + __dequeue_entry(queue, entry); | |
690 | + list_add_tail(&entry->list, &batch_list); | |
691 | + } | |
692 | + | |
693 | + spin_unlock_bh(&queue->lock); | |
694 | + | |
695 | + if (list_empty(&batch_list)) | |
696 | + return -ENOENT; | |
697 | + | |
698 | + list_for_each_entry_safe(entry, tmp, &batch_list, list) { | |
699 | + if (nfqa[NFQA_MARK]) | |
700 | + entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); | |
701 | + nf_reinject(entry, verdict); | |
702 | + } | |
703 | + return 0; | |
704 | +} | |
705 | + | |
706 | +static int | |
612 | 707 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, |
613 | 708 | const struct nlmsghdr *nlh, |
614 | 709 | const struct nlattr * const nfqa[]) |
615 | 710 | |
616 | 711 | |
617 | 712 | |
618 | 713 | |
619 | 714 | |
620 | 715 | |
621 | 716 | |
... | ... | @@ -620,39 +715,23 @@ |
620 | 715 | struct nfqnl_instance *queue; |
621 | 716 | unsigned int verdict; |
622 | 717 | struct nf_queue_entry *entry; |
623 | - int err; | |
624 | 718 | |
625 | - rcu_read_lock(); | |
626 | 719 | queue = instance_lookup(queue_num); |
627 | - if (!queue) { | |
628 | - err = -ENODEV; | |
629 | - goto err_out_unlock; | |
630 | - } | |
720 | + if (!queue) | |
631 | 721 | |
632 | - if (queue->peer_pid != NETLINK_CB(skb).pid) { | |
633 | - err = -EPERM; | |
634 | - goto err_out_unlock; | |
635 | - } | |
722 | + queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid); | |
723 | + if (IS_ERR(queue)) | |
724 | + return PTR_ERR(queue); | |
636 | 725 | |
637 | - if (!nfqa[NFQA_VERDICT_HDR]) { | |
638 | - err = -EINVAL; | |
639 | - goto err_out_unlock; | |
640 | - } | |
726 | + vhdr = verdicthdr_get(nfqa); | |
727 | + if (!vhdr) | |
728 | + return -EINVAL; | |
641 | 729 | |
642 | - vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); | |
643 | 730 | verdict = ntohl(vhdr->verdict); |
644 | 731 | |
645 | - if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) { | |
646 | - err = -EINVAL; | |
647 | - goto err_out_unlock; | |
648 | - } | |
649 | - | |
650 | 732 | entry = find_dequeue_entry(queue, ntohl(vhdr->id)); |
651 | - if (entry == NULL) { | |
652 | - err = -ENOENT; | |
653 | - goto err_out_unlock; | |
654 | - } | |
655 | - rcu_read_unlock(); | |
733 | + if (entry == NULL) | |
734 | + return -ENOENT; | |
656 | 735 | |
657 | 736 | if (nfqa[NFQA_PAYLOAD]) { |
658 | 737 | if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), |
... | ... | @@ -665,10 +744,6 @@ |
665 | 744 | |
666 | 745 | nf_reinject(entry, verdict); |
667 | 746 | return 0; |
668 | - | |
669 | -err_out_unlock: | |
670 | - rcu_read_unlock(); | |
671 | - return err; | |
672 | 747 | } |
673 | 748 | |
674 | 749 | static int |
675 | 750 | |
676 | 751 | |
... | ... | @@ -781,14 +856,17 @@ |
781 | 856 | } |
782 | 857 | |
783 | 858 | static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { |
784 | - [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, | |
859 | + [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp, | |
785 | 860 | .attr_count = NFQA_MAX, }, |
786 | - [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, | |
861 | + [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict, | |
787 | 862 | .attr_count = NFQA_MAX, |
788 | 863 | .policy = nfqa_verdict_policy }, |
789 | 864 | [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, |
790 | 865 | .attr_count = NFQA_CFG_MAX, |
791 | 866 | .policy = nfqa_cfg_policy }, |
867 | + [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch, | |
868 | + .attr_count = NFQA_MAX, | |
869 | + .policy = nfqa_verdict_batch_policy }, | |
792 | 870 | }; |
793 | 871 | |
794 | 872 | static const struct nfnetlink_subsystem nfqnl_subsys = { |
... | ... | @@ -870,7 +948,7 @@ |
870 | 948 | inst->peer_pid, inst->queue_total, |
871 | 949 | inst->copy_mode, inst->copy_range, |
872 | 950 | inst->queue_dropped, inst->queue_user_dropped, |
873 | - atomic_read(&inst->id_sequence), 1); | |
951 | + inst->id_sequence, 1); | |
874 | 952 | } |
875 | 953 | |
876 | 954 | static const struct seq_operations nfqnl_seq_ops = { |