Commit ba89966c1984513f4f2cc0a6c182266be44ddd03

Authored by Eric Dumazet
Committed by David S. Miller
1 parent 29cb9f9c55

[NET]: use __read_mostly on kmem_cache_t , DEFINE_SNMP_STAT pointers

This patch puts mostly read only data in the right section
(read_mostly), to help sharing of these data between CPUS without
memory ping pongs.

On one of my production machine, tcp_statistics was sitting in a
heavily modified cache line, so *every* SNMP update had to force a
reload.

Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 29 changed files with 39 additions and 39 deletions Side-by-side Diff

... ... @@ -23,7 +23,7 @@
23 23 #include <asm/atomic.h>
24 24 #include "br_private.h"
25 25  
26   -static kmem_cache_t *br_fdb_cache;
  26 +static kmem_cache_t *br_fdb_cache __read_mostly;
27 27 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
28 28 const unsigned char *addr);
29 29  
... ... @@ -42,7 +42,7 @@
42 42  
43 43 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
44 44  
45   -static kmem_cache_t *flow_cachep;
  45 +static kmem_cache_t *flow_cachep __read_mostly;
46 46  
47 47 static int flow_lwm, flow_hwm;
48 48  
... ... @@ -68,8 +68,8 @@
68 68 #include <asm/uaccess.h>
69 69 #include <asm/system.h>
70 70  
71   -static kmem_cache_t *skbuff_head_cache;
72   -static kmem_cache_t *skbuff_fclone_cache;
  71 +static kmem_cache_t *skbuff_head_cache __read_mostly;
  72 +static kmem_cache_t *skbuff_fclone_cache __read_mostly;
73 73  
74 74 struct timeval __read_mostly skb_tv_base;
75 75  
net/dccp/ccids/ccid3.c
... ... @@ -85,7 +85,7 @@
85 85 static struct dccp_tx_hist *ccid3_tx_hist;
86 86 static struct dccp_rx_hist *ccid3_rx_hist;
87 87  
88   -static kmem_cache_t *ccid3_loss_interval_hist_slab;
  88 +static kmem_cache_t *ccid3_loss_interval_hist_slab __read_mostly;
89 89  
90 90 static inline struct ccid3_loss_interval_hist_entry *
91 91 ccid3_loss_interval_hist_entry_new(const unsigned int __nocast prio)
... ... @@ -39,7 +39,7 @@
39 39 #include "ccid.h"
40 40 #include "dccp.h"
41 41  
42   -DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics);
  42 +DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
43 43  
44 44 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45 45  
net/decnet/dn_table.c
... ... @@ -79,7 +79,7 @@
79 79 static DEFINE_RWLOCK(dn_fib_tables_lock);
80 80 struct dn_fib_table *dn_fib_tables[RT_TABLE_MAX + 1];
81 81  
82   -static kmem_cache_t *dn_hash_kmem;
  82 +static kmem_cache_t *dn_hash_kmem __read_mostly;
83 83 static int dn_fib_hash_zombies;
84 84  
85 85 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
... ... @@ -113,7 +113,7 @@
113 113 #include <linux/mroute.h>
114 114 #endif
115 115  
116   -DEFINE_SNMP_STAT(struct linux_mib, net_statistics);
  116 +DEFINE_SNMP_STAT(struct linux_mib, net_statistics) __read_mostly;
117 117  
118 118 extern void ip_mc_drop_socket(struct sock *sk);
119 119  
... ... @@ -45,8 +45,8 @@
45 45  
46 46 #include "fib_lookup.h"
47 47  
48   -static kmem_cache_t *fn_hash_kmem;
49   -static kmem_cache_t *fn_alias_kmem;
  48 +static kmem_cache_t *fn_hash_kmem __read_mostly;
  49 +static kmem_cache_t *fn_alias_kmem __read_mostly;
50 50  
51 51 struct fib_node {
52 52 struct hlist_node fn_hash;
... ... @@ -166,7 +166,7 @@
166 166 static void tnode_free(struct tnode *tn);
167 167 static void trie_dump_seq(struct seq_file *seq, struct trie *t);
168 168  
169   -static kmem_cache_t *fn_alias_kmem;
  169 +static kmem_cache_t *fn_alias_kmem __read_mostly;
170 170 static struct trie *trie_local = NULL, *trie_main = NULL;
171 171  
172 172  
... ... @@ -114,7 +114,7 @@
114 114 /*
115 115 * Statistics
116 116 */
117   -DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
  117 +DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics) __read_mostly;
118 118  
119 119 /* An array of errno for error messages from dest unreach. */
120 120 /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
... ... @@ -73,7 +73,7 @@
73 73 /* Exported for inet_getid inline function. */
74 74 DEFINE_SPINLOCK(inet_peer_idlock);
75 75  
76   -static kmem_cache_t *peer_cachep;
  76 +static kmem_cache_t *peer_cachep __read_mostly;
77 77  
78 78 #define node_height(x) x->avl_height
79 79 static struct inet_peer peer_fake_node = {
... ... @@ -150,7 +150,7 @@
150 150 * SNMP management statistics
151 151 */
152 152  
153   -DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics);
  153 +DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly;
154 154  
155 155 /*
156 156 * Process Router Attention IP option
... ... @@ -103,7 +103,7 @@
103 103 In this case data path is free of exclusive locks at all.
104 104 */
105 105  
106   -static kmem_cache_t *mrt_cachep;
  106 +static kmem_cache_t *mrt_cachep __read_mostly;
107 107  
108 108 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
109 109 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
net/ipv4/ipvs/ip_vs_conn.c
... ... @@ -40,7 +40,7 @@
40 40 static struct list_head *ip_vs_conn_tab;
41 41  
42 42 /* SLAB cache for IPVS connections */
43   -static kmem_cache_t *ip_vs_conn_cachep;
  43 +static kmem_cache_t *ip_vs_conn_cachep __read_mostly;
44 44  
45 45 /* counter for current IPVS connections */
46 46 static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
net/ipv4/netfilter/ip_conntrack_core.c
... ... @@ -70,8 +70,8 @@
70 70 unsigned int ip_conntrack_htable_size = 0;
71 71 int ip_conntrack_max;
72 72 struct list_head *ip_conntrack_hash;
73   -static kmem_cache_t *ip_conntrack_cachep;
74   -static kmem_cache_t *ip_conntrack_expect_cachep;
  73 +static kmem_cache_t *ip_conntrack_cachep __read_mostly;
  74 +static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly;
75 75 struct ip_conntrack ip_conntrack_untracked;
76 76 unsigned int ip_ct_log_invalid;
77 77 static LIST_HEAD(unconfirmed);
net/ipv4/netfilter/ipt_hashlimit.c
... ... @@ -94,7 +94,7 @@
94 94 static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
95 95 static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */
96 96 static HLIST_HEAD(hashlimit_htables);
97   -static kmem_cache_t *hashlimit_cachep;
  97 +static kmem_cache_t *hashlimit_cachep __read_mostly;
98 98  
99 99 static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
100 100 {
... ... @@ -269,7 +269,7 @@
269 269  
270 270 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271 271  
272   -DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
  272 +DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
273 273  
274 274 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
275 275  
... ... @@ -113,7 +113,7 @@
113 113 * Snmp MIB for the UDP layer
114 114 */
115 115  
116   -DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
  116 +DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
117 117  
118 118 struct hlist_head udp_hash[UDP_HTABLE_SIZE];
119 119 DEFINE_RWLOCK(udp_hash_lock);
... ... @@ -67,7 +67,7 @@
67 67 #include <asm/uaccess.h>
68 68 #include <asm/system.h>
69 69  
70   -DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
  70 +DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71 71  
72 72 /*
73 73 * The ICMP socket(s). This is the most convenient way to flow control
... ... @@ -49,7 +49,7 @@
49 49  
50 50 struct rt6_statistics rt6_stats;
51 51  
52   -static kmem_cache_t * fib6_node_kmem;
  52 +static kmem_cache_t * fib6_node_kmem __read_mostly;
53 53  
54 54 enum fib_walk_state_t
55 55 {
net/ipv6/ipv6_sockglue.c
... ... @@ -55,7 +55,7 @@
55 55  
56 56 #include <asm/uaccess.h>
57 57  
58   -DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
  58 +DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
59 59  
60 60 static struct packet_type ipv6_packet_type = {
61 61 .type = __constant_htons(ETH_P_IPV6),
... ... @@ -59,7 +59,7 @@
59 59 #include <linux/proc_fs.h>
60 60 #include <linux/seq_file.h>
61 61  
62   -DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
  62 +DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
63 63  
64 64 /* Grrr, addr_type already calculated by caller, but I don't want
65 65 * to add some silly "cookie" argument to this method just for that.
net/ipv6/xfrm6_tunnel.c
... ... @@ -79,7 +79,7 @@
79 79 #define XFRM6_TUNNEL_SPI_MIN 1
80 80 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff
81 81  
82   -static kmem_cache_t *xfrm6_tunnel_spi_kmem;
  82 +static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
83 83  
84 84 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
85 85 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
... ... @@ -62,7 +62,7 @@
62 62 /* Global data structures. */
63 63 struct sctp_globals sctp_globals;
64 64 struct proc_dir_entry *proc_net_sctp;
65   -DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
  65 +DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly;
66 66  
67 67 struct idr sctp_assocs_id;
68 68 DEFINE_SPINLOCK(sctp_assocs_id_lock);
... ... @@ -78,8 +78,8 @@
78 78 static struct sctp_af *sctp_af_v4_specific;
79 79 static struct sctp_af *sctp_af_v6_specific;
80 80  
81   -kmem_cache_t *sctp_chunk_cachep;
82   -kmem_cache_t *sctp_bucket_cachep;
  81 +kmem_cache_t *sctp_chunk_cachep __read_mostly;
  82 +kmem_cache_t *sctp_bucket_cachep __read_mostly;
83 83  
84 84 extern int sctp_snmp_proc_init(void);
85 85 extern int sctp_snmp_proc_exit(void);
... ... @@ -274,7 +274,7 @@
274 274  
275 275 #define SOCKFS_MAGIC 0x534F434B
276 276  
277   -static kmem_cache_t * sock_inode_cachep;
  277 +static kmem_cache_t * sock_inode_cachep __read_mostly;
278 278  
279 279 static struct inode *sock_alloc_inode(struct super_block *sb)
280 280 {
... ... @@ -333,7 +333,7 @@
333 333 return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
334 334 }
335 335  
336   -static struct vfsmount *sock_mnt;
  336 +static struct vfsmount *sock_mnt __read_mostly;
337 337  
338 338 static struct file_system_type sock_fs_type = {
339 339 .name = "sockfs",
net/sunrpc/rpc_pipe.c
... ... @@ -28,13 +28,13 @@
28 28 #include <linux/workqueue.h>
29 29 #include <linux/sunrpc/rpc_pipe_fs.h>
30 30  
31   -static struct vfsmount *rpc_mount;
  31 +static struct vfsmount *rpc_mount __read_mostly;
32 32 static int rpc_mount_count;
33 33  
34 34 static struct file_system_type rpc_pipe_fs_type;
35 35  
36 36  
37   -static kmem_cache_t *rpc_inode_cachep;
  37 +static kmem_cache_t *rpc_inode_cachep __read_mostly;
38 38  
39 39 #define RPC_UPCALL_TIMEOUT (30*HZ)
40 40  
... ... @@ -34,10 +34,10 @@
34 34 #define RPC_BUFFER_MAXSIZE (2048)
35 35 #define RPC_BUFFER_POOLSIZE (8)
36 36 #define RPC_TASK_POOLSIZE (8)
37   -static kmem_cache_t *rpc_task_slabp;
38   -static kmem_cache_t *rpc_buffer_slabp;
39   -static mempool_t *rpc_task_mempool;
40   -static mempool_t *rpc_buffer_mempool;
  37 +static kmem_cache_t *rpc_task_slabp __read_mostly;
  38 +static kmem_cache_t *rpc_buffer_slabp __read_mostly;
  39 +static mempool_t *rpc_task_mempool __read_mostly;
  40 +static mempool_t *rpc_buffer_mempool __read_mostly;
41 41  
42 42 static void __rpc_default_timer(struct rpc_task *task);
43 43 static void rpciod_killall(void);
net/xfrm/xfrm_input.c
... ... @@ -12,7 +12,7 @@
12 12 #include <net/ip.h>
13 13 #include <net/xfrm.h>
14 14  
15   -static kmem_cache_t *secpath_cachep;
  15 +static kmem_cache_t *secpath_cachep __read_mostly;
16 16  
17 17 void __secpath_destroy(struct sec_path *sp)
18 18 {
net/xfrm/xfrm_policy.c
... ... @@ -37,7 +37,7 @@
37 37 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
38 38 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
39 39  
40   -static kmem_cache_t *xfrm_dst_cache;
  40 +static kmem_cache_t *xfrm_dst_cache __read_mostly;
41 41  
42 42 static struct work_struct xfrm_policy_gc_work;
43 43 static struct list_head xfrm_policy_gc_list =