Commit 5ffaf8554163d9f3873988ce2f9977f6c6f408d2
Committed by
Trond Myklebust
1 parent
cb9c1c4a88
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
NFS: replace global bl_wq with per-net one
This queue is used for sleeping in kernel and it have to be per-net since we don't want to wake any other waiters except in out network nemespace. BTW, move wq to per-net data is easy. But some way to handle upcall timeouts have to be provided. On message destroy in case of timeout, tasks, waiting for message to be delivered, should be awakened. Thus, some data required to located the right wait queue. Chosen solution replaces rpc_pipe_msg object with new introduced bl_pipe_msg object, containing rpc_pipe_msg and proper wq. Signed-off-by: Stanislav Kinsbursky <skinsbursky@parallels.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Showing 5 changed files with 39 additions and 31 deletions Side-by-side Diff
fs/nfs/blocklayout/blocklayout.c
... | ... | @@ -46,8 +46,6 @@ |
46 | 46 | MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>"); |
47 | 47 | MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); |
48 | 48 | |
49 | -wait_queue_head_t bl_wq; | |
50 | - | |
51 | 49 | static void print_page(struct page *page) |
52 | 50 | { |
53 | 51 | dprintk("PRINTPAGE page %p\n", page); |
... | ... | @@ -1117,6 +1115,7 @@ |
1117 | 1115 | struct nfs_net *nn = net_generic(net, nfs_net_id); |
1118 | 1116 | struct dentry *dentry; |
1119 | 1117 | |
1118 | + init_waitqueue_head(&nn->bl_wq); | |
1120 | 1119 | nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); |
1121 | 1120 | if (IS_ERR(nn->bl_device_pipe)) |
1122 | 1121 | return PTR_ERR(nn->bl_device_pipe); |
... | ... | @@ -1153,7 +1152,6 @@ |
1153 | 1152 | if (ret) |
1154 | 1153 | goto out; |
1155 | 1154 | |
1156 | - init_waitqueue_head(&bl_wq); | |
1157 | 1155 | ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block); |
1158 | 1156 | if (ret) |
1159 | 1157 | goto out_remove; |
fs/nfs/blocklayout/blocklayout.h
... | ... | @@ -153,12 +153,15 @@ |
153 | 153 | return BLK_LO2EXT(lseg->pls_layout); |
154 | 154 | } |
155 | 155 | |
156 | +struct bl_pipe_msg { | |
157 | + struct rpc_pipe_msg msg; | |
158 | + wait_queue_head_t *bl_wq; | |
159 | +}; | |
160 | + | |
156 | 161 | struct bl_msg_hdr { |
157 | 162 | u8 type; |
158 | 163 | u16 totallen; /* length of entire message, including hdr itself */ |
159 | 164 | }; |
160 | - | |
161 | -extern wait_queue_head_t bl_wq; | |
162 | 165 | |
163 | 166 | #define BL_DEVICE_UMOUNT 0x0 /* Umount--delete devices */ |
164 | 167 | #define BL_DEVICE_MOUNT 0x1 /* Mount--create devices*/ |
fs/nfs/blocklayout/blocklayoutdev.c
... | ... | @@ -91,16 +91,18 @@ |
91 | 91 | if (copy_from_user(&nn->bl_mount_reply, src, mlen) != 0) |
92 | 92 | return -EFAULT; |
93 | 93 | |
94 | - wake_up(&bl_wq); | |
94 | + wake_up(&nn->bl_wq); | |
95 | 95 | |
96 | 96 | return mlen; |
97 | 97 | } |
98 | 98 | |
99 | 99 | void bl_pipe_destroy_msg(struct rpc_pipe_msg *msg) |
100 | 100 | { |
101 | + struct bl_pipe_msg *bl_pipe_msg = container_of(msg, struct bl_pipe_msg, msg); | |
102 | + | |
101 | 103 | if (msg->errno >= 0) |
102 | 104 | return; |
103 | - wake_up(&bl_wq); | |
105 | + wake_up(bl_pipe_msg->bl_wq); | |
104 | 106 | } |
105 | 107 | |
106 | 108 | /* |
... | ... | @@ -112,7 +114,8 @@ |
112 | 114 | { |
113 | 115 | struct pnfs_block_dev *rv; |
114 | 116 | struct block_device *bd = NULL; |
115 | - struct rpc_pipe_msg msg; | |
117 | + struct bl_pipe_msg bl_pipe_msg; | |
118 | + struct rpc_pipe_msg *msg = &bl_pipe_msg.msg; | |
116 | 119 | struct bl_msg_hdr bl_msg = { |
117 | 120 | .type = BL_DEVICE_MOUNT, |
118 | 121 | .totallen = dev->mincount, |
119 | 122 | |
... | ... | @@ -128,15 +131,16 @@ |
128 | 131 | dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data, |
129 | 132 | dev->mincount); |
130 | 133 | |
131 | - memset(&msg, 0, sizeof(msg)); | |
132 | - msg.data = kzalloc(sizeof(bl_msg) + dev->mincount, GFP_NOFS); | |
133 | - if (!msg.data) { | |
134 | + bl_pipe_msg.bl_wq = &nn->bl_wq; | |
135 | + memset(msg, 0, sizeof(*msg)); | |
136 | + msg->data = kzalloc(sizeof(bl_msg) + dev->mincount, GFP_NOFS); | |
137 | + if (!msg->data) { | |
134 | 138 | rv = ERR_PTR(-ENOMEM); |
135 | 139 | goto out; |
136 | 140 | } |
137 | 141 | |
138 | - memcpy(msg.data, &bl_msg, sizeof(bl_msg)); | |
139 | - dataptr = (uint8_t *) msg.data; | |
142 | + memcpy(msg->data, &bl_msg, sizeof(bl_msg)); | |
143 | + dataptr = (uint8_t *) msg->data; | |
140 | 144 | len = dev->mincount; |
141 | 145 | offset = sizeof(bl_msg); |
142 | 146 | for (i = 0; len > 0; i++) { |
143 | 147 | |
144 | 148 | |
... | ... | @@ -145,13 +149,13 @@ |
145 | 149 | len -= PAGE_CACHE_SIZE; |
146 | 150 | offset += PAGE_CACHE_SIZE; |
147 | 151 | } |
148 | - msg.len = sizeof(bl_msg) + dev->mincount; | |
152 | + msg->len = sizeof(bl_msg) + dev->mincount; | |
149 | 153 | |
150 | 154 | dprintk("%s CALLING USERSPACE DAEMON\n", __func__); |
151 | - add_wait_queue(&bl_wq, &wq); | |
152 | - rc = rpc_queue_upcall(nn->bl_device_pipe, &msg); | |
155 | + add_wait_queue(&nn->bl_wq, &wq); | |
156 | + rc = rpc_queue_upcall(nn->bl_device_pipe, msg); | |
153 | 157 | if (rc < 0) { |
154 | - remove_wait_queue(&bl_wq, &wq); | |
158 | + remove_wait_queue(&nn->bl_wq, &wq); | |
155 | 159 | rv = ERR_PTR(rc); |
156 | 160 | goto out; |
157 | 161 | } |
... | ... | @@ -159,7 +163,7 @@ |
159 | 163 | set_current_state(TASK_UNINTERRUPTIBLE); |
160 | 164 | schedule(); |
161 | 165 | __set_current_state(TASK_RUNNING); |
162 | - remove_wait_queue(&bl_wq, &wq); | |
166 | + remove_wait_queue(&nn->bl_wq, &wq); | |
163 | 167 | |
164 | 168 | if (reply->status != BL_DEVICE_REQUEST_PROC) { |
165 | 169 | dprintk("%s failed to open device: %d\n", |
... | ... | @@ -191,7 +195,7 @@ |
191 | 195 | bd->bd_block_size); |
192 | 196 | |
193 | 197 | out: |
194 | - kfree(msg.data); | |
198 | + kfree(msg->data); | |
195 | 199 | return rv; |
196 | 200 | } |
197 | 201 |
fs/nfs/blocklayout/blocklayoutdm.c
... | ... | @@ -40,7 +40,8 @@ |
40 | 40 | |
41 | 41 | static void dev_remove(struct net *net, dev_t dev) |
42 | 42 | { |
43 | - struct rpc_pipe_msg msg; | |
43 | + struct bl_pipe_msg bl_pipe_msg; | |
44 | + struct rpc_pipe_msg *msg = &bl_pipe_msg.msg; | |
44 | 45 | struct bl_dev_msg bl_umount_request; |
45 | 46 | struct bl_msg_hdr bl_msg = { |
46 | 47 | .type = BL_DEVICE_UMOUNT, |
47 | 48 | |
48 | 49 | |
49 | 50 | |
50 | 51 | |
51 | 52 | |
... | ... | @@ -52,33 +53,34 @@ |
52 | 53 | |
53 | 54 | dprintk("Entering %s\n", __func__); |
54 | 55 | |
55 | - memset(&msg, 0, sizeof(msg)); | |
56 | - msg.data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); | |
57 | - if (!msg.data) | |
56 | + bl_pipe_msg.bl_wq = &nn->bl_wq; | |
57 | + memset(&msg, 0, sizeof(*msg)); | |
58 | + msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); | |
59 | + if (!msg->data) | |
58 | 60 | goto out; |
59 | 61 | |
60 | 62 | memset(&bl_umount_request, 0, sizeof(bl_umount_request)); |
61 | 63 | bl_umount_request.major = MAJOR(dev); |
62 | 64 | bl_umount_request.minor = MINOR(dev); |
63 | 65 | |
64 | - memcpy(msg.data, &bl_msg, sizeof(bl_msg)); | |
65 | - dataptr = (uint8_t *) msg.data; | |
66 | + memcpy(msg->data, &bl_msg, sizeof(bl_msg)); | |
67 | + dataptr = (uint8_t *) msg->data; | |
66 | 68 | memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request)); |
67 | - msg.len = sizeof(bl_msg) + bl_msg.totallen; | |
69 | + msg->len = sizeof(bl_msg) + bl_msg.totallen; | |
68 | 70 | |
69 | - add_wait_queue(&bl_wq, &wq); | |
70 | - if (rpc_queue_upcall(nn->bl_device_pipe, &msg) < 0) { | |
71 | - remove_wait_queue(&bl_wq, &wq); | |
71 | + add_wait_queue(&nn->bl_wq, &wq); | |
72 | + if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { | |
73 | + remove_wait_queue(&nn->bl_wq, &wq); | |
72 | 74 | goto out; |
73 | 75 | } |
74 | 76 | |
75 | 77 | set_current_state(TASK_UNINTERRUPTIBLE); |
76 | 78 | schedule(); |
77 | 79 | __set_current_state(TASK_RUNNING); |
78 | - remove_wait_queue(&bl_wq, &wq); | |
80 | + remove_wait_queue(&nn->bl_wq, &wq); | |
79 | 81 | |
80 | 82 | out: |
81 | - kfree(msg.data); | |
83 | + kfree(msg->data); | |
82 | 84 | } |
83 | 85 | |
84 | 86 | /* |
fs/nfs/netns.h