Commit 45e27161c62216c163880d7aed751cb55a65c8e9
Committed by
Konrad Rzeszutek Wilk
1 parent
884ac2978a
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
xenbus: fix compile failure on ARM with Xen enabled
Adding an include of linux/mm.h resolves this: drivers/xen/xenbus/xenbus_client.c: In function ‘xenbus_map_ring_valloc_hvm’: drivers/xen/xenbus/xenbus_client.c:532:66: error: implicit declaration of function ‘page_to_section’ [-Werror=implicit-function-declaration] CC: stable@vger.kernel.org Signed-off-by: Steven Noonan <steven@uplinklabs.net> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Showing 1 changed file with 1 additions and 0 deletions Inline Diff
drivers/xen/xenbus/xenbus_client.c
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | * Client-facing interface for the Xenbus driver. In other words, the | 2 | * Client-facing interface for the Xenbus driver. In other words, the |
3 | * interface between the Xenbus and the device-specific code, be it the | 3 | * interface between the Xenbus and the device-specific code, be it the |
4 | * frontend or the backend of that driver. | 4 | * frontend or the backend of that driver. |
5 | * | 5 | * |
6 | * Copyright (C) 2005 XenSource Ltd | 6 | * Copyright (C) 2005 XenSource Ltd |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License version 2 | 9 | * modify it under the terms of the GNU General Public License version 2 |
10 | * as published by the Free Software Foundation; or, when distributed | 10 | * as published by the Free Software Foundation; or, when distributed |
11 | * separately from the Linux kernel or incorporated into other | 11 | * separately from the Linux kernel or incorporated into other |
12 | * software packages, subject to the following license: | 12 | * software packages, subject to the following license: |
13 | * | 13 | * |
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | 14 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
15 | * of this source file (the "Software"), to deal in the Software without | 15 | * of this source file (the "Software"), to deal in the Software without |
16 | * restriction, including without limitation the rights to use, copy, modify, | 16 | * restriction, including without limitation the rights to use, copy, modify, |
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | 17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
18 | * and to permit persons to whom the Software is furnished to do so, subject to | 18 | * and to permit persons to whom the Software is furnished to do so, subject to |
19 | * the following conditions: | 19 | * the following conditions: |
20 | * | 20 | * |
21 | * The above copyright notice and this permission notice shall be included in | 21 | * The above copyright notice and this permission notice shall be included in |
22 | * all copies or substantial portions of the Software. | 22 | * all copies or substantial portions of the Software. |
23 | * | 23 | * |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | 26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
30 | * IN THE SOFTWARE. | 30 | * IN THE SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/mm.h> | ||
33 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
34 | #include <linux/types.h> | 35 | #include <linux/types.h> |
35 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
36 | #include <linux/vmalloc.h> | 37 | #include <linux/vmalloc.h> |
37 | #include <linux/export.h> | 38 | #include <linux/export.h> |
38 | #include <asm/xen/hypervisor.h> | 39 | #include <asm/xen/hypervisor.h> |
39 | #include <asm/xen/page.h> | 40 | #include <asm/xen/page.h> |
40 | #include <xen/interface/xen.h> | 41 | #include <xen/interface/xen.h> |
41 | #include <xen/interface/event_channel.h> | 42 | #include <xen/interface/event_channel.h> |
42 | #include <xen/balloon.h> | 43 | #include <xen/balloon.h> |
43 | #include <xen/events.h> | 44 | #include <xen/events.h> |
44 | #include <xen/grant_table.h> | 45 | #include <xen/grant_table.h> |
45 | #include <xen/xenbus.h> | 46 | #include <xen/xenbus.h> |
46 | #include <xen/xen.h> | 47 | #include <xen/xen.h> |
47 | 48 | ||
48 | #include "xenbus_probe.h" | 49 | #include "xenbus_probe.h" |
49 | 50 | ||
50 | struct xenbus_map_node { | 51 | struct xenbus_map_node { |
51 | struct list_head next; | 52 | struct list_head next; |
52 | union { | 53 | union { |
53 | struct vm_struct *area; /* PV */ | 54 | struct vm_struct *area; /* PV */ |
54 | struct page *page; /* HVM */ | 55 | struct page *page; /* HVM */ |
55 | }; | 56 | }; |
56 | grant_handle_t handle; | 57 | grant_handle_t handle; |
57 | }; | 58 | }; |
58 | 59 | ||
59 | static DEFINE_SPINLOCK(xenbus_valloc_lock); | 60 | static DEFINE_SPINLOCK(xenbus_valloc_lock); |
60 | static LIST_HEAD(xenbus_valloc_pages); | 61 | static LIST_HEAD(xenbus_valloc_pages); |
61 | 62 | ||
62 | struct xenbus_ring_ops { | 63 | struct xenbus_ring_ops { |
63 | int (*map)(struct xenbus_device *dev, int gnt, void **vaddr); | 64 | int (*map)(struct xenbus_device *dev, int gnt, void **vaddr); |
64 | int (*unmap)(struct xenbus_device *dev, void *vaddr); | 65 | int (*unmap)(struct xenbus_device *dev, void *vaddr); |
65 | }; | 66 | }; |
66 | 67 | ||
67 | static const struct xenbus_ring_ops *ring_ops __read_mostly; | 68 | static const struct xenbus_ring_ops *ring_ops __read_mostly; |
68 | 69 | ||
69 | const char *xenbus_strstate(enum xenbus_state state) | 70 | const char *xenbus_strstate(enum xenbus_state state) |
70 | { | 71 | { |
71 | static const char *const name[] = { | 72 | static const char *const name[] = { |
72 | [ XenbusStateUnknown ] = "Unknown", | 73 | [ XenbusStateUnknown ] = "Unknown", |
73 | [ XenbusStateInitialising ] = "Initialising", | 74 | [ XenbusStateInitialising ] = "Initialising", |
74 | [ XenbusStateInitWait ] = "InitWait", | 75 | [ XenbusStateInitWait ] = "InitWait", |
75 | [ XenbusStateInitialised ] = "Initialised", | 76 | [ XenbusStateInitialised ] = "Initialised", |
76 | [ XenbusStateConnected ] = "Connected", | 77 | [ XenbusStateConnected ] = "Connected", |
77 | [ XenbusStateClosing ] = "Closing", | 78 | [ XenbusStateClosing ] = "Closing", |
78 | [ XenbusStateClosed ] = "Closed", | 79 | [ XenbusStateClosed ] = "Closed", |
79 | [XenbusStateReconfiguring] = "Reconfiguring", | 80 | [XenbusStateReconfiguring] = "Reconfiguring", |
80 | [XenbusStateReconfigured] = "Reconfigured", | 81 | [XenbusStateReconfigured] = "Reconfigured", |
81 | }; | 82 | }; |
82 | return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; | 83 | return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; |
83 | } | 84 | } |
84 | EXPORT_SYMBOL_GPL(xenbus_strstate); | 85 | EXPORT_SYMBOL_GPL(xenbus_strstate); |
85 | 86 | ||
86 | /** | 87 | /** |
87 | * xenbus_watch_path - register a watch | 88 | * xenbus_watch_path - register a watch |
88 | * @dev: xenbus device | 89 | * @dev: xenbus device |
89 | * @path: path to watch | 90 | * @path: path to watch |
90 | * @watch: watch to register | 91 | * @watch: watch to register |
91 | * @callback: callback to register | 92 | * @callback: callback to register |
92 | * | 93 | * |
93 | * Register a @watch on the given path, using the given xenbus_watch structure | 94 | * Register a @watch on the given path, using the given xenbus_watch structure |
94 | * for storage, and the given @callback function as the callback. Return 0 on | 95 | * for storage, and the given @callback function as the callback. Return 0 on |
95 | * success, or -errno on error. On success, the given @path will be saved as | 96 | * success, or -errno on error. On success, the given @path will be saved as |
96 | * @watch->node, and remains the caller's to free. On error, @watch->node will | 97 | * @watch->node, and remains the caller's to free. On error, @watch->node will |
97 | * be NULL, the device will switch to %XenbusStateClosing, and the error will | 98 | * be NULL, the device will switch to %XenbusStateClosing, and the error will |
98 | * be saved in the store. | 99 | * be saved in the store. |
99 | */ | 100 | */ |
100 | int xenbus_watch_path(struct xenbus_device *dev, const char *path, | 101 | int xenbus_watch_path(struct xenbus_device *dev, const char *path, |
101 | struct xenbus_watch *watch, | 102 | struct xenbus_watch *watch, |
102 | void (*callback)(struct xenbus_watch *, | 103 | void (*callback)(struct xenbus_watch *, |
103 | const char **, unsigned int)) | 104 | const char **, unsigned int)) |
104 | { | 105 | { |
105 | int err; | 106 | int err; |
106 | 107 | ||
107 | watch->node = path; | 108 | watch->node = path; |
108 | watch->callback = callback; | 109 | watch->callback = callback; |
109 | 110 | ||
110 | err = register_xenbus_watch(watch); | 111 | err = register_xenbus_watch(watch); |
111 | 112 | ||
112 | if (err) { | 113 | if (err) { |
113 | watch->node = NULL; | 114 | watch->node = NULL; |
114 | watch->callback = NULL; | 115 | watch->callback = NULL; |
115 | xenbus_dev_fatal(dev, err, "adding watch on %s", path); | 116 | xenbus_dev_fatal(dev, err, "adding watch on %s", path); |
116 | } | 117 | } |
117 | 118 | ||
118 | return err; | 119 | return err; |
119 | } | 120 | } |
120 | EXPORT_SYMBOL_GPL(xenbus_watch_path); | 121 | EXPORT_SYMBOL_GPL(xenbus_watch_path); |
121 | 122 | ||
122 | 123 | ||
123 | /** | 124 | /** |
124 | * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path | 125 | * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path |
125 | * @dev: xenbus device | 126 | * @dev: xenbus device |
126 | * @watch: watch to register | 127 | * @watch: watch to register |
127 | * @callback: callback to register | 128 | * @callback: callback to register |
128 | * @pathfmt: format of path to watch | 129 | * @pathfmt: format of path to watch |
129 | * | 130 | * |
130 | * Register a watch on the given @path, using the given xenbus_watch | 131 | * Register a watch on the given @path, using the given xenbus_watch |
131 | * structure for storage, and the given @callback function as the callback. | 132 | * structure for storage, and the given @callback function as the callback. |
132 | * Return 0 on success, or -errno on error. On success, the watched path | 133 | * Return 0 on success, or -errno on error. On success, the watched path |
133 | * (@path/@path2) will be saved as @watch->node, and becomes the caller's to | 134 | * (@path/@path2) will be saved as @watch->node, and becomes the caller's to |
134 | * kfree(). On error, watch->node will be NULL, so the caller has nothing to | 135 | * kfree(). On error, watch->node will be NULL, so the caller has nothing to |
135 | * free, the device will switch to %XenbusStateClosing, and the error will be | 136 | * free, the device will switch to %XenbusStateClosing, and the error will be |
136 | * saved in the store. | 137 | * saved in the store. |
137 | */ | 138 | */ |
138 | int xenbus_watch_pathfmt(struct xenbus_device *dev, | 139 | int xenbus_watch_pathfmt(struct xenbus_device *dev, |
139 | struct xenbus_watch *watch, | 140 | struct xenbus_watch *watch, |
140 | void (*callback)(struct xenbus_watch *, | 141 | void (*callback)(struct xenbus_watch *, |
141 | const char **, unsigned int), | 142 | const char **, unsigned int), |
142 | const char *pathfmt, ...) | 143 | const char *pathfmt, ...) |
143 | { | 144 | { |
144 | int err; | 145 | int err; |
145 | va_list ap; | 146 | va_list ap; |
146 | char *path; | 147 | char *path; |
147 | 148 | ||
148 | va_start(ap, pathfmt); | 149 | va_start(ap, pathfmt); |
149 | path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); | 150 | path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); |
150 | va_end(ap); | 151 | va_end(ap); |
151 | 152 | ||
152 | if (!path) { | 153 | if (!path) { |
153 | xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); | 154 | xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); |
154 | return -ENOMEM; | 155 | return -ENOMEM; |
155 | } | 156 | } |
156 | err = xenbus_watch_path(dev, path, watch, callback); | 157 | err = xenbus_watch_path(dev, path, watch, callback); |
157 | 158 | ||
158 | if (err) | 159 | if (err) |
159 | kfree(path); | 160 | kfree(path); |
160 | return err; | 161 | return err; |
161 | } | 162 | } |
162 | EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); | 163 | EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); |
163 | 164 | ||
164 | static void xenbus_switch_fatal(struct xenbus_device *, int, int, | 165 | static void xenbus_switch_fatal(struct xenbus_device *, int, int, |
165 | const char *, ...); | 166 | const char *, ...); |
166 | 167 | ||
167 | static int | 168 | static int |
168 | __xenbus_switch_state(struct xenbus_device *dev, | 169 | __xenbus_switch_state(struct xenbus_device *dev, |
169 | enum xenbus_state state, int depth) | 170 | enum xenbus_state state, int depth) |
170 | { | 171 | { |
171 | /* We check whether the state is currently set to the given value, and | 172 | /* We check whether the state is currently set to the given value, and |
172 | if not, then the state is set. We don't want to unconditionally | 173 | if not, then the state is set. We don't want to unconditionally |
173 | write the given state, because we don't want to fire watches | 174 | write the given state, because we don't want to fire watches |
174 | unnecessarily. Furthermore, if the node has gone, we don't write | 175 | unnecessarily. Furthermore, if the node has gone, we don't write |
175 | to it, as the device will be tearing down, and we don't want to | 176 | to it, as the device will be tearing down, and we don't want to |
176 | resurrect that directory. | 177 | resurrect that directory. |
177 | 178 | ||
178 | Note that, because of this cached value of our state, this | 179 | Note that, because of this cached value of our state, this |
179 | function will not take a caller's Xenstore transaction | 180 | function will not take a caller's Xenstore transaction |
180 | (something it was trying to in the past) because dev->state | 181 | (something it was trying to in the past) because dev->state |
181 | would not get reset if the transaction was aborted. | 182 | would not get reset if the transaction was aborted. |
182 | */ | 183 | */ |
183 | 184 | ||
184 | struct xenbus_transaction xbt; | 185 | struct xenbus_transaction xbt; |
185 | int current_state; | 186 | int current_state; |
186 | int err, abort; | 187 | int err, abort; |
187 | 188 | ||
188 | if (state == dev->state) | 189 | if (state == dev->state) |
189 | return 0; | 190 | return 0; |
190 | 191 | ||
191 | again: | 192 | again: |
192 | abort = 1; | 193 | abort = 1; |
193 | 194 | ||
194 | err = xenbus_transaction_start(&xbt); | 195 | err = xenbus_transaction_start(&xbt); |
195 | if (err) { | 196 | if (err) { |
196 | xenbus_switch_fatal(dev, depth, err, "starting transaction"); | 197 | xenbus_switch_fatal(dev, depth, err, "starting transaction"); |
197 | return 0; | 198 | return 0; |
198 | } | 199 | } |
199 | 200 | ||
200 | err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); | 201 | err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); |
201 | if (err != 1) | 202 | if (err != 1) |
202 | goto abort; | 203 | goto abort; |
203 | 204 | ||
204 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); | 205 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); |
205 | if (err) { | 206 | if (err) { |
206 | xenbus_switch_fatal(dev, depth, err, "writing new state"); | 207 | xenbus_switch_fatal(dev, depth, err, "writing new state"); |
207 | goto abort; | 208 | goto abort; |
208 | } | 209 | } |
209 | 210 | ||
210 | abort = 0; | 211 | abort = 0; |
211 | abort: | 212 | abort: |
212 | err = xenbus_transaction_end(xbt, abort); | 213 | err = xenbus_transaction_end(xbt, abort); |
213 | if (err) { | 214 | if (err) { |
214 | if (err == -EAGAIN && !abort) | 215 | if (err == -EAGAIN && !abort) |
215 | goto again; | 216 | goto again; |
216 | xenbus_switch_fatal(dev, depth, err, "ending transaction"); | 217 | xenbus_switch_fatal(dev, depth, err, "ending transaction"); |
217 | } else | 218 | } else |
218 | dev->state = state; | 219 | dev->state = state; |
219 | 220 | ||
220 | return 0; | 221 | return 0; |
221 | } | 222 | } |
222 | 223 | ||
223 | /** | 224 | /** |
224 | * xenbus_switch_state | 225 | * xenbus_switch_state |
225 | * @dev: xenbus device | 226 | * @dev: xenbus device |
226 | * @state: new state | 227 | * @state: new state |
227 | * | 228 | * |
228 | * Advertise in the store a change of the given driver to the given new_state. | 229 | * Advertise in the store a change of the given driver to the given new_state. |
229 | * Return 0 on success, or -errno on error. On error, the device will switch | 230 | * Return 0 on success, or -errno on error. On error, the device will switch |
230 | * to XenbusStateClosing, and the error will be saved in the store. | 231 | * to XenbusStateClosing, and the error will be saved in the store. |
231 | */ | 232 | */ |
232 | int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) | 233 | int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) |
233 | { | 234 | { |
234 | return __xenbus_switch_state(dev, state, 0); | 235 | return __xenbus_switch_state(dev, state, 0); |
235 | } | 236 | } |
236 | 237 | ||
237 | EXPORT_SYMBOL_GPL(xenbus_switch_state); | 238 | EXPORT_SYMBOL_GPL(xenbus_switch_state); |
238 | 239 | ||
239 | int xenbus_frontend_closed(struct xenbus_device *dev) | 240 | int xenbus_frontend_closed(struct xenbus_device *dev) |
240 | { | 241 | { |
241 | xenbus_switch_state(dev, XenbusStateClosed); | 242 | xenbus_switch_state(dev, XenbusStateClosed); |
242 | complete(&dev->down); | 243 | complete(&dev->down); |
243 | return 0; | 244 | return 0; |
244 | } | 245 | } |
245 | EXPORT_SYMBOL_GPL(xenbus_frontend_closed); | 246 | EXPORT_SYMBOL_GPL(xenbus_frontend_closed); |
246 | 247 | ||
247 | /** | 248 | /** |
248 | * Return the path to the error node for the given device, or NULL on failure. | 249 | * Return the path to the error node for the given device, or NULL on failure. |
249 | * If the value returned is non-NULL, then it is the caller's to kfree. | 250 | * If the value returned is non-NULL, then it is the caller's to kfree. |
250 | */ | 251 | */ |
251 | static char *error_path(struct xenbus_device *dev) | 252 | static char *error_path(struct xenbus_device *dev) |
252 | { | 253 | { |
253 | return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); | 254 | return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); |
254 | } | 255 | } |
255 | 256 | ||
256 | 257 | ||
257 | static void xenbus_va_dev_error(struct xenbus_device *dev, int err, | 258 | static void xenbus_va_dev_error(struct xenbus_device *dev, int err, |
258 | const char *fmt, va_list ap) | 259 | const char *fmt, va_list ap) |
259 | { | 260 | { |
260 | int ret; | 261 | int ret; |
261 | unsigned int len; | 262 | unsigned int len; |
262 | char *printf_buffer = NULL; | 263 | char *printf_buffer = NULL; |
263 | char *path_buffer = NULL; | 264 | char *path_buffer = NULL; |
264 | 265 | ||
265 | #define PRINTF_BUFFER_SIZE 4096 | 266 | #define PRINTF_BUFFER_SIZE 4096 |
266 | printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); | 267 | printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); |
267 | if (printf_buffer == NULL) | 268 | if (printf_buffer == NULL) |
268 | goto fail; | 269 | goto fail; |
269 | 270 | ||
270 | len = sprintf(printf_buffer, "%i ", -err); | 271 | len = sprintf(printf_buffer, "%i ", -err); |
271 | ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); | 272 | ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); |
272 | 273 | ||
273 | BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); | 274 | BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); |
274 | 275 | ||
275 | dev_err(&dev->dev, "%s\n", printf_buffer); | 276 | dev_err(&dev->dev, "%s\n", printf_buffer); |
276 | 277 | ||
277 | path_buffer = error_path(dev); | 278 | path_buffer = error_path(dev); |
278 | 279 | ||
279 | if (path_buffer == NULL) { | 280 | if (path_buffer == NULL) { |
280 | dev_err(&dev->dev, "failed to write error node for %s (%s)\n", | 281 | dev_err(&dev->dev, "failed to write error node for %s (%s)\n", |
281 | dev->nodename, printf_buffer); | 282 | dev->nodename, printf_buffer); |
282 | goto fail; | 283 | goto fail; |
283 | } | 284 | } |
284 | 285 | ||
285 | if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { | 286 | if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { |
286 | dev_err(&dev->dev, "failed to write error node for %s (%s)\n", | 287 | dev_err(&dev->dev, "failed to write error node for %s (%s)\n", |
287 | dev->nodename, printf_buffer); | 288 | dev->nodename, printf_buffer); |
288 | goto fail; | 289 | goto fail; |
289 | } | 290 | } |
290 | 291 | ||
291 | fail: | 292 | fail: |
292 | kfree(printf_buffer); | 293 | kfree(printf_buffer); |
293 | kfree(path_buffer); | 294 | kfree(path_buffer); |
294 | } | 295 | } |
295 | 296 | ||
296 | 297 | ||
297 | /** | 298 | /** |
298 | * xenbus_dev_error | 299 | * xenbus_dev_error |
299 | * @dev: xenbus device | 300 | * @dev: xenbus device |
300 | * @err: error to report | 301 | * @err: error to report |
301 | * @fmt: error message format | 302 | * @fmt: error message format |
302 | * | 303 | * |
303 | * Report the given negative errno into the store, along with the given | 304 | * Report the given negative errno into the store, along with the given |
304 | * formatted message. | 305 | * formatted message. |
305 | */ | 306 | */ |
306 | void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) | 307 | void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) |
307 | { | 308 | { |
308 | va_list ap; | 309 | va_list ap; |
309 | 310 | ||
310 | va_start(ap, fmt); | 311 | va_start(ap, fmt); |
311 | xenbus_va_dev_error(dev, err, fmt, ap); | 312 | xenbus_va_dev_error(dev, err, fmt, ap); |
312 | va_end(ap); | 313 | va_end(ap); |
313 | } | 314 | } |
314 | EXPORT_SYMBOL_GPL(xenbus_dev_error); | 315 | EXPORT_SYMBOL_GPL(xenbus_dev_error); |
315 | 316 | ||
316 | /** | 317 | /** |
317 | * xenbus_dev_fatal | 318 | * xenbus_dev_fatal |
318 | * @dev: xenbus device | 319 | * @dev: xenbus device |
319 | * @err: error to report | 320 | * @err: error to report |
320 | * @fmt: error message format | 321 | * @fmt: error message format |
321 | * | 322 | * |
322 | * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by | 323 | * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by |
323 | * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly | 324 | * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly |
324 | * closedown of this driver and its peer. | 325 | * closedown of this driver and its peer. |
325 | */ | 326 | */ |
326 | 327 | ||
327 | void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) | 328 | void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) |
328 | { | 329 | { |
329 | va_list ap; | 330 | va_list ap; |
330 | 331 | ||
331 | va_start(ap, fmt); | 332 | va_start(ap, fmt); |
332 | xenbus_va_dev_error(dev, err, fmt, ap); | 333 | xenbus_va_dev_error(dev, err, fmt, ap); |
333 | va_end(ap); | 334 | va_end(ap); |
334 | 335 | ||
335 | xenbus_switch_state(dev, XenbusStateClosing); | 336 | xenbus_switch_state(dev, XenbusStateClosing); |
336 | } | 337 | } |
337 | EXPORT_SYMBOL_GPL(xenbus_dev_fatal); | 338 | EXPORT_SYMBOL_GPL(xenbus_dev_fatal); |
338 | 339 | ||
339 | /** | 340 | /** |
340 | * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps | 341 | * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps |
341 | * avoiding recursion within xenbus_switch_state. | 342 | * avoiding recursion within xenbus_switch_state. |
342 | */ | 343 | */ |
343 | static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, | 344 | static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, |
344 | const char *fmt, ...) | 345 | const char *fmt, ...) |
345 | { | 346 | { |
346 | va_list ap; | 347 | va_list ap; |
347 | 348 | ||
348 | va_start(ap, fmt); | 349 | va_start(ap, fmt); |
349 | xenbus_va_dev_error(dev, err, fmt, ap); | 350 | xenbus_va_dev_error(dev, err, fmt, ap); |
350 | va_end(ap); | 351 | va_end(ap); |
351 | 352 | ||
352 | if (!depth) | 353 | if (!depth) |
353 | __xenbus_switch_state(dev, XenbusStateClosing, 1); | 354 | __xenbus_switch_state(dev, XenbusStateClosing, 1); |
354 | } | 355 | } |
355 | 356 | ||
356 | /** | 357 | /** |
357 | * xenbus_grant_ring | 358 | * xenbus_grant_ring |
358 | * @dev: xenbus device | 359 | * @dev: xenbus device |
359 | * @ring_mfn: mfn of ring to grant | 360 | * @ring_mfn: mfn of ring to grant |
360 | 361 | ||
361 | * Grant access to the given @ring_mfn to the peer of the given device. Return | 362 | * Grant access to the given @ring_mfn to the peer of the given device. Return |
362 | * 0 on success, or -errno on error. On error, the device will switch to | 363 | * 0 on success, or -errno on error. On error, the device will switch to |
363 | * XenbusStateClosing, and the error will be saved in the store. | 364 | * XenbusStateClosing, and the error will be saved in the store. |
364 | */ | 365 | */ |
365 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) | 366 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) |
366 | { | 367 | { |
367 | int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); | 368 | int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); |
368 | if (err < 0) | 369 | if (err < 0) |
369 | xenbus_dev_fatal(dev, err, "granting access to ring page"); | 370 | xenbus_dev_fatal(dev, err, "granting access to ring page"); |
370 | return err; | 371 | return err; |
371 | } | 372 | } |
372 | EXPORT_SYMBOL_GPL(xenbus_grant_ring); | 373 | EXPORT_SYMBOL_GPL(xenbus_grant_ring); |
373 | 374 | ||
374 | 375 | ||
375 | /** | 376 | /** |
376 | * Allocate an event channel for the given xenbus_device, assigning the newly | 377 | * Allocate an event channel for the given xenbus_device, assigning the newly |
377 | * created local port to *port. Return 0 on success, or -errno on error. On | 378 | * created local port to *port. Return 0 on success, or -errno on error. On |
378 | * error, the device will switch to XenbusStateClosing, and the error will be | 379 | * error, the device will switch to XenbusStateClosing, and the error will be |
379 | * saved in the store. | 380 | * saved in the store. |
380 | */ | 381 | */ |
381 | int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) | 382 | int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) |
382 | { | 383 | { |
383 | struct evtchn_alloc_unbound alloc_unbound; | 384 | struct evtchn_alloc_unbound alloc_unbound; |
384 | int err; | 385 | int err; |
385 | 386 | ||
386 | alloc_unbound.dom = DOMID_SELF; | 387 | alloc_unbound.dom = DOMID_SELF; |
387 | alloc_unbound.remote_dom = dev->otherend_id; | 388 | alloc_unbound.remote_dom = dev->otherend_id; |
388 | 389 | ||
389 | err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, | 390 | err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, |
390 | &alloc_unbound); | 391 | &alloc_unbound); |
391 | if (err) | 392 | if (err) |
392 | xenbus_dev_fatal(dev, err, "allocating event channel"); | 393 | xenbus_dev_fatal(dev, err, "allocating event channel"); |
393 | else | 394 | else |
394 | *port = alloc_unbound.port; | 395 | *port = alloc_unbound.port; |
395 | 396 | ||
396 | return err; | 397 | return err; |
397 | } | 398 | } |
398 | EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); | 399 | EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); |
399 | 400 | ||
400 | 401 | ||
401 | /** | 402 | /** |
402 | * Bind to an existing interdomain event channel in another domain. Returns 0 | 403 | * Bind to an existing interdomain event channel in another domain. Returns 0 |
403 | * on success and stores the local port in *port. On error, returns -errno, | 404 | * on success and stores the local port in *port. On error, returns -errno, |
404 | * switches the device to XenbusStateClosing, and saves the error in XenStore. | 405 | * switches the device to XenbusStateClosing, and saves the error in XenStore. |
405 | */ | 406 | */ |
406 | int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) | 407 | int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) |
407 | { | 408 | { |
408 | struct evtchn_bind_interdomain bind_interdomain; | 409 | struct evtchn_bind_interdomain bind_interdomain; |
409 | int err; | 410 | int err; |
410 | 411 | ||
411 | bind_interdomain.remote_dom = dev->otherend_id; | 412 | bind_interdomain.remote_dom = dev->otherend_id; |
412 | bind_interdomain.remote_port = remote_port; | 413 | bind_interdomain.remote_port = remote_port; |
413 | 414 | ||
414 | err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, | 415 | err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, |
415 | &bind_interdomain); | 416 | &bind_interdomain); |
416 | if (err) | 417 | if (err) |
417 | xenbus_dev_fatal(dev, err, | 418 | xenbus_dev_fatal(dev, err, |
418 | "binding to event channel %d from domain %d", | 419 | "binding to event channel %d from domain %d", |
419 | remote_port, dev->otherend_id); | 420 | remote_port, dev->otherend_id); |
420 | else | 421 | else |
421 | *port = bind_interdomain.local_port; | 422 | *port = bind_interdomain.local_port; |
422 | 423 | ||
423 | return err; | 424 | return err; |
424 | } | 425 | } |
425 | EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); | 426 | EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); |
426 | 427 | ||
427 | 428 | ||
428 | /** | 429 | /** |
429 | * Free an existing event channel. Returns 0 on success or -errno on error. | 430 | * Free an existing event channel. Returns 0 on success or -errno on error. |
430 | */ | 431 | */ |
431 | int xenbus_free_evtchn(struct xenbus_device *dev, int port) | 432 | int xenbus_free_evtchn(struct xenbus_device *dev, int port) |
432 | { | 433 | { |
433 | struct evtchn_close close; | 434 | struct evtchn_close close; |
434 | int err; | 435 | int err; |
435 | 436 | ||
436 | close.port = port; | 437 | close.port = port; |
437 | 438 | ||
438 | err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); | 439 | err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); |
439 | if (err) | 440 | if (err) |
440 | xenbus_dev_error(dev, err, "freeing event channel %d", port); | 441 | xenbus_dev_error(dev, err, "freeing event channel %d", port); |
441 | 442 | ||
442 | return err; | 443 | return err; |
443 | } | 444 | } |
444 | EXPORT_SYMBOL_GPL(xenbus_free_evtchn); | 445 | EXPORT_SYMBOL_GPL(xenbus_free_evtchn); |
445 | 446 | ||
446 | 447 | ||
447 | /** | 448 | /** |
448 | * xenbus_map_ring_valloc | 449 | * xenbus_map_ring_valloc |
449 | * @dev: xenbus device | 450 | * @dev: xenbus device |
450 | * @gnt_ref: grant reference | 451 | * @gnt_ref: grant reference |
451 | * @vaddr: pointer to address to be filled out by mapping | 452 | * @vaddr: pointer to address to be filled out by mapping |
452 | * | 453 | * |
453 | * Based on Rusty Russell's skeleton driver's map_page. | 454 | * Based on Rusty Russell's skeleton driver's map_page. |
454 | * Map a page of memory into this domain from another domain's grant table. | 455 | * Map a page of memory into this domain from another domain's grant table. |
455 | * xenbus_map_ring_valloc allocates a page of virtual address space, maps the | 456 | * xenbus_map_ring_valloc allocates a page of virtual address space, maps the |
456 | * page to that address, and sets *vaddr to that address. | 457 | * page to that address, and sets *vaddr to that address. |
457 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | 458 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) |
458 | * or -ENOMEM on error. If an error is returned, device will switch to | 459 | * or -ENOMEM on error. If an error is returned, device will switch to |
459 | * XenbusStateClosing and the error message will be saved in XenStore. | 460 | * XenbusStateClosing and the error message will be saved in XenStore. |
460 | */ | 461 | */ |
461 | int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) | 462 | int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) |
462 | { | 463 | { |
463 | return ring_ops->map(dev, gnt_ref, vaddr); | 464 | return ring_ops->map(dev, gnt_ref, vaddr); |
464 | } | 465 | } |
465 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); | 466 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); |
466 | 467 | ||
467 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, | 468 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, |
468 | int gnt_ref, void **vaddr) | 469 | int gnt_ref, void **vaddr) |
469 | { | 470 | { |
470 | struct gnttab_map_grant_ref op = { | 471 | struct gnttab_map_grant_ref op = { |
471 | .flags = GNTMAP_host_map | GNTMAP_contains_pte, | 472 | .flags = GNTMAP_host_map | GNTMAP_contains_pte, |
472 | .ref = gnt_ref, | 473 | .ref = gnt_ref, |
473 | .dom = dev->otherend_id, | 474 | .dom = dev->otherend_id, |
474 | }; | 475 | }; |
475 | struct xenbus_map_node *node; | 476 | struct xenbus_map_node *node; |
476 | struct vm_struct *area; | 477 | struct vm_struct *area; |
477 | pte_t *pte; | 478 | pte_t *pte; |
478 | 479 | ||
479 | *vaddr = NULL; | 480 | *vaddr = NULL; |
480 | 481 | ||
481 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 482 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
482 | if (!node) | 483 | if (!node) |
483 | return -ENOMEM; | 484 | return -ENOMEM; |
484 | 485 | ||
485 | area = alloc_vm_area(PAGE_SIZE, &pte); | 486 | area = alloc_vm_area(PAGE_SIZE, &pte); |
486 | if (!area) { | 487 | if (!area) { |
487 | kfree(node); | 488 | kfree(node); |
488 | return -ENOMEM; | 489 | return -ENOMEM; |
489 | } | 490 | } |
490 | 491 | ||
491 | op.host_addr = arbitrary_virt_to_machine(pte).maddr; | 492 | op.host_addr = arbitrary_virt_to_machine(pte).maddr; |
492 | 493 | ||
493 | gnttab_batch_map(&op, 1); | 494 | gnttab_batch_map(&op, 1); |
494 | 495 | ||
495 | if (op.status != GNTST_okay) { | 496 | if (op.status != GNTST_okay) { |
496 | free_vm_area(area); | 497 | free_vm_area(area); |
497 | kfree(node); | 498 | kfree(node); |
498 | xenbus_dev_fatal(dev, op.status, | 499 | xenbus_dev_fatal(dev, op.status, |
499 | "mapping in shared page %d from domain %d", | 500 | "mapping in shared page %d from domain %d", |
500 | gnt_ref, dev->otherend_id); | 501 | gnt_ref, dev->otherend_id); |
501 | return op.status; | 502 | return op.status; |
502 | } | 503 | } |
503 | 504 | ||
504 | node->handle = op.handle; | 505 | node->handle = op.handle; |
505 | node->area = area; | 506 | node->area = area; |
506 | 507 | ||
507 | spin_lock(&xenbus_valloc_lock); | 508 | spin_lock(&xenbus_valloc_lock); |
508 | list_add(&node->next, &xenbus_valloc_pages); | 509 | list_add(&node->next, &xenbus_valloc_pages); |
509 | spin_unlock(&xenbus_valloc_lock); | 510 | spin_unlock(&xenbus_valloc_lock); |
510 | 511 | ||
511 | *vaddr = area->addr; | 512 | *vaddr = area->addr; |
512 | return 0; | 513 | return 0; |
513 | } | 514 | } |
514 | 515 | ||
515 | static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | 516 | static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, |
516 | int gnt_ref, void **vaddr) | 517 | int gnt_ref, void **vaddr) |
517 | { | 518 | { |
518 | struct xenbus_map_node *node; | 519 | struct xenbus_map_node *node; |
519 | int err; | 520 | int err; |
520 | void *addr; | 521 | void *addr; |
521 | 522 | ||
522 | *vaddr = NULL; | 523 | *vaddr = NULL; |
523 | 524 | ||
524 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 525 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
525 | if (!node) | 526 | if (!node) |
526 | return -ENOMEM; | 527 | return -ENOMEM; |
527 | 528 | ||
528 | err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); | 529 | err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); |
529 | if (err) | 530 | if (err) |
530 | goto out_err; | 531 | goto out_err; |
531 | 532 | ||
532 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | 533 | addr = pfn_to_kaddr(page_to_pfn(node->page)); |
533 | 534 | ||
534 | err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); | 535 | err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); |
535 | if (err) | 536 | if (err) |
536 | goto out_err; | 537 | goto out_err; |
537 | 538 | ||
538 | spin_lock(&xenbus_valloc_lock); | 539 | spin_lock(&xenbus_valloc_lock); |
539 | list_add(&node->next, &xenbus_valloc_pages); | 540 | list_add(&node->next, &xenbus_valloc_pages); |
540 | spin_unlock(&xenbus_valloc_lock); | 541 | spin_unlock(&xenbus_valloc_lock); |
541 | 542 | ||
542 | *vaddr = addr; | 543 | *vaddr = addr; |
543 | return 0; | 544 | return 0; |
544 | 545 | ||
545 | out_err: | 546 | out_err: |
546 | free_xenballooned_pages(1, &node->page); | 547 | free_xenballooned_pages(1, &node->page); |
547 | kfree(node); | 548 | kfree(node); |
548 | return err; | 549 | return err; |
549 | } | 550 | } |
550 | 551 | ||
551 | 552 | ||
552 | /** | 553 | /** |
553 | * xenbus_map_ring | 554 | * xenbus_map_ring |
554 | * @dev: xenbus device | 555 | * @dev: xenbus device |
555 | * @gnt_ref: grant reference | 556 | * @gnt_ref: grant reference |
556 | * @handle: pointer to grant handle to be filled | 557 | * @handle: pointer to grant handle to be filled |
557 | * @vaddr: address to be mapped to | 558 | * @vaddr: address to be mapped to |
558 | * | 559 | * |
559 | * Map a page of memory into this domain from another domain's grant table. | 560 | * Map a page of memory into this domain from another domain's grant table. |
560 | * xenbus_map_ring does not allocate the virtual address space (you must do | 561 | * xenbus_map_ring does not allocate the virtual address space (you must do |
561 | * this yourself!). It only maps in the page to the specified address. | 562 | * this yourself!). It only maps in the page to the specified address. |
562 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | 563 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) |
563 | * or -ENOMEM on error. If an error is returned, device will switch to | 564 | * or -ENOMEM on error. If an error is returned, device will switch to |
564 | * XenbusStateClosing and the error message will be saved in XenStore. | 565 | * XenbusStateClosing and the error message will be saved in XenStore. |
565 | */ | 566 | */ |
566 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, | 567 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, |
567 | grant_handle_t *handle, void *vaddr) | 568 | grant_handle_t *handle, void *vaddr) |
568 | { | 569 | { |
569 | struct gnttab_map_grant_ref op; | 570 | struct gnttab_map_grant_ref op; |
570 | 571 | ||
571 | gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, | 572 | gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, |
572 | dev->otherend_id); | 573 | dev->otherend_id); |
573 | 574 | ||
574 | gnttab_batch_map(&op, 1); | 575 | gnttab_batch_map(&op, 1); |
575 | 576 | ||
576 | if (op.status != GNTST_okay) { | 577 | if (op.status != GNTST_okay) { |
577 | xenbus_dev_fatal(dev, op.status, | 578 | xenbus_dev_fatal(dev, op.status, |
578 | "mapping in shared page %d from domain %d", | 579 | "mapping in shared page %d from domain %d", |
579 | gnt_ref, dev->otherend_id); | 580 | gnt_ref, dev->otherend_id); |
580 | } else | 581 | } else |
581 | *handle = op.handle; | 582 | *handle = op.handle; |
582 | 583 | ||
583 | return op.status; | 584 | return op.status; |
584 | } | 585 | } |
585 | EXPORT_SYMBOL_GPL(xenbus_map_ring); | 586 | EXPORT_SYMBOL_GPL(xenbus_map_ring); |
586 | 587 | ||
587 | 588 | ||
588 | /** | 589 | /** |
589 | * xenbus_unmap_ring_vfree | 590 | * xenbus_unmap_ring_vfree |
590 | * @dev: xenbus device | 591 | * @dev: xenbus device |
591 | * @vaddr: addr to unmap | 592 | * @vaddr: addr to unmap |
592 | * | 593 | * |
593 | * Based on Rusty Russell's skeleton driver's unmap_page. | 594 | * Based on Rusty Russell's skeleton driver's unmap_page. |
594 | * Unmap a page of memory in this domain that was imported from another domain. | 595 | * Unmap a page of memory in this domain that was imported from another domain. |
595 | * Use xenbus_unmap_ring_vfree if you mapped in your memory with | 596 | * Use xenbus_unmap_ring_vfree if you mapped in your memory with |
596 | * xenbus_map_ring_valloc (it will free the virtual address space). | 597 | * xenbus_map_ring_valloc (it will free the virtual address space). |
597 | * Returns 0 on success and returns GNTST_* on error | 598 | * Returns 0 on success and returns GNTST_* on error |
598 | * (see xen/include/interface/grant_table.h). | 599 | * (see xen/include/interface/grant_table.h). |
599 | */ | 600 | */ |
600 | int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) | 601 | int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) |
601 | { | 602 | { |
602 | return ring_ops->unmap(dev, vaddr); | 603 | return ring_ops->unmap(dev, vaddr); |
603 | } | 604 | } |
604 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); | 605 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); |
605 | 606 | ||
606 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) | 607 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) |
607 | { | 608 | { |
608 | struct xenbus_map_node *node; | 609 | struct xenbus_map_node *node; |
609 | struct gnttab_unmap_grant_ref op = { | 610 | struct gnttab_unmap_grant_ref op = { |
610 | .host_addr = (unsigned long)vaddr, | 611 | .host_addr = (unsigned long)vaddr, |
611 | }; | 612 | }; |
612 | unsigned int level; | 613 | unsigned int level; |
613 | 614 | ||
614 | spin_lock(&xenbus_valloc_lock); | 615 | spin_lock(&xenbus_valloc_lock); |
615 | list_for_each_entry(node, &xenbus_valloc_pages, next) { | 616 | list_for_each_entry(node, &xenbus_valloc_pages, next) { |
616 | if (node->area->addr == vaddr) { | 617 | if (node->area->addr == vaddr) { |
617 | list_del(&node->next); | 618 | list_del(&node->next); |
618 | goto found; | 619 | goto found; |
619 | } | 620 | } |
620 | } | 621 | } |
621 | node = NULL; | 622 | node = NULL; |
622 | found: | 623 | found: |
623 | spin_unlock(&xenbus_valloc_lock); | 624 | spin_unlock(&xenbus_valloc_lock); |
624 | 625 | ||
625 | if (!node) { | 626 | if (!node) { |
626 | xenbus_dev_error(dev, -ENOENT, | 627 | xenbus_dev_error(dev, -ENOENT, |
627 | "can't find mapped virtual address %p", vaddr); | 628 | "can't find mapped virtual address %p", vaddr); |
628 | return GNTST_bad_virt_addr; | 629 | return GNTST_bad_virt_addr; |
629 | } | 630 | } |
630 | 631 | ||
631 | op.handle = node->handle; | 632 | op.handle = node->handle; |
632 | op.host_addr = arbitrary_virt_to_machine( | 633 | op.host_addr = arbitrary_virt_to_machine( |
633 | lookup_address((unsigned long)vaddr, &level)).maddr; | 634 | lookup_address((unsigned long)vaddr, &level)).maddr; |
634 | 635 | ||
635 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | 636 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) |
636 | BUG(); | 637 | BUG(); |
637 | 638 | ||
638 | if (op.status == GNTST_okay) | 639 | if (op.status == GNTST_okay) |
639 | free_vm_area(node->area); | 640 | free_vm_area(node->area); |
640 | else | 641 | else |
641 | xenbus_dev_error(dev, op.status, | 642 | xenbus_dev_error(dev, op.status, |
642 | "unmapping page at handle %d error %d", | 643 | "unmapping page at handle %d error %d", |
643 | node->handle, op.status); | 644 | node->handle, op.status); |
644 | 645 | ||
645 | kfree(node); | 646 | kfree(node); |
646 | return op.status; | 647 | return op.status; |
647 | } | 648 | } |
648 | 649 | ||
649 | static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | 650 | static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) |
650 | { | 651 | { |
651 | int rv; | 652 | int rv; |
652 | struct xenbus_map_node *node; | 653 | struct xenbus_map_node *node; |
653 | void *addr; | 654 | void *addr; |
654 | 655 | ||
655 | spin_lock(&xenbus_valloc_lock); | 656 | spin_lock(&xenbus_valloc_lock); |
656 | list_for_each_entry(node, &xenbus_valloc_pages, next) { | 657 | list_for_each_entry(node, &xenbus_valloc_pages, next) { |
657 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | 658 | addr = pfn_to_kaddr(page_to_pfn(node->page)); |
658 | if (addr == vaddr) { | 659 | if (addr == vaddr) { |
659 | list_del(&node->next); | 660 | list_del(&node->next); |
660 | goto found; | 661 | goto found; |
661 | } | 662 | } |
662 | } | 663 | } |
663 | node = addr = NULL; | 664 | node = addr = NULL; |
664 | found: | 665 | found: |
665 | spin_unlock(&xenbus_valloc_lock); | 666 | spin_unlock(&xenbus_valloc_lock); |
666 | 667 | ||
667 | if (!node) { | 668 | if (!node) { |
668 | xenbus_dev_error(dev, -ENOENT, | 669 | xenbus_dev_error(dev, -ENOENT, |
669 | "can't find mapped virtual address %p", vaddr); | 670 | "can't find mapped virtual address %p", vaddr); |
670 | return GNTST_bad_virt_addr; | 671 | return GNTST_bad_virt_addr; |
671 | } | 672 | } |
672 | 673 | ||
673 | rv = xenbus_unmap_ring(dev, node->handle, addr); | 674 | rv = xenbus_unmap_ring(dev, node->handle, addr); |
674 | 675 | ||
675 | if (!rv) | 676 | if (!rv) |
676 | free_xenballooned_pages(1, &node->page); | 677 | free_xenballooned_pages(1, &node->page); |
677 | else | 678 | else |
678 | WARN(1, "Leaking %p\n", vaddr); | 679 | WARN(1, "Leaking %p\n", vaddr); |
679 | 680 | ||
680 | kfree(node); | 681 | kfree(node); |
681 | return rv; | 682 | return rv; |
682 | } | 683 | } |
683 | 684 | ||
684 | /** | 685 | /** |
685 | * xenbus_unmap_ring | 686 | * xenbus_unmap_ring |
686 | * @dev: xenbus device | 687 | * @dev: xenbus device |
687 | * @handle: grant handle | 688 | * @handle: grant handle |
688 | * @vaddr: addr to unmap | 689 | * @vaddr: addr to unmap |
689 | * | 690 | * |
690 | * Unmap a page of memory in this domain that was imported from another domain. | 691 | * Unmap a page of memory in this domain that was imported from another domain. |
691 | * Returns 0 on success and returns GNTST_* on error | 692 | * Returns 0 on success and returns GNTST_* on error |
692 | * (see xen/include/interface/grant_table.h). | 693 | * (see xen/include/interface/grant_table.h). |
693 | */ | 694 | */ |
694 | int xenbus_unmap_ring(struct xenbus_device *dev, | 695 | int xenbus_unmap_ring(struct xenbus_device *dev, |
695 | grant_handle_t handle, void *vaddr) | 696 | grant_handle_t handle, void *vaddr) |
696 | { | 697 | { |
697 | struct gnttab_unmap_grant_ref op; | 698 | struct gnttab_unmap_grant_ref op; |
698 | 699 | ||
699 | gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); | 700 | gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); |
700 | 701 | ||
701 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | 702 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) |
702 | BUG(); | 703 | BUG(); |
703 | 704 | ||
704 | if (op.status != GNTST_okay) | 705 | if (op.status != GNTST_okay) |
705 | xenbus_dev_error(dev, op.status, | 706 | xenbus_dev_error(dev, op.status, |
706 | "unmapping page at handle %d error %d", | 707 | "unmapping page at handle %d error %d", |
707 | handle, op.status); | 708 | handle, op.status); |
708 | 709 | ||
709 | return op.status; | 710 | return op.status; |
710 | } | 711 | } |
711 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring); | 712 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring); |
712 | 713 | ||
713 | 714 | ||
714 | /** | 715 | /** |
715 | * xenbus_read_driver_state | 716 | * xenbus_read_driver_state |
716 | * @path: path for driver | 717 | * @path: path for driver |
717 | * | 718 | * |
718 | * Return the state of the driver rooted at the given store path, or | 719 | * Return the state of the driver rooted at the given store path, or |
719 | * XenbusStateUnknown if no state can be read. | 720 | * XenbusStateUnknown if no state can be read. |
720 | */ | 721 | */ |
721 | enum xenbus_state xenbus_read_driver_state(const char *path) | 722 | enum xenbus_state xenbus_read_driver_state(const char *path) |
722 | { | 723 | { |
723 | enum xenbus_state result; | 724 | enum xenbus_state result; |
724 | int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); | 725 | int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); |
725 | if (err) | 726 | if (err) |
726 | result = XenbusStateUnknown; | 727 | result = XenbusStateUnknown; |
727 | 728 | ||
728 | return result; | 729 | return result; |
729 | } | 730 | } |
730 | EXPORT_SYMBOL_GPL(xenbus_read_driver_state); | 731 | EXPORT_SYMBOL_GPL(xenbus_read_driver_state); |
731 | 732 | ||
732 | static const struct xenbus_ring_ops ring_ops_pv = { | 733 | static const struct xenbus_ring_ops ring_ops_pv = { |
733 | .map = xenbus_map_ring_valloc_pv, | 734 | .map = xenbus_map_ring_valloc_pv, |
734 | .unmap = xenbus_unmap_ring_vfree_pv, | 735 | .unmap = xenbus_unmap_ring_vfree_pv, |
735 | }; | 736 | }; |
736 | 737 | ||
737 | static const struct xenbus_ring_ops ring_ops_hvm = { | 738 | static const struct xenbus_ring_ops ring_ops_hvm = { |
738 | .map = xenbus_map_ring_valloc_hvm, | 739 | .map = xenbus_map_ring_valloc_hvm, |
739 | .unmap = xenbus_unmap_ring_vfree_hvm, | 740 | .unmap = xenbus_unmap_ring_vfree_hvm, |
740 | }; | 741 | }; |
741 | 742 | ||
742 | void __init xenbus_ring_ops_init(void) | 743 | void __init xenbus_ring_ops_init(void) |
743 | { | 744 | { |
744 | if (xen_pv_domain()) | 745 | if (xen_pv_domain()) |
745 | ring_ops = &ring_ops_pv; | 746 | ring_ops = &ring_ops_pv; |
746 | else | 747 | else |
747 | ring_ops = &ring_ops_hvm; | 748 | ring_ops = &ring_ops_hvm; |
748 | } | 749 | } |
749 | 750 |