Commit 9c6cd3b39048c8bbb83c5cd936f4dffc847321c6
Committed by
Greg Kroah-Hartman
1 parent
45acea5733
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
android/sync: use get_unused_fd_flags(O_CLOEXEC) instead of get_unused_fd()
Macro get_unused_fd() is used to allocate a file descriptor with default flags. Those default flags (0) can be "unsafe": O_CLOEXEC must be used by default to not leak file descriptor across exec(). Instead of macro get_unused_fd(), functions anon_inode_getfd() or get_unused_fd_flags() should be used with flags given by userspace. If not possible, flags should be set to O_CLOEXEC to provide userspace with a default safe behavor. In a further patch, get_unused_fd() will be removed so that new code start using anon_inode_getfd() or get_unused_fd_flags() with correct flags. This patch replaces calls to get_unused_fd() with call to get_unused_fd_flags(O_CLOEXEC) following advice from Erik Gilling. Signed-off-by: Yann Droneaud <ydroneaud@opteya.com> Cc: Erik Gilling <konkers@android.com> Cc: Colin Cross <ccross@google.com> Link: http://lkml.kernel.org/r/CACSP8SjXGMk2_kX_+RgzqqQwqKernvF1Wt3K5tw991W5dfAnCA@mail.gmail.com Link: http://lkml.kernel.org/r/cover.1376327678.git.ydroneaud@opteya.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
drivers/staging/android/sync.c
1 | /* | 1 | /* |
2 | * drivers/base/sync.c | 2 | * drivers/base/sync.c |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Google, Inc. | 4 | * Copyright (C) 2012 Google, Inc. |
5 | * | 5 | * |
6 | * This software is licensed under the terms of the GNU General Public | 6 | * This software is licensed under the terms of the GNU General Public |
7 | * License version 2, as published by the Free Software Foundation, and | 7 | * License version 2, as published by the Free Software Foundation, and |
8 | * may be copied, distributed, and modified under those terms. | 8 | * may be copied, distributed, and modified under those terms. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/debugfs.h> | 17 | #include <linux/debugfs.h> |
18 | #include <linux/export.h> | 18 | #include <linux/export.h> |
19 | #include <linux/file.h> | 19 | #include <linux/file.h> |
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/poll.h> | 22 | #include <linux/poll.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/anon_inodes.h> | 27 | #include <linux/anon_inodes.h> |
28 | 28 | ||
29 | #include "sync.h" | 29 | #include "sync.h" |
30 | 30 | ||
31 | #define CREATE_TRACE_POINTS | 31 | #define CREATE_TRACE_POINTS |
32 | #include "trace/sync.h" | 32 | #include "trace/sync.h" |
33 | 33 | ||
34 | static void sync_fence_signal_pt(struct sync_pt *pt); | 34 | static void sync_fence_signal_pt(struct sync_pt *pt); |
35 | static int _sync_pt_has_signaled(struct sync_pt *pt); | 35 | static int _sync_pt_has_signaled(struct sync_pt *pt); |
36 | static void sync_fence_free(struct kref *kref); | 36 | static void sync_fence_free(struct kref *kref); |
37 | static void sync_dump(void); | 37 | static void sync_dump(void); |
38 | 38 | ||
39 | static LIST_HEAD(sync_timeline_list_head); | 39 | static LIST_HEAD(sync_timeline_list_head); |
40 | static DEFINE_SPINLOCK(sync_timeline_list_lock); | 40 | static DEFINE_SPINLOCK(sync_timeline_list_lock); |
41 | 41 | ||
42 | static LIST_HEAD(sync_fence_list_head); | 42 | static LIST_HEAD(sync_fence_list_head); |
43 | static DEFINE_SPINLOCK(sync_fence_list_lock); | 43 | static DEFINE_SPINLOCK(sync_fence_list_lock); |
44 | 44 | ||
45 | struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, | 45 | struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, |
46 | int size, const char *name) | 46 | int size, const char *name) |
47 | { | 47 | { |
48 | struct sync_timeline *obj; | 48 | struct sync_timeline *obj; |
49 | unsigned long flags; | 49 | unsigned long flags; |
50 | 50 | ||
51 | if (size < sizeof(struct sync_timeline)) | 51 | if (size < sizeof(struct sync_timeline)) |
52 | return NULL; | 52 | return NULL; |
53 | 53 | ||
54 | obj = kzalloc(size, GFP_KERNEL); | 54 | obj = kzalloc(size, GFP_KERNEL); |
55 | if (obj == NULL) | 55 | if (obj == NULL) |
56 | return NULL; | 56 | return NULL; |
57 | 57 | ||
58 | kref_init(&obj->kref); | 58 | kref_init(&obj->kref); |
59 | obj->ops = ops; | 59 | obj->ops = ops; |
60 | strlcpy(obj->name, name, sizeof(obj->name)); | 60 | strlcpy(obj->name, name, sizeof(obj->name)); |
61 | 61 | ||
62 | INIT_LIST_HEAD(&obj->child_list_head); | 62 | INIT_LIST_HEAD(&obj->child_list_head); |
63 | spin_lock_init(&obj->child_list_lock); | 63 | spin_lock_init(&obj->child_list_lock); |
64 | 64 | ||
65 | INIT_LIST_HEAD(&obj->active_list_head); | 65 | INIT_LIST_HEAD(&obj->active_list_head); |
66 | spin_lock_init(&obj->active_list_lock); | 66 | spin_lock_init(&obj->active_list_lock); |
67 | 67 | ||
68 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | 68 | spin_lock_irqsave(&sync_timeline_list_lock, flags); |
69 | list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); | 69 | list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); |
70 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | 70 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); |
71 | 71 | ||
72 | return obj; | 72 | return obj; |
73 | } | 73 | } |
74 | EXPORT_SYMBOL(sync_timeline_create); | 74 | EXPORT_SYMBOL(sync_timeline_create); |
75 | 75 | ||
76 | static void sync_timeline_free(struct kref *kref) | 76 | static void sync_timeline_free(struct kref *kref) |
77 | { | 77 | { |
78 | struct sync_timeline *obj = | 78 | struct sync_timeline *obj = |
79 | container_of(kref, struct sync_timeline, kref); | 79 | container_of(kref, struct sync_timeline, kref); |
80 | unsigned long flags; | 80 | unsigned long flags; |
81 | 81 | ||
82 | if (obj->ops->release_obj) | 82 | if (obj->ops->release_obj) |
83 | obj->ops->release_obj(obj); | 83 | obj->ops->release_obj(obj); |
84 | 84 | ||
85 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | 85 | spin_lock_irqsave(&sync_timeline_list_lock, flags); |
86 | list_del(&obj->sync_timeline_list); | 86 | list_del(&obj->sync_timeline_list); |
87 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | 87 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); |
88 | 88 | ||
89 | kfree(obj); | 89 | kfree(obj); |
90 | } | 90 | } |
91 | 91 | ||
92 | void sync_timeline_destroy(struct sync_timeline *obj) | 92 | void sync_timeline_destroy(struct sync_timeline *obj) |
93 | { | 93 | { |
94 | obj->destroyed = true; | 94 | obj->destroyed = true; |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * If this is not the last reference, signal any children | 97 | * If this is not the last reference, signal any children |
98 | * that their parent is going away. | 98 | * that their parent is going away. |
99 | */ | 99 | */ |
100 | 100 | ||
101 | if (!kref_put(&obj->kref, sync_timeline_free)) | 101 | if (!kref_put(&obj->kref, sync_timeline_free)) |
102 | sync_timeline_signal(obj); | 102 | sync_timeline_signal(obj); |
103 | } | 103 | } |
104 | EXPORT_SYMBOL(sync_timeline_destroy); | 104 | EXPORT_SYMBOL(sync_timeline_destroy); |
105 | 105 | ||
106 | static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) | 106 | static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) |
107 | { | 107 | { |
108 | unsigned long flags; | 108 | unsigned long flags; |
109 | 109 | ||
110 | pt->parent = obj; | 110 | pt->parent = obj; |
111 | 111 | ||
112 | spin_lock_irqsave(&obj->child_list_lock, flags); | 112 | spin_lock_irqsave(&obj->child_list_lock, flags); |
113 | list_add_tail(&pt->child_list, &obj->child_list_head); | 113 | list_add_tail(&pt->child_list, &obj->child_list_head); |
114 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | 114 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
115 | } | 115 | } |
116 | 116 | ||
117 | static void sync_timeline_remove_pt(struct sync_pt *pt) | 117 | static void sync_timeline_remove_pt(struct sync_pt *pt) |
118 | { | 118 | { |
119 | struct sync_timeline *obj = pt->parent; | 119 | struct sync_timeline *obj = pt->parent; |
120 | unsigned long flags; | 120 | unsigned long flags; |
121 | 121 | ||
122 | spin_lock_irqsave(&obj->active_list_lock, flags); | 122 | spin_lock_irqsave(&obj->active_list_lock, flags); |
123 | if (!list_empty(&pt->active_list)) | 123 | if (!list_empty(&pt->active_list)) |
124 | list_del_init(&pt->active_list); | 124 | list_del_init(&pt->active_list); |
125 | spin_unlock_irqrestore(&obj->active_list_lock, flags); | 125 | spin_unlock_irqrestore(&obj->active_list_lock, flags); |
126 | 126 | ||
127 | spin_lock_irqsave(&obj->child_list_lock, flags); | 127 | spin_lock_irqsave(&obj->child_list_lock, flags); |
128 | if (!list_empty(&pt->child_list)) | 128 | if (!list_empty(&pt->child_list)) |
129 | list_del_init(&pt->child_list); | 129 | list_del_init(&pt->child_list); |
130 | 130 | ||
131 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | 131 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
132 | } | 132 | } |
133 | 133 | ||
134 | void sync_timeline_signal(struct sync_timeline *obj) | 134 | void sync_timeline_signal(struct sync_timeline *obj) |
135 | { | 135 | { |
136 | unsigned long flags; | 136 | unsigned long flags; |
137 | LIST_HEAD(signaled_pts); | 137 | LIST_HEAD(signaled_pts); |
138 | struct list_head *pos, *n; | 138 | struct list_head *pos, *n; |
139 | 139 | ||
140 | trace_sync_timeline(obj); | 140 | trace_sync_timeline(obj); |
141 | 141 | ||
142 | spin_lock_irqsave(&obj->active_list_lock, flags); | 142 | spin_lock_irqsave(&obj->active_list_lock, flags); |
143 | 143 | ||
144 | list_for_each_safe(pos, n, &obj->active_list_head) { | 144 | list_for_each_safe(pos, n, &obj->active_list_head) { |
145 | struct sync_pt *pt = | 145 | struct sync_pt *pt = |
146 | container_of(pos, struct sync_pt, active_list); | 146 | container_of(pos, struct sync_pt, active_list); |
147 | 147 | ||
148 | if (_sync_pt_has_signaled(pt)) { | 148 | if (_sync_pt_has_signaled(pt)) { |
149 | list_del_init(pos); | 149 | list_del_init(pos); |
150 | list_add(&pt->signaled_list, &signaled_pts); | 150 | list_add(&pt->signaled_list, &signaled_pts); |
151 | kref_get(&pt->fence->kref); | 151 | kref_get(&pt->fence->kref); |
152 | } | 152 | } |
153 | } | 153 | } |
154 | 154 | ||
155 | spin_unlock_irqrestore(&obj->active_list_lock, flags); | 155 | spin_unlock_irqrestore(&obj->active_list_lock, flags); |
156 | 156 | ||
157 | list_for_each_safe(pos, n, &signaled_pts) { | 157 | list_for_each_safe(pos, n, &signaled_pts) { |
158 | struct sync_pt *pt = | 158 | struct sync_pt *pt = |
159 | container_of(pos, struct sync_pt, signaled_list); | 159 | container_of(pos, struct sync_pt, signaled_list); |
160 | 160 | ||
161 | list_del_init(pos); | 161 | list_del_init(pos); |
162 | sync_fence_signal_pt(pt); | 162 | sync_fence_signal_pt(pt); |
163 | kref_put(&pt->fence->kref, sync_fence_free); | 163 | kref_put(&pt->fence->kref, sync_fence_free); |
164 | } | 164 | } |
165 | } | 165 | } |
166 | EXPORT_SYMBOL(sync_timeline_signal); | 166 | EXPORT_SYMBOL(sync_timeline_signal); |
167 | 167 | ||
168 | struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) | 168 | struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) |
169 | { | 169 | { |
170 | struct sync_pt *pt; | 170 | struct sync_pt *pt; |
171 | 171 | ||
172 | if (size < sizeof(struct sync_pt)) | 172 | if (size < sizeof(struct sync_pt)) |
173 | return NULL; | 173 | return NULL; |
174 | 174 | ||
175 | pt = kzalloc(size, GFP_KERNEL); | 175 | pt = kzalloc(size, GFP_KERNEL); |
176 | if (pt == NULL) | 176 | if (pt == NULL) |
177 | return NULL; | 177 | return NULL; |
178 | 178 | ||
179 | INIT_LIST_HEAD(&pt->active_list); | 179 | INIT_LIST_HEAD(&pt->active_list); |
180 | kref_get(&parent->kref); | 180 | kref_get(&parent->kref); |
181 | sync_timeline_add_pt(parent, pt); | 181 | sync_timeline_add_pt(parent, pt); |
182 | 182 | ||
183 | return pt; | 183 | return pt; |
184 | } | 184 | } |
185 | EXPORT_SYMBOL(sync_pt_create); | 185 | EXPORT_SYMBOL(sync_pt_create); |
186 | 186 | ||
187 | void sync_pt_free(struct sync_pt *pt) | 187 | void sync_pt_free(struct sync_pt *pt) |
188 | { | 188 | { |
189 | if (pt->parent->ops->free_pt) | 189 | if (pt->parent->ops->free_pt) |
190 | pt->parent->ops->free_pt(pt); | 190 | pt->parent->ops->free_pt(pt); |
191 | 191 | ||
192 | sync_timeline_remove_pt(pt); | 192 | sync_timeline_remove_pt(pt); |
193 | 193 | ||
194 | kref_put(&pt->parent->kref, sync_timeline_free); | 194 | kref_put(&pt->parent->kref, sync_timeline_free); |
195 | 195 | ||
196 | kfree(pt); | 196 | kfree(pt); |
197 | } | 197 | } |
198 | EXPORT_SYMBOL(sync_pt_free); | 198 | EXPORT_SYMBOL(sync_pt_free); |
199 | 199 | ||
200 | /* call with pt->parent->active_list_lock held */ | 200 | /* call with pt->parent->active_list_lock held */ |
201 | static int _sync_pt_has_signaled(struct sync_pt *pt) | 201 | static int _sync_pt_has_signaled(struct sync_pt *pt) |
202 | { | 202 | { |
203 | int old_status = pt->status; | 203 | int old_status = pt->status; |
204 | 204 | ||
205 | if (!pt->status) | 205 | if (!pt->status) |
206 | pt->status = pt->parent->ops->has_signaled(pt); | 206 | pt->status = pt->parent->ops->has_signaled(pt); |
207 | 207 | ||
208 | if (!pt->status && pt->parent->destroyed) | 208 | if (!pt->status && pt->parent->destroyed) |
209 | pt->status = -ENOENT; | 209 | pt->status = -ENOENT; |
210 | 210 | ||
211 | if (pt->status != old_status) | 211 | if (pt->status != old_status) |
212 | pt->timestamp = ktime_get(); | 212 | pt->timestamp = ktime_get(); |
213 | 213 | ||
214 | return pt->status; | 214 | return pt->status; |
215 | } | 215 | } |
216 | 216 | ||
217 | static struct sync_pt *sync_pt_dup(struct sync_pt *pt) | 217 | static struct sync_pt *sync_pt_dup(struct sync_pt *pt) |
218 | { | 218 | { |
219 | return pt->parent->ops->dup(pt); | 219 | return pt->parent->ops->dup(pt); |
220 | } | 220 | } |
221 | 221 | ||
222 | /* Adds a sync pt to the active queue. Called when added to a fence */ | 222 | /* Adds a sync pt to the active queue. Called when added to a fence */ |
223 | static void sync_pt_activate(struct sync_pt *pt) | 223 | static void sync_pt_activate(struct sync_pt *pt) |
224 | { | 224 | { |
225 | struct sync_timeline *obj = pt->parent; | 225 | struct sync_timeline *obj = pt->parent; |
226 | unsigned long flags; | 226 | unsigned long flags; |
227 | int err; | 227 | int err; |
228 | 228 | ||
229 | spin_lock_irqsave(&obj->active_list_lock, flags); | 229 | spin_lock_irqsave(&obj->active_list_lock, flags); |
230 | 230 | ||
231 | err = _sync_pt_has_signaled(pt); | 231 | err = _sync_pt_has_signaled(pt); |
232 | if (err != 0) | 232 | if (err != 0) |
233 | goto out; | 233 | goto out; |
234 | 234 | ||
235 | list_add_tail(&pt->active_list, &obj->active_list_head); | 235 | list_add_tail(&pt->active_list, &obj->active_list_head); |
236 | 236 | ||
237 | out: | 237 | out: |
238 | spin_unlock_irqrestore(&obj->active_list_lock, flags); | 238 | spin_unlock_irqrestore(&obj->active_list_lock, flags); |
239 | } | 239 | } |
240 | 240 | ||
241 | static int sync_fence_release(struct inode *inode, struct file *file); | 241 | static int sync_fence_release(struct inode *inode, struct file *file); |
242 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait); | 242 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait); |
243 | static long sync_fence_ioctl(struct file *file, unsigned int cmd, | 243 | static long sync_fence_ioctl(struct file *file, unsigned int cmd, |
244 | unsigned long arg); | 244 | unsigned long arg); |
245 | 245 | ||
246 | 246 | ||
247 | static const struct file_operations sync_fence_fops = { | 247 | static const struct file_operations sync_fence_fops = { |
248 | .release = sync_fence_release, | 248 | .release = sync_fence_release, |
249 | .poll = sync_fence_poll, | 249 | .poll = sync_fence_poll, |
250 | .unlocked_ioctl = sync_fence_ioctl, | 250 | .unlocked_ioctl = sync_fence_ioctl, |
251 | .compat_ioctl = sync_fence_ioctl, | 251 | .compat_ioctl = sync_fence_ioctl, |
252 | }; | 252 | }; |
253 | 253 | ||
254 | static struct sync_fence *sync_fence_alloc(const char *name) | 254 | static struct sync_fence *sync_fence_alloc(const char *name) |
255 | { | 255 | { |
256 | struct sync_fence *fence; | 256 | struct sync_fence *fence; |
257 | unsigned long flags; | 257 | unsigned long flags; |
258 | 258 | ||
259 | fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); | 259 | fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); |
260 | if (fence == NULL) | 260 | if (fence == NULL) |
261 | return NULL; | 261 | return NULL; |
262 | 262 | ||
263 | fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, | 263 | fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, |
264 | fence, 0); | 264 | fence, 0); |
265 | if (IS_ERR(fence->file)) | 265 | if (IS_ERR(fence->file)) |
266 | goto err; | 266 | goto err; |
267 | 267 | ||
268 | kref_init(&fence->kref); | 268 | kref_init(&fence->kref); |
269 | strlcpy(fence->name, name, sizeof(fence->name)); | 269 | strlcpy(fence->name, name, sizeof(fence->name)); |
270 | 270 | ||
271 | INIT_LIST_HEAD(&fence->pt_list_head); | 271 | INIT_LIST_HEAD(&fence->pt_list_head); |
272 | INIT_LIST_HEAD(&fence->waiter_list_head); | 272 | INIT_LIST_HEAD(&fence->waiter_list_head); |
273 | spin_lock_init(&fence->waiter_list_lock); | 273 | spin_lock_init(&fence->waiter_list_lock); |
274 | 274 | ||
275 | init_waitqueue_head(&fence->wq); | 275 | init_waitqueue_head(&fence->wq); |
276 | 276 | ||
277 | spin_lock_irqsave(&sync_fence_list_lock, flags); | 277 | spin_lock_irqsave(&sync_fence_list_lock, flags); |
278 | list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); | 278 | list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); |
279 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | 279 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); |
280 | 280 | ||
281 | return fence; | 281 | return fence; |
282 | 282 | ||
283 | err: | 283 | err: |
284 | kfree(fence); | 284 | kfree(fence); |
285 | return NULL; | 285 | return NULL; |
286 | } | 286 | } |
287 | 287 | ||
288 | /* TODO: implement a create which takes more that one sync_pt */ | 288 | /* TODO: implement a create which takes more that one sync_pt */ |
289 | struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) | 289 | struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) |
290 | { | 290 | { |
291 | struct sync_fence *fence; | 291 | struct sync_fence *fence; |
292 | 292 | ||
293 | if (pt->fence) | 293 | if (pt->fence) |
294 | return NULL; | 294 | return NULL; |
295 | 295 | ||
296 | fence = sync_fence_alloc(name); | 296 | fence = sync_fence_alloc(name); |
297 | if (fence == NULL) | 297 | if (fence == NULL) |
298 | return NULL; | 298 | return NULL; |
299 | 299 | ||
300 | pt->fence = fence; | 300 | pt->fence = fence; |
301 | list_add(&pt->pt_list, &fence->pt_list_head); | 301 | list_add(&pt->pt_list, &fence->pt_list_head); |
302 | sync_pt_activate(pt); | 302 | sync_pt_activate(pt); |
303 | 303 | ||
304 | /* | 304 | /* |
305 | * signal the fence in case pt was activated before | 305 | * signal the fence in case pt was activated before |
306 | * sync_pt_activate(pt) was called | 306 | * sync_pt_activate(pt) was called |
307 | */ | 307 | */ |
308 | sync_fence_signal_pt(pt); | 308 | sync_fence_signal_pt(pt); |
309 | 309 | ||
310 | return fence; | 310 | return fence; |
311 | } | 311 | } |
312 | EXPORT_SYMBOL(sync_fence_create); | 312 | EXPORT_SYMBOL(sync_fence_create); |
313 | 313 | ||
314 | static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) | 314 | static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) |
315 | { | 315 | { |
316 | struct list_head *pos; | 316 | struct list_head *pos; |
317 | 317 | ||
318 | list_for_each(pos, &src->pt_list_head) { | 318 | list_for_each(pos, &src->pt_list_head) { |
319 | struct sync_pt *orig_pt = | 319 | struct sync_pt *orig_pt = |
320 | container_of(pos, struct sync_pt, pt_list); | 320 | container_of(pos, struct sync_pt, pt_list); |
321 | struct sync_pt *new_pt = sync_pt_dup(orig_pt); | 321 | struct sync_pt *new_pt = sync_pt_dup(orig_pt); |
322 | 322 | ||
323 | if (new_pt == NULL) | 323 | if (new_pt == NULL) |
324 | return -ENOMEM; | 324 | return -ENOMEM; |
325 | 325 | ||
326 | new_pt->fence = dst; | 326 | new_pt->fence = dst; |
327 | list_add(&new_pt->pt_list, &dst->pt_list_head); | 327 | list_add(&new_pt->pt_list, &dst->pt_list_head); |
328 | } | 328 | } |
329 | 329 | ||
330 | return 0; | 330 | return 0; |
331 | } | 331 | } |
332 | 332 | ||
333 | static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) | 333 | static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) |
334 | { | 334 | { |
335 | struct list_head *src_pos, *dst_pos, *n; | 335 | struct list_head *src_pos, *dst_pos, *n; |
336 | 336 | ||
337 | list_for_each(src_pos, &src->pt_list_head) { | 337 | list_for_each(src_pos, &src->pt_list_head) { |
338 | struct sync_pt *src_pt = | 338 | struct sync_pt *src_pt = |
339 | container_of(src_pos, struct sync_pt, pt_list); | 339 | container_of(src_pos, struct sync_pt, pt_list); |
340 | bool collapsed = false; | 340 | bool collapsed = false; |
341 | 341 | ||
342 | list_for_each_safe(dst_pos, n, &dst->pt_list_head) { | 342 | list_for_each_safe(dst_pos, n, &dst->pt_list_head) { |
343 | struct sync_pt *dst_pt = | 343 | struct sync_pt *dst_pt = |
344 | container_of(dst_pos, struct sync_pt, pt_list); | 344 | container_of(dst_pos, struct sync_pt, pt_list); |
345 | /* collapse two sync_pts on the same timeline | 345 | /* collapse two sync_pts on the same timeline |
346 | * to a single sync_pt that will signal at | 346 | * to a single sync_pt that will signal at |
347 | * the later of the two | 347 | * the later of the two |
348 | */ | 348 | */ |
349 | if (dst_pt->parent == src_pt->parent) { | 349 | if (dst_pt->parent == src_pt->parent) { |
350 | if (dst_pt->parent->ops->compare(dst_pt, src_pt) | 350 | if (dst_pt->parent->ops->compare(dst_pt, src_pt) |
351 | == -1) { | 351 | == -1) { |
352 | struct sync_pt *new_pt = | 352 | struct sync_pt *new_pt = |
353 | sync_pt_dup(src_pt); | 353 | sync_pt_dup(src_pt); |
354 | if (new_pt == NULL) | 354 | if (new_pt == NULL) |
355 | return -ENOMEM; | 355 | return -ENOMEM; |
356 | 356 | ||
357 | new_pt->fence = dst; | 357 | new_pt->fence = dst; |
358 | list_replace(&dst_pt->pt_list, | 358 | list_replace(&dst_pt->pt_list, |
359 | &new_pt->pt_list); | 359 | &new_pt->pt_list); |
360 | sync_pt_free(dst_pt); | 360 | sync_pt_free(dst_pt); |
361 | } | 361 | } |
362 | collapsed = true; | 362 | collapsed = true; |
363 | break; | 363 | break; |
364 | } | 364 | } |
365 | } | 365 | } |
366 | 366 | ||
367 | if (!collapsed) { | 367 | if (!collapsed) { |
368 | struct sync_pt *new_pt = sync_pt_dup(src_pt); | 368 | struct sync_pt *new_pt = sync_pt_dup(src_pt); |
369 | 369 | ||
370 | if (new_pt == NULL) | 370 | if (new_pt == NULL) |
371 | return -ENOMEM; | 371 | return -ENOMEM; |
372 | 372 | ||
373 | new_pt->fence = dst; | 373 | new_pt->fence = dst; |
374 | list_add(&new_pt->pt_list, &dst->pt_list_head); | 374 | list_add(&new_pt->pt_list, &dst->pt_list_head); |
375 | } | 375 | } |
376 | } | 376 | } |
377 | 377 | ||
378 | return 0; | 378 | return 0; |
379 | } | 379 | } |
380 | 380 | ||
381 | static void sync_fence_detach_pts(struct sync_fence *fence) | 381 | static void sync_fence_detach_pts(struct sync_fence *fence) |
382 | { | 382 | { |
383 | struct list_head *pos, *n; | 383 | struct list_head *pos, *n; |
384 | 384 | ||
385 | list_for_each_safe(pos, n, &fence->pt_list_head) { | 385 | list_for_each_safe(pos, n, &fence->pt_list_head) { |
386 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); | 386 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); |
387 | sync_timeline_remove_pt(pt); | 387 | sync_timeline_remove_pt(pt); |
388 | } | 388 | } |
389 | } | 389 | } |
390 | 390 | ||
391 | static void sync_fence_free_pts(struct sync_fence *fence) | 391 | static void sync_fence_free_pts(struct sync_fence *fence) |
392 | { | 392 | { |
393 | struct list_head *pos, *n; | 393 | struct list_head *pos, *n; |
394 | 394 | ||
395 | list_for_each_safe(pos, n, &fence->pt_list_head) { | 395 | list_for_each_safe(pos, n, &fence->pt_list_head) { |
396 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); | 396 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); |
397 | sync_pt_free(pt); | 397 | sync_pt_free(pt); |
398 | } | 398 | } |
399 | } | 399 | } |
400 | 400 | ||
401 | struct sync_fence *sync_fence_fdget(int fd) | 401 | struct sync_fence *sync_fence_fdget(int fd) |
402 | { | 402 | { |
403 | struct file *file = fget(fd); | 403 | struct file *file = fget(fd); |
404 | 404 | ||
405 | if (file == NULL) | 405 | if (file == NULL) |
406 | return NULL; | 406 | return NULL; |
407 | 407 | ||
408 | if (file->f_op != &sync_fence_fops) | 408 | if (file->f_op != &sync_fence_fops) |
409 | goto err; | 409 | goto err; |
410 | 410 | ||
411 | return file->private_data; | 411 | return file->private_data; |
412 | 412 | ||
413 | err: | 413 | err: |
414 | fput(file); | 414 | fput(file); |
415 | return NULL; | 415 | return NULL; |
416 | } | 416 | } |
417 | EXPORT_SYMBOL(sync_fence_fdget); | 417 | EXPORT_SYMBOL(sync_fence_fdget); |
418 | 418 | ||
419 | void sync_fence_put(struct sync_fence *fence) | 419 | void sync_fence_put(struct sync_fence *fence) |
420 | { | 420 | { |
421 | fput(fence->file); | 421 | fput(fence->file); |
422 | } | 422 | } |
423 | EXPORT_SYMBOL(sync_fence_put); | 423 | EXPORT_SYMBOL(sync_fence_put); |
424 | 424 | ||
425 | void sync_fence_install(struct sync_fence *fence, int fd) | 425 | void sync_fence_install(struct sync_fence *fence, int fd) |
426 | { | 426 | { |
427 | fd_install(fd, fence->file); | 427 | fd_install(fd, fence->file); |
428 | } | 428 | } |
429 | EXPORT_SYMBOL(sync_fence_install); | 429 | EXPORT_SYMBOL(sync_fence_install); |
430 | 430 | ||
431 | static int sync_fence_get_status(struct sync_fence *fence) | 431 | static int sync_fence_get_status(struct sync_fence *fence) |
432 | { | 432 | { |
433 | struct list_head *pos; | 433 | struct list_head *pos; |
434 | int status = 1; | 434 | int status = 1; |
435 | 435 | ||
436 | list_for_each(pos, &fence->pt_list_head) { | 436 | list_for_each(pos, &fence->pt_list_head) { |
437 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); | 437 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); |
438 | int pt_status = pt->status; | 438 | int pt_status = pt->status; |
439 | 439 | ||
440 | if (pt_status < 0) { | 440 | if (pt_status < 0) { |
441 | status = pt_status; | 441 | status = pt_status; |
442 | break; | 442 | break; |
443 | } else if (status == 1) { | 443 | } else if (status == 1) { |
444 | status = pt_status; | 444 | status = pt_status; |
445 | } | 445 | } |
446 | } | 446 | } |
447 | 447 | ||
448 | return status; | 448 | return status; |
449 | } | 449 | } |
450 | 450 | ||
451 | struct sync_fence *sync_fence_merge(const char *name, | 451 | struct sync_fence *sync_fence_merge(const char *name, |
452 | struct sync_fence *a, struct sync_fence *b) | 452 | struct sync_fence *a, struct sync_fence *b) |
453 | { | 453 | { |
454 | struct sync_fence *fence; | 454 | struct sync_fence *fence; |
455 | struct list_head *pos; | 455 | struct list_head *pos; |
456 | int err; | 456 | int err; |
457 | 457 | ||
458 | fence = sync_fence_alloc(name); | 458 | fence = sync_fence_alloc(name); |
459 | if (fence == NULL) | 459 | if (fence == NULL) |
460 | return NULL; | 460 | return NULL; |
461 | 461 | ||
462 | err = sync_fence_copy_pts(fence, a); | 462 | err = sync_fence_copy_pts(fence, a); |
463 | if (err < 0) | 463 | if (err < 0) |
464 | goto err; | 464 | goto err; |
465 | 465 | ||
466 | err = sync_fence_merge_pts(fence, b); | 466 | err = sync_fence_merge_pts(fence, b); |
467 | if (err < 0) | 467 | if (err < 0) |
468 | goto err; | 468 | goto err; |
469 | 469 | ||
470 | list_for_each(pos, &fence->pt_list_head) { | 470 | list_for_each(pos, &fence->pt_list_head) { |
471 | struct sync_pt *pt = | 471 | struct sync_pt *pt = |
472 | container_of(pos, struct sync_pt, pt_list); | 472 | container_of(pos, struct sync_pt, pt_list); |
473 | sync_pt_activate(pt); | 473 | sync_pt_activate(pt); |
474 | } | 474 | } |
475 | 475 | ||
476 | /* | 476 | /* |
477 | * signal the fence in case one of it's pts were activated before | 477 | * signal the fence in case one of it's pts were activated before |
478 | * they were activated | 478 | * they were activated |
479 | */ | 479 | */ |
480 | sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, | 480 | sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, |
481 | struct sync_pt, | 481 | struct sync_pt, |
482 | pt_list)); | 482 | pt_list)); |
483 | 483 | ||
484 | return fence; | 484 | return fence; |
485 | err: | 485 | err: |
486 | sync_fence_free_pts(fence); | 486 | sync_fence_free_pts(fence); |
487 | kfree(fence); | 487 | kfree(fence); |
488 | return NULL; | 488 | return NULL; |
489 | } | 489 | } |
490 | EXPORT_SYMBOL(sync_fence_merge); | 490 | EXPORT_SYMBOL(sync_fence_merge); |
491 | 491 | ||
492 | static void sync_fence_signal_pt(struct sync_pt *pt) | 492 | static void sync_fence_signal_pt(struct sync_pt *pt) |
493 | { | 493 | { |
494 | LIST_HEAD(signaled_waiters); | 494 | LIST_HEAD(signaled_waiters); |
495 | struct sync_fence *fence = pt->fence; | 495 | struct sync_fence *fence = pt->fence; |
496 | struct list_head *pos; | 496 | struct list_head *pos; |
497 | struct list_head *n; | 497 | struct list_head *n; |
498 | unsigned long flags; | 498 | unsigned long flags; |
499 | int status; | 499 | int status; |
500 | 500 | ||
501 | status = sync_fence_get_status(fence); | 501 | status = sync_fence_get_status(fence); |
502 | 502 | ||
503 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | 503 | spin_lock_irqsave(&fence->waiter_list_lock, flags); |
504 | /* | 504 | /* |
505 | * this should protect against two threads racing on the signaled | 505 | * this should protect against two threads racing on the signaled |
506 | * false -> true transition | 506 | * false -> true transition |
507 | */ | 507 | */ |
508 | if (status && !fence->status) { | 508 | if (status && !fence->status) { |
509 | list_for_each_safe(pos, n, &fence->waiter_list_head) | 509 | list_for_each_safe(pos, n, &fence->waiter_list_head) |
510 | list_move(pos, &signaled_waiters); | 510 | list_move(pos, &signaled_waiters); |
511 | 511 | ||
512 | fence->status = status; | 512 | fence->status = status; |
513 | } else { | 513 | } else { |
514 | status = 0; | 514 | status = 0; |
515 | } | 515 | } |
516 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | 516 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); |
517 | 517 | ||
518 | if (status) { | 518 | if (status) { |
519 | list_for_each_safe(pos, n, &signaled_waiters) { | 519 | list_for_each_safe(pos, n, &signaled_waiters) { |
520 | struct sync_fence_waiter *waiter = | 520 | struct sync_fence_waiter *waiter = |
521 | container_of(pos, struct sync_fence_waiter, | 521 | container_of(pos, struct sync_fence_waiter, |
522 | waiter_list); | 522 | waiter_list); |
523 | 523 | ||
524 | list_del(pos); | 524 | list_del(pos); |
525 | waiter->callback(fence, waiter); | 525 | waiter->callback(fence, waiter); |
526 | } | 526 | } |
527 | wake_up(&fence->wq); | 527 | wake_up(&fence->wq); |
528 | } | 528 | } |
529 | } | 529 | } |
530 | 530 | ||
531 | int sync_fence_wait_async(struct sync_fence *fence, | 531 | int sync_fence_wait_async(struct sync_fence *fence, |
532 | struct sync_fence_waiter *waiter) | 532 | struct sync_fence_waiter *waiter) |
533 | { | 533 | { |
534 | unsigned long flags; | 534 | unsigned long flags; |
535 | int err = 0; | 535 | int err = 0; |
536 | 536 | ||
537 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | 537 | spin_lock_irqsave(&fence->waiter_list_lock, flags); |
538 | 538 | ||
539 | if (fence->status) { | 539 | if (fence->status) { |
540 | err = fence->status; | 540 | err = fence->status; |
541 | goto out; | 541 | goto out; |
542 | } | 542 | } |
543 | 543 | ||
544 | list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); | 544 | list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); |
545 | out: | 545 | out: |
546 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | 546 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); |
547 | 547 | ||
548 | return err; | 548 | return err; |
549 | } | 549 | } |
550 | EXPORT_SYMBOL(sync_fence_wait_async); | 550 | EXPORT_SYMBOL(sync_fence_wait_async); |
551 | 551 | ||
552 | int sync_fence_cancel_async(struct sync_fence *fence, | 552 | int sync_fence_cancel_async(struct sync_fence *fence, |
553 | struct sync_fence_waiter *waiter) | 553 | struct sync_fence_waiter *waiter) |
554 | { | 554 | { |
555 | struct list_head *pos; | 555 | struct list_head *pos; |
556 | struct list_head *n; | 556 | struct list_head *n; |
557 | unsigned long flags; | 557 | unsigned long flags; |
558 | int ret = -ENOENT; | 558 | int ret = -ENOENT; |
559 | 559 | ||
560 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | 560 | spin_lock_irqsave(&fence->waiter_list_lock, flags); |
561 | /* | 561 | /* |
562 | * Make sure waiter is still in waiter_list because it is possible for | 562 | * Make sure waiter is still in waiter_list because it is possible for |
563 | * the waiter to be removed from the list while the callback is still | 563 | * the waiter to be removed from the list while the callback is still |
564 | * pending. | 564 | * pending. |
565 | */ | 565 | */ |
566 | list_for_each_safe(pos, n, &fence->waiter_list_head) { | 566 | list_for_each_safe(pos, n, &fence->waiter_list_head) { |
567 | struct sync_fence_waiter *list_waiter = | 567 | struct sync_fence_waiter *list_waiter = |
568 | container_of(pos, struct sync_fence_waiter, | 568 | container_of(pos, struct sync_fence_waiter, |
569 | waiter_list); | 569 | waiter_list); |
570 | if (list_waiter == waiter) { | 570 | if (list_waiter == waiter) { |
571 | list_del(pos); | 571 | list_del(pos); |
572 | ret = 0; | 572 | ret = 0; |
573 | break; | 573 | break; |
574 | } | 574 | } |
575 | } | 575 | } |
576 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | 576 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); |
577 | return ret; | 577 | return ret; |
578 | } | 578 | } |
579 | EXPORT_SYMBOL(sync_fence_cancel_async); | 579 | EXPORT_SYMBOL(sync_fence_cancel_async); |
580 | 580 | ||
581 | static bool sync_fence_check(struct sync_fence *fence) | 581 | static bool sync_fence_check(struct sync_fence *fence) |
582 | { | 582 | { |
583 | /* | 583 | /* |
584 | * Make sure that reads to fence->status are ordered with the | 584 | * Make sure that reads to fence->status are ordered with the |
585 | * wait queue event triggering | 585 | * wait queue event triggering |
586 | */ | 586 | */ |
587 | smp_rmb(); | 587 | smp_rmb(); |
588 | return fence->status != 0; | 588 | return fence->status != 0; |
589 | } | 589 | } |
590 | 590 | ||
591 | int sync_fence_wait(struct sync_fence *fence, long timeout) | 591 | int sync_fence_wait(struct sync_fence *fence, long timeout) |
592 | { | 592 | { |
593 | int err = 0; | 593 | int err = 0; |
594 | struct sync_pt *pt; | 594 | struct sync_pt *pt; |
595 | 595 | ||
596 | trace_sync_wait(fence, 1); | 596 | trace_sync_wait(fence, 1); |
597 | list_for_each_entry(pt, &fence->pt_list_head, pt_list) | 597 | list_for_each_entry(pt, &fence->pt_list_head, pt_list) |
598 | trace_sync_pt(pt); | 598 | trace_sync_pt(pt); |
599 | 599 | ||
600 | if (timeout > 0) { | 600 | if (timeout > 0) { |
601 | timeout = msecs_to_jiffies(timeout); | 601 | timeout = msecs_to_jiffies(timeout); |
602 | err = wait_event_interruptible_timeout(fence->wq, | 602 | err = wait_event_interruptible_timeout(fence->wq, |
603 | sync_fence_check(fence), | 603 | sync_fence_check(fence), |
604 | timeout); | 604 | timeout); |
605 | } else if (timeout < 0) { | 605 | } else if (timeout < 0) { |
606 | err = wait_event_interruptible(fence->wq, | 606 | err = wait_event_interruptible(fence->wq, |
607 | sync_fence_check(fence)); | 607 | sync_fence_check(fence)); |
608 | } | 608 | } |
609 | trace_sync_wait(fence, 0); | 609 | trace_sync_wait(fence, 0); |
610 | 610 | ||
611 | if (err < 0) | 611 | if (err < 0) |
612 | return err; | 612 | return err; |
613 | 613 | ||
614 | if (fence->status < 0) { | 614 | if (fence->status < 0) { |
615 | pr_info("fence error %d on [%p]\n", fence->status, fence); | 615 | pr_info("fence error %d on [%p]\n", fence->status, fence); |
616 | sync_dump(); | 616 | sync_dump(); |
617 | return fence->status; | 617 | return fence->status; |
618 | } | 618 | } |
619 | 619 | ||
620 | if (fence->status == 0) { | 620 | if (fence->status == 0) { |
621 | if (timeout > 0) { | 621 | if (timeout > 0) { |
622 | pr_info("fence timeout on [%p] after %dms\n", fence, | 622 | pr_info("fence timeout on [%p] after %dms\n", fence, |
623 | jiffies_to_msecs(timeout)); | 623 | jiffies_to_msecs(timeout)); |
624 | sync_dump(); | 624 | sync_dump(); |
625 | } | 625 | } |
626 | return -ETIME; | 626 | return -ETIME; |
627 | } | 627 | } |
628 | 628 | ||
629 | return 0; | 629 | return 0; |
630 | } | 630 | } |
631 | EXPORT_SYMBOL(sync_fence_wait); | 631 | EXPORT_SYMBOL(sync_fence_wait); |
632 | 632 | ||
633 | static void sync_fence_free(struct kref *kref) | 633 | static void sync_fence_free(struct kref *kref) |
634 | { | 634 | { |
635 | struct sync_fence *fence = container_of(kref, struct sync_fence, kref); | 635 | struct sync_fence *fence = container_of(kref, struct sync_fence, kref); |
636 | 636 | ||
637 | sync_fence_free_pts(fence); | 637 | sync_fence_free_pts(fence); |
638 | 638 | ||
639 | kfree(fence); | 639 | kfree(fence); |
640 | } | 640 | } |
641 | 641 | ||
642 | static int sync_fence_release(struct inode *inode, struct file *file) | 642 | static int sync_fence_release(struct inode *inode, struct file *file) |
643 | { | 643 | { |
644 | struct sync_fence *fence = file->private_data; | 644 | struct sync_fence *fence = file->private_data; |
645 | unsigned long flags; | 645 | unsigned long flags; |
646 | 646 | ||
647 | /* | 647 | /* |
648 | * We need to remove all ways to access this fence before droping | 648 | * We need to remove all ways to access this fence before droping |
649 | * our ref. | 649 | * our ref. |
650 | * | 650 | * |
651 | * start with its membership in the global fence list | 651 | * start with its membership in the global fence list |
652 | */ | 652 | */ |
653 | spin_lock_irqsave(&sync_fence_list_lock, flags); | 653 | spin_lock_irqsave(&sync_fence_list_lock, flags); |
654 | list_del(&fence->sync_fence_list); | 654 | list_del(&fence->sync_fence_list); |
655 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | 655 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); |
656 | 656 | ||
657 | /* | 657 | /* |
658 | * remove its pts from their parents so that sync_timeline_signal() | 658 | * remove its pts from their parents so that sync_timeline_signal() |
659 | * can't reference the fence. | 659 | * can't reference the fence. |
660 | */ | 660 | */ |
661 | sync_fence_detach_pts(fence); | 661 | sync_fence_detach_pts(fence); |
662 | 662 | ||
663 | kref_put(&fence->kref, sync_fence_free); | 663 | kref_put(&fence->kref, sync_fence_free); |
664 | 664 | ||
665 | return 0; | 665 | return 0; |
666 | } | 666 | } |
667 | 667 | ||
668 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait) | 668 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait) |
669 | { | 669 | { |
670 | struct sync_fence *fence = file->private_data; | 670 | struct sync_fence *fence = file->private_data; |
671 | 671 | ||
672 | poll_wait(file, &fence->wq, wait); | 672 | poll_wait(file, &fence->wq, wait); |
673 | 673 | ||
674 | /* | 674 | /* |
675 | * Make sure that reads to fence->status are ordered with the | 675 | * Make sure that reads to fence->status are ordered with the |
676 | * wait queue event triggering | 676 | * wait queue event triggering |
677 | */ | 677 | */ |
678 | smp_rmb(); | 678 | smp_rmb(); |
679 | 679 | ||
680 | if (fence->status == 1) | 680 | if (fence->status == 1) |
681 | return POLLIN; | 681 | return POLLIN; |
682 | else if (fence->status < 0) | 682 | else if (fence->status < 0) |
683 | return POLLERR; | 683 | return POLLERR; |
684 | else | 684 | else |
685 | return 0; | 685 | return 0; |
686 | } | 686 | } |
687 | 687 | ||
688 | static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) | 688 | static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) |
689 | { | 689 | { |
690 | __s32 value; | 690 | __s32 value; |
691 | 691 | ||
692 | if (copy_from_user(&value, (void __user *)arg, sizeof(value))) | 692 | if (copy_from_user(&value, (void __user *)arg, sizeof(value))) |
693 | return -EFAULT; | 693 | return -EFAULT; |
694 | 694 | ||
695 | return sync_fence_wait(fence, value); | 695 | return sync_fence_wait(fence, value); |
696 | } | 696 | } |
697 | 697 | ||
698 | static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) | 698 | static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) |
699 | { | 699 | { |
700 | int fd = get_unused_fd(); | 700 | int fd = get_unused_fd_flags(O_CLOEXEC); |
701 | int err; | 701 | int err; |
702 | struct sync_fence *fence2, *fence3; | 702 | struct sync_fence *fence2, *fence3; |
703 | struct sync_merge_data data; | 703 | struct sync_merge_data data; |
704 | 704 | ||
705 | if (fd < 0) | 705 | if (fd < 0) |
706 | return fd; | 706 | return fd; |
707 | 707 | ||
708 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { | 708 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { |
709 | err = -EFAULT; | 709 | err = -EFAULT; |
710 | goto err_put_fd; | 710 | goto err_put_fd; |
711 | } | 711 | } |
712 | 712 | ||
713 | fence2 = sync_fence_fdget(data.fd2); | 713 | fence2 = sync_fence_fdget(data.fd2); |
714 | if (fence2 == NULL) { | 714 | if (fence2 == NULL) { |
715 | err = -ENOENT; | 715 | err = -ENOENT; |
716 | goto err_put_fd; | 716 | goto err_put_fd; |
717 | } | 717 | } |
718 | 718 | ||
719 | data.name[sizeof(data.name) - 1] = '\0'; | 719 | data.name[sizeof(data.name) - 1] = '\0'; |
720 | fence3 = sync_fence_merge(data.name, fence, fence2); | 720 | fence3 = sync_fence_merge(data.name, fence, fence2); |
721 | if (fence3 == NULL) { | 721 | if (fence3 == NULL) { |
722 | err = -ENOMEM; | 722 | err = -ENOMEM; |
723 | goto err_put_fence2; | 723 | goto err_put_fence2; |
724 | } | 724 | } |
725 | 725 | ||
726 | data.fence = fd; | 726 | data.fence = fd; |
727 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) { | 727 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) { |
728 | err = -EFAULT; | 728 | err = -EFAULT; |
729 | goto err_put_fence3; | 729 | goto err_put_fence3; |
730 | } | 730 | } |
731 | 731 | ||
732 | sync_fence_install(fence3, fd); | 732 | sync_fence_install(fence3, fd); |
733 | sync_fence_put(fence2); | 733 | sync_fence_put(fence2); |
734 | return 0; | 734 | return 0; |
735 | 735 | ||
736 | err_put_fence3: | 736 | err_put_fence3: |
737 | sync_fence_put(fence3); | 737 | sync_fence_put(fence3); |
738 | 738 | ||
739 | err_put_fence2: | 739 | err_put_fence2: |
740 | sync_fence_put(fence2); | 740 | sync_fence_put(fence2); |
741 | 741 | ||
742 | err_put_fd: | 742 | err_put_fd: |
743 | put_unused_fd(fd); | 743 | put_unused_fd(fd); |
744 | return err; | 744 | return err; |
745 | } | 745 | } |
746 | 746 | ||
747 | static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) | 747 | static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) |
748 | { | 748 | { |
749 | struct sync_pt_info *info = data; | 749 | struct sync_pt_info *info = data; |
750 | int ret; | 750 | int ret; |
751 | 751 | ||
752 | if (size < sizeof(struct sync_pt_info)) | 752 | if (size < sizeof(struct sync_pt_info)) |
753 | return -ENOMEM; | 753 | return -ENOMEM; |
754 | 754 | ||
755 | info->len = sizeof(struct sync_pt_info); | 755 | info->len = sizeof(struct sync_pt_info); |
756 | 756 | ||
757 | if (pt->parent->ops->fill_driver_data) { | 757 | if (pt->parent->ops->fill_driver_data) { |
758 | ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, | 758 | ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, |
759 | size - sizeof(*info)); | 759 | size - sizeof(*info)); |
760 | if (ret < 0) | 760 | if (ret < 0) |
761 | return ret; | 761 | return ret; |
762 | 762 | ||
763 | info->len += ret; | 763 | info->len += ret; |
764 | } | 764 | } |
765 | 765 | ||
766 | strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); | 766 | strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); |
767 | strlcpy(info->driver_name, pt->parent->ops->driver_name, | 767 | strlcpy(info->driver_name, pt->parent->ops->driver_name, |
768 | sizeof(info->driver_name)); | 768 | sizeof(info->driver_name)); |
769 | info->status = pt->status; | 769 | info->status = pt->status; |
770 | info->timestamp_ns = ktime_to_ns(pt->timestamp); | 770 | info->timestamp_ns = ktime_to_ns(pt->timestamp); |
771 | 771 | ||
772 | return info->len; | 772 | return info->len; |
773 | } | 773 | } |
774 | 774 | ||
775 | static long sync_fence_ioctl_fence_info(struct sync_fence *fence, | 775 | static long sync_fence_ioctl_fence_info(struct sync_fence *fence, |
776 | unsigned long arg) | 776 | unsigned long arg) |
777 | { | 777 | { |
778 | struct sync_fence_info_data *data; | 778 | struct sync_fence_info_data *data; |
779 | struct list_head *pos; | 779 | struct list_head *pos; |
780 | __u32 size; | 780 | __u32 size; |
781 | __u32 len = 0; | 781 | __u32 len = 0; |
782 | int ret; | 782 | int ret; |
783 | 783 | ||
784 | if (copy_from_user(&size, (void __user *)arg, sizeof(size))) | 784 | if (copy_from_user(&size, (void __user *)arg, sizeof(size))) |
785 | return -EFAULT; | 785 | return -EFAULT; |
786 | 786 | ||
787 | if (size < sizeof(struct sync_fence_info_data)) | 787 | if (size < sizeof(struct sync_fence_info_data)) |
788 | return -EINVAL; | 788 | return -EINVAL; |
789 | 789 | ||
790 | if (size > 4096) | 790 | if (size > 4096) |
791 | size = 4096; | 791 | size = 4096; |
792 | 792 | ||
793 | data = kzalloc(size, GFP_KERNEL); | 793 | data = kzalloc(size, GFP_KERNEL); |
794 | if (data == NULL) | 794 | if (data == NULL) |
795 | return -ENOMEM; | 795 | return -ENOMEM; |
796 | 796 | ||
797 | strlcpy(data->name, fence->name, sizeof(data->name)); | 797 | strlcpy(data->name, fence->name, sizeof(data->name)); |
798 | data->status = fence->status; | 798 | data->status = fence->status; |
799 | len = sizeof(struct sync_fence_info_data); | 799 | len = sizeof(struct sync_fence_info_data); |
800 | 800 | ||
801 | list_for_each(pos, &fence->pt_list_head) { | 801 | list_for_each(pos, &fence->pt_list_head) { |
802 | struct sync_pt *pt = | 802 | struct sync_pt *pt = |
803 | container_of(pos, struct sync_pt, pt_list); | 803 | container_of(pos, struct sync_pt, pt_list); |
804 | 804 | ||
805 | ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); | 805 | ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); |
806 | 806 | ||
807 | if (ret < 0) | 807 | if (ret < 0) |
808 | goto out; | 808 | goto out; |
809 | 809 | ||
810 | len += ret; | 810 | len += ret; |
811 | } | 811 | } |
812 | 812 | ||
813 | data->len = len; | 813 | data->len = len; |
814 | 814 | ||
815 | if (copy_to_user((void __user *)arg, data, len)) | 815 | if (copy_to_user((void __user *)arg, data, len)) |
816 | ret = -EFAULT; | 816 | ret = -EFAULT; |
817 | else | 817 | else |
818 | ret = 0; | 818 | ret = 0; |
819 | 819 | ||
820 | out: | 820 | out: |
821 | kfree(data); | 821 | kfree(data); |
822 | 822 | ||
823 | return ret; | 823 | return ret; |
824 | } | 824 | } |
825 | 825 | ||
826 | static long sync_fence_ioctl(struct file *file, unsigned int cmd, | 826 | static long sync_fence_ioctl(struct file *file, unsigned int cmd, |
827 | unsigned long arg) | 827 | unsigned long arg) |
828 | { | 828 | { |
829 | struct sync_fence *fence = file->private_data; | 829 | struct sync_fence *fence = file->private_data; |
830 | switch (cmd) { | 830 | switch (cmd) { |
831 | case SYNC_IOC_WAIT: | 831 | case SYNC_IOC_WAIT: |
832 | return sync_fence_ioctl_wait(fence, arg); | 832 | return sync_fence_ioctl_wait(fence, arg); |
833 | 833 | ||
834 | case SYNC_IOC_MERGE: | 834 | case SYNC_IOC_MERGE: |
835 | return sync_fence_ioctl_merge(fence, arg); | 835 | return sync_fence_ioctl_merge(fence, arg); |
836 | 836 | ||
837 | case SYNC_IOC_FENCE_INFO: | 837 | case SYNC_IOC_FENCE_INFO: |
838 | return sync_fence_ioctl_fence_info(fence, arg); | 838 | return sync_fence_ioctl_fence_info(fence, arg); |
839 | 839 | ||
840 | default: | 840 | default: |
841 | return -ENOTTY; | 841 | return -ENOTTY; |
842 | } | 842 | } |
843 | } | 843 | } |
844 | 844 | ||
845 | #ifdef CONFIG_DEBUG_FS | 845 | #ifdef CONFIG_DEBUG_FS |
846 | static const char *sync_status_str(int status) | 846 | static const char *sync_status_str(int status) |
847 | { | 847 | { |
848 | if (status > 0) | 848 | if (status > 0) |
849 | return "signaled"; | 849 | return "signaled"; |
850 | else if (status == 0) | 850 | else if (status == 0) |
851 | return "active"; | 851 | return "active"; |
852 | else | 852 | else |
853 | return "error"; | 853 | return "error"; |
854 | } | 854 | } |
855 | 855 | ||
856 | static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) | 856 | static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) |
857 | { | 857 | { |
858 | int status = pt->status; | 858 | int status = pt->status; |
859 | seq_printf(s, " %s%spt %s", | 859 | seq_printf(s, " %s%spt %s", |
860 | fence ? pt->parent->name : "", | 860 | fence ? pt->parent->name : "", |
861 | fence ? "_" : "", | 861 | fence ? "_" : "", |
862 | sync_status_str(status)); | 862 | sync_status_str(status)); |
863 | if (pt->status) { | 863 | if (pt->status) { |
864 | struct timeval tv = ktime_to_timeval(pt->timestamp); | 864 | struct timeval tv = ktime_to_timeval(pt->timestamp); |
865 | seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); | 865 | seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); |
866 | } | 866 | } |
867 | 867 | ||
868 | if (pt->parent->ops->timeline_value_str && | 868 | if (pt->parent->ops->timeline_value_str && |
869 | pt->parent->ops->pt_value_str) { | 869 | pt->parent->ops->pt_value_str) { |
870 | char value[64]; | 870 | char value[64]; |
871 | pt->parent->ops->pt_value_str(pt, value, sizeof(value)); | 871 | pt->parent->ops->pt_value_str(pt, value, sizeof(value)); |
872 | seq_printf(s, ": %s", value); | 872 | seq_printf(s, ": %s", value); |
873 | if (fence) { | 873 | if (fence) { |
874 | pt->parent->ops->timeline_value_str(pt->parent, value, | 874 | pt->parent->ops->timeline_value_str(pt->parent, value, |
875 | sizeof(value)); | 875 | sizeof(value)); |
876 | seq_printf(s, " / %s", value); | 876 | seq_printf(s, " / %s", value); |
877 | } | 877 | } |
878 | } else if (pt->parent->ops->print_pt) { | 878 | } else if (pt->parent->ops->print_pt) { |
879 | seq_puts(s, ": "); | 879 | seq_puts(s, ": "); |
880 | pt->parent->ops->print_pt(s, pt); | 880 | pt->parent->ops->print_pt(s, pt); |
881 | } | 881 | } |
882 | 882 | ||
883 | seq_puts(s, "\n"); | 883 | seq_puts(s, "\n"); |
884 | } | 884 | } |
885 | 885 | ||
886 | static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) | 886 | static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) |
887 | { | 887 | { |
888 | struct list_head *pos; | 888 | struct list_head *pos; |
889 | unsigned long flags; | 889 | unsigned long flags; |
890 | 890 | ||
891 | seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); | 891 | seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); |
892 | 892 | ||
893 | if (obj->ops->timeline_value_str) { | 893 | if (obj->ops->timeline_value_str) { |
894 | char value[64]; | 894 | char value[64]; |
895 | obj->ops->timeline_value_str(obj, value, sizeof(value)); | 895 | obj->ops->timeline_value_str(obj, value, sizeof(value)); |
896 | seq_printf(s, ": %s", value); | 896 | seq_printf(s, ": %s", value); |
897 | } else if (obj->ops->print_obj) { | 897 | } else if (obj->ops->print_obj) { |
898 | seq_puts(s, ": "); | 898 | seq_puts(s, ": "); |
899 | obj->ops->print_obj(s, obj); | 899 | obj->ops->print_obj(s, obj); |
900 | } | 900 | } |
901 | 901 | ||
902 | seq_puts(s, "\n"); | 902 | seq_puts(s, "\n"); |
903 | 903 | ||
904 | spin_lock_irqsave(&obj->child_list_lock, flags); | 904 | spin_lock_irqsave(&obj->child_list_lock, flags); |
905 | list_for_each(pos, &obj->child_list_head) { | 905 | list_for_each(pos, &obj->child_list_head) { |
906 | struct sync_pt *pt = | 906 | struct sync_pt *pt = |
907 | container_of(pos, struct sync_pt, child_list); | 907 | container_of(pos, struct sync_pt, child_list); |
908 | sync_print_pt(s, pt, false); | 908 | sync_print_pt(s, pt, false); |
909 | } | 909 | } |
910 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | 910 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
911 | } | 911 | } |
912 | 912 | ||
913 | static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) | 913 | static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) |
914 | { | 914 | { |
915 | struct list_head *pos; | 915 | struct list_head *pos; |
916 | unsigned long flags; | 916 | unsigned long flags; |
917 | 917 | ||
918 | seq_printf(s, "[%p] %s: %s\n", fence, fence->name, | 918 | seq_printf(s, "[%p] %s: %s\n", fence, fence->name, |
919 | sync_status_str(fence->status)); | 919 | sync_status_str(fence->status)); |
920 | 920 | ||
921 | list_for_each(pos, &fence->pt_list_head) { | 921 | list_for_each(pos, &fence->pt_list_head) { |
922 | struct sync_pt *pt = | 922 | struct sync_pt *pt = |
923 | container_of(pos, struct sync_pt, pt_list); | 923 | container_of(pos, struct sync_pt, pt_list); |
924 | sync_print_pt(s, pt, true); | 924 | sync_print_pt(s, pt, true); |
925 | } | 925 | } |
926 | 926 | ||
927 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | 927 | spin_lock_irqsave(&fence->waiter_list_lock, flags); |
928 | list_for_each(pos, &fence->waiter_list_head) { | 928 | list_for_each(pos, &fence->waiter_list_head) { |
929 | struct sync_fence_waiter *waiter = | 929 | struct sync_fence_waiter *waiter = |
930 | container_of(pos, struct sync_fence_waiter, | 930 | container_of(pos, struct sync_fence_waiter, |
931 | waiter_list); | 931 | waiter_list); |
932 | 932 | ||
933 | seq_printf(s, "waiter %pF\n", waiter->callback); | 933 | seq_printf(s, "waiter %pF\n", waiter->callback); |
934 | } | 934 | } |
935 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | 935 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); |
936 | } | 936 | } |
937 | 937 | ||
938 | static int sync_debugfs_show(struct seq_file *s, void *unused) | 938 | static int sync_debugfs_show(struct seq_file *s, void *unused) |
939 | { | 939 | { |
940 | unsigned long flags; | 940 | unsigned long flags; |
941 | struct list_head *pos; | 941 | struct list_head *pos; |
942 | 942 | ||
943 | seq_puts(s, "objs:\n--------------\n"); | 943 | seq_puts(s, "objs:\n--------------\n"); |
944 | 944 | ||
945 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | 945 | spin_lock_irqsave(&sync_timeline_list_lock, flags); |
946 | list_for_each(pos, &sync_timeline_list_head) { | 946 | list_for_each(pos, &sync_timeline_list_head) { |
947 | struct sync_timeline *obj = | 947 | struct sync_timeline *obj = |
948 | container_of(pos, struct sync_timeline, | 948 | container_of(pos, struct sync_timeline, |
949 | sync_timeline_list); | 949 | sync_timeline_list); |
950 | 950 | ||
951 | sync_print_obj(s, obj); | 951 | sync_print_obj(s, obj); |
952 | seq_puts(s, "\n"); | 952 | seq_puts(s, "\n"); |
953 | } | 953 | } |
954 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | 954 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); |
955 | 955 | ||
956 | seq_puts(s, "fences:\n--------------\n"); | 956 | seq_puts(s, "fences:\n--------------\n"); |
957 | 957 | ||
958 | spin_lock_irqsave(&sync_fence_list_lock, flags); | 958 | spin_lock_irqsave(&sync_fence_list_lock, flags); |
959 | list_for_each(pos, &sync_fence_list_head) { | 959 | list_for_each(pos, &sync_fence_list_head) { |
960 | struct sync_fence *fence = | 960 | struct sync_fence *fence = |
961 | container_of(pos, struct sync_fence, sync_fence_list); | 961 | container_of(pos, struct sync_fence, sync_fence_list); |
962 | 962 | ||
963 | sync_print_fence(s, fence); | 963 | sync_print_fence(s, fence); |
964 | seq_puts(s, "\n"); | 964 | seq_puts(s, "\n"); |
965 | } | 965 | } |
966 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | 966 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); |
967 | return 0; | 967 | return 0; |
968 | } | 968 | } |
969 | 969 | ||
970 | static int sync_debugfs_open(struct inode *inode, struct file *file) | 970 | static int sync_debugfs_open(struct inode *inode, struct file *file) |
971 | { | 971 | { |
972 | return single_open(file, sync_debugfs_show, inode->i_private); | 972 | return single_open(file, sync_debugfs_show, inode->i_private); |
973 | } | 973 | } |
974 | 974 | ||
975 | static const struct file_operations sync_debugfs_fops = { | 975 | static const struct file_operations sync_debugfs_fops = { |
976 | .open = sync_debugfs_open, | 976 | .open = sync_debugfs_open, |
977 | .read = seq_read, | 977 | .read = seq_read, |
978 | .llseek = seq_lseek, | 978 | .llseek = seq_lseek, |
979 | .release = single_release, | 979 | .release = single_release, |
980 | }; | 980 | }; |
981 | 981 | ||
982 | static __init int sync_debugfs_init(void) | 982 | static __init int sync_debugfs_init(void) |
983 | { | 983 | { |
984 | debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); | 984 | debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); |
985 | return 0; | 985 | return 0; |
986 | } | 986 | } |
987 | late_initcall(sync_debugfs_init); | 987 | late_initcall(sync_debugfs_init); |
988 | 988 | ||
989 | #define DUMP_CHUNK 256 | 989 | #define DUMP_CHUNK 256 |
990 | static char sync_dump_buf[64 * 1024]; | 990 | static char sync_dump_buf[64 * 1024]; |
991 | static void sync_dump(void) | 991 | static void sync_dump(void) |
992 | { | 992 | { |
993 | struct seq_file s = { | 993 | struct seq_file s = { |
994 | .buf = sync_dump_buf, | 994 | .buf = sync_dump_buf, |
995 | .size = sizeof(sync_dump_buf) - 1, | 995 | .size = sizeof(sync_dump_buf) - 1, |
996 | }; | 996 | }; |
997 | int i; | 997 | int i; |
998 | 998 | ||
999 | sync_debugfs_show(&s, NULL); | 999 | sync_debugfs_show(&s, NULL); |
1000 | 1000 | ||
1001 | for (i = 0; i < s.count; i += DUMP_CHUNK) { | 1001 | for (i = 0; i < s.count; i += DUMP_CHUNK) { |
1002 | if ((s.count - i) > DUMP_CHUNK) { | 1002 | if ((s.count - i) > DUMP_CHUNK) { |
1003 | char c = s.buf[i + DUMP_CHUNK]; | 1003 | char c = s.buf[i + DUMP_CHUNK]; |
1004 | s.buf[i + DUMP_CHUNK] = 0; | 1004 | s.buf[i + DUMP_CHUNK] = 0; |
1005 | pr_cont("%s", s.buf + i); | 1005 | pr_cont("%s", s.buf + i); |
1006 | s.buf[i + DUMP_CHUNK] = c; | 1006 | s.buf[i + DUMP_CHUNK] = c; |
1007 | } else { | 1007 | } else { |
1008 | s.buf[s.count] = 0; | 1008 | s.buf[s.count] = 0; |
1009 | pr_cont("%s", s.buf + i); | 1009 | pr_cont("%s", s.buf + i); |
1010 | } | 1010 | } |
1011 | } | 1011 | } |
1012 | } | 1012 | } |
1013 | #else | 1013 | #else |
1014 | static void sync_dump(void) | 1014 | static void sync_dump(void) |
1015 | { | 1015 | { |
1016 | } | 1016 | } |
1017 | #endif | 1017 | #endif |
1018 | 1018 |