Commit a08727bae727fc2ca3a6ee9506d77786b71070b3

Authored by Linus Torvalds
1 parent 2f77d10705

Make workqueue bit operations work on "atomic_long_t"

On architectures where the atomicity of the bit operations is handled by
external means (ie a separate spinlock to protect concurrent accesses),
just doing a direct assignment on the workqueue data field (as done by
commit 4594bf159f1962cec3b727954b7c598b07e2e737) can cause the
assignment to be lost due to lack of serialization with the bitops on
the same word.

So we need to serialize the assignment with the locks on those
architectures (notably older ARM chips, PA-RISC and sparc32).

So rather than using an "unsigned long", let's use "atomic_long_t",
which already has a safe assignment operation (atomic_long_set()) on
such architectures.

This requires that the atomic operations use the same atomicity locks as
the bit operations do, but that is largely the case anyway.  Sparc32
will probably need fixing.

Architectures (including modern ARM with LL/SC) that implement sane
atomic operations for SMP won't see any of this matter.

Cc: Russell King <rmk+lkml@arm.linux.org.uk>
Cc: David Howells <dhowells@redhat.com>
Cc: David Miller <davem@davemloft.com>
Cc: Matthew Wilcox <matthew@wil.cx>
Cc: Linux Arch Maintainers <linux-arch@vger.kernel.org>
Cc: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 2 changed files with 30 additions and 18 deletions Side-by-side Diff

include/linux/workqueue.h
... ... @@ -8,16 +8,21 @@
8 8 #include <linux/timer.h>
9 9 #include <linux/linkage.h>
10 10 #include <linux/bitops.h>
  11 +#include <asm/atomic.h>
11 12  
12 13 struct workqueue_struct;
13 14  
14 15 struct work_struct;
15 16 typedef void (*work_func_t)(struct work_struct *work);
16 17  
  18 +/*
  19 + * The first word is the work queue pointer and the flags rolled into
  20 + * one
  21 + */
  22 +#define work_data_bits(work) ((unsigned long *)(&(work)->data))
  23 +
17 24 struct work_struct {
18   - /* the first word is the work queue pointer and the flags rolled into
19   - * one */
20   - unsigned long management;
  25 + atomic_long_t data;
21 26 #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
22 27 #define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
23 28 #define WORK_STRUCT_FLAG_MASK (3UL)
... ... @@ -26,6 +31,9 @@
26 31 work_func_t func;
27 32 };
28 33  
  34 +#define WORK_DATA_INIT(autorelease) \
  35 + ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL)
  36 +
29 37 struct delayed_work {
30 38 struct work_struct work;
31 39 struct timer_list timer;
32 40  
... ... @@ -36,13 +44,13 @@
36 44 };
37 45  
38 46 #define __WORK_INITIALIZER(n, f) { \
39   - .management = 0, \
  47 + .data = WORK_DATA_INIT(0), \
40 48 .entry = { &(n).entry, &(n).entry }, \
41 49 .func = (f), \
42 50 }
43 51  
44 52 #define __WORK_INITIALIZER_NAR(n, f) { \
45   - .management = (1 << WORK_STRUCT_NOAUTOREL), \
  53 + .data = WORK_DATA_INIT(1), \
46 54 .entry = { &(n).entry, &(n).entry }, \
47 55 .func = (f), \
48 56 }
49 57  
50 58  
... ... @@ -82,17 +90,21 @@
82 90  
83 91 /*
84 92 * initialize all of a work item in one go
  93 + *
  94 + * NOTE! No point in using "atomic_long_set()": useing a direct
  95 + * assignment of the work data initializer allows the compiler
  96 + * to generate better code.
85 97 */
86 98 #define INIT_WORK(_work, _func) \
87 99 do { \
88   - (_work)->management = 0; \
  100 + (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \
89 101 INIT_LIST_HEAD(&(_work)->entry); \
90 102 PREPARE_WORK((_work), (_func)); \
91 103 } while (0)
92 104  
93 105 #define INIT_WORK_NAR(_work, _func) \
94 106 do { \
95   - (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \
  107 + (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \
96 108 INIT_LIST_HEAD(&(_work)->entry); \
97 109 PREPARE_WORK((_work), (_func)); \
98 110 } while (0)
... ... @@ -114,7 +126,7 @@
114 126 * @work: The work item in question
115 127 */
116 128 #define work_pending(work) \
117   - test_bit(WORK_STRUCT_PENDING, &(work)->management)
  129 + test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
118 130  
119 131 /**
120 132 * delayed_work_pending - Find out whether a delayable work item is currently
... ... @@ -143,7 +155,7 @@
143 155 * This should also be used to release a delayed work item.
144 156 */
145 157 #define work_release(work) \
146   - clear_bit(WORK_STRUCT_PENDING, &(work)->management)
  158 + clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
147 159  
148 160  
149 161 extern struct workqueue_struct *__create_workqueue(const char *name,
... ... @@ -188,7 +200,7 @@
188 200  
189 201 ret = del_timer_sync(&work->timer);
190 202 if (ret)
191   - clear_bit(WORK_STRUCT_PENDING, &work->work.management);
  203 + work_release(&work->work);
192 204 return ret;
193 205 }
194 206  
... ... @@ -96,13 +96,13 @@
96 96 BUG_ON(!work_pending(work));
97 97  
98 98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
99   - new |= work->management & WORK_STRUCT_FLAG_MASK;
100   - work->management = new;
  99 + new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
  100 + atomic_long_set(&work->data, new);
101 101 }
102 102  
103 103 static inline void *get_wq_data(struct work_struct *work)
104 104 {
105   - return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
  105 + return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
106 106 }
107 107  
108 108 static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
... ... @@ -133,7 +133,7 @@
133 133 list_del_init(&work->entry);
134 134 spin_unlock_irqrestore(&cwq->lock, flags);
135 135  
136   - if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
  136 + if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
137 137 work_release(work);
138 138 f(work);
139 139  
... ... @@ -206,7 +206,7 @@
206 206 {
207 207 int ret = 0, cpu = get_cpu();
208 208  
209   - if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
  209 + if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
210 210 if (unlikely(is_single_threaded(wq)))
211 211 cpu = singlethread_cpu;
212 212 BUG_ON(!list_empty(&work->entry));
... ... @@ -248,7 +248,7 @@
248 248 if (delay == 0)
249 249 return queue_work(wq, work);
250 250  
251   - if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
  251 + if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
252 252 BUG_ON(timer_pending(timer));
253 253 BUG_ON(!list_empty(&work->entry));
254 254  
... ... @@ -280,7 +280,7 @@
280 280 struct timer_list *timer = &dwork->timer;
281 281 struct work_struct *work = &dwork->work;
282 282  
283   - if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
  283 + if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
284 284 BUG_ON(timer_pending(timer));
285 285 BUG_ON(!list_empty(&work->entry));
286 286  
... ... @@ -321,7 +321,7 @@
321 321 spin_unlock_irqrestore(&cwq->lock, flags);
322 322  
323 323 BUG_ON(get_wq_data(work) != cwq);
324   - if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
  324 + if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
325 325 work_release(work);
326 326 f(work);
327 327