Blame view
mm/pdflush.c
6.63 KB
1da177e4c
|
1 2 3 4 5 |
/* * mm/pdflush.c - worker threads for writing back filesystem data * * Copyright (C) 2002, Linus Torvalds. * |
e1f8e8744
|
6 |
* 09Apr2002 Andrew Morton |
1da177e4c
|
7 8 9 10 11 12 13 14 15 16 17 18 19 |
* Initial version * 29Feb2004 kaos@sgi.com * Move worker thread creation to kthread to avoid chewing * up stack space with nested calls to kernel_thread. */ #include <linux/sched.h> #include <linux/list.h> #include <linux/signal.h> #include <linux/spinlock.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/module.h> |
f5264481c
|
20 21 |
#include <linux/fs.h> /* Needed by writeback.h */ #include <linux/writeback.h> /* Prototypes pdflush_operation() */ |
1da177e4c
|
22 |
#include <linux/kthread.h> |
28a42b9ea
|
23 |
#include <linux/cpuset.h> |
7dfb71030
|
24 |
#include <linux/freezer.h> |
1da177e4c
|
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
/* * Minimum and maximum number of pdflush instances */ #define MIN_PDFLUSH_THREADS 2 #define MAX_PDFLUSH_THREADS 8 static void start_one_pdflush_thread(void); /* * The pdflush threads are worker threads for writing back dirty data. * Ideally, we'd like one thread per active disk spindle. But the disk * topology is very hard to divine at this level. Instead, we take * care in various places to prevent more than one pdflush thread from * performing writeback against a single filesystem. pdflush threads * have the PF_FLUSHER flag set in current->flags to aid in this. */ /* * All the pdflush threads. Protected by pdflush_lock */ static LIST_HEAD(pdflush_list); static DEFINE_SPINLOCK(pdflush_lock); /* * The count of currently-running pdflush threads. Protected * by pdflush_lock. * * Readable by sysctl, but not writable. Published to userspace at * /proc/sys/vm/nr_pdflush_threads. */ int nr_pdflush_threads = 0; /* * The time at which the pdflush thread pool last went empty */ static unsigned long last_empty_jifs; /* * The pdflush thread. * * Thread pool management algorithm: * * - The minimum and maximum number of pdflush instances are bound * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS. * * - If there have been no idle pdflush instances for 1 second, create * a new one. * * - If the least-recently-went-to-sleep pdflush thread has been asleep * for more than one second, terminate a thread. */ /* * A structure for passing work to a pdflush thread. Also for passing * state information between pdflush threads. Protected by pdflush_lock. */ struct pdflush_work { struct task_struct *who; /* The thread */ void (*fn)(unsigned long); /* A callback function */ unsigned long arg0; /* An argument to the callback */ struct list_head list; /* On pdflush_list, when idle */ unsigned long when_i_went_to_sleep; }; static int __pdflush(struct pdflush_work *my_work) { |
930d91525
|
94 |
current->flags |= PF_FLUSHER | PF_SWAPWRITE; |
831441862
|
95 |
set_freezable(); |
1da177e4c
|
96 97 98 99 100 101 102 103 104 105 106 107 108 |
my_work->fn = NULL; my_work->who = current; INIT_LIST_HEAD(&my_work->list); spin_lock_irq(&pdflush_lock); nr_pdflush_threads++; for ( ; ; ) { struct pdflush_work *pdf; set_current_state(TASK_INTERRUPTIBLE); list_move(&my_work->list, &pdflush_list); my_work->when_i_went_to_sleep = jiffies; spin_unlock_irq(&pdflush_lock); |
1da177e4c
|
109 |
schedule(); |
d616e09ab
|
110 |
try_to_freeze(); |
1da177e4c
|
111 112 |
spin_lock_irq(&pdflush_lock); if (!list_empty(&my_work->list)) { |
d616e09ab
|
113 114 115 116 117 |
/* * Someone woke us up, but without removing our control * structure from the global list. swsusp will do this * in try_to_freeze()->refrigerator(). Handle it. */ |
1da177e4c
|
118 119 120 121 |
my_work->fn = NULL; continue; } if (my_work->fn == NULL) { |
d616e09ab
|
122 123 |
printk("pdflush: bogus wakeup "); |
1da177e4c
|
124 125 126 127 128 129 130 131 132 133 |
continue; } spin_unlock_irq(&pdflush_lock); (*my_work->fn)(my_work->arg0); /* * Thread creation: For how long have there been zero * available threads? */ |
2b4bc4605
|
134 |
if (time_after(jiffies, last_empty_jifs + 1 * HZ)) { |
1da177e4c
|
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
/* unlocked list_empty() test is OK here */ if (list_empty(&pdflush_list)) { /* unlocked test is OK here */ if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) start_one_pdflush_thread(); } } spin_lock_irq(&pdflush_lock); my_work->fn = NULL; /* * Thread destruction: For how long has the sleepiest * thread slept? */ if (list_empty(&pdflush_list)) continue; if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS) continue; pdf = list_entry(pdflush_list.prev, struct pdflush_work, list); |
2b4bc4605
|
155 |
if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) { |
1da177e4c
|
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
/* Limit exit rate */ pdf->when_i_went_to_sleep = jiffies; break; /* exeunt */ } } nr_pdflush_threads--; spin_unlock_irq(&pdflush_lock); return 0; } /* * Of course, my_work wants to be just a local in __pdflush(). It is * separated out in this manner to hopefully prevent the compiler from * performing unfortunate optimisations against the auto variables. Because * these are visible to other tasks and CPUs. (No problem has actually * been observed. This is just paranoia). */ static int pdflush(void *dummy) { struct pdflush_work my_work; |
174596a0b
|
176 177 178 179 180 181 182 183 184 185 186 |
cpumask_var_t cpus_allowed; /* * Since the caller doesn't even check kthread_run() worked, let's not * freak out too much if this fails. */ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { printk(KERN_WARNING "pdflush failed to allocate cpumask "); return 0; } |
1da177e4c
|
187 188 189 190 191 192 |
/* * pdflush can spend a lot of time doing encryption via dm-crypt. We * don't want to do that at keventd's priority. */ set_user_nice(current, 0); |
28a42b9ea
|
193 194 195 196 197 198 199 200 |
/* * Some configs put our parent kthread in a limited cpuset, * which kthread() overrides, forcing cpus_allowed == CPU_MASK_ALL. * Our needs are more modest - cut back to our cpusets cpus_allowed. * This is needed as pdflush's are dynamically created and destroyed. * The boottime pdflush's are easily placed w/o these 2 lines. */ |
174596a0b
|
201 202 203 |
cpuset_cpus_allowed(current, cpus_allowed); set_cpus_allowed_ptr(current, cpus_allowed); free_cpumask_var(cpus_allowed); |
28a42b9ea
|
204 |
|
1da177e4c
|
205 206 207 208 209 210 211 212 213 214 215 216 |
return __pdflush(&my_work); } /* * Attempt to wake up a pdflush thread, and get it to do some work for you. * Returns zero if it indeed managed to find a worker thread, and passed your * payload to it. */ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0) { unsigned long flags; int ret = 0; |
125e18745
|
217 |
BUG_ON(fn == NULL); /* Hard to diagnose if it's deferred */ |
1da177e4c
|
218 219 220 |
spin_lock_irqsave(&pdflush_lock, flags); if (list_empty(&pdflush_list)) { |
1da177e4c
|
221 222 223 224 225 226 227 228 229 230 231 |
ret = -1; } else { struct pdflush_work *pdf; pdf = list_entry(pdflush_list.next, struct pdflush_work, list); list_del_init(&pdf->list); if (list_empty(&pdflush_list)) last_empty_jifs = jiffies; pdf->fn = fn; pdf->arg0 = arg0; wake_up_process(pdf->who); |
1da177e4c
|
232 |
} |
5aecd5598
|
233 |
spin_unlock_irqrestore(&pdflush_lock, flags); |
1da177e4c
|
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 |
return ret; } static void start_one_pdflush_thread(void) { kthread_run(pdflush, NULL, "pdflush"); } static int __init pdflush_init(void) { int i; for (i = 0; i < MIN_PDFLUSH_THREADS; i++) start_one_pdflush_thread(); return 0; } module_init(pdflush_init); |