Blame view
net/sunrpc/cache.c
44.2 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
/* * net/sunrpc/cache.c * * Generic code for various authentication-related caches * used by sunrpc clients and servers. * * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> * * Released under terms in GPL version 2. See COPYING. * */ #include <linux/types.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/module.h> #include <linux/ctype.h> |
1b2e122d1
|
23 |
#include <linux/string_helpers.h> |
7c0f6ba68
|
24 |
#include <linux/uaccess.h> |
1da177e4c
|
25 26 27 28 29 |
#include <linux/poll.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/net.h> #include <linux/workqueue.h> |
4a3e2f711
|
30 |
#include <linux/mutex.h> |
da77005f0
|
31 |
#include <linux/pagemap.h> |
1da177e4c
|
32 33 34 35 |
#include <asm/ioctls.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/cache.h> #include <linux/sunrpc/stats.h> |
8854e82d9
|
36 |
#include <linux/sunrpc/rpc_pipe_fs.h> |
4f42d0d53
|
37 |
#include "netns.h" |
1da177e4c
|
38 39 |
#define RPCDBG_FACILITY RPCDBG_CACHE |
d76d1815f
|
40 |
static bool cache_defer_req(struct cache_req *req, struct cache_head *item); |
1da177e4c
|
41 |
static void cache_revisit_request(struct cache_head *item); |
778620364
|
42 |
static void cache_init(struct cache_head *h, struct cache_detail *detail) |
1da177e4c
|
43 |
{ |
c5b29f885
|
44 |
time_t now = seconds_since_boot(); |
129e5824c
|
45 |
INIT_HLIST_NODE(&h->cache_list); |
1da177e4c
|
46 |
h->flags = 0; |
baab935ff
|
47 |
kref_init(&h->ref); |
1da177e4c
|
48 |
h->expiry_time = now + CACHE_NEW_EXPIRY; |
778620364
|
49 50 51 |
if (now <= detail->flush_time) /* ensure it isn't already expired */ now = detail->flush_time + 1; |
1da177e4c
|
52 53 |
h->last_refresh = now; } |
15a5f6bd2
|
54 55 56 |
struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, struct cache_head *key, int hash) { |
129e5824c
|
57 58 |
struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL; struct hlist_head *head; |
15a5f6bd2
|
59 60 61 62 |
head = &detail->hash_table[hash]; read_lock(&detail->hash_lock); |
129e5824c
|
63 |
hlist_for_each_entry(tmp, head, cache_list) { |
15a5f6bd2
|
64 |
if (detail->match(tmp, key)) { |
d202cce89
|
65 66 67 |
if (cache_is_expired(detail, tmp)) /* This entry is expired, we will discard it. */ break; |
15a5f6bd2
|
68 69 70 71 72 73 74 75 76 77 78 |
cache_get(tmp); read_unlock(&detail->hash_lock); return tmp; } } read_unlock(&detail->hash_lock); /* Didn't find anything, insert an empty entry */ new = detail->alloc(); if (!new) return NULL; |
2f34931fd
|
79 80 81 82 |
/* must fully initialise 'new', else * we might get lose if we need to * cache_put it soon. */ |
778620364
|
83 |
cache_init(new, detail); |
2f34931fd
|
84 |
detail->init(new, key); |
15a5f6bd2
|
85 86 87 88 |
write_lock(&detail->hash_lock); /* check if entry appeared while we slept */ |
129e5824c
|
89 |
hlist_for_each_entry(tmp, head, cache_list) { |
15a5f6bd2
|
90 |
if (detail->match(tmp, key)) { |
d202cce89
|
91 |
if (cache_is_expired(detail, tmp)) { |
129e5824c
|
92 |
hlist_del_init(&tmp->cache_list); |
d202cce89
|
93 94 95 96 |
detail->entries --; freeme = tmp; break; } |
15a5f6bd2
|
97 98 |
cache_get(tmp); write_unlock(&detail->hash_lock); |
baab935ff
|
99 |
cache_put(new, detail); |
15a5f6bd2
|
100 101 102 |
return tmp; } } |
129e5824c
|
103 104 |
hlist_add_head(&new->cache_list, head); |
15a5f6bd2
|
105 106 107 |
detail->entries++; cache_get(new); write_unlock(&detail->hash_lock); |
d202cce89
|
108 109 |
if (freeme) cache_put(freeme, detail); |
15a5f6bd2
|
110 111 |
return new; } |
24c3767e4
|
112 |
EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); |
15a5f6bd2
|
113 |
|
ebd0cb1af
|
114 |
|
f866a8194
|
115 |
static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); |
ebd0cb1af
|
116 |
|
778620364
|
117 118 |
static void cache_fresh_locked(struct cache_head *head, time_t expiry, struct cache_detail *detail) |
ebd0cb1af
|
119 |
{ |
778620364
|
120 121 122 123 |
time_t now = seconds_since_boot(); if (now <= detail->flush_time) /* ensure it isn't immediately treated as expired */ now = detail->flush_time + 1; |
ebd0cb1af
|
124 |
head->expiry_time = expiry; |
778620364
|
125 |
head->last_refresh = now; |
fdef7aa5d
|
126 |
smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ |
908329f2c
|
127 |
set_bit(CACHE_VALID, &head->flags); |
ebd0cb1af
|
128 129 130 |
} static void cache_fresh_unlocked(struct cache_head *head, |
908329f2c
|
131 |
struct cache_detail *detail) |
ebd0cb1af
|
132 |
{ |
ebd0cb1af
|
133 134 |
if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { cache_revisit_request(head); |
f866a8194
|
135 |
cache_dequeue(detail, head); |
ebd0cb1af
|
136 137 |
} } |
15a5f6bd2
|
138 139 140 141 142 143 144 |
struct cache_head *sunrpc_cache_update(struct cache_detail *detail, struct cache_head *new, struct cache_head *old, int hash) { /* The 'old' entry is to be replaced by 'new'. * If 'old' is not VALID, we update it directly, * otherwise we need to replace it */ |
15a5f6bd2
|
145 146 147 148 149 150 151 152 153 |
struct cache_head *tmp; if (!test_bit(CACHE_VALID, &old->flags)) { write_lock(&detail->hash_lock); if (!test_bit(CACHE_VALID, &old->flags)) { if (test_bit(CACHE_NEGATIVE, &new->flags)) set_bit(CACHE_NEGATIVE, &old->flags); else detail->update(old, new); |
778620364
|
154 |
cache_fresh_locked(old, new->expiry_time, detail); |
15a5f6bd2
|
155 |
write_unlock(&detail->hash_lock); |
908329f2c
|
156 |
cache_fresh_unlocked(old, detail); |
15a5f6bd2
|
157 158 159 160 161 162 163 |
return old; } write_unlock(&detail->hash_lock); } /* We need to insert a new entry */ tmp = detail->alloc(); if (!tmp) { |
baab935ff
|
164 |
cache_put(old, detail); |
15a5f6bd2
|
165 166 |
return NULL; } |
778620364
|
167 |
cache_init(tmp, detail); |
15a5f6bd2
|
168 |
detail->init(tmp, old); |
15a5f6bd2
|
169 170 171 172 173 174 |
write_lock(&detail->hash_lock); if (test_bit(CACHE_NEGATIVE, &new->flags)) set_bit(CACHE_NEGATIVE, &tmp->flags); else detail->update(tmp, new); |
129e5824c
|
175 |
hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); |
f2d395865
|
176 |
detail->entries++; |
15a5f6bd2
|
177 |
cache_get(tmp); |
778620364
|
178 179 |
cache_fresh_locked(tmp, new->expiry_time, detail); cache_fresh_locked(old, 0, detail); |
15a5f6bd2
|
180 |
write_unlock(&detail->hash_lock); |
908329f2c
|
181 182 |
cache_fresh_unlocked(tmp, detail); cache_fresh_unlocked(old, detail); |
baab935ff
|
183 |
cache_put(old, detail); |
15a5f6bd2
|
184 185 |
return tmp; } |
24c3767e4
|
186 |
EXPORT_SYMBOL_GPL(sunrpc_cache_update); |
1da177e4c
|
187 |
|
bc74b4f5e
|
188 189 |
static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) { |
2d4383383
|
190 191 |
if (cd->cache_upcall) return cd->cache_upcall(cd, h); |
21cd1254d
|
192 |
return sunrpc_cache_pipe_upcall(cd, h); |
bc74b4f5e
|
193 |
} |
989a19b9b
|
194 |
|
b6040f970
|
195 |
static inline int cache_is_valid(struct cache_head *h) |
989a19b9b
|
196 |
{ |
d202cce89
|
197 |
if (!test_bit(CACHE_VALID, &h->flags)) |
989a19b9b
|
198 199 200 201 202 |
return -EAGAIN; else { /* entry is valid */ if (test_bit(CACHE_NEGATIVE, &h->flags)) return -ENOENT; |
fdef7aa5d
|
203 204 205 206 207 208 209 210 |
else { /* * In combination with write barrier in * sunrpc_cache_update, ensures that anyone * using the cache entry after this sees the * updated contents: */ smp_rmb(); |
989a19b9b
|
211 |
return 0; |
fdef7aa5d
|
212 |
} |
989a19b9b
|
213 214 |
} } |
e9dc12216
|
215 |
|
6bab93f87
|
216 217 218 219 220 |
static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) { int rv; write_lock(&detail->hash_lock); |
b6040f970
|
221 |
rv = cache_is_valid(h); |
2a1c7f53f
|
222 223 |
if (rv == -EAGAIN) { set_bit(CACHE_NEGATIVE, &h->flags); |
778620364
|
224 225 |
cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY, detail); |
2a1c7f53f
|
226 |
rv = -ENOENT; |
6bab93f87
|
227 |
} |
6bab93f87
|
228 229 |
write_unlock(&detail->hash_lock); cache_fresh_unlocked(h, detail); |
2a1c7f53f
|
230 |
return rv; |
6bab93f87
|
231 |
} |
1da177e4c
|
232 233 234 235 236 237 238 239 |
/* * This is the generic cache management routine for all * the authentication caches. * It checks the currency of a cache item and will (later) * initiate an upcall to fill it if needed. * * * Returns 0 if the cache_head can be used, or cache_puts it and returns |
989a19b9b
|
240 241 242 243 |
* -EAGAIN if upcall is pending and request has been queued * -ETIMEDOUT if upcall failed or request could not be queue or * upcall completed but item is still invalid (implying that * the cache item has been replaced with a newer one). |
1da177e4c
|
244 245 246 247 248 249 250 251 252 |
* -ENOENT if cache entry was negative */ int cache_check(struct cache_detail *detail, struct cache_head *h, struct cache_req *rqstp) { int rv; long refresh_age, age; /* First decide return status as best we can */ |
b6040f970
|
253 |
rv = cache_is_valid(h); |
1da177e4c
|
254 255 256 |
/* now see if we want to start an upcall */ refresh_age = (h->expiry_time - h->last_refresh); |
c5b29f885
|
257 |
age = seconds_since_boot() - h->last_refresh; |
1da177e4c
|
258 259 260 261 |
if (rqstp == NULL) { if (rv == -EAGAIN) rv = -ENOENT; |
0bebc633f
|
262 263 |
} else if (rv == -EAGAIN || (h->expiry_time != 0 && age > refresh_age/2)) { |
46121cf7d
|
264 265 266 |
dprintk("RPC: Want update, refage=%ld, age=%ld ", refresh_age, age); |
1da177e4c
|
267 268 269 |
if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { switch (cache_make_upcall(detail, h)) { case -EINVAL: |
6bab93f87
|
270 |
rv = try_to_negate_entry(detail, h); |
1da177e4c
|
271 |
break; |
1da177e4c
|
272 |
case -EAGAIN: |
2a1c7f53f
|
273 |
cache_fresh_unlocked(h, detail); |
1da177e4c
|
274 275 276 277 |
break; } } } |
989a19b9b
|
278 |
if (rv == -EAGAIN) { |
d76d1815f
|
279 280 281 282 283 |
if (!cache_defer_req(rqstp, h)) { /* * Request was not deferred; handle it as best * we can ourselves: */ |
b6040f970
|
284 |
rv = cache_is_valid(h); |
989a19b9b
|
285 286 287 288 |
if (rv == -EAGAIN) rv = -ETIMEDOUT; } } |
4013edea9
|
289 |
if (rv) |
baab935ff
|
290 |
cache_put(h, detail); |
1da177e4c
|
291 292 |
return rv; } |
24c3767e4
|
293 |
EXPORT_SYMBOL_GPL(cache_check); |
1da177e4c
|
294 |
|
1da177e4c
|
295 296 297 298 299 300 |
/* * caches need to be periodically cleaned. * For this we maintain a list of cache_detail and * a current pointer into that list and into the table * for that entry. * |
013920eb5
|
301 |
* Each time cache_clean is called it finds the next non-empty entry |
1da177e4c
|
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 |
* in the current table and walks the list in that entry * looking for entries that can be removed. * * An entry gets removed if: * - The expiry is before current time * - The last_refresh time is before the flush_time for that cache * * later we might drop old entries with non-NEVER expiry if that table * is getting 'full' for some definition of 'full' * * The question of "how often to scan a table" is an interesting one * and is answered in part by the use of the "nextcheck" field in the * cache_detail. * When a scan of a table begins, the nextcheck field is set to a time * that is well into the future. * While scanning, if an expiry time is found that is earlier than the * current nextcheck time, nextcheck is set to that expiry time. * If the flush_time is ever set to a time earlier than the nextcheck * time, the nextcheck time is then set to that flush_time. * * A table is then only scanned if the current time is at least * the nextcheck time. |
cca5172a7
|
324 |
* |
1da177e4c
|
325 326 327 328 329 330 |
*/ static LIST_HEAD(cache_list); static DEFINE_SPINLOCK(cache_list_lock); static struct cache_detail *current_detail; static int current_index; |
65f27f384
|
331 |
static void do_cache_clean(struct work_struct *work); |
8eab945c5
|
332 |
static struct delayed_work cache_cleaner; |
1da177e4c
|
333 |
|
820f9442e
|
334 |
void sunrpc_init_cache_detail(struct cache_detail *cd) |
ffe9386b6
|
335 |
{ |
1da177e4c
|
336 337 338 339 340 341 342 343 344 345 346 347 |
rwlock_init(&cd->hash_lock); INIT_LIST_HEAD(&cd->queue); spin_lock(&cache_list_lock); cd->nextcheck = 0; cd->entries = 0; atomic_set(&cd->readers, 0); cd->last_close = 0; cd->last_warn = -1; list_add(&cd->others, &cache_list); spin_unlock(&cache_list_lock); /* start the cleaning process */ |
77b00bc03
|
348 |
queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0); |
1da177e4c
|
349 |
} |
820f9442e
|
350 |
EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail); |
1da177e4c
|
351 |
|
820f9442e
|
352 |
void sunrpc_destroy_cache_detail(struct cache_detail *cd) |
1da177e4c
|
353 354 355 356 |
{ cache_purge(cd); spin_lock(&cache_list_lock); write_lock(&cd->hash_lock); |
1da177e4c
|
357 358 359 360 361 |
if (current_detail == cd) current_detail = NULL; list_del_init(&cd->others); write_unlock(&cd->hash_lock); spin_unlock(&cache_list_lock); |
1da177e4c
|
362 363 |
if (list_empty(&cache_list)) { /* module must be being unloaded so its safe to kill the worker */ |
4011cd978
|
364 |
cancel_delayed_work_sync(&cache_cleaner); |
1da177e4c
|
365 |
} |
1da177e4c
|
366 |
} |
820f9442e
|
367 |
EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail); |
1da177e4c
|
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 |
/* clean cache tries to find something to clean * and cleans it. * It returns 1 if it cleaned something, * 0 if it didn't find anything this time * -1 if it fell off the end of the list. */ static int cache_clean(void) { int rv = 0; struct list_head *next; spin_lock(&cache_list_lock); /* find a suitable table if we don't already have one */ while (current_detail == NULL || current_index >= current_detail->hash_size) { if (current_detail) next = current_detail->others.next; else next = cache_list.next; if (next == &cache_list) { current_detail = NULL; spin_unlock(&cache_list_lock); return -1; } current_detail = list_entry(next, struct cache_detail, others); |
c5b29f885
|
395 |
if (current_detail->nextcheck > seconds_since_boot()) |
1da177e4c
|
396 397 398 |
current_index = current_detail->hash_size; else { current_index = 0; |
c5b29f885
|
399 |
current_detail->nextcheck = seconds_since_boot()+30*60; |
1da177e4c
|
400 401 402 403 404 405 |
} } /* find a non-empty bucket in the table */ while (current_detail && current_index < current_detail->hash_size && |
129e5824c
|
406 |
hlist_empty(¤t_detail->hash_table[current_index])) |
1da177e4c
|
407 408 409 |
current_index++; /* find a cleanable entry in the bucket and clean it, or set to next bucket */ |
cca5172a7
|
410 |
|
1da177e4c
|
411 |
if (current_detail && current_index < current_detail->hash_size) { |
129e5824c
|
412 |
struct cache_head *ch = NULL; |
1da177e4c
|
413 |
struct cache_detail *d; |
129e5824c
|
414 415 |
struct hlist_head *head; struct hlist_node *tmp; |
cca5172a7
|
416 |
|
1da177e4c
|
417 418 419 |
write_lock(¤t_detail->hash_lock); /* Ok, now to clean this strand */ |
cca5172a7
|
420 |
|
129e5824c
|
421 422 |
head = ¤t_detail->hash_table[current_index]; hlist_for_each_entry_safe(ch, tmp, head, cache_list) { |
1da177e4c
|
423 424 |
if (current_detail->nextcheck > ch->expiry_time) current_detail->nextcheck = ch->expiry_time+1; |
2f50d8b63
|
425 |
if (!cache_is_expired(current_detail, ch)) |
1da177e4c
|
426 |
continue; |
1da177e4c
|
427 |
|
129e5824c
|
428 |
hlist_del_init(&ch->cache_list); |
1da177e4c
|
429 430 |
current_detail->entries--; rv = 1; |
3af4974eb
|
431 |
break; |
1da177e4c
|
432 |
} |
3af4974eb
|
433 |
|
1da177e4c
|
434 435 436 437 438 |
write_unlock(¤t_detail->hash_lock); d = current_detail; if (!ch) current_index ++; spin_unlock(&cache_list_lock); |
5c4d26390
|
439 |
if (ch) { |
013920eb5
|
440 |
set_bit(CACHE_CLEANED, &ch->flags); |
2a1c7f53f
|
441 |
cache_fresh_unlocked(ch, d); |
baab935ff
|
442 |
cache_put(ch, d); |
5c4d26390
|
443 |
} |
1da177e4c
|
444 445 446 447 448 449 450 451 452 |
} else spin_unlock(&cache_list_lock); return rv; } /* * We want to regularly clean the cache, so we need to schedule some work ... */ |
65f27f384
|
453 |
static void do_cache_clean(struct work_struct *work) |
1da177e4c
|
454 455 456 |
{ int delay = 5; if (cache_clean() == -1) |
6aad89c83
|
457 |
delay = round_jiffies_relative(30*HZ); |
1da177e4c
|
458 459 460 461 462 |
if (list_empty(&cache_list)) delay = 0; if (delay) |
77b00bc03
|
463 464 |
queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay); |
1da177e4c
|
465 |
} |
cca5172a7
|
466 |
/* |
1da177e4c
|
467 |
* Clean all caches promptly. This just calls cache_clean |
cca5172a7
|
468 |
* repeatedly until we are sure that every cache has had a chance to |
1da177e4c
|
469 470 471 472 473 474 475 476 477 |
* be fully cleaned */ void cache_flush(void) { while (cache_clean() != -1) cond_resched(); while (cache_clean() != -1) cond_resched(); } |
24c3767e4
|
478 |
EXPORT_SYMBOL_GPL(cache_flush); |
1da177e4c
|
479 480 481 |
void cache_purge(struct cache_detail *detail) { |
471a930ad
|
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 |
struct cache_head *ch = NULL; struct hlist_head *head = NULL; struct hlist_node *tmp = NULL; int i = 0; write_lock(&detail->hash_lock); if (!detail->entries) { write_unlock(&detail->hash_lock); return; } dprintk("RPC: %d entries in %s cache ", detail->entries, detail->name); for (i = 0; i < detail->hash_size; i++) { head = &detail->hash_table[i]; hlist_for_each_entry_safe(ch, tmp, head, cache_list) { hlist_del_init(&ch->cache_list); detail->entries--; set_bit(CACHE_CLEANED, &ch->flags); write_unlock(&detail->hash_lock); cache_fresh_unlocked(ch, detail); cache_put(ch, detail); write_lock(&detail->hash_lock); } } write_unlock(&detail->hash_lock); |
1da177e4c
|
509 |
} |
24c3767e4
|
510 |
EXPORT_SYMBOL_GPL(cache_purge); |
1da177e4c
|
511 512 513 514 515 516 517 518 519 520 |
/* * Deferral and Revisiting of Requests. * * If a cache lookup finds a pending entry, we * need to defer the request and revisit it later. * All deferred requests are stored in a hash table, * indexed by "struct cache_head *". * As it may be wasteful to store a whole request |
cca5172a7
|
521 |
* structure, we allow the request to provide a |
1da177e4c
|
522 523 524 525 526 527 528 529 530 531 532 533 534 |
* deferred form, which must contain a * 'struct cache_deferred_req' * This cache_deferred_req contains a method to allow * it to be revisited when cache info is available */ #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head)) #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE) #define DFR_MAX 300 /* ??? */ static DEFINE_SPINLOCK(cache_defer_lock); static LIST_HEAD(cache_defer_list); |
111744927
|
535 |
static struct hlist_head cache_defer_hash[DFR_HASHSIZE]; |
1da177e4c
|
536 |
static int cache_defer_cnt; |
6610f720e
|
537 538 |
static void __unhash_deferred_req(struct cache_deferred_req *dreq) { |
111744927
|
539 |
hlist_del_init(&dreq->hash); |
e33534d54
|
540 541 542 543 |
if (!list_empty(&dreq->recent)) { list_del_init(&dreq->recent); cache_defer_cnt--; } |
6610f720e
|
544 545 546 |
} static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) |
1da177e4c
|
547 |
{ |
1da177e4c
|
548 |
int hash = DFR_HASH(item); |
e33534d54
|
549 |
INIT_LIST_HEAD(&dreq->recent); |
111744927
|
550 |
hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); |
6610f720e
|
551 |
} |
e33534d54
|
552 553 554 |
static void setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item, int count_me) |
1da177e4c
|
555 |
{ |
1da177e4c
|
556 557 |
dreq->item = item; |
1da177e4c
|
558 559 |
spin_lock(&cache_defer_lock); |
6610f720e
|
560 |
__hash_deferred_req(dreq, item); |
1da177e4c
|
561 |
|
e33534d54
|
562 563 564 |
if (count_me) { cache_defer_cnt++; list_add(&dreq->recent, &cache_defer_list); |
1da177e4c
|
565 |
} |
e33534d54
|
566 |
|
1da177e4c
|
567 |
spin_unlock(&cache_defer_lock); |
3211af111
|
568 |
} |
f16b6e8d8
|
569 |
|
3211af111
|
570 571 572 573 574 575 576 577 578 579 580 |
struct thread_deferred_req { struct cache_deferred_req handle; struct completion completion; }; static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) { struct thread_deferred_req *dr = container_of(dreq, struct thread_deferred_req, handle); complete(&dr->completion); } |
d29068c43
|
581 |
static void cache_wait_req(struct cache_req *req, struct cache_head *item) |
3211af111
|
582 583 584 |
{ struct thread_deferred_req sleeper; struct cache_deferred_req *dreq = &sleeper.handle; |
3211af111
|
585 586 587 |
sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion); dreq->revisit = cache_restart_thread; |
e33534d54
|
588 |
setup_deferral(dreq, item, 0); |
3211af111
|
589 |
|
d29068c43
|
590 |
if (!test_bit(CACHE_PENDING, &item->flags) || |
277f68dbb
|
591 |
wait_for_completion_interruptible_timeout( |
3211af111
|
592 593 594 595 596 |
&sleeper.completion, req->thread_wait) <= 0) { /* The completion wasn't completed, so we need * to clean up */ spin_lock(&cache_defer_lock); |
111744927
|
597 |
if (!hlist_unhashed(&sleeper.handle.hash)) { |
3211af111
|
598 599 600 601 602 603 604 |
__unhash_deferred_req(&sleeper.handle); spin_unlock(&cache_defer_lock); } else { /* cache_revisit_request already removed * this from the hash table, but hasn't * called ->revisit yet. It will very soon * and we need to wait for it. |
f16b6e8d8
|
605 |
*/ |
3211af111
|
606 607 |
spin_unlock(&cache_defer_lock); wait_for_completion(&sleeper.completion); |
f16b6e8d8
|
608 |
} |
3211af111
|
609 |
} |
3211af111
|
610 |
} |
e33534d54
|
611 |
static void cache_limit_defers(void) |
3211af111
|
612 |
{ |
e33534d54
|
613 614 615 616 |
/* Make sure we haven't exceed the limit of allowed deferred * requests. */ struct cache_deferred_req *discard = NULL; |
3211af111
|
617 |
|
e33534d54
|
618 619 |
if (cache_defer_cnt <= DFR_MAX) return; |
d29068c43
|
620 |
|
e33534d54
|
621 622 623 624 |
spin_lock(&cache_defer_lock); /* Consider removing either the first or the last */ if (cache_defer_cnt > DFR_MAX) { |
63862b5be
|
625 |
if (prandom_u32() & 1) |
e33534d54
|
626 627 628 629 630 631 632 633 |
discard = list_entry(cache_defer_list.next, struct cache_deferred_req, recent); else discard = list_entry(cache_defer_list.prev, struct cache_deferred_req, recent); __unhash_deferred_req(discard); } spin_unlock(&cache_defer_lock); |
cd68c374e
|
634 |
if (discard) |
cd68c374e
|
635 |
discard->revisit(discard, 1); |
e33534d54
|
636 |
} |
cd68c374e
|
637 |
|
d76d1815f
|
638 639 |
/* Return true if and only if a deferred request is queued. */ static bool cache_defer_req(struct cache_req *req, struct cache_head *item) |
e33534d54
|
640 641 |
{ struct cache_deferred_req *dreq; |
d29068c43
|
642 |
|
3211af111
|
643 |
if (req->thread_wait) { |
d29068c43
|
644 645 |
cache_wait_req(req, item); if (!test_bit(CACHE_PENDING, &item->flags)) |
d76d1815f
|
646 |
return false; |
1da177e4c
|
647 |
} |
3211af111
|
648 649 |
dreq = req->defer(req); if (dreq == NULL) |
d76d1815f
|
650 |
return false; |
e33534d54
|
651 |
setup_deferral(dreq, item, 1); |
d29068c43
|
652 653 654 655 656 |
if (!test_bit(CACHE_PENDING, &item->flags)) /* Bit could have been cleared before we managed to * set up the deferral, so need to revisit just in case */ cache_revisit_request(item); |
e33534d54
|
657 658 |
cache_limit_defers(); |
d76d1815f
|
659 |
return true; |
1da177e4c
|
660 661 662 663 664 665 |
} static void cache_revisit_request(struct cache_head *item) { struct cache_deferred_req *dreq; struct list_head pending; |
b67bfe0d4
|
666 |
struct hlist_node *tmp; |
1da177e4c
|
667 668 669 670 |
int hash = DFR_HASH(item); INIT_LIST_HEAD(&pending); spin_lock(&cache_defer_lock); |
cca5172a7
|
671 |
|
b67bfe0d4
|
672 |
hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash) |
111744927
|
673 674 675 |
if (dreq->item == item) { __unhash_deferred_req(dreq); list_add(&dreq->recent, &pending); |
1da177e4c
|
676 |
} |
111744927
|
677 |
|
1da177e4c
|
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 |
spin_unlock(&cache_defer_lock); while (!list_empty(&pending)) { dreq = list_entry(pending.next, struct cache_deferred_req, recent); list_del_init(&dreq->recent); dreq->revisit(dreq, 0); } } void cache_clean_deferred(void *owner) { struct cache_deferred_req *dreq, *tmp; struct list_head pending; INIT_LIST_HEAD(&pending); spin_lock(&cache_defer_lock); |
cca5172a7
|
695 |
|
1da177e4c
|
696 697 |
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { if (dreq->owner == owner) { |
6610f720e
|
698 |
__unhash_deferred_req(dreq); |
e95dffa43
|
699 |
list_add(&dreq->recent, &pending); |
1da177e4c
|
700 701 702 703 704 705 706 707 708 709 710 711 712 713 |
} } spin_unlock(&cache_defer_lock); while (!list_empty(&pending)) { dreq = list_entry(pending.next, struct cache_deferred_req, recent); list_del_init(&dreq->recent); dreq->revisit(dreq, 1); } } /* * communicate with user-space * |
6489a8f41
|
714 |
* We have a magic /proc file - /proc/net/rpc/<cachename>/channel. |
a490c681c
|
715 716 717 |
* On read, you get a full request, or block. * On write, an update request is processed. * Poll works if anything to read, and always allows write. |
1da177e4c
|
718 |
* |
cca5172a7
|
719 |
* Implemented by linked list of requests. Each open file has |
a490c681c
|
720 |
* a ->private that also exists in this list. New requests are added |
1da177e4c
|
721 722 723 724 725 726 727 |
* to the end and may wakeup and preceding readers. * New readers are added to the head. If, on read, an item is found with * CACHE_UPCALLING clear, we free it from the list. * */ static DEFINE_SPINLOCK(queue_lock); |
4a3e2f711
|
728 |
static DEFINE_MUTEX(queue_io_mutex); |
1da177e4c
|
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 |
struct cache_queue { struct list_head list; int reader; /* if 0, then request */ }; struct cache_request { struct cache_queue q; struct cache_head *item; char * buf; int len; int readers; }; struct cache_reader { struct cache_queue q; int offset; /* if non-0, we have a refcnt on next request */ }; |
d94af6dea
|
745 746 747 748 749 750 751 752 753 754 755 |
static int cache_request(struct cache_detail *detail, struct cache_request *crq) { char *bp = crq->buf; int len = PAGE_SIZE; detail->cache_request(detail, crq->item, &bp, &len); if (len < 0) return -EAGAIN; return PAGE_SIZE - len; } |
173912a6a
|
756 757 |
static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) |
1da177e4c
|
758 759 760 |
{ struct cache_reader *rp = filp->private_data; struct cache_request *rq; |
496ad9aa8
|
761 |
struct inode *inode = file_inode(filp); |
1da177e4c
|
762 763 764 765 |
int err; if (count == 0) return 0; |
5955102c9
|
766 |
inode_lock(inode); /* protect against multiple concurrent |
1da177e4c
|
767 768 769 770 771 772 773 774 775 776 777 778 |
* readers on this file */ again: spin_lock(&queue_lock); /* need to find next request */ while (rp->q.list.next != &cd->queue && list_entry(rp->q.list.next, struct cache_queue, list) ->reader) { struct list_head *next = rp->q.list.next; list_move(&rp->q.list, next); } if (rp->q.list.next == &cd->queue) { spin_unlock(&queue_lock); |
5955102c9
|
779 |
inode_unlock(inode); |
0db74d9a2
|
780 |
WARN_ON_ONCE(rp->offset); |
1da177e4c
|
781 782 783 |
return 0; } rq = container_of(rp->q.list.next, struct cache_request, q.list); |
0db74d9a2
|
784 |
WARN_ON_ONCE(rq->q.reader); |
1da177e4c
|
785 786 787 |
if (rp->offset == 0) rq->readers++; spin_unlock(&queue_lock); |
d94af6dea
|
788 789 790 791 792 793 |
if (rq->len == 0) { err = cache_request(cd, rq); if (err < 0) goto out; rq->len = err; } |
1da177e4c
|
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 |
if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { err = -EAGAIN; spin_lock(&queue_lock); list_move(&rp->q.list, &rq->q.list); spin_unlock(&queue_lock); } else { if (rp->offset + count > rq->len) count = rq->len - rp->offset; err = -EFAULT; if (copy_to_user(buf, rq->buf + rp->offset, count)) goto out; rp->offset += count; if (rp->offset >= rq->len) { rp->offset = 0; spin_lock(&queue_lock); list_move(&rp->q.list, &rq->q.list); spin_unlock(&queue_lock); } err = 0; } out: if (rp->offset == 0) { /* need to release rq */ spin_lock(&queue_lock); rq->readers--; if (rq->readers == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { list_del(&rq->q.list); spin_unlock(&queue_lock); |
baab935ff
|
823 |
cache_put(rq->item, cd); |
1da177e4c
|
824 825 826 827 828 829 830 |
kfree(rq->buf); kfree(rq); } else spin_unlock(&queue_lock); } if (err == -EAGAIN) goto again; |
5955102c9
|
831 |
inode_unlock(inode); |
1da177e4c
|
832 833 |
return err ? err : count; } |
da77005f0
|
834 835 836 837 |
static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, size_t count, struct cache_detail *cd) { ssize_t ret; |
1da177e4c
|
838 |
|
6d8d17499
|
839 840 |
if (count == 0) return -EINVAL; |
da77005f0
|
841 842 843 844 845 846 847 848 849 850 851 |
if (copy_from_user(kaddr, buf, count)) return -EFAULT; kaddr[count] = '\0'; ret = cd->cache_parse(cd, kaddr, count); if (!ret) ret = count; return ret; } static ssize_t cache_slow_downcall(const char __user *buf, size_t count, struct cache_detail *cd) |
1da177e4c
|
852 |
{ |
da77005f0
|
853 854 |
static char write_buf[8192]; /* protected by queue_io_mutex */ ssize_t ret = -EINVAL; |
1da177e4c
|
855 |
|
1da177e4c
|
856 |
if (count >= sizeof(write_buf)) |
da77005f0
|
857 |
goto out; |
4a3e2f711
|
858 |
mutex_lock(&queue_io_mutex); |
da77005f0
|
859 860 861 862 863 |
ret = cache_do_downcall(write_buf, buf, count, cd); mutex_unlock(&queue_io_mutex); out: return ret; } |
1da177e4c
|
864 |
|
da77005f0
|
865 866 867 868 869 870 871 |
static ssize_t cache_downcall(struct address_space *mapping, const char __user *buf, size_t count, struct cache_detail *cd) { struct page *page; char *kaddr; ssize_t ret = -ENOMEM; |
09cbfeaf1
|
872 |
if (count >= PAGE_SIZE) |
da77005f0
|
873 874 875 876 877 878 879 880 881 882 |
goto out_slow; page = find_or_create_page(mapping, 0, GFP_KERNEL); if (!page) goto out_slow; kaddr = kmap(page); ret = cache_do_downcall(kaddr, buf, count, cd); kunmap(page); unlock_page(page); |
09cbfeaf1
|
883 |
put_page(page); |
da77005f0
|
884 885 886 887 |
return ret; out_slow: return cache_slow_downcall(buf, count, cd); } |
1da177e4c
|
888 |
|
173912a6a
|
889 890 891 |
static ssize_t cache_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) |
da77005f0
|
892 893 |
{ struct address_space *mapping = filp->f_mapping; |
496ad9aa8
|
894 |
struct inode *inode = file_inode(filp); |
da77005f0
|
895 896 897 898 |
ssize_t ret = -EINVAL; if (!cd->cache_parse) goto out; |
5955102c9
|
899 |
inode_lock(inode); |
da77005f0
|
900 |
ret = cache_downcall(mapping, buf, count, cd); |
5955102c9
|
901 |
inode_unlock(inode); |
da77005f0
|
902 903 |
out: return ret; |
1da177e4c
|
904 905 906 |
} static DECLARE_WAIT_QUEUE_HEAD(queue_wait); |
ade994f4f
|
907 |
static __poll_t cache_poll(struct file *filp, poll_table *wait, |
173912a6a
|
908 |
struct cache_detail *cd) |
1da177e4c
|
909 |
{ |
ade994f4f
|
910 |
__poll_t mask; |
1da177e4c
|
911 912 |
struct cache_reader *rp = filp->private_data; struct cache_queue *cq; |
1da177e4c
|
913 914 915 916 |
poll_wait(filp, &queue_wait, wait); /* alway allow write */ |
a9a08845e
|
917 |
mask = EPOLLOUT | EPOLLWRNORM; |
1da177e4c
|
918 919 920 921 922 923 924 925 926 |
if (!rp) return mask; spin_lock(&queue_lock); for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { |
a9a08845e
|
927 |
mask |= EPOLLIN | EPOLLRDNORM; |
1da177e4c
|
928 929 930 931 932 |
break; } spin_unlock(&queue_lock); return mask; } |
173912a6a
|
933 934 935 |
static int cache_ioctl(struct inode *ino, struct file *filp, unsigned int cmd, unsigned long arg, struct cache_detail *cd) |
1da177e4c
|
936 937 938 939 |
{ int len = 0; struct cache_reader *rp = filp->private_data; struct cache_queue *cq; |
1da177e4c
|
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 |
if (cmd != FIONREAD || !rp) return -EINVAL; spin_lock(&queue_lock); /* only find the length remaining in current request, * or the length of the next request */ for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { struct cache_request *cr = container_of(cq, struct cache_request, q); len = cr->len - rp->offset; break; } spin_unlock(&queue_lock); return put_user(len, (int __user *)arg); } |
173912a6a
|
961 962 |
static int cache_open(struct inode *inode, struct file *filp, struct cache_detail *cd) |
1da177e4c
|
963 964 |
{ struct cache_reader *rp = NULL; |
f7e86ab92
|
965 966 |
if (!cd || !try_module_get(cd->owner)) return -EACCES; |
1da177e4c
|
967 968 |
nonseekable_open(inode, filp); if (filp->f_mode & FMODE_READ) { |
1da177e4c
|
969 |
rp = kmalloc(sizeof(*rp), GFP_KERNEL); |
a7823c797
|
970 971 |
if (!rp) { module_put(cd->owner); |
1da177e4c
|
972 |
return -ENOMEM; |
a7823c797
|
973 |
} |
1da177e4c
|
974 975 976 977 978 979 980 981 982 983 |
rp->offset = 0; rp->q.reader = 1; atomic_inc(&cd->readers); spin_lock(&queue_lock); list_add(&rp->q.list, &cd->queue); spin_unlock(&queue_lock); } filp->private_data = rp; return 0; } |
173912a6a
|
984 985 |
static int cache_release(struct inode *inode, struct file *filp, struct cache_detail *cd) |
1da177e4c
|
986 987 |
{ struct cache_reader *rp = filp->private_data; |
1da177e4c
|
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 |
if (rp) { spin_lock(&queue_lock); if (rp->offset) { struct cache_queue *cq; for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { container_of(cq, struct cache_request, q) ->readers--; break; } rp->offset = 0; } list_del(&rp->q.list); spin_unlock(&queue_lock); filp->private_data = NULL; kfree(rp); |
c5b29f885
|
1007 |
cd->last_close = seconds_since_boot(); |
1da177e4c
|
1008 1009 |
atomic_dec(&cd->readers); } |
f7e86ab92
|
1010 |
module_put(cd->owner); |
1da177e4c
|
1011 1012 |
return 0; } |
f866a8194
|
1013 |
static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch) |
1da177e4c
|
1014 |
{ |
f9e1aedc6
|
1015 1016 1017 1018 1019 |
struct cache_queue *cq, *tmp; struct cache_request *cr; struct list_head dequeued; INIT_LIST_HEAD(&dequeued); |
1da177e4c
|
1020 |
spin_lock(&queue_lock); |
f9e1aedc6
|
1021 |
list_for_each_entry_safe(cq, tmp, &detail->queue, list) |
1da177e4c
|
1022 |
if (!cq->reader) { |
f9e1aedc6
|
1023 |
cr = container_of(cq, struct cache_request, q); |
1da177e4c
|
1024 1025 |
if (cr->item != ch) continue; |
f9e1aedc6
|
1026 1027 1028 |
if (test_bit(CACHE_PENDING, &ch->flags)) /* Lost a race and it is pending again */ break; |
1da177e4c
|
1029 |
if (cr->readers != 0) |
4013edea9
|
1030 |
continue; |
f9e1aedc6
|
1031 |
list_move(&cr->q.list, &dequeued); |
1da177e4c
|
1032 1033 |
} spin_unlock(&queue_lock); |
f9e1aedc6
|
1034 1035 1036 1037 1038 1039 1040 |
while (!list_empty(&dequeued)) { cr = list_entry(dequeued.next, struct cache_request, q.list); list_del(&cr->q.list); cache_put(cr->item, detail); kfree(cr->buf); kfree(cr); } |
1da177e4c
|
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 |
} /* * Support routines for text-based upcalls. * Fields are separated by spaces. * Fields are either mangled to quote space tab newline slosh with slosh * or a hexified with a leading \x * Record is terminated with newline. * */ void qword_add(char **bpp, int *lp, char *str) { char *bp = *bpp; int len = *lp; |
1b2e122d1
|
1056 |
int ret; |
1da177e4c
|
1057 1058 |
if (len < 0) return; |
41416f233
|
1059 1060 1061 1062 |
ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \t"); if (ret >= len) { bp += len; |
1b2e122d1
|
1063 |
len = -1; |
41416f233
|
1064 1065 |
} else { bp += ret; |
1b2e122d1
|
1066 |
len -= ret; |
1da177e4c
|
1067 1068 1069 1070 1071 1072 |
*bp++ = ' '; len--; } *bpp = bp; *lp = len; } |
24c3767e4
|
1073 |
EXPORT_SYMBOL_GPL(qword_add); |
1da177e4c
|
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 |
void qword_addhex(char **bpp, int *lp, char *buf, int blen) { char *bp = *bpp; int len = *lp; if (len < 0) return; if (len > 2) { *bp++ = '\\'; *bp++ = 'x'; len -= 2; while (blen && len >= 2) { |
056785ea5
|
1087 |
bp = hex_byte_pack(bp, *buf++); |
1da177e4c
|
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 |
len -= 2; blen--; } } if (blen || len<1) len = -1; else { *bp++ = ' '; len--; } *bpp = bp; *lp = len; } |
24c3767e4
|
1100 |
EXPORT_SYMBOL_GPL(qword_addhex); |
1da177e4c
|
1101 1102 1103 1104 1105 1106 |
static void warn_no_listener(struct cache_detail *detail) { if (detail->last_warn != detail->last_close) { detail->last_warn = detail->last_close; if (detail->warn_no_listener) |
2da8ca26c
|
1107 |
detail->warn_no_listener(detail, detail->last_close != 0); |
1da177e4c
|
1108 1109 |
} } |
064975245
|
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 |
static bool cache_listeners_exist(struct cache_detail *detail) { if (atomic_read(&detail->readers)) return true; if (detail->last_close == 0) /* This cache was never opened */ return false; if (detail->last_close < seconds_since_boot() - 30) /* * We allow for the possibility that someone might * restart a userspace daemon without restarting the * server; but after 30 seconds, we give up. */ return false; return true; } |
1da177e4c
|
1126 |
/* |
bc74b4f5e
|
1127 1128 1129 |
* register an upcall request to user-space and queue it up for read() by the * upcall daemon. * |
1da177e4c
|
1130 1131 |
* Each request is at most one page long. */ |
21cd1254d
|
1132 |
int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) |
1da177e4c
|
1133 1134 1135 1136 |
{ char *buf; struct cache_request *crq; |
f9e1aedc6
|
1137 |
int ret = 0; |
1da177e4c
|
1138 |
|
2d4383383
|
1139 1140 |
if (!detail->cache_request) return -EINVAL; |
1da177e4c
|
1141 |
|
064975245
|
1142 1143 1144 |
if (!cache_listeners_exist(detail)) { warn_no_listener(detail); return -EINVAL; |
1da177e4c
|
1145 |
} |
013920eb5
|
1146 1147 1148 |
if (test_bit(CACHE_CLEANED, &h->flags)) /* Too late to make an upcall */ return -EAGAIN; |
1da177e4c
|
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 |
buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -EAGAIN; crq = kmalloc(sizeof (*crq), GFP_KERNEL); if (!crq) { kfree(buf); return -EAGAIN; } |
1da177e4c
|
1159 |
crq->q.reader = 0; |
1da177e4c
|
1160 |
crq->buf = buf; |
d94af6dea
|
1161 |
crq->len = 0; |
1da177e4c
|
1162 1163 |
crq->readers = 0; spin_lock(&queue_lock); |
a6ab1e812
|
1164 1165 |
if (test_bit(CACHE_PENDING, &h->flags)) { crq->item = cache_get(h); |
f9e1aedc6
|
1166 |
list_add_tail(&crq->q.list, &detail->queue); |
a6ab1e812
|
1167 |
} else |
f9e1aedc6
|
1168 1169 |
/* Lost a race, no longer PENDING, so don't enqueue */ ret = -EAGAIN; |
1da177e4c
|
1170 1171 |
spin_unlock(&queue_lock); wake_up(&queue_wait); |
f9e1aedc6
|
1172 1173 1174 1175 1176 |
if (ret == -EAGAIN) { kfree(buf); kfree(crq); } return ret; |
1da177e4c
|
1177 |
} |
bc74b4f5e
|
1178 |
EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); |
1da177e4c
|
1179 1180 1181 1182 1183 1184 1185 1186 |
/* * parse a message from user-space and pass it * to an appropriate cache * Messages are, like requests, separated into fields by * spaces and dequotes as \xHEXSTRING or embedded nn octal * |
cca5172a7
|
1187 |
* Message is |
1da177e4c
|
1188 1189 |
* reply cachename expiry key ... content.... * |
cca5172a7
|
1190 |
* key and content are both parsed by cache |
1da177e4c
|
1191 |
*/ |
1da177e4c
|
1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 |
int qword_get(char **bpp, char *dest, int bufsize) { /* return bytes copied, or -1 on error */ char *bp = *bpp; int len = 0; while (*bp == ' ') bp++; if (bp[0] == '\\' && bp[1] == 'x') { /* HEX STRING */ bp += 2; |
b7052cd7b
|
1203 |
while (len < bufsize - 1) { |
e7f483eab
|
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 |
int h, l; h = hex_to_bin(bp[0]); if (h < 0) break; l = hex_to_bin(bp[1]); if (l < 0) break; *dest++ = (h << 4) | l; bp += 2; |
1da177e4c
|
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 |
len++; } } else { /* text with nn octal quoting */ while (*bp != ' ' && *bp != ' ' && *bp && len < bufsize-1) { if (*bp == '\\' && isodigit(bp[1]) && (bp[1] <= '3') && isodigit(bp[2]) && isodigit(bp[3])) { int byte = (*++bp -'0'); bp++; byte = (byte << 3) | (*bp++ - '0'); byte = (byte << 3) | (*bp++ - '0'); *dest++ = byte; len++; } else { *dest++ = *bp++; len++; } } } if (*bp != ' ' && *bp != ' ' && *bp != '\0') return -1; while (*bp == ' ') bp++; *bpp = bp; *dest = '\0'; return len; } |
24c3767e4
|
1248 |
EXPORT_SYMBOL_GPL(qword_get); |
1da177e4c
|
1249 1250 1251 |
/* |
6489a8f41
|
1252 |
* support /proc/net/rpc/$CACHENAME/content |
1da177e4c
|
1253 1254 1255 1256 |
* as a seqfile. * We call ->cache_show passing NULL for the item to * get a header, then pass each real item in the cache */ |
c8c081b70
|
1257 |
void *cache_seq_start(struct seq_file *m, loff_t *pos) |
9a429c498
|
1258 |
__acquires(cd->hash_lock) |
1da177e4c
|
1259 1260 |
{ loff_t n = *pos; |
95c961747
|
1261 |
unsigned int hash, entry; |
1da177e4c
|
1262 |
struct cache_head *ch; |
9936f2ae3
|
1263 |
struct cache_detail *cd = m->private; |
1da177e4c
|
1264 1265 1266 1267 1268 1269 |
read_lock(&cd->hash_lock); if (!n--) return SEQ_START_TOKEN; hash = n >> 32; entry = n & ((1LL<<32) - 1); |
129e5824c
|
1270 |
hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list) |
1da177e4c
|
1271 1272 1273 1274 1275 1276 |
if (!entry--) return ch; n &= ~((1LL<<32) - 1); do { hash++; n += 1LL<<32; |
cca5172a7
|
1277 |
} while(hash < cd->hash_size && |
129e5824c
|
1278 |
hlist_empty(&cd->hash_table[hash])); |
1da177e4c
|
1279 1280 1281 |
if (hash >= cd->hash_size) return NULL; *pos = n+1; |
129e5824c
|
1282 1283 |
return hlist_entry_safe(cd->hash_table[hash].first, struct cache_head, cache_list); |
1da177e4c
|
1284 |
} |
c8c081b70
|
1285 |
EXPORT_SYMBOL_GPL(cache_seq_start); |
1da177e4c
|
1286 |
|
c8c081b70
|
1287 |
void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos) |
1da177e4c
|
1288 1289 1290 |
{ struct cache_head *ch = p; int hash = (*pos >> 32); |
9936f2ae3
|
1291 |
struct cache_detail *cd = m->private; |
1da177e4c
|
1292 1293 1294 |
if (p == SEQ_START_TOKEN) hash = 0; |
129e5824c
|
1295 |
else if (ch->cache_list.next == NULL) { |
1da177e4c
|
1296 1297 1298 1299 |
hash++; *pos += 1LL<<32; } else { ++*pos; |
129e5824c
|
1300 1301 |
return hlist_entry_safe(ch->cache_list.next, struct cache_head, cache_list); |
1da177e4c
|
1302 1303 1304 |
} *pos &= ~((1LL<<32) - 1); while (hash < cd->hash_size && |
129e5824c
|
1305 |
hlist_empty(&cd->hash_table[hash])) { |
1da177e4c
|
1306 1307 1308 1309 1310 1311 |
hash++; *pos += 1LL<<32; } if (hash >= cd->hash_size) return NULL; ++*pos; |
129e5824c
|
1312 1313 |
return hlist_entry_safe(cd->hash_table[hash].first, struct cache_head, cache_list); |
1da177e4c
|
1314 |
} |
c8c081b70
|
1315 |
EXPORT_SYMBOL_GPL(cache_seq_next); |
1da177e4c
|
1316 |
|
c8c081b70
|
1317 |
void cache_seq_stop(struct seq_file *m, void *p) |
9a429c498
|
1318 |
__releases(cd->hash_lock) |
1da177e4c
|
1319 |
{ |
9936f2ae3
|
1320 |
struct cache_detail *cd = m->private; |
1da177e4c
|
1321 1322 |
read_unlock(&cd->hash_lock); } |
c8c081b70
|
1323 |
EXPORT_SYMBOL_GPL(cache_seq_stop); |
1da177e4c
|
1324 1325 1326 1327 |
static int c_show(struct seq_file *m, void *p) { struct cache_head *cp = p; |
9936f2ae3
|
1328 |
struct cache_detail *cd = m->private; |
1da177e4c
|
1329 1330 1331 1332 1333 |
if (p == SEQ_START_TOKEN) return cd->cache_show(m, cd, NULL); ifdebug(CACHE) |
4013edea9
|
1334 1335 |
seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx ", |
c5b29f885
|
1336 |
convert_to_wallclock(cp->expiry_time), |
2c935bc57
|
1337 |
kref_read(&cp->ref), cp->flags); |
1da177e4c
|
1338 1339 1340 1341 |
cache_get(cp); if (cache_check(cd, cp, NULL)) /* cache_check does a cache_put on failure */ seq_printf(m, "# "); |
200724a70
|
1342 1343 1344 |
else { if (cache_is_expired(cd, cp)) seq_printf(m, "# "); |
1da177e4c
|
1345 |
cache_put(cp, cd); |
200724a70
|
1346 |
} |
1da177e4c
|
1347 1348 1349 |
return cd->cache_show(m, cd, cp); } |
56b3d975b
|
1350 |
static const struct seq_operations cache_content_op = { |
c8c081b70
|
1351 1352 1353 |
.start = cache_seq_start, .next = cache_seq_next, .stop = cache_seq_stop, |
1da177e4c
|
1354 1355 |
.show = c_show, }; |
173912a6a
|
1356 1357 |
static int content_open(struct inode *inode, struct file *file, struct cache_detail *cd) |
1da177e4c
|
1358 |
{ |
9936f2ae3
|
1359 1360 |
struct seq_file *seq; int err; |
1da177e4c
|
1361 |
|
f7e86ab92
|
1362 1363 |
if (!cd || !try_module_get(cd->owner)) return -EACCES; |
9936f2ae3
|
1364 1365 1366 |
err = seq_open(file, &cache_content_op); if (err) { |
a5990ea12
|
1367 |
module_put(cd->owner); |
9936f2ae3
|
1368 |
return err; |
a5990ea12
|
1369 |
} |
1da177e4c
|
1370 |
|
9936f2ae3
|
1371 1372 |
seq = file->private_data; seq->private = cd; |
ec9310351
|
1373 |
return 0; |
1da177e4c
|
1374 |
} |
1da177e4c
|
1375 |
|
f7e86ab92
|
1376 1377 1378 |
static int content_release(struct inode *inode, struct file *file, struct cache_detail *cd) { |
9936f2ae3
|
1379 |
int ret = seq_release(inode, file); |
f7e86ab92
|
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 |
module_put(cd->owner); return ret; } static int open_flush(struct inode *inode, struct file *file, struct cache_detail *cd) { if (!cd || !try_module_get(cd->owner)) return -EACCES; return nonseekable_open(inode, file); } static int release_flush(struct inode *inode, struct file *file, struct cache_detail *cd) { module_put(cd->owner); return 0; } |
1da177e4c
|
1398 1399 |
static ssize_t read_flush(struct file *file, char __user *buf, |
173912a6a
|
1400 1401 |
size_t count, loff_t *ppos, struct cache_detail *cd) |
1da177e4c
|
1402 |
{ |
212ba9069
|
1403 |
char tbuf[22]; |
01b2969a8
|
1404 |
size_t len; |
1da177e4c
|
1405 |
|
8ccc86916
|
1406 1407 1408 1409 |
len = snprintf(tbuf, sizeof(tbuf), "%lu ", convert_to_wallclock(cd->flush_time)); return simple_read_from_buffer(buf, count, ppos, tbuf, len); |
1da177e4c
|
1410 |
} |
173912a6a
|
1411 1412 1413 |
static ssize_t write_flush(struct file *file, const char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) |
1da177e4c
|
1414 |
{ |
1da177e4c
|
1415 |
char tbuf[20]; |
3b68e6ee3
|
1416 1417 |
char *ep; time_t now; |
c5b29f885
|
1418 |
|
1da177e4c
|
1419 1420 1421 1422 1423 |
if (*ppos || count > sizeof(tbuf)-1) return -EINVAL; if (copy_from_user(tbuf, buf, count)) return -EFAULT; tbuf[count] = 0; |
c5b29f885
|
1424 |
simple_strtoul(tbuf, &ep, 0); |
1da177e4c
|
1425 1426 1427 |
if (*ep && *ep != ' ') return -EINVAL; |
3b68e6ee3
|
1428 1429 1430 1431 |
/* Note that while we check that 'buf' holds a valid number, * we always ignore the value and just flush everything. * Making use of the number leads to races. */ |
1da177e4c
|
1432 |
|
778620364
|
1433 |
now = seconds_since_boot(); |
3b68e6ee3
|
1434 1435 1436 1437 1438 |
/* Always flush everything, so behave like cache_purge() * Do this by advancing flush_time to the current time, * or by one second if it has already reached the current time. * Newly added cache entries will always have ->last_refresh greater * that ->flush_time, so they don't get flushed prematurely. |
778620364
|
1439 |
*/ |
778620364
|
1440 |
|
3b68e6ee3
|
1441 1442 1443 1444 1445 |
if (cd->flush_time >= now) now = cd->flush_time + 1; cd->flush_time = now; cd->nextcheck = now; |
1da177e4c
|
1446 1447 1448 1449 1450 |
cache_flush(); *ppos += count; return count; } |
173912a6a
|
1451 1452 1453 |
static ssize_t cache_read_procfs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { |
d9dda78ba
|
1454 |
struct cache_detail *cd = PDE_DATA(file_inode(filp)); |
173912a6a
|
1455 1456 1457 1458 1459 1460 1461 |
return cache_read(filp, buf, count, ppos, cd); } static ssize_t cache_write_procfs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { |
d9dda78ba
|
1462 |
struct cache_detail *cd = PDE_DATA(file_inode(filp)); |
173912a6a
|
1463 1464 1465 |
return cache_write(filp, buf, count, ppos, cd); } |
ade994f4f
|
1466 |
static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait) |
173912a6a
|
1467 |
{ |
d9dda78ba
|
1468 |
struct cache_detail *cd = PDE_DATA(file_inode(filp)); |
173912a6a
|
1469 1470 1471 |
return cache_poll(filp, wait, cd); } |
d79b6f4de
|
1472 1473 |
static long cache_ioctl_procfs(struct file *filp, unsigned int cmd, unsigned long arg) |
173912a6a
|
1474 |
{ |
496ad9aa8
|
1475 |
struct inode *inode = file_inode(filp); |
d9dda78ba
|
1476 |
struct cache_detail *cd = PDE_DATA(inode); |
173912a6a
|
1477 |
|
a6f8dbc65
|
1478 |
return cache_ioctl(inode, filp, cmd, arg, cd); |
173912a6a
|
1479 1480 1481 1482 |
} static int cache_open_procfs(struct inode *inode, struct file *filp) { |
d9dda78ba
|
1483 |
struct cache_detail *cd = PDE_DATA(inode); |
173912a6a
|
1484 1485 1486 1487 1488 1489 |
return cache_open(inode, filp, cd); } static int cache_release_procfs(struct inode *inode, struct file *filp) { |
d9dda78ba
|
1490 |
struct cache_detail *cd = PDE_DATA(inode); |
173912a6a
|
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 |
return cache_release(inode, filp, cd); } static const struct file_operations cache_file_operations_procfs = { .owner = THIS_MODULE, .llseek = no_llseek, .read = cache_read_procfs, .write = cache_write_procfs, .poll = cache_poll_procfs, |
d79b6f4de
|
1501 |
.unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */ |
173912a6a
|
1502 1503 |
.open = cache_open_procfs, .release = cache_release_procfs, |
1da177e4c
|
1504 |
}; |
173912a6a
|
1505 1506 1507 |
static int content_open_procfs(struct inode *inode, struct file *filp) { |
d9dda78ba
|
1508 |
struct cache_detail *cd = PDE_DATA(inode); |
173912a6a
|
1509 1510 1511 |
return content_open(inode, filp, cd); } |
f7e86ab92
|
1512 1513 |
static int content_release_procfs(struct inode *inode, struct file *filp) { |
d9dda78ba
|
1514 |
struct cache_detail *cd = PDE_DATA(inode); |
f7e86ab92
|
1515 1516 1517 |
return content_release(inode, filp, cd); } |
173912a6a
|
1518 1519 1520 1521 |
static const struct file_operations content_file_operations_procfs = { .open = content_open_procfs, .read = seq_read, .llseek = seq_lseek, |
f7e86ab92
|
1522 |
.release = content_release_procfs, |
173912a6a
|
1523 |
}; |
f7e86ab92
|
1524 1525 |
static int open_flush_procfs(struct inode *inode, struct file *filp) { |
d9dda78ba
|
1526 |
struct cache_detail *cd = PDE_DATA(inode); |
f7e86ab92
|
1527 1528 1529 1530 1531 1532 |
return open_flush(inode, filp, cd); } static int release_flush_procfs(struct inode *inode, struct file *filp) { |
d9dda78ba
|
1533 |
struct cache_detail *cd = PDE_DATA(inode); |
f7e86ab92
|
1534 1535 1536 |
return release_flush(inode, filp, cd); } |
173912a6a
|
1537 1538 1539 |
static ssize_t read_flush_procfs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { |
d9dda78ba
|
1540 |
struct cache_detail *cd = PDE_DATA(file_inode(filp)); |
173912a6a
|
1541 1542 1543 1544 1545 1546 1547 1548 |
return read_flush(filp, buf, count, ppos, cd); } static ssize_t write_flush_procfs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { |
d9dda78ba
|
1549 |
struct cache_detail *cd = PDE_DATA(file_inode(filp)); |
173912a6a
|
1550 1551 1552 1553 1554 |
return write_flush(filp, buf, count, ppos, cd); } static const struct file_operations cache_flush_operations_procfs = { |
f7e86ab92
|
1555 |
.open = open_flush_procfs, |
173912a6a
|
1556 1557 |
.read = read_flush_procfs, .write = write_flush_procfs, |
f7e86ab92
|
1558 |
.release = release_flush_procfs, |
6038f373a
|
1559 |
.llseek = no_llseek, |
1da177e4c
|
1560 |
}; |
173912a6a
|
1561 |
|
863d7d9c2
|
1562 |
static void remove_cache_proc_entries(struct cache_detail *cd) |
173912a6a
|
1563 |
{ |
863d7d9c2
|
1564 1565 1566 1567 |
if (cd->procfs) { proc_remove(cd->procfs); cd->procfs = NULL; } |
173912a6a
|
1568 1569 1570 |
} #ifdef CONFIG_PROC_FS |
593ce16b9
|
1571 |
static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) |
173912a6a
|
1572 1573 |
{ struct proc_dir_entry *p; |
4f42d0d53
|
1574 |
struct sunrpc_net *sn; |
173912a6a
|
1575 |
|
4f42d0d53
|
1576 |
sn = net_generic(net, sunrpc_net_id); |
863d7d9c2
|
1577 1578 |
cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); if (cd->procfs == NULL) |
173912a6a
|
1579 |
goto out_nomem; |
173912a6a
|
1580 |
|
d6444062f
|
1581 |
p = proc_create_data("flush", S_IFREG | 0600, |
863d7d9c2
|
1582 |
cd->procfs, &cache_flush_operations_procfs, cd); |
173912a6a
|
1583 1584 |
if (p == NULL) goto out_nomem; |
2d4383383
|
1585 |
if (cd->cache_request || cd->cache_parse) { |
d6444062f
|
1586 1587 |
p = proc_create_data("channel", S_IFREG | 0600, cd->procfs, &cache_file_operations_procfs, cd); |
173912a6a
|
1588 1589 1590 1591 |
if (p == NULL) goto out_nomem; } if (cd->cache_show) { |
d6444062f
|
1592 1593 |
p = proc_create_data("content", S_IFREG | 0400, cd->procfs, &content_file_operations_procfs, cd); |
173912a6a
|
1594 1595 1596 1597 1598 |
if (p == NULL) goto out_nomem; } return 0; out_nomem: |
863d7d9c2
|
1599 |
remove_cache_proc_entries(cd); |
173912a6a
|
1600 1601 1602 |
return -ENOMEM; } #else /* CONFIG_PROC_FS */ |
593ce16b9
|
1603 |
static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) |
173912a6a
|
1604 1605 1606 1607 |
{ return 0; } #endif |
8eab945c5
|
1608 1609 |
void __init cache_initialize(void) { |
203b42f73
|
1610 |
INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean); |
8eab945c5
|
1611 |
} |
593ce16b9
|
1612 |
int cache_register_net(struct cache_detail *cd, struct net *net) |
173912a6a
|
1613 1614 1615 1616 |
{ int ret; sunrpc_init_cache_detail(cd); |
593ce16b9
|
1617 |
ret = create_cache_proc_entries(cd, net); |
173912a6a
|
1618 1619 1620 1621 |
if (ret) sunrpc_destroy_cache_detail(cd); return ret; } |
f5c8593b9
|
1622 |
EXPORT_SYMBOL_GPL(cache_register_net); |
593ce16b9
|
1623 |
|
593ce16b9
|
1624 |
void cache_unregister_net(struct cache_detail *cd, struct net *net) |
173912a6a
|
1625 |
{ |
863d7d9c2
|
1626 |
remove_cache_proc_entries(cd); |
173912a6a
|
1627 1628 |
sunrpc_destroy_cache_detail(cd); } |
f5c8593b9
|
1629 |
EXPORT_SYMBOL_GPL(cache_unregister_net); |
593ce16b9
|
1630 |
|
d34971a65
|
1631 |
struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net) |
0a402d5a6
|
1632 1633 |
{ struct cache_detail *cd; |
129e5824c
|
1634 |
int i; |
0a402d5a6
|
1635 1636 1637 1638 |
cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); if (cd == NULL) return ERR_PTR(-ENOMEM); |
6396bb221
|
1639 |
cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head), |
0a402d5a6
|
1640 1641 1642 1643 1644 |
GFP_KERNEL); if (cd->hash_table == NULL) { kfree(cd); return ERR_PTR(-ENOMEM); } |
129e5824c
|
1645 1646 1647 |
for (i = 0; i < cd->hash_size; i++) INIT_HLIST_HEAD(&cd->hash_table[i]); |
0a402d5a6
|
1648 1649 1650 1651 1652 1653 |
cd->net = net; return cd; } EXPORT_SYMBOL_GPL(cache_create_net); void cache_destroy_net(struct cache_detail *cd, struct net *net) |
593ce16b9
|
1654 |
{ |
0a402d5a6
|
1655 1656 |
kfree(cd->hash_table); kfree(cd); |
593ce16b9
|
1657 |
} |
0a402d5a6
|
1658 |
EXPORT_SYMBOL_GPL(cache_destroy_net); |
8854e82d9
|
1659 1660 1661 1662 |
static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { |
496ad9aa8
|
1663 |
struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
8854e82d9
|
1664 1665 1666 1667 1668 1669 1670 |
return cache_read(filp, buf, count, ppos, cd); } static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { |
496ad9aa8
|
1671 |
struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
8854e82d9
|
1672 1673 1674 |
return cache_write(filp, buf, count, ppos, cd); } |
ade994f4f
|
1675 |
static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait) |
8854e82d9
|
1676 |
{ |
496ad9aa8
|
1677 |
struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
8854e82d9
|
1678 1679 1680 |
return cache_poll(filp, wait, cd); } |
9918ff26b
|
1681 |
static long cache_ioctl_pipefs(struct file *filp, |
8854e82d9
|
1682 1683 |
unsigned int cmd, unsigned long arg) { |
496ad9aa8
|
1684 |
struct inode *inode = file_inode(filp); |
8854e82d9
|
1685 |
struct cache_detail *cd = RPC_I(inode)->private; |
a6f8dbc65
|
1686 |
return cache_ioctl(inode, filp, cmd, arg, cd); |
8854e82d9
|
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 |
} static int cache_open_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return cache_open(inode, filp, cd); } static int cache_release_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return cache_release(inode, filp, cd); } const struct file_operations cache_file_operations_pipefs = { .owner = THIS_MODULE, .llseek = no_llseek, .read = cache_read_pipefs, .write = cache_write_pipefs, .poll = cache_poll_pipefs, |
9918ff26b
|
1709 |
.unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */ |
8854e82d9
|
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 |
.open = cache_open_pipefs, .release = cache_release_pipefs, }; static int content_open_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return content_open(inode, filp, cd); } |
f7e86ab92
|
1720 1721 1722 1723 1724 1725 |
static int content_release_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return content_release(inode, filp, cd); } |
8854e82d9
|
1726 1727 1728 1729 |
const struct file_operations content_file_operations_pipefs = { .open = content_open_pipefs, .read = seq_read, .llseek = seq_lseek, |
f7e86ab92
|
1730 |
.release = content_release_pipefs, |
8854e82d9
|
1731 |
}; |
f7e86ab92
|
1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 |
static int open_flush_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return open_flush(inode, filp, cd); } static int release_flush_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return release_flush(inode, filp, cd); } |
8854e82d9
|
1745 1746 1747 |
static ssize_t read_flush_pipefs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { |
496ad9aa8
|
1748 |
struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
8854e82d9
|
1749 1750 1751 1752 1753 1754 1755 1756 |
return read_flush(filp, buf, count, ppos, cd); } static ssize_t write_flush_pipefs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { |
496ad9aa8
|
1757 |
struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
8854e82d9
|
1758 1759 1760 1761 1762 |
return write_flush(filp, buf, count, ppos, cd); } const struct file_operations cache_flush_operations_pipefs = { |
f7e86ab92
|
1763 |
.open = open_flush_pipefs, |
8854e82d9
|
1764 1765 |
.read = read_flush_pipefs, .write = write_flush_pipefs, |
f7e86ab92
|
1766 |
.release = release_flush_pipefs, |
6038f373a
|
1767 |
.llseek = no_llseek, |
8854e82d9
|
1768 1769 1770 |
}; int sunrpc_cache_register_pipefs(struct dentry *parent, |
64f1426f3
|
1771 |
const char *name, umode_t umode, |
8854e82d9
|
1772 1773 |
struct cache_detail *cd) { |
a95e691f9
|
1774 1775 1776 |
struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd); if (IS_ERR(dir)) return PTR_ERR(dir); |
863d7d9c2
|
1777 |
cd->pipefs = dir; |
a95e691f9
|
1778 |
return 0; |
8854e82d9
|
1779 1780 1781 1782 1783 |
} EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) { |
863d7d9c2
|
1784 1785 1786 1787 |
if (cd->pipefs) { rpc_remove_cache_dir(cd->pipefs); cd->pipefs = NULL; } |
8854e82d9
|
1788 1789 |
} EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); |
2b477c00f
|
1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 |
void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) { write_lock(&cd->hash_lock); if (!hlist_unhashed(&h->cache_list)){ hlist_del_init(&h->cache_list); cd->entries--; write_unlock(&cd->hash_lock); cache_put(h, cd); } else write_unlock(&cd->hash_lock); } EXPORT_SYMBOL_GPL(sunrpc_cache_unhash); |