Commit cbe5e6109538ddab57764a88d9f0c2accd0c7d48
Committed by
Jens Axboe
1 parent
779b3fe4c0
Exists in
master
and in
20 other branches
lru_cache: introduce lc_get_cumulative()
New helper to be able to consolidate more updates into a single transaction. Without this, we can only grab a single refcount on an updated element while preparing a transaction. lc_get_cumulative - like lc_get; also finds to-be-changed elements @lc: the lru cache to operate on @enr: the label to look up Unlike lc_get this also returns the element for @enr, if it is belonging to a pending transaction, so the return values are like for lc_get(), plus: pointer to an element already on the "to_be_changed" list. In this case, the cache was already marked %LC_DIRTY. Caller needs to make sure that the pending transaction is completed, before proceeding to actually use this element. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com> Fixed up by Jens to export lc_get_cumulative(). Signed-off-by: Jens Axboe <axboe@kernel.dk>
Showing 2 changed files with 47 additions and 10 deletions Side-by-side Diff
include/linux/lru_cache.h
... | ... | @@ -256,6 +256,7 @@ |
256 | 256 | extern void lc_set(struct lru_cache *lc, unsigned int enr, int index); |
257 | 257 | extern void lc_del(struct lru_cache *lc, struct lc_element *element); |
258 | 258 | |
259 | +extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr); | |
259 | 260 | extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); |
260 | 261 | extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); |
261 | 262 | extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); |
lib/lru_cache.c
... | ... | @@ -365,7 +365,13 @@ |
365 | 365 | return 0; |
366 | 366 | } |
367 | 367 | |
368 | -static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool may_change) | |
368 | +/* used as internal flags to __lc_get */ | |
369 | +enum { | |
370 | + LC_GET_MAY_CHANGE = 1, | |
371 | + LC_GET_MAY_USE_UNCOMMITTED = 2, | |
372 | +}; | |
373 | + | |
374 | +static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags) | |
369 | 375 | { |
370 | 376 | struct lc_element *e; |
371 | 377 | |
372 | 378 | |
373 | 379 | |
374 | 380 | |
... | ... | @@ -380,24 +386,33 @@ |
380 | 386 | * this enr is currently being pulled in already, |
381 | 387 | * and will be available once the pending transaction |
382 | 388 | * has been committed. */ |
383 | - if (e && e->lc_new_number == e->lc_number) { | |
389 | + if (e) { | |
390 | + if (e->lc_new_number != e->lc_number) { | |
391 | + /* It has been found above, but on the "to_be_changed" | |
392 | + * list, not yet committed. Don't pull it in twice, | |
393 | + * wait for the transaction, then try again... | |
394 | + */ | |
395 | + if (!(flags & LC_GET_MAY_USE_UNCOMMITTED)) | |
396 | + RETURN(NULL); | |
397 | + /* ... unless the caller is aware of the implications, | |
398 | + * probably preparing a cumulative transaction. */ | |
399 | + ++e->refcnt; | |
400 | + ++lc->hits; | |
401 | + RETURN(e); | |
402 | + } | |
403 | + /* else: lc_new_number == lc_number; a real hit. */ | |
384 | 404 | ++lc->hits; |
385 | 405 | if (e->refcnt++ == 0) |
386 | 406 | lc->used++; |
387 | 407 | list_move(&e->list, &lc->in_use); /* Not evictable... */ |
388 | 408 | RETURN(e); |
389 | 409 | } |
410 | + /* e == NULL */ | |
390 | 411 | |
391 | 412 | ++lc->misses; |
392 | - if (!may_change) | |
413 | + if (!(flags & LC_GET_MAY_CHANGE)) | |
393 | 414 | RETURN(NULL); |
394 | 415 | |
395 | - /* It has been found above, but on the "to_be_changed" list, not yet | |
396 | - * committed. Don't pull it in twice, wait for the transaction, then | |
397 | - * try again */ | |
398 | - if (e) | |
399 | - RETURN(NULL); | |
400 | - | |
401 | 416 | /* To avoid races with lc_try_lock(), first, mark us dirty |
402 | 417 | * (using test_and_set_bit, as it implies memory barriers), ... */ |
403 | 418 | test_and_set_bit(__LC_DIRTY, &lc->flags); |
404 | 419 | |
... | ... | @@ -477,10 +492,30 @@ |
477 | 492 | */ |
478 | 493 | struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) |
479 | 494 | { |
480 | - return __lc_get(lc, enr, 1); | |
495 | + return __lc_get(lc, enr, LC_GET_MAY_CHANGE); | |
481 | 496 | } |
482 | 497 | |
483 | 498 | /** |
499 | + * lc_get_cumulative - like lc_get; also finds to-be-changed elements | |
500 | + * @lc: the lru cache to operate on | |
501 | + * @enr: the label to look up | |
502 | + * | |
503 | + * Unlike lc_get this also returns the element for @enr, if it is belonging to | |
504 | + * a pending transaction, so the return values are like for lc_get(), | |
505 | + * plus: | |
506 | + * | |
507 | + * pointer to an element already on the "to_be_changed" list. | |
508 | + * In this case, the cache was already marked %LC_DIRTY. | |
509 | + * | |
510 | + * Caller needs to make sure that the pending transaction is completed, | |
511 | + * before proceeding to actually use this element. | |
512 | + */ | |
513 | +struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr) | |
514 | +{ | |
515 | + return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED); | |
516 | +} | |
517 | + | |
518 | +/** | |
484 | 519 | * lc_try_get - get element by label, if present; do not change the active set |
485 | 520 | * @lc: the lru cache to operate on |
486 | 521 | * @enr: the label to look up |
... | ... | @@ -648,4 +683,5 @@ |
648 | 683 | EXPORT_SYMBOL(lc_seq_dump_details); |
649 | 684 | EXPORT_SYMBOL(lc_try_lock); |
650 | 685 | EXPORT_SYMBOL(lc_is_used); |
686 | +EXPORT_SYMBOL(lc_get_cumulative); |