Blame view
drivers/md/dm-cache-metadata.h
4.79 KB
c6b4fcbad
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
/* * Copyright (C) 2012 Red Hat, Inc. * * This file is released under the GPL. */ #ifndef DM_CACHE_METADATA_H #define DM_CACHE_METADATA_H #include "dm-cache-block-types.h" #include "dm-cache-policy-internal.h" /*----------------------------------------------------------------*/ #define DM_CACHE_METADATA_BLOCK_SIZE 4096 /* FIXME: remove this restriction */ /* * The metadata device is currently limited in size. * * We have one block of index, which can hold 255 index entries. Each * index entry contains allocation info about 16k metadata blocks. */ #define DM_CACHE_METADATA_MAX_SECTORS (255 * (1 << 14) * (DM_CACHE_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT))) /* * A metadata device larger than 16GB triggers a warning. */ #define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT)) /*----------------------------------------------------------------*/ /* * Ext[234]-style compat feature flags. * * A new feature which old metadata will still be compatible with should * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful). * * A new feature that is not compatible with old code should define a * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with * that flag. * * A new feature that is not compatible with old code accessing the * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and * guard the relevant code with that flag. * * As these various flags are defined they should be added to the * following masks. */ #define DM_CACHE_FEATURE_COMPAT_SUPP 0UL #define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL #define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL /* * Reopens or creates a new, empty metadata volume. * Returns an ERR_PTR on failure. */ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, sector_t data_block_size, bool may_format_device, size_t policy_hint_size); void dm_cache_metadata_close(struct dm_cache_metadata *cmd); /* * The metadata needs to know how many cache blocks there are. We don't * care about the origin, assuming the core target is giving us valid * origin blocks to map to. */ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size); dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd); int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, sector_t discard_block_size, |
64ab346a3
|
75 |
dm_oblock_t new_nr_entries); |
c6b4fcbad
|
76 77 |
typedef int (*load_discard_fn)(void *context, sector_t discard_block_size, |
64ab346a3
|
78 |
dm_oblock_t dblock, bool discarded); |
c6b4fcbad
|
79 80 |
int dm_cache_load_discards(struct dm_cache_metadata *cmd, load_discard_fn fn, void *context); |
64ab346a3
|
81 |
int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard); |
c6b4fcbad
|
82 83 84 85 86 87 88 89 90 |
int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock); int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock); int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd); typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock, dm_cblock_t cblock, bool dirty, uint32_t hint, bool hint_valid); int dm_cache_load_mappings(struct dm_cache_metadata *cmd, |
ea2dd8c1e
|
91 |
struct dm_cache_policy *policy, |
c6b4fcbad
|
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
load_mapping_fn fn, void *context); int dm_cache_set_dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty); struct dm_cache_statistics { uint32_t read_hits; uint32_t read_misses; uint32_t write_hits; uint32_t write_misses; }; void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats); void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats); int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown); int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd, dm_block_t *result); int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd, dm_block_t *result); void dm_cache_dump(struct dm_cache_metadata *cmd); /* * The policy is invited to save a 32bit hint value for every cblock (eg, * for a hit count). These are stored against the policy name. If * policies are changed, then hints will be lost. If the machine crashes, * hints will be lost. * * The hints are indexed by the cblock, but many policies will not * neccessarily have a fast way of accessing efficiently via cblock. So * rather than querying the policy for each cblock, we let it walk its data * structures and fill in the hints in whatever order it wishes. */ |
0596661f0
|
130 |
int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); |
c6b4fcbad
|
131 |
|
2ee57d587
|
132 133 134 135 |
/* * Query method. Are all the blocks in the cache clean? */ int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result); |
c6b4fcbad
|
136 137 138 |
/*----------------------------------------------------------------*/ #endif /* DM_CACHE_METADATA_H */ |