Blame view

drivers/md/dm-bio-prison-v1.h 4.46 KB
4f81a4176   Mike Snitzer   dm thin: move bio...
1
  /*
742c8fdc3   Joe Thornber   dm bio prison v2:...
2
   * Copyright (C) 2011-2017 Red Hat, Inc.
4f81a4176   Mike Snitzer   dm thin: move bio...
3
4
5
6
7
8
9
10
11
   *
   * This file is released under the GPL.
   */
  
  #ifndef DM_BIO_PRISON_H
  #define DM_BIO_PRISON_H
  
  #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
  #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
4f81a4176   Mike Snitzer   dm thin: move bio...
12
  #include <linux/bio.h>
a195db2d2   Joe Thornber   dm bio prison: sw...
13
  #include <linux/rbtree.h>
4f81a4176   Mike Snitzer   dm thin: move bio...
14
15
16
17
18
19
20
21
22
23
  
  /*----------------------------------------------------------------*/
  
  /*
   * Sometimes we can't deal with a bio straight away.  We put them in prison
   * where they can't cause any mischief.  Bios are put in a cell identified
   * by a key, multiple bios can be in the same cell.  When the cell is
   * subsequently unlocked the bios become available.
   */
  struct dm_bio_prison;
4f81a4176   Mike Snitzer   dm thin: move bio...
24

5f274d886   Joe Thornber   dm bio prison: in...
25
26
27
28
  /*
   * Keys define a range of blocks within either a virtual or physical
   * device.
   */
4f81a4176   Mike Snitzer   dm thin: move bio...
29
30
31
  struct dm_cell_key {
  	int virtual;
  	dm_thin_id dev;
5f274d886   Joe Thornber   dm bio prison: in...
32
  	dm_block_t block_begin, block_end;
4f81a4176   Mike Snitzer   dm thin: move bio...
33
  };
025b96853   Joe Thornber   dm thin: remove c...
34
35
36
37
38
  /*
   * Treat this as opaque, only in header so callers can manage allocation
   * themselves.
   */
  struct dm_bio_prison_cell {
a374bb217   Joe Thornber   dm thin: defer wh...
39
  	struct list_head user_list;	/* for client use */
a195db2d2   Joe Thornber   dm bio prison: sw...
40
  	struct rb_node node;
025b96853   Joe Thornber   dm thin: remove c...
41
42
43
44
  	struct dm_cell_key key;
  	struct bio *holder;
  	struct bio_list bios;
  };
a195db2d2   Joe Thornber   dm bio prison: sw...
45
  struct dm_bio_prison *dm_bio_prison_create(void);
4f81a4176   Mike Snitzer   dm thin: move bio...
46
47
48
  void dm_bio_prison_destroy(struct dm_bio_prison *prison);
  
  /*
6beca5eb6   Joe Thornber   dm bio prison: pa...
49
50
   * These two functions just wrap a mempool.  This is a transitory step:
   * Eventually all bio prison clients should manage their own cell memory.
4f81a4176   Mike Snitzer   dm thin: move bio...
51
   *
6beca5eb6   Joe Thornber   dm bio prison: pa...
52
53
   * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
   * in interrupt context or passed GFP_NOWAIT.
4f81a4176   Mike Snitzer   dm thin: move bio...
54
   */
6beca5eb6   Joe Thornber   dm bio prison: pa...
55
56
57
58
  struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
  						    gfp_t gfp);
  void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
  			     struct dm_bio_prison_cell *cell);
4f81a4176   Mike Snitzer   dm thin: move bio...
59

6beca5eb6   Joe Thornber   dm bio prison: pa...
60
  /*
5f274d886   Joe Thornber   dm bio prison: in...
61
   * Creates, or retrieves a cell that overlaps the given key.
c6b4fcbad   Joe Thornber   dm: add cache target
62
63
64
65
66
67
68
69
70
71
   *
   * Returns 1 if pre-existing cell returned, zero if new cell created using
   * @cell_prealloc.
   */
  int dm_get_cell(struct dm_bio_prison *prison,
  		struct dm_cell_key *key,
  		struct dm_bio_prison_cell *cell_prealloc,
  		struct dm_bio_prison_cell **cell_result);
  
  /*
5f274d886   Joe Thornber   dm bio prison: in...
72
73
   * An atomic op that combines retrieving or creating a cell, and adding a
   * bio to it.
6beca5eb6   Joe Thornber   dm bio prison: pa...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
   *
   * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
   */
  int dm_bio_detain(struct dm_bio_prison *prison,
  		  struct dm_cell_key *key,
  		  struct bio *inmate,
  		  struct dm_bio_prison_cell *cell_prealloc,
  		  struct dm_bio_prison_cell **cell_result);
  
  void dm_cell_release(struct dm_bio_prison *prison,
  		     struct dm_bio_prison_cell *cell,
  		     struct bio_list *bios);
  void dm_cell_release_no_holder(struct dm_bio_prison *prison,
  			       struct dm_bio_prison_cell *cell,
  			       struct bio_list *inmates);
  void dm_cell_error(struct dm_bio_prison *prison,
4e4cbee93   Christoph Hellwig   block: switch bio...
90
  		   struct dm_bio_prison_cell *cell, blk_status_t error);
4f81a4176   Mike Snitzer   dm thin: move bio...
91

2d759a46b   Joe Thornber   dm thin: remap th...
92
93
94
95
96
97
98
  /*
   * Visits the cell and then releases.  Guarantees no new inmates are
   * inserted between the visit and release.
   */
  void dm_cell_visit_release(struct dm_bio_prison *prison,
  			   void (*visit_fn)(void *, struct dm_bio_prison_cell *),
  			   void *context, struct dm_bio_prison_cell *cell);
3cdf93f9d   Joe Thornber   dm bio prison: ad...
99
100
101
102
103
104
105
106
107
108
109
110
  /*
   * Rather than always releasing the prisoners in a cell, the client may
   * want to promote one of them to be the new holder.  There is a race here
   * though between releasing an empty cell, and other threads adding new
   * inmates.  So this function makes the decision with its lock held.
   *
   * This function can have two outcomes:
   * i) An inmate is promoted to be the holder of the cell (return value of 0).
   * ii) The cell has no inmate for promotion and is released (return value of 1).
   */
  int dm_cell_promote_or_release(struct dm_bio_prison *prison,
  			       struct dm_bio_prison_cell *cell);
4f81a4176   Mike Snitzer   dm thin: move bio...
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
  /*----------------------------------------------------------------*/
  
  /*
   * We use the deferred set to keep track of pending reads to shared blocks.
   * We do this to ensure the new mapping caused by a write isn't performed
   * until these prior reads have completed.  Otherwise the insertion of the
   * new mapping could free the old block that the read bios are mapped to.
   */
  
  struct dm_deferred_set;
  struct dm_deferred_entry;
  
  struct dm_deferred_set *dm_deferred_set_create(void);
  void dm_deferred_set_destroy(struct dm_deferred_set *ds);
  
  struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
  void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
  int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
  
  /*----------------------------------------------------------------*/
  
  #endif