Blame view

drivers/block/rbd.c 143 KB
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
1

602adf400   Yehuda Sadeh   rbd: introduce ra...
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
  /*
     rbd.c -- Export ceph rados objects as a Linux block device
  
  
     based on drivers/block/osdblk.c:
  
     Copyright 2009 Red Hat, Inc.
  
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation.
  
     This program is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
  
     You should have received a copy of the GNU General Public License
     along with this program; see the file COPYING.  If not, write to
     the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
22
     For usage instructions, please refer to:
602adf400   Yehuda Sadeh   rbd: introduce ra...
23

dfc5606dc   Yehuda Sadeh   rbd: replace the ...
24
                   Documentation/ABI/testing/sysfs-bus-rbd
602adf400   Yehuda Sadeh   rbd: introduce ra...
25
26
27
28
29
30
31
  
   */
  
  #include <linux/ceph/libceph.h>
  #include <linux/ceph/osd_client.h>
  #include <linux/ceph/mon_client.h>
  #include <linux/ceph/decode.h>
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
32
  #include <linux/parser.h>
30d1cff81   Alex Elder   rbd: use binary s...
33
  #include <linux/bsearch.h>
602adf400   Yehuda Sadeh   rbd: introduce ra...
34
35
36
37
  
  #include <linux/kernel.h>
  #include <linux/device.h>
  #include <linux/module.h>
7ad18afad   Christoph Hellwig   rbd: convert to b...
38
  #include <linux/blk-mq.h>
602adf400   Yehuda Sadeh   rbd: introduce ra...
39
40
  #include <linux/fs.h>
  #include <linux/blkdev.h>
1c2a9dfe2   Alex Elder   rbd: allocate ima...
41
  #include <linux/slab.h>
f8a22fc23   Ilya Dryomov   rbd: switch to id...
42
  #include <linux/idr.h>
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
43
  #include <linux/workqueue.h>
602adf400   Yehuda Sadeh   rbd: introduce ra...
44
45
  
  #include "rbd_types.h"
aafb230eb   Alex Elder   rbd: define rbd_a...
46
  #define RBD_DEBUG	/* Activate rbd_assert() calls */
593a9e7b3   Alex Elder   rbd: small changes
47
48
49
50
51
52
53
54
  /*
   * The basic unit of block I/O is a sector.  It is interpreted in a
   * number of contexts in Linux (blk, bio, genhd), but the default is
   * universally 512 bytes.  These symbols are just slightly more
   * meaningful than the bare numbers they represent.
   */
  #define	SECTOR_SHIFT	9
  #define	SECTOR_SIZE	(1ULL << SECTOR_SHIFT)
a2acd00e7   Alex Elder   rbd: reference co...
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
  /*
   * Increment the given counter and return its updated value.
   * If the counter is already 0 it will not be incremented.
   * If the counter is already at its maximum value returns
   * -EINVAL without updating it.
   */
  static int atomic_inc_return_safe(atomic_t *v)
  {
  	unsigned int counter;
  
  	counter = (unsigned int)__atomic_add_unless(v, 1, 0);
  	if (counter <= (unsigned int)INT_MAX)
  		return (int)counter;
  
  	atomic_dec(v);
  
  	return -EINVAL;
  }
  
  /* Decrement the counter.  Return the resulting value, or -EINVAL */
  static int atomic_dec_return_safe(atomic_t *v)
  {
  	int counter;
  
  	counter = atomic_dec_return(v);
  	if (counter >= 0)
  		return counter;
  
  	atomic_inc(v);
  
  	return -EINVAL;
  }
f0f8cef5a   Alex Elder   rbd: a few simple...
87
  #define RBD_DRV_NAME "rbd"
602adf400   Yehuda Sadeh   rbd: introduce ra...
88

7e513d436   Ilya Dryomov   rbd: enable exten...
89
90
  #define RBD_MINORS_PER_MAJOR		256
  #define RBD_SINGLE_MAJOR_PART_SHIFT	4
602adf400   Yehuda Sadeh   rbd: introduce ra...
91

6d69bb536   Ilya Dryomov   rbd: prevent kern...
92
  #define RBD_MAX_PARENT_CHAIN_LEN	16
d4b125e9e   Alex Elder   rbd: increase max...
93
94
95
  #define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
  #define RBD_MAX_SNAP_NAME_LEN	\
  			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
35d489f94   Alex Elder   rbd: get the snap...
96
  #define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
602adf400   Yehuda Sadeh   rbd: introduce ra...
97
98
  
  #define RBD_SNAP_HEAD_NAME	"-"
9682fc6d3   Alex Elder   rbd: look up snap...
99
  #define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */
9e15b77d9   Alex Elder   rbd: get addition...
100
101
  /* This allows a single page to hold an image name sent by OSD */
  #define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
1e1301998   Alex Elder   rbd: get the obje...
102
  #define RBD_IMAGE_ID_LEN_MAX	64
9e15b77d9   Alex Elder   rbd: get addition...
103

1e1301998   Alex Elder   rbd: get the obje...
104
  #define RBD_OBJ_PREFIX_LEN_MAX	64
589d30e0b   Alex Elder   rbd: define rbd_d...
105

d889140c4   Alex Elder   rbd: implement fe...
106
  /* Feature bits */
5cbf6f12c   Alex Elder   rbd: update featu...
107
108
109
110
  #define RBD_FEATURE_LAYERING	(1<<0)
  #define RBD_FEATURE_STRIPINGV2	(1<<1)
  #define RBD_FEATURES_ALL \
  	    (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
d889140c4   Alex Elder   rbd: implement fe...
111
112
  
  /* Features supported by this (client software) implementation. */
770eba6e2   Alex Elder   rbd: activate sup...
113
  #define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
d889140c4   Alex Elder   rbd: implement fe...
114

81a897937   Alex Elder   rbd: do a few che...
115
116
117
118
119
120
  /*
   * An RBD device name will be "rbd#", where the "rbd" comes from
   * RBD_DRV_NAME above, and # is a unique integer identifier.
   * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
   * enough to hold all possible device names.
   */
602adf400   Yehuda Sadeh   rbd: introduce ra...
121
  #define DEV_NAME_LEN		32
81a897937   Alex Elder   rbd: do a few che...
122
  #define MAX_INT_FORMAT_WIDTH	((5 * sizeof (int)) / 2 + 1)
602adf400   Yehuda Sadeh   rbd: introduce ra...
123
124
125
126
127
  
  /*
   * block device image metadata (in-memory version)
   */
  struct rbd_image_header {
f35a4dee1   Alex Elder   rbd: set the mapp...
128
  	/* These six fields never change for a given rbd image */
849b4260d   Alex Elder   rbd: dynamically ...
129
  	char *object_prefix;
602adf400   Yehuda Sadeh   rbd: introduce ra...
130
131
132
  	__u8 obj_order;
  	__u8 crypt_type;
  	__u8 comp_type;
f35a4dee1   Alex Elder   rbd: set the mapp...
133
134
135
  	u64 stripe_unit;
  	u64 stripe_count;
  	u64 features;		/* Might be changeable someday? */
602adf400   Yehuda Sadeh   rbd: introduce ra...
136

f84344f33   Alex Elder   rbd: separate map...
137
138
139
  	/* The remaining fields need to be updated occasionally */
  	u64 image_size;
  	struct ceph_snap_context *snapc;
f35a4dee1   Alex Elder   rbd: set the mapp...
140
141
  	char *snap_names;	/* format 1 only */
  	u64 *snap_sizes;	/* format 1 only */
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
142
  };
0d7dbfce9   Alex Elder   rbd: define image...
143
144
145
146
  /*
   * An rbd image specification.
   *
   * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c0   Alex Elder   rbd: document rbd...
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
   * identify an image.  Each rbd_dev structure includes a pointer to
   * an rbd_spec structure that encapsulates this identity.
   *
   * Each of the id's in an rbd_spec has an associated name.  For a
   * user-mapped image, the names are supplied and the id's associated
   * with them are looked up.  For a layered image, a parent image is
   * defined by the tuple, and the names are looked up.
   *
   * An rbd_dev structure contains a parent_spec pointer which is
   * non-null if the image it represents is a child in a layered
   * image.  This pointer will refer to the rbd_spec structure used
   * by the parent rbd_dev for its own identity (i.e., the structure
   * is shared between the parent and child).
   *
   * Since these structures are populated once, during the discovery
   * phase of image construction, they are effectively immutable so
   * we make no effort to synchronize access to them.
   *
   * Note that code herein does not assume the image name is known (it
   * could be a null pointer).
0d7dbfce9   Alex Elder   rbd: define image...
167
168
169
   */
  struct rbd_spec {
  	u64		pool_id;
ecb4dc225   Alex Elder   rbd: make rbd spe...
170
  	const char	*pool_name;
0d7dbfce9   Alex Elder   rbd: define image...
171

ecb4dc225   Alex Elder   rbd: make rbd spe...
172
173
  	const char	*image_id;
  	const char	*image_name;
0d7dbfce9   Alex Elder   rbd: define image...
174
175
  
  	u64		snap_id;
ecb4dc225   Alex Elder   rbd: make rbd spe...
176
  	const char	*snap_name;
0d7dbfce9   Alex Elder   rbd: define image...
177
178
179
  
  	struct kref	kref;
  };
602adf400   Yehuda Sadeh   rbd: introduce ra...
180
  /*
f0f8cef5a   Alex Elder   rbd: a few simple...
181
   * an instance of the client.  multiple devices may share an rbd client.
602adf400   Yehuda Sadeh   rbd: introduce ra...
182
183
184
185
186
187
   */
  struct rbd_client {
  	struct ceph_client	*client;
  	struct kref		kref;
  	struct list_head	node;
  };
bf0d5f503   Alex Elder   rbd: new request ...
188
189
190
191
192
193
194
  struct rbd_img_request;
  typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
  
  #define	BAD_WHICH	U32_MAX		/* Good which or bad which, which? */
  
  struct rbd_obj_request;
  typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
9969ebc5a   Alex Elder   rbd: implement wa...
195
196
197
  enum obj_request_type {
  	OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
  };
bf0d5f503   Alex Elder   rbd: new request ...
198

6d2940c88   Guangliang Zhao   rbd: extend the o...
199
200
201
  enum obj_operation_type {
  	OBJ_OP_WRITE,
  	OBJ_OP_READ,
90e98c522   Guangliang Zhao   rbd: initial disc...
202
  	OBJ_OP_DISCARD,
6d2940c88   Guangliang Zhao   rbd: extend the o...
203
  };
926f9b3f0   Alex Elder   rbd: define an rb...
204
205
  enum obj_req_flags {
  	OBJ_REQ_DONE,		/* completion flag: not done = 0, done = 1 */
6365d33a2   Alex Elder   rbd: add an objec...
206
  	OBJ_REQ_IMG_DATA,	/* object usage: standalone = 0, image = 1 */
5679c59f6   Alex Elder   rbd: add target o...
207
208
  	OBJ_REQ_KNOWN,		/* EXISTS flag valid: no = 0, yes = 1 */
  	OBJ_REQ_EXISTS,		/* target exists: no = 0, yes = 1 */
926f9b3f0   Alex Elder   rbd: define an rb...
209
  };
bf0d5f503   Alex Elder   rbd: new request ...
210
211
212
213
  struct rbd_obj_request {
  	const char		*object_name;
  	u64			offset;		/* object start byte */
  	u64			length;		/* bytes from offset */
926f9b3f0   Alex Elder   rbd: define an rb...
214
  	unsigned long		flags;
bf0d5f503   Alex Elder   rbd: new request ...
215

c5b5ef6c5   Alex Elder   rbd: issue stat r...
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
  	/*
  	 * An object request associated with an image will have its
  	 * img_data flag set; a standalone object request will not.
  	 *
  	 * A standalone object request will have which == BAD_WHICH
  	 * and a null obj_request pointer.
  	 *
  	 * An object request initiated in support of a layered image
  	 * object (to check for its existence before a write) will
  	 * have which == BAD_WHICH and a non-null obj_request pointer.
  	 *
  	 * Finally, an object request for rbd image data will have
  	 * which != BAD_WHICH, and will have a non-null img_request
  	 * pointer.  The value of which will be in the range
  	 * 0..(img_request->obj_request_count-1).
  	 */
  	union {
  		struct rbd_obj_request	*obj_request;	/* STAT op */
  		struct {
  			struct rbd_img_request	*img_request;
  			u64			img_offset;
  			/* links for img_request->obj_requests list */
  			struct list_head	links;
  		};
  	};
bf0d5f503   Alex Elder   rbd: new request ...
241
242
243
  	u32			which;		/* posn image request list */
  
  	enum obj_request_type	type;
788e2df3b   Alex Elder   rbd: implement sy...
244
245
246
247
248
249
250
  	union {
  		struct bio	*bio_list;
  		struct {
  			struct page	**pages;
  			u32		page_count;
  		};
  	};
0eefd470f   Alex Elder   rbd: issue a copy...
251
  	struct page		**copyup_pages;
ebda6408f   Alex Elder   rbd: fix parent r...
252
  	u32			copyup_page_count;
bf0d5f503   Alex Elder   rbd: new request ...
253
254
255
256
  
  	struct ceph_osd_request	*osd_req;
  
  	u64			xferred;	/* bytes transferred */
1b83bef24   Sage Weil   libceph: update o...
257
  	int			result;
bf0d5f503   Alex Elder   rbd: new request ...
258
259
  
  	rbd_obj_callback_t	callback;
788e2df3b   Alex Elder   rbd: implement sy...
260
  	struct completion	completion;
bf0d5f503   Alex Elder   rbd: new request ...
261
262
263
  
  	struct kref		kref;
  };
0c425248e   Alex Elder   rbd: define image...
264
  enum img_req_flags {
9849e9863   Alex Elder   rbd: define image...
265
266
  	IMG_REQ_WRITE,		/* I/O direction: read = 0, write = 1 */
  	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
d0b2e9445   Alex Elder   rbd: define image...
267
  	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
90e98c522   Guangliang Zhao   rbd: initial disc...
268
  	IMG_REQ_DISCARD,	/* discard: normal = 0, discard request = 1 */
0c425248e   Alex Elder   rbd: define image...
269
  };
bf0d5f503   Alex Elder   rbd: new request ...
270
  struct rbd_img_request {
bf0d5f503   Alex Elder   rbd: new request ...
271
272
273
  	struct rbd_device	*rbd_dev;
  	u64			offset;	/* starting image byte offset */
  	u64			length;	/* byte count from offset */
0c425248e   Alex Elder   rbd: define image...
274
  	unsigned long		flags;
bf0d5f503   Alex Elder   rbd: new request ...
275
  	union {
9849e9863   Alex Elder   rbd: define image...
276
  		u64			snap_id;	/* for reads */
bf0d5f503   Alex Elder   rbd: new request ...
277
  		struct ceph_snap_context *snapc;	/* for writes */
9849e9863   Alex Elder   rbd: define image...
278
279
280
281
  	};
  	union {
  		struct request		*rq;		/* block request */
  		struct rbd_obj_request	*obj_request;	/* obj req initiator */
bf0d5f503   Alex Elder   rbd: new request ...
282
  	};
3d7efd18d   Alex Elder   rbd: implement fu...
283
  	struct page		**copyup_pages;
ebda6408f   Alex Elder   rbd: fix parent r...
284
  	u32			copyup_page_count;
bf0d5f503   Alex Elder   rbd: new request ...
285
286
287
  	spinlock_t		completion_lock;/* protects next_completion */
  	u32			next_completion;
  	rbd_img_callback_t	callback;
55f27e093   Alex Elder   rbd: record aggre...
288
  	u64			xferred;/* aggregate bytes transferred */
a5a337d43   Alex Elder   rbd: record overa...
289
  	int			result;	/* first nonzero obj_request result */
bf0d5f503   Alex Elder   rbd: new request ...
290
291
292
293
294
295
296
297
  
  	u32			obj_request_count;
  	struct list_head	obj_requests;	/* rbd_obj_request structs */
  
  	struct kref		kref;
  };
  
  #define for_each_obj_request(ireq, oreq) \
ef06f4d32   Alex Elder   rbd: add parenthe...
298
  	list_for_each_entry(oreq, &(ireq)->obj_requests, links)
bf0d5f503   Alex Elder   rbd: new request ...
299
  #define for_each_obj_request_from(ireq, oreq) \
ef06f4d32   Alex Elder   rbd: add parenthe...
300
  	list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
bf0d5f503   Alex Elder   rbd: new request ...
301
  #define for_each_obj_request_safe(ireq, oreq, n) \
ef06f4d32   Alex Elder   rbd: add parenthe...
302
  	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
bf0d5f503   Alex Elder   rbd: new request ...
303

f84344f33   Alex Elder   rbd: separate map...
304
  struct rbd_mapping {
99c1f08f6   Alex Elder   rbd: record mappe...
305
  	u64                     size;
34b131849   Alex Elder   rbd: add an rbd f...
306
  	u64                     features;
f84344f33   Alex Elder   rbd: separate map...
307
308
  	bool			read_only;
  };
602adf400   Yehuda Sadeh   rbd: introduce ra...
309
310
311
312
  /*
   * a single device
   */
  struct rbd_device {
de71a2970   Alex Elder   rbd: rename rbd_d...
313
  	int			dev_id;		/* blkdev unique id */
602adf400   Yehuda Sadeh   rbd: introduce ra...
314
315
  
  	int			major;		/* blkdev assigned major */
dd82fff1e   Ilya Dryomov   rbd: add 'minor' ...
316
  	int			minor;
602adf400   Yehuda Sadeh   rbd: introduce ra...
317
  	struct gendisk		*disk;		/* blkdev's gendisk and rq */
602adf400   Yehuda Sadeh   rbd: introduce ra...
318

a30b71b99   Alex Elder   rbd: lay out head...
319
  	u32			image_format;	/* Either 1 or 2 */
602adf400   Yehuda Sadeh   rbd: introduce ra...
320
321
322
  	struct rbd_client	*rbd_client;
  
  	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
b82d167be   Alex Elder   rbd: prevent open...
323
  	spinlock_t		lock;		/* queue, flags, open_count */
602adf400   Yehuda Sadeh   rbd: introduce ra...
324
325
  
  	struct rbd_image_header	header;
b82d167be   Alex Elder   rbd: prevent open...
326
  	unsigned long		flags;		/* possibly lock protected */
0d7dbfce9   Alex Elder   rbd: define image...
327
  	struct rbd_spec		*spec;
d147543d7   Ilya Dryomov   rbd: store rbd_op...
328
  	struct rbd_options	*opts;
602adf400   Yehuda Sadeh   rbd: introduce ra...
329

c41d13a31   Ilya Dryomov   rbd: use header_o...
330
  	struct ceph_object_id	header_oid;
922dab613   Ilya Dryomov   libceph, rbd: cep...
331
  	struct ceph_object_locator header_oloc;
971f839a7   Alex Elder   rbd: move snap in...
332

0903e875c   Alex Elder   rbd: use a common...
333
  	struct ceph_file_layout	layout;
922dab613   Ilya Dryomov   libceph, rbd: cep...
334
  	struct ceph_osd_linger_request *watch_handle;
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
335

86b00e0da   Alex Elder   rbd: get parent s...
336
337
  	struct rbd_spec		*parent_spec;
  	u64			parent_overlap;
a2acd00e7   Alex Elder   rbd: reference co...
338
  	atomic_t		parent_ref;
2f82ee54d   Alex Elder   rbd: probe the pa...
339
  	struct rbd_device	*parent;
86b00e0da   Alex Elder   rbd: get parent s...
340

7ad18afad   Christoph Hellwig   rbd: convert to b...
341
342
  	/* Block layer tags. */
  	struct blk_mq_tag_set	tag_set;
c666601a9   Josh Durgin   rbd: move snap_rw...
343
344
  	/* protects updating the header */
  	struct rw_semaphore     header_rwsem;
f84344f33   Alex Elder   rbd: separate map...
345
346
  
  	struct rbd_mapping	mapping;
602adf400   Yehuda Sadeh   rbd: introduce ra...
347
348
  
  	struct list_head	node;
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
349

dfc5606dc   Yehuda Sadeh   rbd: replace the ...
350
351
  	/* sysfs related */
  	struct device		dev;
b82d167be   Alex Elder   rbd: prevent open...
352
  	unsigned long		open_count;	/* protected by lock */
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
353
  };
b82d167be   Alex Elder   rbd: prevent open...
354
355
356
357
358
359
360
  /*
   * Flag bits for rbd_dev->flags.  If atomicity is required,
   * rbd_dev->lock is used to protect access.
   *
   * Currently, only the "removing" flag (which is coupled with the
   * "open_count" field) requires atomic access.
   */
6d292906f   Alex Elder   rbd: define flags...
361
362
  enum rbd_dev_flags {
  	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
b82d167be   Alex Elder   rbd: prevent open...
363
  	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
6d292906f   Alex Elder   rbd: define flags...
364
  };
cfbf6377b   Alex Elder   rbd: use rwsem to...
365
  static DEFINE_MUTEX(client_mutex);	/* Serialize client creation */
e124a82f3   Alex Elder   rbd: protect the ...
366

602adf400   Yehuda Sadeh   rbd: introduce ra...
367
  static LIST_HEAD(rbd_dev_list);    /* devices */
e124a82f3   Alex Elder   rbd: protect the ...
368
  static DEFINE_SPINLOCK(rbd_dev_list_lock);
432b85874   Alex Elder   rbd: rename "node...
369
370
  static LIST_HEAD(rbd_client_list);		/* clients */
  static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf400   Yehuda Sadeh   rbd: introduce ra...
371

78c2a44aa   Alex Elder   rbd: allocate ima...
372
  /* Slab caches for frequently-allocated structures */
1c2a9dfe2   Alex Elder   rbd: allocate ima...
373
  static struct kmem_cache	*rbd_img_request_cache;
868311b1e   Alex Elder   rbd: allocate obj...
374
  static struct kmem_cache	*rbd_obj_request_cache;
78c2a44aa   Alex Elder   rbd: allocate ima...
375
  static struct kmem_cache	*rbd_segment_name_cache;
1c2a9dfe2   Alex Elder   rbd: allocate ima...
376

9b60e70b3   Ilya Dryomov   rbd: add support ...
377
  static int rbd_major;
f8a22fc23   Ilya Dryomov   rbd: switch to id...
378
  static DEFINE_IDA(rbd_dev_id_ida);
f5ee37bd3   Ilya Dryomov   rbd: use a single...
379
  static struct workqueue_struct *rbd_wq;
9b60e70b3   Ilya Dryomov   rbd: add support ...
380
381
382
383
384
385
386
  /*
   * Default to false for now, as single-major requires >= 0.75 version of
   * userspace rbd utility.
   */
  static bool single_major = false;
  module_param(single_major, bool, S_IRUGO);
  MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
3d7efd18d   Alex Elder   rbd: implement fu...
387
  static int rbd_img_request_submit(struct rbd_img_request *img_request);
f0f8cef5a   Alex Elder   rbd: a few simple...
388
389
390
391
  static ssize_t rbd_add(struct bus_type *bus, const char *buf,
  		       size_t count);
  static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
  			  size_t count);
9b60e70b3   Ilya Dryomov   rbd: add support ...
392
393
394
395
  static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
  				    size_t count);
  static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
  				       size_t count);
6d69bb536   Ilya Dryomov   rbd: prevent kern...
396
  static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
a2acd00e7   Alex Elder   rbd: reference co...
397
  static void rbd_spec_put(struct rbd_spec *spec);
f0f8cef5a   Alex Elder   rbd: a few simple...
398

9b60e70b3   Ilya Dryomov   rbd: add support ...
399
400
  static int rbd_dev_id_to_minor(int dev_id)
  {
7e513d436   Ilya Dryomov   rbd: enable exten...
401
  	return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b3   Ilya Dryomov   rbd: add support ...
402
403
404
405
  }
  
  static int minor_to_rbd_dev_id(int minor)
  {
7e513d436   Ilya Dryomov   rbd: enable exten...
406
  	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b3   Ilya Dryomov   rbd: add support ...
407
  }
b15a21ddd   Greg Kroah-Hartman   rbd: convert bus ...
408
409
  static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
  static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
9b60e70b3   Ilya Dryomov   rbd: add support ...
410
411
  static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
  static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
b15a21ddd   Greg Kroah-Hartman   rbd: convert bus ...
412
413
414
415
  
  static struct attribute *rbd_bus_attrs[] = {
  	&bus_attr_add.attr,
  	&bus_attr_remove.attr,
9b60e70b3   Ilya Dryomov   rbd: add support ...
416
417
  	&bus_attr_add_single_major.attr,
  	&bus_attr_remove_single_major.attr,
b15a21ddd   Greg Kroah-Hartman   rbd: convert bus ...
418
  	NULL,
f0f8cef5a   Alex Elder   rbd: a few simple...
419
  };
92c76dc03   Ilya Dryomov   rbd: wire up is_v...
420
421
422
423
  
  static umode_t rbd_bus_is_visible(struct kobject *kobj,
  				  struct attribute *attr, int index)
  {
9b60e70b3   Ilya Dryomov   rbd: add support ...
424
425
426
427
  	if (!single_major &&
  	    (attr == &bus_attr_add_single_major.attr ||
  	     attr == &bus_attr_remove_single_major.attr))
  		return 0;
92c76dc03   Ilya Dryomov   rbd: wire up is_v...
428
429
430
431
432
433
434
435
  	return attr->mode;
  }
  
  static const struct attribute_group rbd_bus_group = {
  	.attrs = rbd_bus_attrs,
  	.is_visible = rbd_bus_is_visible,
  };
  __ATTRIBUTE_GROUPS(rbd_bus);
f0f8cef5a   Alex Elder   rbd: a few simple...
436
437
438
  
  static struct bus_type rbd_bus_type = {
  	.name		= "rbd",
b15a21ddd   Greg Kroah-Hartman   rbd: convert bus ...
439
  	.bus_groups	= rbd_bus_groups,
f0f8cef5a   Alex Elder   rbd: a few simple...
440
441
442
443
444
445
446
447
448
449
  };
  
  static void rbd_root_dev_release(struct device *dev)
  {
  }
  
  static struct device rbd_root_dev = {
  	.init_name =    "rbd",
  	.release =      rbd_root_dev_release,
  };
06ecc6cbf   Alex Elder   rbd: define and u...
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
  static __printf(2, 3)
  void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
  {
  	struct va_format vaf;
  	va_list args;
  
  	va_start(args, fmt);
  	vaf.fmt = fmt;
  	vaf.va = &args;
  
  	if (!rbd_dev)
  		printk(KERN_WARNING "%s: %pV
  ", RBD_DRV_NAME, &vaf);
  	else if (rbd_dev->disk)
  		printk(KERN_WARNING "%s: %s: %pV
  ",
  			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
  	else if (rbd_dev->spec && rbd_dev->spec->image_name)
  		printk(KERN_WARNING "%s: image %s: %pV
  ",
  			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
  	else if (rbd_dev->spec && rbd_dev->spec->image_id)
  		printk(KERN_WARNING "%s: id %s: %pV
  ",
  			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
  	else	/* punt */
  		printk(KERN_WARNING "%s: rbd_dev %p: %pV
  ",
  			RBD_DRV_NAME, rbd_dev, &vaf);
  	va_end(args);
  }
aafb230eb   Alex Elder   rbd: define rbd_a...
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
  #ifdef RBD_DEBUG
  #define rbd_assert(expr)						\
  		if (unlikely(!(expr))) {				\
  			printk(KERN_ERR "
  Assertion failure in %s() "	\
  						"at line %d:
  
  "	\
  					"\trbd_assert(%s);
  
  ",	\
  					__func__, __LINE__, #expr);	\
  			BUG();						\
  		}
  #else /* !RBD_DEBUG */
  #  define rbd_assert(expr)	((void) 0)
  #endif /* !RBD_DEBUG */
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
498

2761713d3   Ilya Dryomov   rbd: fix copyup c...
499
  static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
b454e36d2   Alex Elder   rbd: encapsulate ...
500
  static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
05a46afdc   Alex Elder   rbd: encapsulate ...
501
502
  static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
  static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
8b3e1a569   Alex Elder   rbd: implement la...
503

cc4a38bdd   Alex Elder   rbd: more version...
504
  static int rbd_dev_refresh(struct rbd_device *rbd_dev);
2df3fac75   Alex Elder   rbd: define rbd_d...
505
  static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
a720ae090   Ilya Dryomov   rbd: introduce rb...
506
  static int rbd_dev_header_info(struct rbd_device *rbd_dev);
e8f59b595   Ilya Dryomov   rbd: do not read ...
507
  static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
54cac61fb   Alex Elder   rbd: use snap_id ...
508
509
  static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
  					u64 snap_id);
2ad3d7167   Alex Elder   rbd: define rbd_s...
510
511
512
513
  static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
  				u8 *order, u64 *snap_size);
  static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
  		u64 *snap_features);
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
514

602adf400   Yehuda Sadeh   rbd: introduce ra...
515
516
  static int rbd_open(struct block_device *bdev, fmode_t mode)
  {
f0f8cef5a   Alex Elder   rbd: a few simple...
517
  	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
b82d167be   Alex Elder   rbd: prevent open...
518
  	bool removing = false;
602adf400   Yehuda Sadeh   rbd: introduce ra...
519

f84344f33   Alex Elder   rbd: separate map...
520
  	if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
602adf400   Yehuda Sadeh   rbd: introduce ra...
521
  		return -EROFS;
a14ea269d   Alex Elder   rbd: turn off int...
522
  	spin_lock_irq(&rbd_dev->lock);
b82d167be   Alex Elder   rbd: prevent open...
523
524
525
526
  	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
  		removing = true;
  	else
  		rbd_dev->open_count++;
a14ea269d   Alex Elder   rbd: turn off int...
527
  	spin_unlock_irq(&rbd_dev->lock);
b82d167be   Alex Elder   rbd: prevent open...
528
529
  	if (removing)
  		return -ENOENT;
c3e946ce7   Alex Elder   rbd: get rid of r...
530
  	(void) get_device(&rbd_dev->dev);
340c7a2b2   Alex Elder   rbd: drop dev ref...
531

602adf400   Yehuda Sadeh   rbd: introduce ra...
532
533
  	return 0;
  }
db2a144be   Al Viro   block_device_oper...
534
  static void rbd_release(struct gendisk *disk, fmode_t mode)
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
535
536
  {
  	struct rbd_device *rbd_dev = disk->private_data;
b82d167be   Alex Elder   rbd: prevent open...
537
  	unsigned long open_count_before;
a14ea269d   Alex Elder   rbd: turn off int...
538
  	spin_lock_irq(&rbd_dev->lock);
b82d167be   Alex Elder   rbd: prevent open...
539
  	open_count_before = rbd_dev->open_count--;
a14ea269d   Alex Elder   rbd: turn off int...
540
  	spin_unlock_irq(&rbd_dev->lock);
b82d167be   Alex Elder   rbd: prevent open...
541
  	rbd_assert(open_count_before > 0);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
542

c3e946ce7   Alex Elder   rbd: get rid of r...
543
  	put_device(&rbd_dev->dev);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
544
  }
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
545
546
  static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
  {
77f33c037   Josh Durgin   rbd: move calls t...
547
  	int ret = 0;
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
548
549
  	int val;
  	bool ro;
77f33c037   Josh Durgin   rbd: move calls t...
550
  	bool ro_changed = false;
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
551

77f33c037   Josh Durgin   rbd: move calls t...
552
  	/* get_user() may sleep, so call it before taking rbd_dev->lock */
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
553
554
555
556
557
558
559
  	if (get_user(val, (int __user *)(arg)))
  		return -EFAULT;
  
  	ro = val ? true : false;
  	/* Snapshot doesn't allow to write*/
  	if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
  		return -EROFS;
77f33c037   Josh Durgin   rbd: move calls t...
560
561
562
563
564
565
  	spin_lock_irq(&rbd_dev->lock);
  	/* prevent others open this device */
  	if (rbd_dev->open_count > 1) {
  		ret = -EBUSY;
  		goto out;
  	}
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
566
567
  	if (rbd_dev->mapping.read_only != ro) {
  		rbd_dev->mapping.read_only = ro;
77f33c037   Josh Durgin   rbd: move calls t...
568
  		ro_changed = true;
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
569
  	}
77f33c037   Josh Durgin   rbd: move calls t...
570
571
572
573
574
575
576
  out:
  	spin_unlock_irq(&rbd_dev->lock);
  	/* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
  	if (ret == 0 && ro_changed)
  		set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
  
  	return ret;
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
577
578
579
580
581
582
583
  }
  
  static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
  			unsigned int cmd, unsigned long arg)
  {
  	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
  	int ret = 0;
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
584
585
586
587
588
589
590
  	switch (cmd) {
  	case BLKROSET:
  		ret = rbd_ioctl_set_ro(rbd_dev, arg);
  		break;
  	default:
  		ret = -ENOTTY;
  	}
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
591
592
593
594
595
596
597
598
599
600
  	return ret;
  }
  
  #ifdef CONFIG_COMPAT
  static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
  				unsigned int cmd, unsigned long arg)
  {
  	return rbd_ioctl(bdev, mode, cmd, arg);
  }
  #endif /* CONFIG_COMPAT */
602adf400   Yehuda Sadeh   rbd: introduce ra...
601
602
603
  static const struct block_device_operations rbd_bd_ops = {
  	.owner			= THIS_MODULE,
  	.open			= rbd_open,
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
604
  	.release		= rbd_release,
131fd9f6f   Guangliang Zhao   rbd: add ioctl fo...
605
606
607
608
  	.ioctl			= rbd_ioctl,
  #ifdef CONFIG_COMPAT
  	.compat_ioctl		= rbd_compat_ioctl,
  #endif
602adf400   Yehuda Sadeh   rbd: introduce ra...
609
610
611
  };
  
  /*
7262cfca4   Alex Elder   rbd: don't destro...
612
   * Initialize an rbd client instance.  Success or not, this function
cfbf6377b   Alex Elder   rbd: use rwsem to...
613
   * consumes ceph_opts.  Caller holds client_mutex.
602adf400   Yehuda Sadeh   rbd: introduce ra...
614
   */
f8c389291   Alex Elder   rbd: move rbd_opt...
615
  static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf400   Yehuda Sadeh   rbd: introduce ra...
616
617
618
  {
  	struct rbd_client *rbdc;
  	int ret = -ENOMEM;
37206ee5b   Alex Elder   rbd: normalize do...
619
620
  	dout("%s:
  ", __func__);
602adf400   Yehuda Sadeh   rbd: introduce ra...
621
622
623
624
625
626
  	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
  	if (!rbdc)
  		goto out_opt;
  
  	kref_init(&rbdc->kref);
  	INIT_LIST_HEAD(&rbdc->node);
43ae47011   Alex Elder   rbd: option symbo...
627
  	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
602adf400   Yehuda Sadeh   rbd: introduce ra...
628
  	if (IS_ERR(rbdc->client))
08f75463c   Alex Elder   rbd: protect agai...
629
  		goto out_rbdc;
43ae47011   Alex Elder   rbd: option symbo...
630
  	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf400   Yehuda Sadeh   rbd: introduce ra...
631
632
633
  
  	ret = ceph_open_session(rbdc->client);
  	if (ret < 0)
08f75463c   Alex Elder   rbd: protect agai...
634
  		goto out_client;
602adf400   Yehuda Sadeh   rbd: introduce ra...
635

432b85874   Alex Elder   rbd: rename "node...
636
  	spin_lock(&rbd_client_list_lock);
602adf400   Yehuda Sadeh   rbd: introduce ra...
637
  	list_add_tail(&rbdc->node, &rbd_client_list);
432b85874   Alex Elder   rbd: rename "node...
638
  	spin_unlock(&rbd_client_list_lock);
602adf400   Yehuda Sadeh   rbd: introduce ra...
639

37206ee5b   Alex Elder   rbd: normalize do...
640
641
  	dout("%s: rbdc %p
  ", __func__, rbdc);
bc534d86b   Alex Elder   rbd: move ctl_mut...
642

602adf400   Yehuda Sadeh   rbd: introduce ra...
643
  	return rbdc;
08f75463c   Alex Elder   rbd: protect agai...
644
  out_client:
602adf400   Yehuda Sadeh   rbd: introduce ra...
645
  	ceph_destroy_client(rbdc->client);
08f75463c   Alex Elder   rbd: protect agai...
646
  out_rbdc:
602adf400   Yehuda Sadeh   rbd: introduce ra...
647
648
  	kfree(rbdc);
  out_opt:
43ae47011   Alex Elder   rbd: option symbo...
649
650
  	if (ceph_opts)
  		ceph_destroy_options(ceph_opts);
37206ee5b   Alex Elder   rbd: normalize do...
651
652
  	dout("%s: error %d
  ", __func__, ret);
28f259b7c   Vasiliy Kulikov   block: rbd: fixed...
653
  	return ERR_PTR(ret);
602adf400   Yehuda Sadeh   rbd: introduce ra...
654
  }
2f82ee54d   Alex Elder   rbd: probe the pa...
655
656
657
658
659
660
  static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
  {
  	kref_get(&rbdc->kref);
  
  	return rbdc;
  }
602adf400   Yehuda Sadeh   rbd: introduce ra...
661
  /*
1f7ba3311   Alex Elder   rbd: handle locki...
662
663
   * Find a ceph client with specific addr and configuration.  If
   * found, bump its reference count.
602adf400   Yehuda Sadeh   rbd: introduce ra...
664
   */
1f7ba3311   Alex Elder   rbd: handle locki...
665
  static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf400   Yehuda Sadeh   rbd: introduce ra...
666
667
  {
  	struct rbd_client *client_node;
1f7ba3311   Alex Elder   rbd: handle locki...
668
  	bool found = false;
602adf400   Yehuda Sadeh   rbd: introduce ra...
669

43ae47011   Alex Elder   rbd: option symbo...
670
  	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf400   Yehuda Sadeh   rbd: introduce ra...
671
  		return NULL;
1f7ba3311   Alex Elder   rbd: handle locki...
672
673
674
  	spin_lock(&rbd_client_list_lock);
  	list_for_each_entry(client_node, &rbd_client_list, node) {
  		if (!ceph_compare_options(ceph_opts, client_node->client)) {
2f82ee54d   Alex Elder   rbd: probe the pa...
675
  			__rbd_get_client(client_node);
1f7ba3311   Alex Elder   rbd: handle locki...
676
677
678
679
680
681
682
  			found = true;
  			break;
  		}
  	}
  	spin_unlock(&rbd_client_list_lock);
  
  	return found ? client_node : NULL;
602adf400   Yehuda Sadeh   rbd: introduce ra...
683
684
685
  }
  
  /*
210c104c5   Ilya Dryomov   rbd: terminate rb...
686
   * (Per device) rbd map options
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
687
688
   */
  enum {
b55841807   Ilya Dryomov   rbd: queue_depth ...
689
  	Opt_queue_depth,
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
690
691
692
693
  	Opt_last_int,
  	/* int args above */
  	Opt_last_string,
  	/* string args above */
cc0538b62   Alex Elder   rbd: add read_onl...
694
695
  	Opt_read_only,
  	Opt_read_write,
210c104c5   Ilya Dryomov   rbd: terminate rb...
696
  	Opt_err
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
697
  };
43ae47011   Alex Elder   rbd: option symbo...
698
  static match_table_t rbd_opts_tokens = {
b55841807   Ilya Dryomov   rbd: queue_depth ...
699
  	{Opt_queue_depth, "queue_depth=%d"},
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
700
701
  	/* int args above */
  	/* string args above */
be466c1cc   Alex Elder   rbd: fix read-onl...
702
  	{Opt_read_only, "read_only"},
cc0538b62   Alex Elder   rbd: add read_onl...
703
704
705
  	{Opt_read_only, "ro"},		/* Alternate spelling */
  	{Opt_read_write, "read_write"},
  	{Opt_read_write, "rw"},		/* Alternate spelling */
210c104c5   Ilya Dryomov   rbd: terminate rb...
706
  	{Opt_err, NULL}
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
707
  };
98571b5aa   Alex Elder   rbd: small changes
708
  struct rbd_options {
b55841807   Ilya Dryomov   rbd: queue_depth ...
709
  	int	queue_depth;
98571b5aa   Alex Elder   rbd: small changes
710
711
  	bool	read_only;
  };
b55841807   Ilya Dryomov   rbd: queue_depth ...
712
  #define RBD_QUEUE_DEPTH_DEFAULT	BLKDEV_MAX_RQ
98571b5aa   Alex Elder   rbd: small changes
713
  #define RBD_READ_ONLY_DEFAULT	false
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
714
715
  static int parse_rbd_opts_token(char *c, void *private)
  {
43ae47011   Alex Elder   rbd: option symbo...
716
  	struct rbd_options *rbd_opts = private;
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
717
718
  	substring_t argstr[MAX_OPT_ARGS];
  	int token, intval, ret;
43ae47011   Alex Elder   rbd: option symbo...
719
  	token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
720
721
722
  	if (token < Opt_last_int) {
  		ret = match_int(&argstr[0], &intval);
  		if (ret < 0) {
210c104c5   Ilya Dryomov   rbd: terminate rb...
723
724
  			pr_err("bad mount option arg (not int) at '%s'
  ", c);
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
725
726
727
728
729
  			return ret;
  		}
  		dout("got int token %d val %d
  ", token, intval);
  	} else if (token > Opt_last_int && token < Opt_last_string) {
210c104c5   Ilya Dryomov   rbd: terminate rb...
730
731
  		dout("got string token %d val %s
  ", token, argstr[0].from);
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
732
733
734
735
736
737
  	} else {
  		dout("got token %d
  ", token);
  	}
  
  	switch (token) {
b55841807   Ilya Dryomov   rbd: queue_depth ...
738
739
740
741
742
743
744
745
  	case Opt_queue_depth:
  		if (intval < 1) {
  			pr_err("queue_depth out of range
  ");
  			return -EINVAL;
  		}
  		rbd_opts->queue_depth = intval;
  		break;
cc0538b62   Alex Elder   rbd: add read_onl...
746
747
748
749
750
751
  	case Opt_read_only:
  		rbd_opts->read_only = true;
  		break;
  	case Opt_read_write:
  		rbd_opts->read_only = false;
  		break;
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
752
  	default:
210c104c5   Ilya Dryomov   rbd: terminate rb...
753
754
  		/* libceph prints "bad option" msg */
  		return -EINVAL;
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
755
  	}
210c104c5   Ilya Dryomov   rbd: terminate rb...
756

59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
757
758
  	return 0;
  }
6d2940c88   Guangliang Zhao   rbd: extend the o...
759
760
761
762
763
764
765
  static char* obj_op_name(enum obj_operation_type op_type)
  {
  	switch (op_type) {
  	case OBJ_OP_READ:
  		return "read";
  	case OBJ_OP_WRITE:
  		return "write";
90e98c522   Guangliang Zhao   rbd: initial disc...
766
767
  	case OBJ_OP_DISCARD:
  		return "discard";
6d2940c88   Guangliang Zhao   rbd: extend the o...
768
769
770
771
  	default:
  		return "???";
  	}
  }
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
772
  /*
602adf400   Yehuda Sadeh   rbd: introduce ra...
773
   * Get a ceph client with specific addr and configuration, if one does
7262cfca4   Alex Elder   rbd: don't destro...
774
775
   * not exist create it.  Either way, ceph_opts is consumed by this
   * function.
602adf400   Yehuda Sadeh   rbd: introduce ra...
776
   */
9d3997fdf   Alex Elder   rbd: don't pass r...
777
  static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
602adf400   Yehuda Sadeh   rbd: introduce ra...
778
  {
f8c389291   Alex Elder   rbd: move rbd_opt...
779
  	struct rbd_client *rbdc;
59c2be1e4   Yehuda Sadeh   rbd: use watch/no...
780

cfbf6377b   Alex Elder   rbd: use rwsem to...
781
  	mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
1f7ba3311   Alex Elder   rbd: handle locki...
782
  	rbdc = rbd_client_find(ceph_opts);
9d3997fdf   Alex Elder   rbd: don't pass r...
783
  	if (rbdc)	/* using an existing client */
43ae47011   Alex Elder   rbd: option symbo...
784
  		ceph_destroy_options(ceph_opts);
9d3997fdf   Alex Elder   rbd: don't pass r...
785
  	else
f8c389291   Alex Elder   rbd: move rbd_opt...
786
  		rbdc = rbd_client_create(ceph_opts);
cfbf6377b   Alex Elder   rbd: use rwsem to...
787
  	mutex_unlock(&client_mutex);
602adf400   Yehuda Sadeh   rbd: introduce ra...
788

9d3997fdf   Alex Elder   rbd: don't pass r...
789
  	return rbdc;
602adf400   Yehuda Sadeh   rbd: introduce ra...
790
791
792
793
  }
  
  /*
   * Destroy ceph client
d23a4b3fd   Alex Elder   rbd: fix safety o...
794
   *
432b85874   Alex Elder   rbd: rename "node...
795
   * Caller must hold rbd_client_list_lock.
602adf400   Yehuda Sadeh   rbd: introduce ra...
796
797
798
799
   */
  static void rbd_client_release(struct kref *kref)
  {
  	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
37206ee5b   Alex Elder   rbd: normalize do...
800
801
  	dout("%s: rbdc %p
  ", __func__, rbdc);
cd9d9f5df   Alex Elder   rbd: don't hold s...
802
  	spin_lock(&rbd_client_list_lock);
602adf400   Yehuda Sadeh   rbd: introduce ra...
803
  	list_del(&rbdc->node);
cd9d9f5df   Alex Elder   rbd: don't hold s...
804
  	spin_unlock(&rbd_client_list_lock);
602adf400   Yehuda Sadeh   rbd: introduce ra...
805
806
807
808
809
810
811
812
813
  
  	ceph_destroy_client(rbdc->client);
  	kfree(rbdc);
  }
  
  /*
   * Drop reference to ceph client node. If it's not referenced anymore, release
   * it.
   */
9d3997fdf   Alex Elder   rbd: don't pass r...
814
  static void rbd_put_client(struct rbd_client *rbdc)
602adf400   Yehuda Sadeh   rbd: introduce ra...
815
  {
c53d58933   Alex Elder   rbd: define rbd_d...
816
817
  	if (rbdc)
  		kref_put(&rbdc->kref, rbd_client_release);
602adf400   Yehuda Sadeh   rbd: introduce ra...
818
  }
a30b71b99   Alex Elder   rbd: lay out head...
819
820
821
822
  static bool rbd_image_format_valid(u32 image_format)
  {
  	return image_format == 1 || image_format == 2;
  }
8e94af8e2   Alex Elder   rbd: encapsulate ...
823
824
  static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
  {
103a150f0   Alex Elder   rbd: expand rbd_d...
825
826
827
828
829
830
  	size_t size;
  	u32 snap_count;
  
  	/* The header has to start with the magic rbd header text */
  	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
  		return false;
db2388b6e   Alex Elder   rbd: verify rbd i...
831
832
833
834
835
836
837
838
839
  	/* The bio layer requires at least sector-sized I/O */
  
  	if (ondisk->options.order < SECTOR_SHIFT)
  		return false;
  
  	/* If we use u64 in a few spots we may be able to loosen this */
  
  	if (ondisk->options.order > 8 * sizeof (int) - 1)
  		return false;
103a150f0   Alex Elder   rbd: expand rbd_d...
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
  	/*
  	 * The size of a snapshot header has to fit in a size_t, and
  	 * that limits the number of snapshots.
  	 */
  	snap_count = le32_to_cpu(ondisk->snap_count);
  	size = SIZE_MAX - sizeof (struct ceph_snap_context);
  	if (snap_count > size / sizeof (__le64))
  		return false;
  
  	/*
  	 * Not only that, but the size of the entire the snapshot
  	 * header must also be representable in a size_t.
  	 */
  	size -= snap_count * sizeof (__le64);
  	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
  		return false;
  
  	return true;
8e94af8e2   Alex Elder   rbd: encapsulate ...
858
  }
602adf400   Yehuda Sadeh   rbd: introduce ra...
859
  /*
bb23e37ac   Alex Elder   rbd: refactor rbd...
860
861
   * Fill an rbd image header with information from the given format 1
   * on-disk header.
602adf400   Yehuda Sadeh   rbd: introduce ra...
862
   */
662518b12   Alex Elder   rbd: update in-co...
863
  static int rbd_header_from_disk(struct rbd_device *rbd_dev,
4156d9984   Alex Elder   rbd: separate rea...
864
  				 struct rbd_image_header_ondisk *ondisk)
602adf400   Yehuda Sadeh   rbd: introduce ra...
865
  {
662518b12   Alex Elder   rbd: update in-co...
866
  	struct rbd_image_header *header = &rbd_dev->header;
bb23e37ac   Alex Elder   rbd: refactor rbd...
867
868
869
870
871
  	bool first_time = header->object_prefix == NULL;
  	struct ceph_snap_context *snapc;
  	char *object_prefix = NULL;
  	char *snap_names = NULL;
  	u64 *snap_sizes = NULL;
ccece235d   Alex Elder   rbd: fixes in rbd...
872
  	u32 snap_count;
d2bb24e50   Alex Elder   rbd: use sizeof (...
873
  	size_t size;
bb23e37ac   Alex Elder   rbd: refactor rbd...
874
  	int ret = -ENOMEM;
621901d65   Alex Elder   rbd: more cleanup...
875
  	u32 i;
602adf400   Yehuda Sadeh   rbd: introduce ra...
876

bb23e37ac   Alex Elder   rbd: refactor rbd...
877
  	/* Allocate this now to avoid having to handle failure below */
6a52325f6   Alex Elder   rbd: rearrange rb...
878

bb23e37ac   Alex Elder   rbd: refactor rbd...
879
880
  	if (first_time) {
  		size_t len;
103a150f0   Alex Elder   rbd: expand rbd_d...
881

bb23e37ac   Alex Elder   rbd: refactor rbd...
882
883
884
885
886
887
888
889
  		len = strnlen(ondisk->object_prefix,
  				sizeof (ondisk->object_prefix));
  		object_prefix = kmalloc(len + 1, GFP_KERNEL);
  		if (!object_prefix)
  			return -ENOMEM;
  		memcpy(object_prefix, ondisk->object_prefix, len);
  		object_prefix[len] = '\0';
  	}
00f1f36ff   Alex Elder   rbd: do some refa...
890

bb23e37ac   Alex Elder   rbd: refactor rbd...
891
  	/* Allocate the snapshot context and fill it in */
00f1f36ff   Alex Elder   rbd: do some refa...
892

bb23e37ac   Alex Elder   rbd: refactor rbd...
893
894
895
896
897
  	snap_count = le32_to_cpu(ondisk->snap_count);
  	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
  	if (!snapc)
  		goto out_err;
  	snapc->seq = le64_to_cpu(ondisk->snap_seq);
602adf400   Yehuda Sadeh   rbd: introduce ra...
898
  	if (snap_count) {
bb23e37ac   Alex Elder   rbd: refactor rbd...
899
  		struct rbd_image_snap_ondisk *snaps;
f785cc1db   Alex Elder   rbd: kill incore ...
900
  		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
bb23e37ac   Alex Elder   rbd: refactor rbd...
901
  		/* We'll keep a copy of the snapshot names... */
621901d65   Alex Elder   rbd: more cleanup...
902

bb23e37ac   Alex Elder   rbd: refactor rbd...
903
904
905
906
  		if (snap_names_len > (u64)SIZE_MAX)
  			goto out_2big;
  		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
  		if (!snap_names)
6a52325f6   Alex Elder   rbd: rearrange rb...
907
  			goto out_err;
bb23e37ac   Alex Elder   rbd: refactor rbd...
908
  		/* ...as well as the array of their sizes. */
621901d65   Alex Elder   rbd: more cleanup...
909

d2bb24e50   Alex Elder   rbd: use sizeof (...
910
  		size = snap_count * sizeof (*header->snap_sizes);
bb23e37ac   Alex Elder   rbd: refactor rbd...
911
912
  		snap_sizes = kmalloc(size, GFP_KERNEL);
  		if (!snap_sizes)
6a52325f6   Alex Elder   rbd: rearrange rb...
913
  			goto out_err;
bb23e37ac   Alex Elder   rbd: refactor rbd...
914

f785cc1db   Alex Elder   rbd: kill incore ...
915
  		/*
bb23e37ac   Alex Elder   rbd: refactor rbd...
916
917
918
  		 * Copy the names, and fill in each snapshot's id
  		 * and size.
  		 *
99a41ebce   Alex Elder   rbd: get rid of t...
919
  		 * Note that rbd_dev_v1_header_info() guarantees the
bb23e37ac   Alex Elder   rbd: refactor rbd...
920
  		 * ondisk buffer we're working with has
f785cc1db   Alex Elder   rbd: kill incore ...
921
922
923
  		 * snap_names_len bytes beyond the end of the
  		 * snapshot id array, this memcpy() is safe.
  		 */
bb23e37ac   Alex Elder   rbd: refactor rbd...
924
925
926
927
928
929
  		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
  		snaps = ondisk->snaps;
  		for (i = 0; i < snap_count; i++) {
  			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
  			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
  		}
602adf400   Yehuda Sadeh   rbd: introduce ra...
930
  	}
6a52325f6   Alex Elder   rbd: rearrange rb...
931

bb23e37ac   Alex Elder   rbd: refactor rbd...
932
  	/* We won't fail any more, fill in the header */
621901d65   Alex Elder   rbd: more cleanup...
933

bb23e37ac   Alex Elder   rbd: refactor rbd...
934
935
936
937
938
939
940
941
942
  	if (first_time) {
  		header->object_prefix = object_prefix;
  		header->obj_order = ondisk->options.order;
  		header->crypt_type = ondisk->options.crypt_type;
  		header->comp_type = ondisk->options.comp_type;
  		/* The rest aren't used for format 1 images */
  		header->stripe_unit = 0;
  		header->stripe_count = 0;
  		header->features = 0;
602adf400   Yehuda Sadeh   rbd: introduce ra...
943
  	} else {
662518b12   Alex Elder   rbd: update in-co...
944
945
946
  		ceph_put_snap_context(header->snapc);
  		kfree(header->snap_names);
  		kfree(header->snap_sizes);
602adf400   Yehuda Sadeh   rbd: introduce ra...
947
  	}
849b4260d   Alex Elder   rbd: dynamically ...
948

bb23e37ac   Alex Elder   rbd: refactor rbd...
949
  	/* The remaining fields always get updated (when we refresh) */
621901d65   Alex Elder   rbd: more cleanup...
950

f84344f33   Alex Elder   rbd: separate map...
951
  	header->image_size = le64_to_cpu(ondisk->image_size);
bb23e37ac   Alex Elder   rbd: refactor rbd...
952
953
954
  	header->snapc = snapc;
  	header->snap_names = snap_names;
  	header->snap_sizes = snap_sizes;
468521c1b   Alex Elder   rbd: define rbd s...
955

602adf400   Yehuda Sadeh   rbd: introduce ra...
956
  	return 0;
bb23e37ac   Alex Elder   rbd: refactor rbd...
957
958
  out_2big:
  	ret = -EIO;
6a52325f6   Alex Elder   rbd: rearrange rb...
959
  out_err:
bb23e37ac   Alex Elder   rbd: refactor rbd...
960
961
962
963
  	kfree(snap_sizes);
  	kfree(snap_names);
  	ceph_put_snap_context(snapc);
  	kfree(object_prefix);
ccece235d   Alex Elder   rbd: fixes in rbd...
964

bb23e37ac   Alex Elder   rbd: refactor rbd...
965
  	return ret;
602adf400   Yehuda Sadeh   rbd: introduce ra...
966
  }
9682fc6d3   Alex Elder   rbd: look up snap...
967
968
969
970
971
972
973
974
975
976
977
978
979
980
  static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
  {
  	const char *snap_name;
  
  	rbd_assert(which < rbd_dev->header.snapc->num_snaps);
  
  	/* Skip over names until we find the one we are looking for */
  
  	snap_name = rbd_dev->header.snap_names;
  	while (which--)
  		snap_name += strlen(snap_name) + 1;
  
  	return kstrdup(snap_name, GFP_KERNEL);
  }
30d1cff81   Alex Elder   rbd: use binary s...
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
  /*
   * Snapshot id comparison function for use with qsort()/bsearch().
   * Note that result is for snapshots in *descending* order.
   */
  static int snapid_compare_reverse(const void *s1, const void *s2)
  {
  	u64 snap_id1 = *(u64 *)s1;
  	u64 snap_id2 = *(u64 *)s2;
  
  	if (snap_id1 < snap_id2)
  		return 1;
  	return snap_id1 == snap_id2 ? 0 : -1;
  }
  
  /*
   * Search a snapshot context to see if the given snapshot id is
   * present.
   *
   * Returns the position of the snapshot id in the array if it's found,
   * or BAD_SNAP_INDEX otherwise.
   *
   * Note: The snapshot array is in kept sorted (by the osd) in
   * reverse order, highest snapshot id first.
   */
9682fc6d3   Alex Elder   rbd: look up snap...
1005
1006
1007
  static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
  {
  	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
30d1cff81   Alex Elder   rbd: use binary s...
1008
  	u64 *found;
9682fc6d3   Alex Elder   rbd: look up snap...
1009

30d1cff81   Alex Elder   rbd: use binary s...
1010
1011
  	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
  				sizeof (snap_id), snapid_compare_reverse);
9682fc6d3   Alex Elder   rbd: look up snap...
1012

30d1cff81   Alex Elder   rbd: use binary s...
1013
  	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
9682fc6d3   Alex Elder   rbd: look up snap...
1014
  }
2ad3d7167   Alex Elder   rbd: define rbd_s...
1015
1016
  static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
  					u64 snap_id)
9e15b77d9   Alex Elder   rbd: get addition...
1017
  {
54cac61fb   Alex Elder   rbd: use snap_id ...
1018
  	u32 which;
da6a6b639   Josh Durgin   rbd: fix error ha...
1019
  	const char *snap_name;
9e15b77d9   Alex Elder   rbd: get addition...
1020

54cac61fb   Alex Elder   rbd: use snap_id ...
1021
1022
  	which = rbd_dev_snap_index(rbd_dev, snap_id);
  	if (which == BAD_SNAP_INDEX)
da6a6b639   Josh Durgin   rbd: fix error ha...
1023
  		return ERR_PTR(-ENOENT);
54cac61fb   Alex Elder   rbd: use snap_id ...
1024

da6a6b639   Josh Durgin   rbd: fix error ha...
1025
1026
  	snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
  	return snap_name ? snap_name : ERR_PTR(-ENOMEM);
54cac61fb   Alex Elder   rbd: use snap_id ...
1027
1028
1029
1030
  }
  
  static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
  {
9e15b77d9   Alex Elder   rbd: get addition...
1031
1032
  	if (snap_id == CEPH_NOSNAP)
  		return RBD_SNAP_HEAD_NAME;
54cac61fb   Alex Elder   rbd: use snap_id ...
1033
1034
1035
  	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
  	if (rbd_dev->image_format == 1)
  		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
9e15b77d9   Alex Elder   rbd: get addition...
1036

54cac61fb   Alex Elder   rbd: use snap_id ...
1037
  	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
9e15b77d9   Alex Elder   rbd: get addition...
1038
  }
2ad3d7167   Alex Elder   rbd: define rbd_s...
1039
1040
  static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
  				u64 *snap_size)
602adf400   Yehuda Sadeh   rbd: introduce ra...
1041
  {
2ad3d7167   Alex Elder   rbd: define rbd_s...
1042
1043
1044
1045
1046
  	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
  	if (snap_id == CEPH_NOSNAP) {
  		*snap_size = rbd_dev->header.image_size;
  	} else if (rbd_dev->image_format == 1) {
  		u32 which;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1047

2ad3d7167   Alex Elder   rbd: define rbd_s...
1048
1049
1050
  		which = rbd_dev_snap_index(rbd_dev, snap_id);
  		if (which == BAD_SNAP_INDEX)
  			return -ENOENT;
e86924a80   Alex Elder   rbd: use snaps li...
1051

2ad3d7167   Alex Elder   rbd: define rbd_s...
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
  		*snap_size = rbd_dev->header.snap_sizes[which];
  	} else {
  		u64 size = 0;
  		int ret;
  
  		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
  		if (ret)
  			return ret;
  
  		*snap_size = size;
  	}
  	return 0;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1064
  }
2ad3d7167   Alex Elder   rbd: define rbd_s...
1065
1066
  static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
  			u64 *snap_features)
602adf400   Yehuda Sadeh   rbd: introduce ra...
1067
  {
2ad3d7167   Alex Elder   rbd: define rbd_s...
1068
1069
1070
1071
1072
  	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
  	if (snap_id == CEPH_NOSNAP) {
  		*snap_features = rbd_dev->header.features;
  	} else if (rbd_dev->image_format == 1) {
  		*snap_features = 0;	/* No features for format 1 */
602adf400   Yehuda Sadeh   rbd: introduce ra...
1073
  	} else {
2ad3d7167   Alex Elder   rbd: define rbd_s...
1074
1075
  		u64 features = 0;
  		int ret;
8b0241f85   Alex Elder   rbd: have snap_by...
1076

2ad3d7167   Alex Elder   rbd: define rbd_s...
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
  		ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
  		if (ret)
  			return ret;
  
  		*snap_features = features;
  	}
  	return 0;
  }
  
  static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
  {
8f4b7d982   Alex Elder   rbd: don't look u...
1088
  	u64 snap_id = rbd_dev->spec->snap_id;
2ad3d7167   Alex Elder   rbd: define rbd_s...
1089
1090
1091
  	u64 size = 0;
  	u64 features = 0;
  	int ret;
2ad3d7167   Alex Elder   rbd: define rbd_s...
1092
1093
1094
1095
1096
1097
1098
1099
1100
  	ret = rbd_snap_size(rbd_dev, snap_id, &size);
  	if (ret)
  		return ret;
  	ret = rbd_snap_features(rbd_dev, snap_id, &features);
  	if (ret)
  		return ret;
  
  	rbd_dev->mapping.size = size;
  	rbd_dev->mapping.features = features;
8b0241f85   Alex Elder   rbd: have snap_by...
1101
  	return 0;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1102
  }
d1cf57884   Alex Elder   rbd: set mapping ...
1103
1104
1105
1106
  static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
  {
  	rbd_dev->mapping.size = 0;
  	rbd_dev->mapping.features = 0;
200a6a8be   Alex Elder   rbd: don't destro...
1107
  }
7d5079aa8   Himangi Saraogi   rbd: use rbd_segm...
1108
1109
1110
1111
1112
1113
  static void rbd_segment_name_free(const char *name)
  {
  	/* The explicit cast here is needed to drop the const qualifier */
  
  	kmem_cache_free(rbd_segment_name_cache, (void *)name);
  }
98571b5aa   Alex Elder   rbd: small changes
1114
  static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
602adf400   Yehuda Sadeh   rbd: introduce ra...
1115
  {
65ccfe21d   Alex Elder   rbd: split up rbd...
1116
1117
1118
  	char *name;
  	u64 segment;
  	int ret;
3a96d5cd7   Josh Durgin   rbd: use the corr...
1119
  	char *name_format;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1120

78c2a44aa   Alex Elder   rbd: allocate ima...
1121
  	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
65ccfe21d   Alex Elder   rbd: split up rbd...
1122
1123
1124
  	if (!name)
  		return NULL;
  	segment = offset >> rbd_dev->header.obj_order;
3a96d5cd7   Josh Durgin   rbd: use the corr...
1125
1126
1127
  	name_format = "%s.%012llx";
  	if (rbd_dev->image_format == 2)
  		name_format = "%s.%016llx";
2d0ebc5d5   Ilya Dryomov   libceph: rename M...
1128
  	ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
65ccfe21d   Alex Elder   rbd: split up rbd...
1129
  			rbd_dev->header.object_prefix, segment);
2d0ebc5d5   Ilya Dryomov   libceph: rename M...
1130
  	if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
65ccfe21d   Alex Elder   rbd: split up rbd...
1131
1132
1133
  		pr_err("error formatting segment name for #%llu (%d)
  ",
  			segment, ret);
7d5079aa8   Himangi Saraogi   rbd: use rbd_segm...
1134
  		rbd_segment_name_free(name);
65ccfe21d   Alex Elder   rbd: split up rbd...
1135
1136
  		name = NULL;
  	}
602adf400   Yehuda Sadeh   rbd: introduce ra...
1137

65ccfe21d   Alex Elder   rbd: split up rbd...
1138
1139
  	return name;
  }
602adf400   Yehuda Sadeh   rbd: introduce ra...
1140

65ccfe21d   Alex Elder   rbd: split up rbd...
1141
1142
1143
  static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
  {
  	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1144

65ccfe21d   Alex Elder   rbd: split up rbd...
1145
1146
1147
1148
1149
1150
1151
1152
1153
  	return offset & (segment_size - 1);
  }
  
  static u64 rbd_segment_length(struct rbd_device *rbd_dev,
  				u64 offset, u64 length)
  {
  	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
  
  	offset &= segment_size - 1;
aafb230eb   Alex Elder   rbd: define rbd_a...
1154
  	rbd_assert(length <= U64_MAX - offset);
65ccfe21d   Alex Elder   rbd: split up rbd...
1155
1156
1157
1158
  	if (offset + length > segment_size)
  		length = segment_size - offset;
  
  	return length;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1159
1160
1161
  }
  
  /*
029bcbd8b   Josh Durgin   rbd: set blk_queu...
1162
1163
1164
1165
1166
1167
1168
1169
   * returns the size of an object in the image
   */
  static u64 rbd_obj_bytes(struct rbd_image_header *header)
  {
  	return 1 << header->obj_order;
  }
  
  /*
602adf400   Yehuda Sadeh   rbd: introduce ra...
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
   * bio helpers
   */
  
  static void bio_chain_put(struct bio *chain)
  {
  	struct bio *tmp;
  
  	while (chain) {
  		tmp = chain;
  		chain = chain->bi_next;
  		bio_put(tmp);
  	}
  }
  
  /*
   * zeros a bio chain, starting at specific offset
   */
  static void zero_bio_chain(struct bio *chain, int start_ofs)
  {
7988613b0   Kent Overstreet   block: Convert bi...
1189
1190
  	struct bio_vec bv;
  	struct bvec_iter iter;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1191
1192
  	unsigned long flags;
  	void *buf;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1193
1194
1195
  	int pos = 0;
  
  	while (chain) {
7988613b0   Kent Overstreet   block: Convert bi...
1196
1197
  		bio_for_each_segment(bv, chain, iter) {
  			if (pos + bv.bv_len > start_ofs) {
602adf400   Yehuda Sadeh   rbd: introduce ra...
1198
  				int remainder = max(start_ofs - pos, 0);
7988613b0   Kent Overstreet   block: Convert bi...
1199
  				buf = bvec_kmap_irq(&bv, &flags);
602adf400   Yehuda Sadeh   rbd: introduce ra...
1200
  				memset(buf + remainder, 0,
7988613b0   Kent Overstreet   block: Convert bi...
1201
1202
  				       bv.bv_len - remainder);
  				flush_dcache_page(bv.bv_page);
85b5aaa62   Dan Carpenter   rbd: passing wron...
1203
  				bvec_kunmap_irq(buf, &flags);
602adf400   Yehuda Sadeh   rbd: introduce ra...
1204
  			}
7988613b0   Kent Overstreet   block: Convert bi...
1205
  			pos += bv.bv_len;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1206
1207
1208
1209
1210
1211
1212
  		}
  
  		chain = chain->bi_next;
  	}
  }
  
  /*
b9434c5b4   Alex Elder   rbd: define zero_...
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
   * similar to zero_bio_chain(), zeros data defined by a page array,
   * starting at the given byte offset from the start of the array and
   * continuing up to the given end offset.  The pages array is
   * assumed to be big enough to hold all bytes up to the end.
   */
  static void zero_pages(struct page **pages, u64 offset, u64 end)
  {
  	struct page **page = &pages[offset >> PAGE_SHIFT];
  
  	rbd_assert(end > offset);
  	rbd_assert(end - offset <= (u64)SIZE_MAX);
  	while (offset < end) {
  		size_t page_offset;
  		size_t length;
  		unsigned long flags;
  		void *kaddr;
491205a8b   Geert Uytterhoeven   rbd: Use min_t() ...
1229
1230
  		page_offset = offset & ~PAGE_MASK;
  		length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
b9434c5b4   Alex Elder   rbd: define zero_...
1231
1232
1233
  		local_irq_save(flags);
  		kaddr = kmap_atomic(*page);
  		memset(kaddr + page_offset, 0, length);
e21560541   Alex Elder   rbd: flush dcache...
1234
  		flush_dcache_page(*page);
b9434c5b4   Alex Elder   rbd: define zero_...
1235
1236
1237
1238
1239
1240
1241
1242
1243
  		kunmap_atomic(kaddr);
  		local_irq_restore(flags);
  
  		offset += length;
  		page++;
  	}
  }
  
  /*
f7760dad2   Alex Elder   rbd: simplify rbd...
1244
1245
   * Clone a portion of a bio, starting at the given byte offset
   * and continuing for the number of bytes indicated.
602adf400   Yehuda Sadeh   rbd: introduce ra...
1246
   */
f7760dad2   Alex Elder   rbd: simplify rbd...
1247
1248
1249
1250
  static struct bio *bio_clone_range(struct bio *bio_src,
  					unsigned int offset,
  					unsigned int len,
  					gfp_t gfpmask)
602adf400   Yehuda Sadeh   rbd: introduce ra...
1251
  {
f7760dad2   Alex Elder   rbd: simplify rbd...
1252
  	struct bio *bio;
5341a6278   Kent Overstreet   rbd: Refactor bio...
1253
  	bio = bio_clone(bio_src, gfpmask);
f7760dad2   Alex Elder   rbd: simplify rbd...
1254
1255
  	if (!bio)
  		return NULL;	/* ENOMEM */
602adf400   Yehuda Sadeh   rbd: introduce ra...
1256

5341a6278   Kent Overstreet   rbd: Refactor bio...
1257
  	bio_advance(bio, offset);
4f024f379   Kent Overstreet   block: Abstract o...
1258
  	bio->bi_iter.bi_size = len;
f7760dad2   Alex Elder   rbd: simplify rbd...
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
  
  	return bio;
  }
  
  /*
   * Clone a portion of a bio chain, starting at the given byte offset
   * into the first bio in the source chain and continuing for the
   * number of bytes indicated.  The result is another bio chain of
   * exactly the given length, or a null pointer on error.
   *
   * The bio_src and offset parameters are both in-out.  On entry they
   * refer to the first source bio and the offset into that bio where
   * the start of data to be cloned is located.
   *
   * On return, bio_src is updated to refer to the bio in the source
   * chain that contains first un-cloned byte, and *offset will
   * contain the offset of that byte within that bio.
   */
  static struct bio *bio_chain_clone_range(struct bio **bio_src,
  					unsigned int *offset,
  					unsigned int len,
  					gfp_t gfpmask)
  {
  	struct bio *bi = *bio_src;
  	unsigned int off = *offset;
  	struct bio *chain = NULL;
  	struct bio **end;
  
  	/* Build up a chain of clone bios up to the limit */
4f024f379   Kent Overstreet   block: Abstract o...
1288
  	if (!bi || off >= bi->bi_iter.bi_size || !len)
f7760dad2   Alex Elder   rbd: simplify rbd...
1289
  		return NULL;		/* Nothing to clone */
602adf400   Yehuda Sadeh   rbd: introduce ra...
1290

f7760dad2   Alex Elder   rbd: simplify rbd...
1291
1292
1293
1294
  	end = &chain;
  	while (len) {
  		unsigned int bi_size;
  		struct bio *bio;
f5400b7a0   Alex Elder   rbd: add a warnin...
1295
1296
  		if (!bi) {
  			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
f7760dad2   Alex Elder   rbd: simplify rbd...
1297
  			goto out_err;	/* EINVAL; ran out of bio's */
f5400b7a0   Alex Elder   rbd: add a warnin...
1298
  		}
4f024f379   Kent Overstreet   block: Abstract o...
1299
  		bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
f7760dad2   Alex Elder   rbd: simplify rbd...
1300
1301
1302
1303
1304
1305
  		bio = bio_clone_range(bi, off, bi_size, gfpmask);
  		if (!bio)
  			goto out_err;	/* ENOMEM */
  
  		*end = bio;
  		end = &bio->bi_next;
602adf400   Yehuda Sadeh   rbd: introduce ra...
1306

f7760dad2   Alex Elder   rbd: simplify rbd...
1307
  		off += bi_size;
4f024f379   Kent Overstreet   block: Abstract o...
1308
  		if (off == bi->bi_iter.bi_size) {
f7760dad2   Alex Elder   rbd: simplify rbd...
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
  			bi = bi->bi_next;
  			off = 0;
  		}
  		len -= bi_size;
  	}
  	*bio_src = bi;
  	*offset = off;
  
  	return chain;
  out_err:
  	bio_chain_put(chain);
602adf400   Yehuda Sadeh   rbd: introduce ra...
1320

602adf400   Yehuda Sadeh   rbd: introduce ra...
1321
1322
  	return NULL;
  }
926f9b3f0   Alex Elder   rbd: define an rb...
1323
1324
1325
1326
1327
  /*
   * The default/initial value for all object request flags is 0.  For
   * each flag, once its value is set to 1 it is never reset to 0
   * again.
   */
57acbaa7f   Alex Elder   rbd: always check...
1328
  static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
926f9b3f0   Alex Elder   rbd: define an rb...
1329
  {
57acbaa7f   Alex Elder   rbd: always check...
1330
  	if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
926f9b3f0   Alex Elder   rbd: define an rb...
1331
  		struct rbd_device *rbd_dev;
57acbaa7f   Alex Elder   rbd: always check...
1332
  		rbd_dev = obj_request->img_request->rbd_dev;
9584d5082   Ilya Dryomov   rbd: remove extra...
1333
  		rbd_warn(rbd_dev, "obj_request %p already marked img_data",
926f9b3f0   Alex Elder   rbd: define an rb...
1334
1335
1336
  			obj_request);
  	}
  }
57acbaa7f   Alex Elder   rbd: always check...
1337
  static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
926f9b3f0   Alex Elder   rbd: define an rb...
1338
1339
  {
  	smp_mb();
57acbaa7f   Alex Elder   rbd: always check...
1340
  	return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
926f9b3f0   Alex Elder   rbd: define an rb...
1341
  }
57acbaa7f   Alex Elder   rbd: always check...
1342
  static void obj_request_done_set(struct rbd_obj_request *obj_request)
6365d33a2   Alex Elder   rbd: add an objec...
1343
  {
57acbaa7f   Alex Elder   rbd: always check...
1344
1345
  	if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
  		struct rbd_device *rbd_dev = NULL;
6365d33a2   Alex Elder   rbd: add an objec...
1346

57acbaa7f   Alex Elder   rbd: always check...
1347
1348
  		if (obj_request_img_data_test(obj_request))
  			rbd_dev = obj_request->img_request->rbd_dev;
9584d5082   Ilya Dryomov   rbd: remove extra...
1349
  		rbd_warn(rbd_dev, "obj_request %p already marked done",
6365d33a2   Alex Elder   rbd: add an objec...
1350
1351
1352
  			obj_request);
  	}
  }
57acbaa7f   Alex Elder   rbd: always check...
1353
  static bool obj_request_done_test(struct rbd_obj_request *obj_request)
6365d33a2   Alex Elder   rbd: add an objec...
1354
1355
  {
  	smp_mb();
57acbaa7f   Alex Elder   rbd: always check...
1356
  	return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
6365d33a2   Alex Elder   rbd: add an objec...
1357
  }
5679c59f6   Alex Elder   rbd: add target o...
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
  /*
   * This sets the KNOWN flag after (possibly) setting the EXISTS
   * flag.  The latter is set based on the "exists" value provided.
   *
   * Note that for our purposes once an object exists it never goes
   * away again.  It's possible that the response from two existence
   * checks are separated by the creation of the target object, and
   * the first ("doesn't exist") response arrives *after* the second
   * ("does exist").  In that case we ignore the second one.
   */
  static void obj_request_existence_set(struct rbd_obj_request *obj_request,
  				bool exists)
  {
  	if (exists)
  		set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
  	set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
  	smp_mb();
  }
  
  static bool obj_request_known_test(struct rbd_obj_request *obj_request)
  {
  	smp_mb();
  	return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
  }
  
  static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
  {
  	smp_mb();
  	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
  }
9638556a2   Ilya Dryomov   rbd: handle paren...
1388
1389
1390
1391
1392
1393
1394
  static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
  {
  	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
  
  	return obj_request->img_offset <
  	    round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
  }
bf0d5f503   Alex Elder   rbd: new request ...
1395
1396
  static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
  {
37206ee5b   Alex Elder   rbd: normalize do...
1397
1398
1399
  	dout("%s: obj %p (was %d)
  ", __func__, obj_request,
  		atomic_read(&obj_request->kref.refcount));
bf0d5f503   Alex Elder   rbd: new request ...
1400
1401
1402
1403
1404
1405
1406
  	kref_get(&obj_request->kref);
  }
  
  static void rbd_obj_request_destroy(struct kref *kref);
  static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
  {
  	rbd_assert(obj_request != NULL);
37206ee5b   Alex Elder   rbd: normalize do...
1407
1408
1409
  	dout("%s: obj %p (was %d)
  ", __func__, obj_request,
  		atomic_read(&obj_request->kref.refcount));
bf0d5f503   Alex Elder   rbd: new request ...
1410
1411
  	kref_put(&obj_request->kref, rbd_obj_request_destroy);
  }
0f2d5be79   Alex Elder   rbd: use referenc...
1412
1413
1414
1415
1416
1417
1418
  static void rbd_img_request_get(struct rbd_img_request *img_request)
  {
  	dout("%s: img %p (was %d)
  ", __func__, img_request,
  	     atomic_read(&img_request->kref.refcount));
  	kref_get(&img_request->kref);
  }
e93f31523   Alex Elder   rbd: define paren...
1419
1420
  static bool img_request_child_test(struct rbd_img_request *img_request);
  static void rbd_parent_request_destroy(struct kref *kref);
bf0d5f503   Alex Elder   rbd: new request ...
1421
1422
1423
1424
  static void rbd_img_request_destroy(struct kref *kref);
  static void rbd_img_request_put(struct rbd_img_request *img_request)
  {
  	rbd_assert(img_request != NULL);
37206ee5b   Alex Elder   rbd: normalize do...
1425
1426
1427
  	dout("%s: img %p (was %d)
  ", __func__, img_request,
  		atomic_read(&img_request->kref.refcount));
e93f31523   Alex Elder   rbd: define paren...
1428
1429
1430
1431
  	if (img_request_child_test(img_request))
  		kref_put(&img_request->kref, rbd_parent_request_destroy);
  	else
  		kref_put(&img_request->kref, rbd_img_request_destroy);
bf0d5f503   Alex Elder   rbd: new request ...
1432
1433
1434
1435
1436
  }
  
  static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
  					struct rbd_obj_request *obj_request)
  {
25dcf954c   Alex Elder   rbd: decrement ob...
1437
  	rbd_assert(obj_request->img_request == NULL);
b155e86cf   Alex Elder   rbd: adjust image...
1438
  	/* Image request now owns object's original reference */
bf0d5f503   Alex Elder   rbd: new request ...
1439
  	obj_request->img_request = img_request;
25dcf954c   Alex Elder   rbd: decrement ob...
1440
  	obj_request->which = img_request->obj_request_count;
6365d33a2   Alex Elder   rbd: add an objec...
1441
1442
  	rbd_assert(!obj_request_img_data_test(obj_request));
  	obj_request_img_data_set(obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
1443
  	rbd_assert(obj_request->which != BAD_WHICH);
25dcf954c   Alex Elder   rbd: decrement ob...
1444
1445
  	img_request->obj_request_count++;
  	list_add_tail(&obj_request->links, &img_request->obj_requests);
37206ee5b   Alex Elder   rbd: normalize do...
1446
1447
1448
  	dout("%s: img %p obj %p w=%u
  ", __func__, img_request, obj_request,
  		obj_request->which);
bf0d5f503   Alex Elder   rbd: new request ...
1449
1450
1451
1452
1453
1454
  }
  
  static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
  					struct rbd_obj_request *obj_request)
  {
  	rbd_assert(obj_request->which != BAD_WHICH);
25dcf954c   Alex Elder   rbd: decrement ob...
1455

37206ee5b   Alex Elder   rbd: normalize do...
1456
1457
1458
  	dout("%s: img %p obj %p w=%u
  ", __func__, img_request, obj_request,
  		obj_request->which);
bf0d5f503   Alex Elder   rbd: new request ...
1459
  	list_del(&obj_request->links);
25dcf954c   Alex Elder   rbd: decrement ob...
1460
1461
1462
1463
  	rbd_assert(img_request->obj_request_count > 0);
  	img_request->obj_request_count--;
  	rbd_assert(obj_request->which == img_request->obj_request_count);
  	obj_request->which = BAD_WHICH;
6365d33a2   Alex Elder   rbd: add an objec...
1464
  	rbd_assert(obj_request_img_data_test(obj_request));
bf0d5f503   Alex Elder   rbd: new request ...
1465
  	rbd_assert(obj_request->img_request == img_request);
bf0d5f503   Alex Elder   rbd: new request ...
1466
  	obj_request->img_request = NULL;
25dcf954c   Alex Elder   rbd: decrement ob...
1467
  	obj_request->callback = NULL;
bf0d5f503   Alex Elder   rbd: new request ...
1468
1469
1470
1471
1472
1473
  	rbd_obj_request_put(obj_request);
  }
  
  static bool obj_request_type_valid(enum obj_request_type type)
  {
  	switch (type) {
9969ebc5a   Alex Elder   rbd: implement wa...
1474
  	case OBJ_REQUEST_NODATA:
bf0d5f503   Alex Elder   rbd: new request ...
1475
  	case OBJ_REQUEST_BIO:
788e2df3b   Alex Elder   rbd: implement sy...
1476
  	case OBJ_REQUEST_PAGES:
bf0d5f503   Alex Elder   rbd: new request ...
1477
1478
1479
1480
1481
  		return true;
  	default:
  		return false;
  	}
  }
bf0d5f503   Alex Elder   rbd: new request ...
1482
1483
1484
  static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
  				struct rbd_obj_request *obj_request)
  {
71c20a066   Ilya Dryomov   rbd: rbd_obj_requ...
1485
1486
  	dout("%s %p
  ", __func__, obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
1487
1488
  	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
  }
71c20a066   Ilya Dryomov   rbd: rbd_obj_requ...
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
  static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
  {
  	dout("%s %p
  ", __func__, obj_request);
  	ceph_osdc_cancel_request(obj_request->osd_req);
  }
  
  /*
   * Wait for an object request to complete.  If interrupted, cancel the
   * underlying osd request.
2894e1d76   Ilya Dryomov   rbd: timeout watc...
1499
1500
   *
   * @timeout: in jiffies, 0 means "wait forever"
71c20a066   Ilya Dryomov   rbd: rbd_obj_requ...
1501
   */
2894e1d76   Ilya Dryomov   rbd: timeout watc...
1502
1503
  static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
  				  unsigned long timeout)
71c20a066   Ilya Dryomov   rbd: rbd_obj_requ...
1504
  {
2894e1d76   Ilya Dryomov   rbd: timeout watc...
1505
  	long ret;
71c20a066   Ilya Dryomov   rbd: rbd_obj_requ...
1506
1507
1508
  
  	dout("%s %p
  ", __func__, obj_request);
2894e1d76   Ilya Dryomov   rbd: timeout watc...
1509
1510
1511
1512
1513
1514
  	ret = wait_for_completion_interruptible_timeout(
  					&obj_request->completion,
  					ceph_timeout_jiffies(timeout));
  	if (ret <= 0) {
  		if (ret == 0)
  			ret = -ETIMEDOUT;
71c20a066   Ilya Dryomov   rbd: rbd_obj_requ...
1515
  		rbd_obj_request_end(obj_request);
2894e1d76   Ilya Dryomov   rbd: timeout watc...
1516
1517
  	} else {
  		ret = 0;
71c20a066   Ilya Dryomov   rbd: rbd_obj_requ...
1518
  	}
2894e1d76   Ilya Dryomov   rbd: timeout watc...
1519
1520
1521
1522
1523
1524
1525
1526
1527
  	dout("%s %p ret %d
  ", __func__, obj_request, (int)ret);
  	return ret;
  }
  
  static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
  {
  	return __rbd_obj_request_wait(obj_request, 0);
  }
bf0d5f503   Alex Elder   rbd: new request ...
1528
1529
  static void rbd_img_request_complete(struct rbd_img_request *img_request)
  {
55f27e093   Alex Elder   rbd: record aggre...
1530

37206ee5b   Alex Elder   rbd: normalize do...
1531
1532
  	dout("%s: img %p
  ", __func__, img_request);
55f27e093   Alex Elder   rbd: record aggre...
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
  
  	/*
  	 * If no error occurred, compute the aggregate transfer
  	 * count for the image request.  We could instead use
  	 * atomic64_cmpxchg() to update it as each object request
  	 * completes; not clear which way is better off hand.
  	 */
  	if (!img_request->result) {
  		struct rbd_obj_request *obj_request;
  		u64 xferred = 0;
  
  		for_each_obj_request(img_request, obj_request)
  			xferred += obj_request->xferred;
  		img_request->xferred = xferred;
  	}
bf0d5f503   Alex Elder   rbd: new request ...
1548
1549
1550
1551
1552
  	if (img_request->callback)
  		img_request->callback(img_request);
  	else
  		rbd_img_request_put(img_request);
  }
0c425248e   Alex Elder   rbd: define image...
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
  /*
   * The default/initial value for all image request flags is 0.  Each
   * is conditionally set to 1 at image request initialization time
   * and currently never change thereafter.
   */
  static void img_request_write_set(struct rbd_img_request *img_request)
  {
  	set_bit(IMG_REQ_WRITE, &img_request->flags);
  	smp_mb();
  }
  
  static bool img_request_write_test(struct rbd_img_request *img_request)
  {
  	smp_mb();
  	return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
  }
90e98c522   Guangliang Zhao   rbd: initial disc...
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
  /*
   * Set the discard flag when the img_request is an discard request
   */
  static void img_request_discard_set(struct rbd_img_request *img_request)
  {
  	set_bit(IMG_REQ_DISCARD, &img_request->flags);
  	smp_mb();
  }
  
  static bool img_request_discard_test(struct rbd_img_request *img_request)
  {
  	smp_mb();
  	return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
  }
9849e9863   Alex Elder   rbd: define image...
1583
1584
1585
1586
1587
  static void img_request_child_set(struct rbd_img_request *img_request)
  {
  	set_bit(IMG_REQ_CHILD, &img_request->flags);
  	smp_mb();
  }
e93f31523   Alex Elder   rbd: define paren...
1588
1589
1590
1591
1592
  static void img_request_child_clear(struct rbd_img_request *img_request)
  {
  	clear_bit(IMG_REQ_CHILD, &img_request->flags);
  	smp_mb();
  }
9849e9863   Alex Elder   rbd: define image...
1593
1594
1595
1596
1597
  static bool img_request_child_test(struct rbd_img_request *img_request)
  {
  	smp_mb();
  	return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
  }
d0b2e9445   Alex Elder   rbd: define image...
1598
1599
1600
1601
1602
  static void img_request_layered_set(struct rbd_img_request *img_request)
  {
  	set_bit(IMG_REQ_LAYERED, &img_request->flags);
  	smp_mb();
  }
a2acd00e7   Alex Elder   rbd: reference co...
1603
1604
1605
1606
1607
  static void img_request_layered_clear(struct rbd_img_request *img_request)
  {
  	clear_bit(IMG_REQ_LAYERED, &img_request->flags);
  	smp_mb();
  }
d0b2e9445   Alex Elder   rbd: define image...
1608
1609
1610
1611
1612
  static bool img_request_layered_test(struct rbd_img_request *img_request)
  {
  	smp_mb();
  	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
  }
3b434a2af   Josh Durgin   rbd: extract a me...
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
  static enum obj_operation_type
  rbd_img_request_op_type(struct rbd_img_request *img_request)
  {
  	if (img_request_write_test(img_request))
  		return OBJ_OP_WRITE;
  	else if (img_request_discard_test(img_request))
  		return OBJ_OP_DISCARD;
  	else
  		return OBJ_OP_READ;
  }
6e2a4505d   Alex Elder   rbd: don't zero-f...
1623
1624
1625
  static void
  rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
  {
b9434c5b4   Alex Elder   rbd: define zero_...
1626
1627
  	u64 xferred = obj_request->xferred;
  	u64 length = obj_request->length;
6e2a4505d   Alex Elder   rbd: don't zero-f...
1628
1629
1630
  	dout("%s: obj %p img %p result %d %llu/%llu
  ", __func__,
  		obj_request, obj_request->img_request, obj_request->result,
b9434c5b4   Alex Elder   rbd: define zero_...
1631
  		xferred, length);
6e2a4505d   Alex Elder   rbd: don't zero-f...
1632
  	/*
17c1cc1d9   Josh Durgin   rbd: fix I/O erro...
1633
1634
1635
1636
1637
1638
  	 * ENOENT means a hole in the image.  We zero-fill the entire
  	 * length of the request.  A short read also implies zero-fill
  	 * to the end of the request.  An error requires the whole
  	 * length of the request to be reported finished with an error
  	 * to the block layer.  In each case we update the xferred
  	 * count to indicate the whole request was satisfied.
6e2a4505d   Alex Elder   rbd: don't zero-f...
1639
  	 */
b9434c5b4   Alex Elder   rbd: define zero_...
1640
  	rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
6e2a4505d   Alex Elder   rbd: don't zero-f...
1641
  	if (obj_request->result == -ENOENT) {
b9434c5b4   Alex Elder   rbd: define zero_...
1642
1643
1644
1645
  		if (obj_request->type == OBJ_REQUEST_BIO)
  			zero_bio_chain(obj_request->bio_list, 0);
  		else
  			zero_pages(obj_request->pages, 0, length);
6e2a4505d   Alex Elder   rbd: don't zero-f...
1646
  		obj_request->result = 0;
b9434c5b4   Alex Elder   rbd: define zero_...
1647
1648
1649
1650
1651
  	} else if (xferred < length && !obj_request->result) {
  		if (obj_request->type == OBJ_REQUEST_BIO)
  			zero_bio_chain(obj_request->bio_list, xferred);
  		else
  			zero_pages(obj_request->pages, xferred, length);
6e2a4505d   Alex Elder   rbd: don't zero-f...
1652
  	}
17c1cc1d9   Josh Durgin   rbd: fix I/O erro...
1653
  	obj_request->xferred = length;
6e2a4505d   Alex Elder   rbd: don't zero-f...
1654
1655
  	obj_request_done_set(obj_request);
  }
bf0d5f503   Alex Elder   rbd: new request ...
1656
1657
  static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
  {
37206ee5b   Alex Elder   rbd: normalize do...
1658
1659
1660
  	dout("%s: obj %p cb %p
  ", __func__, obj_request,
  		obj_request->callback);
bf0d5f503   Alex Elder   rbd: new request ...
1661
1662
  	if (obj_request->callback)
  		obj_request->callback(obj_request);
788e2df3b   Alex Elder   rbd: implement sy...
1663
1664
  	else
  		complete_all(&obj_request->completion);
bf0d5f503   Alex Elder   rbd: new request ...
1665
  }
c47f93715   Alex Elder   rbd: pass length,...
1666
  static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
bf0d5f503   Alex Elder   rbd: new request ...
1667
  {
57acbaa7f   Alex Elder   rbd: always check...
1668
  	struct rbd_img_request *img_request = NULL;
a9e8ba2cb   Alex Elder   rbd: enforce pare...
1669
  	struct rbd_device *rbd_dev = NULL;
57acbaa7f   Alex Elder   rbd: always check...
1670
1671
1672
1673
1674
  	bool layered = false;
  
  	if (obj_request_img_data_test(obj_request)) {
  		img_request = obj_request->img_request;
  		layered = img_request && img_request_layered_test(img_request);
a9e8ba2cb   Alex Elder   rbd: enforce pare...
1675
  		rbd_dev = img_request->rbd_dev;
57acbaa7f   Alex Elder   rbd: always check...
1676
  	}
8b3e1a569   Alex Elder   rbd: implement la...
1677
1678
1679
1680
1681
  
  	dout("%s: obj %p img %p result %d %llu/%llu
  ", __func__,
  		obj_request, img_request, obj_request->result,
  		obj_request->xferred, obj_request->length);
a9e8ba2cb   Alex Elder   rbd: enforce pare...
1682
1683
  	if (layered && obj_request->result == -ENOENT &&
  			obj_request->img_offset < rbd_dev->parent_overlap)
8b3e1a569   Alex Elder   rbd: implement la...
1684
1685
  		rbd_img_parent_read(obj_request);
  	else if (img_request)
6e2a4505d   Alex Elder   rbd: don't zero-f...
1686
1687
1688
  		rbd_img_obj_request_read_callback(obj_request);
  	else
  		obj_request_done_set(obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
1689
  }
c47f93715   Alex Elder   rbd: pass length,...
1690
  static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
bf0d5f503   Alex Elder   rbd: new request ...
1691
  {
1b83bef24   Sage Weil   libceph: update o...
1692
1693
1694
1695
  	dout("%s: obj %p result %d %llu
  ", __func__, obj_request,
  		obj_request->result, obj_request->length);
  	/*
8b3e1a569   Alex Elder   rbd: implement la...
1696
1697
  	 * There is no such thing as a successful short write.  Set
  	 * it to our originally-requested length.
1b83bef24   Sage Weil   libceph: update o...
1698
1699
  	 */
  	obj_request->xferred = obj_request->length;
077413082   Alex Elder   rbd: add barriers...
1700
  	obj_request_done_set(obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
1701
  }
90e98c522   Guangliang Zhao   rbd: initial disc...
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
  static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
  {
  	dout("%s: obj %p result %d %llu
  ", __func__, obj_request,
  		obj_request->result, obj_request->length);
  	/*
  	 * There is no such thing as a successful short discard.  Set
  	 * it to our originally-requested length.
  	 */
  	obj_request->xferred = obj_request->length;
d0265de7c   Josh Durgin   rbd: tolerate -EN...
1712
1713
1714
  	/* discarding a non-existent object is not a problem */
  	if (obj_request->result == -ENOENT)
  		obj_request->result = 0;
90e98c522   Guangliang Zhao   rbd: initial disc...
1715
1716
  	obj_request_done_set(obj_request);
  }
fbfab5396   Alex Elder   libceph: allow ST...
1717
1718
1719
1720
  /*
   * For a simple stat call there's nothing to do.  We'll do more if
   * this is part of a write sequence for a layered image.
   */
c47f93715   Alex Elder   rbd: pass length,...
1721
  static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
fbfab5396   Alex Elder   libceph: allow ST...
1722
  {
37206ee5b   Alex Elder   rbd: normalize do...
1723
1724
  	dout("%s: obj %p
  ", __func__, obj_request);
fbfab5396   Alex Elder   libceph: allow ST...
1725
1726
  	obj_request_done_set(obj_request);
  }
2761713d3   Ilya Dryomov   rbd: fix copyup c...
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
  static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
  {
  	dout("%s: obj %p
  ", __func__, obj_request);
  
  	if (obj_request_img_data_test(obj_request))
  		rbd_osd_copyup_callback(obj_request);
  	else
  		obj_request_done_set(obj_request);
  }
85e084feb   Ilya Dryomov   libceph: drop msg...
1737
  static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
bf0d5f503   Alex Elder   rbd: new request ...
1738
1739
  {
  	struct rbd_obj_request *obj_request = osd_req->r_priv;
bf0d5f503   Alex Elder   rbd: new request ...
1740
  	u16 opcode;
85e084feb   Ilya Dryomov   libceph: drop msg...
1741
1742
  	dout("%s: osd_req %p
  ", __func__, osd_req);
bf0d5f503   Alex Elder   rbd: new request ...
1743
  	rbd_assert(osd_req == obj_request->osd_req);
57acbaa7f   Alex Elder   rbd: always check...
1744
1745
1746
1747
1748
1749
  	if (obj_request_img_data_test(obj_request)) {
  		rbd_assert(obj_request->img_request);
  		rbd_assert(obj_request->which != BAD_WHICH);
  	} else {
  		rbd_assert(obj_request->which == BAD_WHICH);
  	}
bf0d5f503   Alex Elder   rbd: new request ...
1750

1b83bef24   Sage Weil   libceph: update o...
1751
1752
  	if (osd_req->r_result < 0)
  		obj_request->result = osd_req->r_result;
bf0d5f503   Alex Elder   rbd: new request ...
1753

c47f93715   Alex Elder   rbd: pass length,...
1754
1755
  	/*
  	 * We support a 64-bit length, but ultimately it has to be
7ad18afad   Christoph Hellwig   rbd: convert to b...
1756
1757
  	 * passed to the block layer, which just supports a 32-bit
  	 * length field.
c47f93715   Alex Elder   rbd: pass length,...
1758
  	 */
7665d85b7   Yan, Zheng   libceph: move r_r...
1759
  	obj_request->xferred = osd_req->r_ops[0].outdata_len;
8b3e1a569   Alex Elder   rbd: implement la...
1760
  	rbd_assert(obj_request->xferred < (u64)UINT_MAX);
0ccd59266   Ilya Dryomov   rbd: prefix rbd w...
1761

79528734f   Alex Elder   libceph: keep sou...
1762
  	opcode = osd_req->r_ops[0].op;
bf0d5f503   Alex Elder   rbd: new request ...
1763
1764
  	switch (opcode) {
  	case CEPH_OSD_OP_READ:
c47f93715   Alex Elder   rbd: pass length,...
1765
  		rbd_osd_read_callback(obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
1766
  		break;
0ccd59266   Ilya Dryomov   rbd: prefix rbd w...
1767
  	case CEPH_OSD_OP_SETALLOCHINT:
e30b7577b   Ilya Dryomov   rbd: use writeful...
1768
1769
  		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
  			   osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
0ccd59266   Ilya Dryomov   rbd: prefix rbd w...
1770
  		/* fall through */
bf0d5f503   Alex Elder   rbd: new request ...
1771
  	case CEPH_OSD_OP_WRITE:
e30b7577b   Ilya Dryomov   rbd: use writeful...
1772
  	case CEPH_OSD_OP_WRITEFULL:
c47f93715   Alex Elder   rbd: pass length,...
1773
  		rbd_osd_write_callback(obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
1774
  		break;
fbfab5396   Alex Elder   libceph: allow ST...
1775
  	case CEPH_OSD_OP_STAT:
c47f93715   Alex Elder   rbd: pass length,...
1776
  		rbd_osd_stat_callback(obj_request);
fbfab5396   Alex Elder   libceph: allow ST...
1777
  		break;
90e98c522   Guangliang Zhao   rbd: initial disc...
1778
1779
1780
1781
1782
  	case CEPH_OSD_OP_DELETE:
  	case CEPH_OSD_OP_TRUNCATE:
  	case CEPH_OSD_OP_ZERO:
  		rbd_osd_discard_callback(obj_request);
  		break;
36be9a761   Alex Elder   rbd: implement sy...
1783
  	case CEPH_OSD_OP_CALL:
2761713d3   Ilya Dryomov   rbd: fix copyup c...
1784
1785
  		rbd_osd_call_callback(obj_request);
  		break;
bf0d5f503   Alex Elder   rbd: new request ...
1786
  	default:
9584d5082   Ilya Dryomov   rbd: remove extra...
1787
  		rbd_warn(NULL, "%s: unsupported op %hu",
bf0d5f503   Alex Elder   rbd: new request ...
1788
1789
1790
  			obj_request->object_name, (unsigned short) opcode);
  		break;
  	}
077413082   Alex Elder   rbd: add barriers...
1791
  	if (obj_request_done_test(obj_request))
bf0d5f503   Alex Elder   rbd: new request ...
1792
1793
  		rbd_obj_request_complete(obj_request);
  }
9d4df01f0   Alex Elder   rbd: define separ...
1794
  static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
430c28c3c   Alex Elder   rbd: define rbd_o...
1795
1796
  {
  	struct rbd_img_request *img_request = obj_request->img_request;
8c042b0df   Alex Elder   libceph: add data...
1797
  	struct ceph_osd_request *osd_req = obj_request->osd_req;
430c28c3c   Alex Elder   rbd: define rbd_o...
1798

bb873b539   Ilya Dryomov   libceph: switch t...
1799
1800
  	if (img_request)
  		osd_req->r_snapid = img_request->snap_id;
9d4df01f0   Alex Elder   rbd: define separ...
1801
1802
1803
1804
  }
  
  static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
  {
9d4df01f0   Alex Elder   rbd: define separ...
1805
  	struct ceph_osd_request *osd_req = obj_request->osd_req;
9d4df01f0   Alex Elder   rbd: define separ...
1806

bb873b539   Ilya Dryomov   libceph: switch t...
1807
1808
  	osd_req->r_mtime = CURRENT_TIME;
  	osd_req->r_data_offset = obj_request->offset;
430c28c3c   Alex Elder   rbd: define rbd_o...
1809
  }
0ccd59266   Ilya Dryomov   rbd: prefix rbd w...
1810
1811
1812
1813
1814
1815
  /*
   * Create an osd request.  A read request has one osd op (read).
   * A write request has either one (watch) or two (hint+write) osd ops.
   * (All rbd data writes are prefixed with an allocation hint op, but
   * technically osd watch is a write request, hence this distinction.)
   */
bf0d5f503   Alex Elder   rbd: new request ...
1816
1817
  static struct ceph_osd_request *rbd_osd_req_create(
  					struct rbd_device *rbd_dev,
6d2940c88   Guangliang Zhao   rbd: extend the o...
1818
  					enum obj_operation_type op_type,
deb236b30   Ilya Dryomov   rbd: num_ops para...
1819
  					unsigned int num_ops,
430c28c3c   Alex Elder   rbd: define rbd_o...
1820
  					struct rbd_obj_request *obj_request)
bf0d5f503   Alex Elder   rbd: new request ...
1821
  {
bf0d5f503   Alex Elder   rbd: new request ...
1822
1823
1824
  	struct ceph_snap_context *snapc = NULL;
  	struct ceph_osd_client *osdc;
  	struct ceph_osd_request *osd_req;
bf0d5f503   Alex Elder   rbd: new request ...
1825

90e98c522   Guangliang Zhao   rbd: initial disc...
1826
1827
  	if (obj_request_img_data_test(obj_request) &&
  		(op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
6365d33a2   Alex Elder   rbd: add an objec...
1828
  		struct rbd_img_request *img_request = obj_request->img_request;
90e98c522   Guangliang Zhao   rbd: initial disc...
1829
1830
1831
1832
1833
  		if (op_type == OBJ_OP_WRITE) {
  			rbd_assert(img_request_write_test(img_request));
  		} else {
  			rbd_assert(img_request_discard_test(img_request));
  		}
6d2940c88   Guangliang Zhao   rbd: extend the o...
1834
  		snapc = img_request->snapc;
bf0d5f503   Alex Elder   rbd: new request ...
1835
  	}
6d2940c88   Guangliang Zhao   rbd: extend the o...
1836
  	rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
deb236b30   Ilya Dryomov   rbd: num_ops para...
1837
1838
  
  	/* Allocate and initialize the request, for the num_ops ops */
bf0d5f503   Alex Elder   rbd: new request ...
1839
1840
  
  	osdc = &rbd_dev->rbd_client->client->osdc;
deb236b30   Ilya Dryomov   rbd: num_ops para...
1841
  	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
2224d879c   David Disseldorp   rbd: use GFP_NOIO...
1842
  					  GFP_NOIO);
bf0d5f503   Alex Elder   rbd: new request ...
1843
  	if (!osd_req)
13d1ad16d   Ilya Dryomov   libceph: move mes...
1844
  		goto fail;
bf0d5f503   Alex Elder   rbd: new request ...
1845

90e98c522   Guangliang Zhao   rbd: initial disc...
1846
  	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
bf0d5f503   Alex Elder   rbd: new request ...
1847
  		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
430c28c3c   Alex Elder   rbd: define rbd_o...
1848
  	else
bf0d5f503   Alex Elder   rbd: new request ...
1849
  		osd_req->r_flags = CEPH_OSD_FLAG_READ;
bf0d5f503   Alex Elder   rbd: new request ...
1850
1851
1852
  
  	osd_req->r_callback = rbd_osd_req_callback;
  	osd_req->r_priv = obj_request;
7627151ea   Yan, Zheng   libceph: define n...
1853
  	osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
d30291b98   Ilya Dryomov   libceph: variable...
1854
1855
1856
  	if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
  			     obj_request->object_name))
  		goto fail;
bf0d5f503   Alex Elder   rbd: new request ...
1857

13d1ad16d   Ilya Dryomov   libceph: move mes...
1858
1859
  	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
  		goto fail;
bf0d5f503   Alex Elder   rbd: new request ...
1860
  	return osd_req;
13d1ad16d   Ilya Dryomov   libceph: move mes...
1861
1862
1863
1864
  
  fail:
  	ceph_osdc_put_request(osd_req);
  	return NULL;
bf0d5f503   Alex Elder   rbd: new request ...
1865
  }
0eefd470f   Alex Elder   rbd: issue a copy...
1866
  /*
d3246fb0d   Josh Durgin   rbd: use helpers ...
1867
1868
1869
1870
   * Create a copyup osd request based on the information in the object
   * request supplied.  A copyup request has two or three osd ops, a
   * copyup method call, potentially a hint op, and a write or truncate
   * or zero op.
0eefd470f   Alex Elder   rbd: issue a copy...
1871
1872
1873
1874
1875
1876
1877
1878
1879
   */
  static struct ceph_osd_request *
  rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
  {
  	struct rbd_img_request *img_request;
  	struct ceph_snap_context *snapc;
  	struct rbd_device *rbd_dev;
  	struct ceph_osd_client *osdc;
  	struct ceph_osd_request *osd_req;
d3246fb0d   Josh Durgin   rbd: use helpers ...
1880
  	int num_osd_ops = 3;
0eefd470f   Alex Elder   rbd: issue a copy...
1881
1882
1883
1884
  
  	rbd_assert(obj_request_img_data_test(obj_request));
  	img_request = obj_request->img_request;
  	rbd_assert(img_request);
d3246fb0d   Josh Durgin   rbd: use helpers ...
1885
1886
  	rbd_assert(img_request_write_test(img_request) ||
  			img_request_discard_test(img_request));
0eefd470f   Alex Elder   rbd: issue a copy...
1887

d3246fb0d   Josh Durgin   rbd: use helpers ...
1888
1889
1890
1891
  	if (img_request_discard_test(img_request))
  		num_osd_ops = 2;
  
  	/* Allocate and initialize the request, for all the ops */
0eefd470f   Alex Elder   rbd: issue a copy...
1892
1893
1894
1895
  
  	snapc = img_request->snapc;
  	rbd_dev = img_request->rbd_dev;
  	osdc = &rbd_dev->rbd_client->client->osdc;
d3246fb0d   Josh Durgin   rbd: use helpers ...
1896
  	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2224d879c   David Disseldorp   rbd: use GFP_NOIO...
1897
  						false, GFP_NOIO);
0eefd470f   Alex Elder   rbd: issue a copy...
1898
  	if (!osd_req)
13d1ad16d   Ilya Dryomov   libceph: move mes...
1899
  		goto fail;
0eefd470f   Alex Elder   rbd: issue a copy...
1900
1901
1902
1903
  
  	osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
  	osd_req->r_callback = rbd_osd_req_callback;
  	osd_req->r_priv = obj_request;
7627151ea   Yan, Zheng   libceph: define n...
1904
  	osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
d30291b98   Ilya Dryomov   libceph: variable...
1905
1906
1907
  	if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
  			     obj_request->object_name))
  		goto fail;
0eefd470f   Alex Elder   rbd: issue a copy...
1908

13d1ad16d   Ilya Dryomov   libceph: move mes...
1909
1910
  	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
  		goto fail;
0eefd470f   Alex Elder   rbd: issue a copy...
1911
  	return osd_req;
13d1ad16d   Ilya Dryomov   libceph: move mes...
1912
1913
1914
1915
  
  fail:
  	ceph_osdc_put_request(osd_req);
  	return NULL;
0eefd470f   Alex Elder   rbd: issue a copy...
1916
  }
bf0d5f503   Alex Elder   rbd: new request ...
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
  static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
  {
  	ceph_osdc_put_request(osd_req);
  }
  
  /* object_name is assumed to be a non-null pointer and NUL-terminated */
  
  static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
  						u64 offset, u64 length,
  						enum obj_request_type type)
  {
  	struct rbd_obj_request *obj_request;
  	size_t size;
  	char *name;
  
  	rbd_assert(obj_request_type_valid(type));
  
  	size = strlen(object_name) + 1;
5a60e8760   Ilya Dryomov   rbd: use GFP_NOIO...
1935
  	name = kmalloc(size, GFP_NOIO);
f907ad559   Alex Elder   rbd: allocate nam...
1936
  	if (!name)
bf0d5f503   Alex Elder   rbd: new request ...
1937
  		return NULL;
5a60e8760   Ilya Dryomov   rbd: use GFP_NOIO...
1938
  	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
f907ad559   Alex Elder   rbd: allocate nam...
1939
1940
1941
1942
  	if (!obj_request) {
  		kfree(name);
  		return NULL;
  	}
bf0d5f503   Alex Elder   rbd: new request ...
1943
1944
1945
  	obj_request->object_name = memcpy(name, object_name, size);
  	obj_request->offset = offset;
  	obj_request->length = length;
926f9b3f0   Alex Elder   rbd: define an rb...
1946
  	obj_request->flags = 0;
bf0d5f503   Alex Elder   rbd: new request ...
1947
1948
1949
  	obj_request->which = BAD_WHICH;
  	obj_request->type = type;
  	INIT_LIST_HEAD(&obj_request->links);
788e2df3b   Alex Elder   rbd: implement sy...
1950
  	init_completion(&obj_request->completion);
bf0d5f503   Alex Elder   rbd: new request ...
1951
  	kref_init(&obj_request->kref);
37206ee5b   Alex Elder   rbd: normalize do...
1952
1953
1954
  	dout("%s: \"%s\" %llu/%llu %d -> obj %p
  ", __func__, object_name,
  		offset, length, (int)type, obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
1955
1956
1957
1958
1959
1960
1961
1962
  	return obj_request;
  }
  
  static void rbd_obj_request_destroy(struct kref *kref)
  {
  	struct rbd_obj_request *obj_request;
  
  	obj_request = container_of(kref, struct rbd_obj_request, kref);
37206ee5b   Alex Elder   rbd: normalize do...
1963
1964
  	dout("%s: obj %p
  ", __func__, obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
1965
1966
1967
1968
1969
1970
1971
1972
  	rbd_assert(obj_request->img_request == NULL);
  	rbd_assert(obj_request->which == BAD_WHICH);
  
  	if (obj_request->osd_req)
  		rbd_osd_req_destroy(obj_request->osd_req);
  
  	rbd_assert(obj_request_type_valid(obj_request->type));
  	switch (obj_request->type) {
9969ebc5a   Alex Elder   rbd: implement wa...
1973
1974
  	case OBJ_REQUEST_NODATA:
  		break;		/* Nothing to do */
bf0d5f503   Alex Elder   rbd: new request ...
1975
1976
1977
1978
  	case OBJ_REQUEST_BIO:
  		if (obj_request->bio_list)
  			bio_chain_put(obj_request->bio_list);
  		break;
788e2df3b   Alex Elder   rbd: implement sy...
1979
1980
1981
1982
1983
  	case OBJ_REQUEST_PAGES:
  		if (obj_request->pages)
  			ceph_release_page_vector(obj_request->pages,
  						obj_request->page_count);
  		break;
bf0d5f503   Alex Elder   rbd: new request ...
1984
  	}
f907ad559   Alex Elder   rbd: allocate nam...
1985
  	kfree(obj_request->object_name);
868311b1e   Alex Elder   rbd: allocate obj...
1986
1987
  	obj_request->object_name = NULL;
  	kmem_cache_free(rbd_obj_request_cache, obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
1988
  }
fb65d2284   Alex Elder   rbd: define rbd_d...
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
  /* It's OK to call this for a device with no parent */
  
  static void rbd_spec_put(struct rbd_spec *spec);
  static void rbd_dev_unparent(struct rbd_device *rbd_dev)
  {
  	rbd_dev_remove_parent(rbd_dev);
  	rbd_spec_put(rbd_dev->parent_spec);
  	rbd_dev->parent_spec = NULL;
  	rbd_dev->parent_overlap = 0;
  }
bf0d5f503   Alex Elder   rbd: new request ...
1999
  /*
a2acd00e7   Alex Elder   rbd: reference co...
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
   * Parent image reference counting is used to determine when an
   * image's parent fields can be safely torn down--after there are no
   * more in-flight requests to the parent image.  When the last
   * reference is dropped, cleaning them up is safe.
   */
  static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
  {
  	int counter;
  
  	if (!rbd_dev->parent_spec)
  		return;
  
  	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
  	if (counter > 0)
  		return;
  
  	/* Last reference; clean up parent data structures */
  
  	if (!counter)
  		rbd_dev_unparent(rbd_dev);
  	else
9584d5082   Ilya Dryomov   rbd: remove extra...
2021
  		rbd_warn(rbd_dev, "parent reference underflow");
a2acd00e7   Alex Elder   rbd: reference co...
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
  }
  
  /*
   * If an image has a non-zero parent overlap, get a reference to its
   * parent.
   *
   * Returns true if the rbd device has a parent with a non-zero
   * overlap and a reference for it was successfully taken, or
   * false otherwise.
   */
  static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
  {
ae43e9d05   Ilya Dryomov   rbd: fix rbd_dev_...
2034
  	int counter = 0;
a2acd00e7   Alex Elder   rbd: reference co...
2035
2036
2037
  
  	if (!rbd_dev->parent_spec)
  		return false;
ae43e9d05   Ilya Dryomov   rbd: fix rbd_dev_...
2038
2039
2040
2041
  	down_read(&rbd_dev->header_rwsem);
  	if (rbd_dev->parent_overlap)
  		counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
  	up_read(&rbd_dev->header_rwsem);
a2acd00e7   Alex Elder   rbd: reference co...
2042
2043
  
  	if (counter < 0)
9584d5082   Ilya Dryomov   rbd: remove extra...
2044
  		rbd_warn(rbd_dev, "parent reference overflow");
a2acd00e7   Alex Elder   rbd: reference co...
2045

ae43e9d05   Ilya Dryomov   rbd: fix rbd_dev_...
2046
  	return counter > 0;
a2acd00e7   Alex Elder   rbd: reference co...
2047
  }
bf0d5f503   Alex Elder   rbd: new request ...
2048
2049
2050
2051
2052
  /*
   * Caller is responsible for filling in the list of object requests
   * that comprises the image request, and the Linux request pointer
   * (if there is one).
   */
cc344fa1b   Alex Elder   rbd: eliminate sp...
2053
2054
  static struct rbd_img_request *rbd_img_request_create(
  					struct rbd_device *rbd_dev,
bf0d5f503   Alex Elder   rbd: new request ...
2055
  					u64 offset, u64 length,
6d2940c88   Guangliang Zhao   rbd: extend the o...
2056
  					enum obj_operation_type op_type,
4e752f0ab   Josh Durgin   rbd: access snaps...
2057
  					struct ceph_snap_context *snapc)
bf0d5f503   Alex Elder   rbd: new request ...
2058
2059
  {
  	struct rbd_img_request *img_request;
bf0d5f503   Alex Elder   rbd: new request ...
2060

7a716aac0   Ilya Dryomov   rbd: allocate img...
2061
  	img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
bf0d5f503   Alex Elder   rbd: new request ...
2062
2063
  	if (!img_request)
  		return NULL;
bf0d5f503   Alex Elder   rbd: new request ...
2064
2065
2066
2067
  	img_request->rq = NULL;
  	img_request->rbd_dev = rbd_dev;
  	img_request->offset = offset;
  	img_request->length = length;
0c425248e   Alex Elder   rbd: define image...
2068
  	img_request->flags = 0;
90e98c522   Guangliang Zhao   rbd: initial disc...
2069
2070
2071
2072
  	if (op_type == OBJ_OP_DISCARD) {
  		img_request_discard_set(img_request);
  		img_request->snapc = snapc;
  	} else if (op_type == OBJ_OP_WRITE) {
0c425248e   Alex Elder   rbd: define image...
2073
  		img_request_write_set(img_request);
4e752f0ab   Josh Durgin   rbd: access snaps...
2074
  		img_request->snapc = snapc;
0c425248e   Alex Elder   rbd: define image...
2075
  	} else {
bf0d5f503   Alex Elder   rbd: new request ...
2076
  		img_request->snap_id = rbd_dev->spec->snap_id;
0c425248e   Alex Elder   rbd: define image...
2077
  	}
a2acd00e7   Alex Elder   rbd: reference co...
2078
  	if (rbd_dev_parent_get(rbd_dev))
d0b2e9445   Alex Elder   rbd: define image...
2079
  		img_request_layered_set(img_request);
bf0d5f503   Alex Elder   rbd: new request ...
2080
2081
2082
  	spin_lock_init(&img_request->completion_lock);
  	img_request->next_completion = 0;
  	img_request->callback = NULL;
a5a337d43   Alex Elder   rbd: record overa...
2083
  	img_request->result = 0;
bf0d5f503   Alex Elder   rbd: new request ...
2084
2085
2086
  	img_request->obj_request_count = 0;
  	INIT_LIST_HEAD(&img_request->obj_requests);
  	kref_init(&img_request->kref);
37206ee5b   Alex Elder   rbd: normalize do...
2087
2088
  	dout("%s: rbd_dev %p %s %llu/%llu -> img %p
  ", __func__, rbd_dev,
6d2940c88   Guangliang Zhao   rbd: extend the o...
2089
  		obj_op_name(op_type), offset, length, img_request);
37206ee5b   Alex Elder   rbd: normalize do...
2090

bf0d5f503   Alex Elder   rbd: new request ...
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
  	return img_request;
  }
  
  static void rbd_img_request_destroy(struct kref *kref)
  {
  	struct rbd_img_request *img_request;
  	struct rbd_obj_request *obj_request;
  	struct rbd_obj_request *next_obj_request;
  
  	img_request = container_of(kref, struct rbd_img_request, kref);
37206ee5b   Alex Elder   rbd: normalize do...
2101
2102
  	dout("%s: img %p
  ", __func__, img_request);
bf0d5f503   Alex Elder   rbd: new request ...
2103
2104
  	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
  		rbd_img_obj_request_del(img_request, obj_request);
25dcf954c   Alex Elder   rbd: decrement ob...
2105
  	rbd_assert(img_request->obj_request_count == 0);
bf0d5f503   Alex Elder   rbd: new request ...
2106

a2acd00e7   Alex Elder   rbd: reference co...
2107
2108
2109
2110
  	if (img_request_layered_test(img_request)) {
  		img_request_layered_clear(img_request);
  		rbd_dev_parent_put(img_request->rbd_dev);
  	}
bef95455a   Josh Durgin   rbd: fix snapshot...
2111
2112
  	if (img_request_write_test(img_request) ||
  		img_request_discard_test(img_request))
812164f8c   Alex Elder   ceph: use ceph_cr...
2113
  		ceph_put_snap_context(img_request->snapc);
bf0d5f503   Alex Elder   rbd: new request ...
2114

1c2a9dfe2   Alex Elder   rbd: allocate ima...
2115
  	kmem_cache_free(rbd_img_request_cache, img_request);
bf0d5f503   Alex Elder   rbd: new request ...
2116
  }
e93f31523   Alex Elder   rbd: define paren...
2117
2118
2119
2120
2121
2122
2123
2124
2125
  static struct rbd_img_request *rbd_parent_request_create(
  					struct rbd_obj_request *obj_request,
  					u64 img_offset, u64 length)
  {
  	struct rbd_img_request *parent_request;
  	struct rbd_device *rbd_dev;
  
  	rbd_assert(obj_request->img_request);
  	rbd_dev = obj_request->img_request->rbd_dev;
4e752f0ab   Josh Durgin   rbd: access snaps...
2126
  	parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
6d2940c88   Guangliang Zhao   rbd: extend the o...
2127
  						length, OBJ_OP_READ, NULL);
e93f31523   Alex Elder   rbd: define paren...
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
  	if (!parent_request)
  		return NULL;
  
  	img_request_child_set(parent_request);
  	rbd_obj_request_get(obj_request);
  	parent_request->obj_request = obj_request;
  
  	return parent_request;
  }
  
  static void rbd_parent_request_destroy(struct kref *kref)
  {
  	struct rbd_img_request *parent_request;
  	struct rbd_obj_request *orig_request;
  
  	parent_request = container_of(kref, struct rbd_img_request, kref);
  	orig_request = parent_request->obj_request;
  
  	parent_request->obj_request = NULL;
  	rbd_obj_request_put(orig_request);
  	img_request_child_clear(parent_request);
  
  	rbd_img_request_destroy(kref);
  }
1217857fb   Alex Elder   rbd: encapsulate ...
2152
2153
  static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
  {
6365d33a2   Alex Elder   rbd: add an objec...
2154
  	struct rbd_img_request *img_request;
1217857fb   Alex Elder   rbd: encapsulate ...
2155
2156
  	unsigned int xferred;
  	int result;
8b3e1a569   Alex Elder   rbd: implement la...
2157
  	bool more;
1217857fb   Alex Elder   rbd: encapsulate ...
2158

6365d33a2   Alex Elder   rbd: add an objec...
2159
2160
  	rbd_assert(obj_request_img_data_test(obj_request));
  	img_request = obj_request->img_request;
1217857fb   Alex Elder   rbd: encapsulate ...
2161
2162
2163
2164
2165
  	rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
  	xferred = (unsigned int)obj_request->xferred;
  	result = obj_request->result;
  	if (result) {
  		struct rbd_device *rbd_dev = img_request->rbd_dev;
6d2940c88   Guangliang Zhao   rbd: extend the o...
2166
  		enum obj_operation_type op_type;
90e98c522   Guangliang Zhao   rbd: initial disc...
2167
2168
2169
2170
2171
2172
  		if (img_request_discard_test(img_request))
  			op_type = OBJ_OP_DISCARD;
  		else if (img_request_write_test(img_request))
  			op_type = OBJ_OP_WRITE;
  		else
  			op_type = OBJ_OP_READ;
1217857fb   Alex Elder   rbd: encapsulate ...
2173

9584d5082   Ilya Dryomov   rbd: remove extra...
2174
  		rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
6d2940c88   Guangliang Zhao   rbd: extend the o...
2175
2176
  			obj_op_name(op_type), obj_request->length,
  			obj_request->img_offset, obj_request->offset);
9584d5082   Ilya Dryomov   rbd: remove extra...
2177
  		rbd_warn(rbd_dev, "  result %d xferred %x",
1217857fb   Alex Elder   rbd: encapsulate ...
2178
2179
2180
  			result, xferred);
  		if (!img_request->result)
  			img_request->result = result;
082a75dad   Ilya Dryomov   rbd: end I/O the ...
2181
2182
2183
2184
2185
  		/*
  		 * Need to end I/O on the entire obj_request worth of
  		 * bytes in case of error.
  		 */
  		xferred = obj_request->length;
1217857fb   Alex Elder   rbd: encapsulate ...
2186
  	}
f1a4739f3   Alex Elder   rbd: support page...
2187
2188
2189
2190
2191
2192
  	/* Image object requests don't own their page array */
  
  	if (obj_request->type == OBJ_REQUEST_PAGES) {
  		obj_request->pages = NULL;
  		obj_request->page_count = 0;
  	}
8b3e1a569   Alex Elder   rbd: implement la...
2193
2194
2195
2196
2197
  	if (img_request_child_test(img_request)) {
  		rbd_assert(img_request->obj_request != NULL);
  		more = obj_request->which < img_request->obj_request_count - 1;
  	} else {
  		rbd_assert(img_request->rq != NULL);
7ad18afad   Christoph Hellwig   rbd: convert to b...
2198
2199
2200
2201
  
  		more = blk_update_request(img_request->rq, result, xferred);
  		if (!more)
  			__blk_mq_end_request(img_request->rq, result);
8b3e1a569   Alex Elder   rbd: implement la...
2202
2203
2204
  	}
  
  	return more;
1217857fb   Alex Elder   rbd: encapsulate ...
2205
  }
2169238dd   Alex Elder   rbd: rearrange so...
2206
2207
2208
2209
2210
  static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
  {
  	struct rbd_img_request *img_request;
  	u32 which = obj_request->which;
  	bool more = true;
6365d33a2   Alex Elder   rbd: add an objec...
2211
  	rbd_assert(obj_request_img_data_test(obj_request));
2169238dd   Alex Elder   rbd: rearrange so...
2212
2213
2214
2215
2216
  	img_request = obj_request->img_request;
  
  	dout("%s: img %p obj %p
  ", __func__, img_request, obj_request);
  	rbd_assert(img_request != NULL);
2169238dd   Alex Elder   rbd: rearrange so...
2217
2218
2219
  	rbd_assert(img_request->obj_request_count > 0);
  	rbd_assert(which != BAD_WHICH);
  	rbd_assert(which < img_request->obj_request_count);
2169238dd   Alex Elder   rbd: rearrange so...
2220
2221
2222
2223
2224
2225
  
  	spin_lock_irq(&img_request->completion_lock);
  	if (which != img_request->next_completion)
  		goto out;
  
  	for_each_obj_request_from(img_request, obj_request) {
2169238dd   Alex Elder   rbd: rearrange so...
2226
2227
2228
2229
2230
  		rbd_assert(more);
  		rbd_assert(which < img_request->obj_request_count);
  
  		if (!obj_request_done_test(obj_request))
  			break;
1217857fb   Alex Elder   rbd: encapsulate ...
2231
  		more = rbd_img_obj_end_request(obj_request);
2169238dd   Alex Elder   rbd: rearrange so...
2232
2233
2234
2235
2236
2237
2238
  		which++;
  	}
  
  	rbd_assert(more ^ (which == img_request->obj_request_count));
  	img_request->next_completion = which;
  out:
  	spin_unlock_irq(&img_request->completion_lock);
0f2d5be79   Alex Elder   rbd: use referenc...
2239
  	rbd_img_request_put(img_request);
2169238dd   Alex Elder   rbd: rearrange so...
2240
2241
2242
2243
  
  	if (!more)
  		rbd_img_request_complete(img_request);
  }
f1a4739f3   Alex Elder   rbd: support page...
2244
  /*
3b434a2af   Josh Durgin   rbd: extract a me...
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
   * Add individual osd ops to the given ceph_osd_request and prepare
   * them for submission. num_ops is the current number of
   * osd operations already to the object request.
   */
  static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
  				struct ceph_osd_request *osd_request,
  				enum obj_operation_type op_type,
  				unsigned int num_ops)
  {
  	struct rbd_img_request *img_request = obj_request->img_request;
  	struct rbd_device *rbd_dev = img_request->rbd_dev;
  	u64 object_size = rbd_obj_bytes(&rbd_dev->header);
  	u64 offset = obj_request->offset;
  	u64 length = obj_request->length;
  	u64 img_end;
  	u16 opcode;
  
  	if (op_type == OBJ_OP_DISCARD) {
d3246fb0d   Josh Durgin   rbd: use helpers ...
2263
2264
2265
  		if (!offset && length == object_size &&
  		    (!img_request_layered_test(img_request) ||
  		     !obj_request_overlaps_parent(obj_request))) {
3b434a2af   Josh Durgin   rbd: extract a me...
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
  			opcode = CEPH_OSD_OP_DELETE;
  		} else if ((offset + length == object_size)) {
  			opcode = CEPH_OSD_OP_TRUNCATE;
  		} else {
  			down_read(&rbd_dev->header_rwsem);
  			img_end = rbd_dev->header.image_size;
  			up_read(&rbd_dev->header_rwsem);
  
  			if (obj_request->img_offset + length == img_end)
  				opcode = CEPH_OSD_OP_TRUNCATE;
  			else
  				opcode = CEPH_OSD_OP_ZERO;
  		}
  	} else if (op_type == OBJ_OP_WRITE) {
e30b7577b   Ilya Dryomov   rbd: use writeful...
2280
2281
2282
2283
  		if (!offset && length == object_size)
  			opcode = CEPH_OSD_OP_WRITEFULL;
  		else
  			opcode = CEPH_OSD_OP_WRITE;
3b434a2af   Josh Durgin   rbd: extract a me...
2284
2285
2286
2287
2288
2289
  		osd_req_op_alloc_hint_init(osd_request, num_ops,
  					object_size, object_size);
  		num_ops++;
  	} else {
  		opcode = CEPH_OSD_OP_READ;
  	}
7e868b6ef   Ilya Dryomov   rbd: don't treat ...
2290
  	if (opcode == CEPH_OSD_OP_DELETE)
144cba149   Yan, Zheng   libceph: allow se...
2291
  		osd_req_op_init(osd_request, num_ops, opcode, 0);
7e868b6ef   Ilya Dryomov   rbd: don't treat ...
2292
2293
2294
  	else
  		osd_req_op_extent_init(osd_request, num_ops, opcode,
  				       offset, length, 0, 0);
3b434a2af   Josh Durgin   rbd: extract a me...
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
  	if (obj_request->type == OBJ_REQUEST_BIO)
  		osd_req_op_extent_osd_data_bio(osd_request, num_ops,
  					obj_request->bio_list, length);
  	else if (obj_request->type == OBJ_REQUEST_PAGES)
  		osd_req_op_extent_osd_data_pages(osd_request, num_ops,
  					obj_request->pages, length,
  					offset & ~PAGE_MASK, false, false);
  
  	/* Discards are also writes */
  	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
  		rbd_osd_req_format_write(obj_request);
  	else
  		rbd_osd_req_format_read(obj_request);
  }
  
  /*
f1a4739f3   Alex Elder   rbd: support page...
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
   * Split up an image request into one or more object requests, each
   * to a different object.  The "type" parameter indicates whether
   * "data_desc" is the pointer to the head of a list of bio
   * structures, or the base of a page array.  In either case this
   * function assumes data_desc describes memory sufficient to hold
   * all data described by the image request.
   */
  static int rbd_img_request_fill(struct rbd_img_request *img_request,
  					enum obj_request_type type,
  					void *data_desc)
bf0d5f503   Alex Elder   rbd: new request ...
2321
2322
2323
2324
  {
  	struct rbd_device *rbd_dev = img_request->rbd_dev;
  	struct rbd_obj_request *obj_request = NULL;
  	struct rbd_obj_request *next_obj_request;
a158073c4   Jingoo Han   block: rbd: use N...
2325
  	struct bio *bio_list = NULL;
f1a4739f3   Alex Elder   rbd: support page...
2326
  	unsigned int bio_offset = 0;
a158073c4   Jingoo Han   block: rbd: use N...
2327
  	struct page **pages = NULL;
6d2940c88   Guangliang Zhao   rbd: extend the o...
2328
  	enum obj_operation_type op_type;
7da22d296   Alex Elder   rbd: record image...
2329
  	u64 img_offset;
bf0d5f503   Alex Elder   rbd: new request ...
2330
  	u64 resid;
bf0d5f503   Alex Elder   rbd: new request ...
2331

f1a4739f3   Alex Elder   rbd: support page...
2332
2333
2334
  	dout("%s: img %p type %d data_desc %p
  ", __func__, img_request,
  		(int)type, data_desc);
37206ee5b   Alex Elder   rbd: normalize do...
2335

7da22d296   Alex Elder   rbd: record image...
2336
  	img_offset = img_request->offset;
bf0d5f503   Alex Elder   rbd: new request ...
2337
  	resid = img_request->length;
4dda41d3d   Alex Elder   rbd: ignore zero-...
2338
  	rbd_assert(resid > 0);
3b434a2af   Josh Durgin   rbd: extract a me...
2339
  	op_type = rbd_img_request_op_type(img_request);
f1a4739f3   Alex Elder   rbd: support page...
2340
2341
2342
  
  	if (type == OBJ_REQUEST_BIO) {
  		bio_list = data_desc;
4f024f379   Kent Overstreet   block: Abstract o...
2343
2344
  		rbd_assert(img_offset ==
  			   bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
90e98c522   Guangliang Zhao   rbd: initial disc...
2345
  	} else if (type == OBJ_REQUEST_PAGES) {
f1a4739f3   Alex Elder   rbd: support page...
2346
2347
  		pages = data_desc;
  	}
bf0d5f503   Alex Elder   rbd: new request ...
2348
  	while (resid) {
2fa123201   Alex Elder   rbd: don't set da...
2349
  		struct ceph_osd_request *osd_req;
bf0d5f503   Alex Elder   rbd: new request ...
2350
  		const char *object_name;
bf0d5f503   Alex Elder   rbd: new request ...
2351
2352
  		u64 offset;
  		u64 length;
7da22d296   Alex Elder   rbd: record image...
2353
  		object_name = rbd_segment_name(rbd_dev, img_offset);
bf0d5f503   Alex Elder   rbd: new request ...
2354
2355
  		if (!object_name)
  			goto out_unwind;
7da22d296   Alex Elder   rbd: record image...
2356
2357
  		offset = rbd_segment_offset(rbd_dev, img_offset);
  		length = rbd_segment_length(rbd_dev, img_offset, resid);
bf0d5f503   Alex Elder   rbd: new request ...
2358
  		obj_request = rbd_obj_request_create(object_name,
f1a4739f3   Alex Elder   rbd: support page...
2359
  						offset, length, type);
78c2a44aa   Alex Elder   rbd: allocate ima...
2360
2361
  		/* object request has its own copy of the object name */
  		rbd_segment_name_free(object_name);
bf0d5f503   Alex Elder   rbd: new request ...
2362
2363
  		if (!obj_request)
  			goto out_unwind;
62054da65   Ilya Dryomov   rbd: remove out_p...
2364

03507db63   Josh Durgin   rbd: fix buffer s...
2365
2366
2367
2368
2369
  		/*
  		 * set obj_request->img_request before creating the
  		 * osd_request so that it gets the right snapc
  		 */
  		rbd_img_obj_request_add(img_request, obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
2370

f1a4739f3   Alex Elder   rbd: support page...
2371
2372
2373
2374
2375
2376
2377
2378
2379
  		if (type == OBJ_REQUEST_BIO) {
  			unsigned int clone_size;
  
  			rbd_assert(length <= (u64)UINT_MAX);
  			clone_size = (unsigned int)length;
  			obj_request->bio_list =
  					bio_chain_clone_range(&bio_list,
  								&bio_offset,
  								clone_size,
2224d879c   David Disseldorp   rbd: use GFP_NOIO...
2380
  								GFP_NOIO);
f1a4739f3   Alex Elder   rbd: support page...
2381
  			if (!obj_request->bio_list)
62054da65   Ilya Dryomov   rbd: remove out_p...
2382
  				goto out_unwind;
90e98c522   Guangliang Zhao   rbd: initial disc...
2383
  		} else if (type == OBJ_REQUEST_PAGES) {
f1a4739f3   Alex Elder   rbd: support page...
2384
2385
2386
2387
2388
2389
2390
2391
2392
  			unsigned int page_count;
  
  			obj_request->pages = pages;
  			page_count = (u32)calc_pages_for(offset, length);
  			obj_request->page_count = page_count;
  			if ((offset + length) & ~PAGE_MASK)
  				page_count--;	/* more on last page */
  			pages += page_count;
  		}
bf0d5f503   Alex Elder   rbd: new request ...
2393

6d2940c88   Guangliang Zhao   rbd: extend the o...
2394
2395
2396
  		osd_req = rbd_osd_req_create(rbd_dev, op_type,
  					(op_type == OBJ_OP_WRITE) ? 2 : 1,
  					obj_request);
2fa123201   Alex Elder   rbd: don't set da...
2397
  		if (!osd_req)
62054da65   Ilya Dryomov   rbd: remove out_p...
2398
  			goto out_unwind;
3b434a2af   Josh Durgin   rbd: extract a me...
2399

2fa123201   Alex Elder   rbd: don't set da...
2400
  		obj_request->osd_req = osd_req;
2169238dd   Alex Elder   rbd: rearrange so...
2401
  		obj_request->callback = rbd_img_obj_callback;
3b434a2af   Josh Durgin   rbd: extract a me...
2402
  		obj_request->img_offset = img_offset;
9d4df01f0   Alex Elder   rbd: define separ...
2403

3b434a2af   Josh Durgin   rbd: extract a me...
2404
  		rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
430c28c3c   Alex Elder   rbd: define rbd_o...
2405

3b434a2af   Josh Durgin   rbd: extract a me...
2406
  		rbd_img_request_get(img_request);
bf0d5f503   Alex Elder   rbd: new request ...
2407

7da22d296   Alex Elder   rbd: record image...
2408
  		img_offset += length;
bf0d5f503   Alex Elder   rbd: new request ...
2409
2410
2411
2412
  		resid -= length;
  	}
  
  	return 0;
bf0d5f503   Alex Elder   rbd: new request ...
2413
2414
  out_unwind:
  	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
42dd037c0   Ilya Dryomov   rbd: fix error pa...
2415
  		rbd_img_obj_request_del(img_request, obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
2416
2417
2418
  
  	return -ENOMEM;
  }
3d7efd18d   Alex Elder   rbd: implement fu...
2419
  static void
2761713d3   Ilya Dryomov   rbd: fix copyup c...
2420
  rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
0eefd470f   Alex Elder   rbd: issue a copy...
2421
2422
2423
  {
  	struct rbd_img_request *img_request;
  	struct rbd_device *rbd_dev;
ebda6408f   Alex Elder   rbd: fix parent r...
2424
  	struct page **pages;
0eefd470f   Alex Elder   rbd: issue a copy...
2425
  	u32 page_count;
2761713d3   Ilya Dryomov   rbd: fix copyup c...
2426
2427
  	dout("%s: obj %p
  ", __func__, obj_request);
d3246fb0d   Josh Durgin   rbd: use helpers ...
2428
2429
  	rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
  		obj_request->type == OBJ_REQUEST_NODATA);
0eefd470f   Alex Elder   rbd: issue a copy...
2430
2431
2432
2433
2434
2435
  	rbd_assert(obj_request_img_data_test(obj_request));
  	img_request = obj_request->img_request;
  	rbd_assert(img_request);
  
  	rbd_dev = img_request->rbd_dev;
  	rbd_assert(rbd_dev);
0eefd470f   Alex Elder   rbd: issue a copy...
2436

ebda6408f   Alex Elder   rbd: fix parent r...
2437
2438
  	pages = obj_request->copyup_pages;
  	rbd_assert(pages != NULL);
0eefd470f   Alex Elder   rbd: issue a copy...
2439
  	obj_request->copyup_pages = NULL;
ebda6408f   Alex Elder   rbd: fix parent r...
2440
2441
2442
2443
  	page_count = obj_request->copyup_page_count;
  	rbd_assert(page_count);
  	obj_request->copyup_page_count = 0;
  	ceph_release_page_vector(pages, page_count);
0eefd470f   Alex Elder   rbd: issue a copy...
2444
2445
2446
2447
2448
2449
2450
2451
2452
  
  	/*
  	 * We want the transfer count to reflect the size of the
  	 * original write request.  There is no such thing as a
  	 * successful short write, so if the request was successful
  	 * we can just set it to the originally-requested length.
  	 */
  	if (!obj_request->result)
  		obj_request->xferred = obj_request->length;
2761713d3   Ilya Dryomov   rbd: fix copyup c...
2453
  	obj_request_done_set(obj_request);
0eefd470f   Alex Elder   rbd: issue a copy...
2454
2455
2456
  }
  
  static void
3d7efd18d   Alex Elder   rbd: implement fu...
2457
2458
2459
  rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
  {
  	struct rbd_obj_request *orig_request;
0eefd470f   Alex Elder   rbd: issue a copy...
2460
2461
2462
  	struct ceph_osd_request *osd_req;
  	struct ceph_osd_client *osdc;
  	struct rbd_device *rbd_dev;
3d7efd18d   Alex Elder   rbd: implement fu...
2463
  	struct page **pages;
d3246fb0d   Josh Durgin   rbd: use helpers ...
2464
  	enum obj_operation_type op_type;
ebda6408f   Alex Elder   rbd: fix parent r...
2465
  	u32 page_count;
bbea1c1a3   Alex Elder   rbd: re-submit wr...
2466
  	int img_result;
ebda6408f   Alex Elder   rbd: fix parent r...
2467
  	u64 parent_length;
3d7efd18d   Alex Elder   rbd: implement fu...
2468
2469
2470
2471
2472
2473
2474
2475
  
  	rbd_assert(img_request_child_test(img_request));
  
  	/* First get what we need from the image request */
  
  	pages = img_request->copyup_pages;
  	rbd_assert(pages != NULL);
  	img_request->copyup_pages = NULL;
ebda6408f   Alex Elder   rbd: fix parent r...
2476
2477
2478
  	page_count = img_request->copyup_page_count;
  	rbd_assert(page_count);
  	img_request->copyup_page_count = 0;
3d7efd18d   Alex Elder   rbd: implement fu...
2479
2480
2481
  
  	orig_request = img_request->obj_request;
  	rbd_assert(orig_request != NULL);
b91f09f17   Alex Elder   rbd: support read...
2482
  	rbd_assert(obj_request_type_valid(orig_request->type));
bbea1c1a3   Alex Elder   rbd: re-submit wr...
2483
  	img_result = img_request->result;
ebda6408f   Alex Elder   rbd: fix parent r...
2484
2485
  	parent_length = img_request->length;
  	rbd_assert(parent_length == img_request->xferred);
91c6febb3   Alex Elder   rbd: fix an incor...
2486
  	rbd_img_request_put(img_request);
3d7efd18d   Alex Elder   rbd: implement fu...
2487

91c6febb3   Alex Elder   rbd: fix an incor...
2488
2489
  	rbd_assert(orig_request->img_request);
  	rbd_dev = orig_request->img_request->rbd_dev;
0eefd470f   Alex Elder   rbd: issue a copy...
2490
  	rbd_assert(rbd_dev);
0eefd470f   Alex Elder   rbd: issue a copy...
2491

bbea1c1a3   Alex Elder   rbd: re-submit wr...
2492
2493
2494
2495
2496
2497
2498
  	/*
  	 * If the overlap has become 0 (most likely because the
  	 * image has been flattened) we need to free the pages
  	 * and re-submit the original write request.
  	 */
  	if (!rbd_dev->parent_overlap) {
  		struct ceph_osd_client *osdc;
3d7efd18d   Alex Elder   rbd: implement fu...
2499

bbea1c1a3   Alex Elder   rbd: re-submit wr...
2500
2501
2502
2503
2504
2505
  		ceph_release_page_vector(pages, page_count);
  		osdc = &rbd_dev->rbd_client->client->osdc;
  		img_result = rbd_obj_request_submit(osdc, orig_request);
  		if (!img_result)
  			return;
  	}
0eefd470f   Alex Elder   rbd: issue a copy...
2506

bbea1c1a3   Alex Elder   rbd: re-submit wr...
2507
  	if (img_result)
0eefd470f   Alex Elder   rbd: issue a copy...
2508
  		goto out_err;
0eefd470f   Alex Elder   rbd: issue a copy...
2509

8785b1d48   Alex Elder   rbd: don't releas...
2510
2511
  	/*
  	 * The original osd request is of no use to use any more.
0ccd59266   Ilya Dryomov   rbd: prefix rbd w...
2512
  	 * We need a new one that can hold the three ops in a copyup
8785b1d48   Alex Elder   rbd: don't releas...
2513
2514
2515
  	 * request.  Allocate the new copyup osd request for the
  	 * original request, and release the old one.
  	 */
bbea1c1a3   Alex Elder   rbd: re-submit wr...
2516
  	img_result = -ENOMEM;
0eefd470f   Alex Elder   rbd: issue a copy...
2517
2518
2519
  	osd_req = rbd_osd_req_create_copyup(orig_request);
  	if (!osd_req)
  		goto out_err;
8785b1d48   Alex Elder   rbd: don't releas...
2520
  	rbd_osd_req_destroy(orig_request->osd_req);
0eefd470f   Alex Elder   rbd: issue a copy...
2521
2522
  	orig_request->osd_req = osd_req;
  	orig_request->copyup_pages = pages;
ebda6408f   Alex Elder   rbd: fix parent r...
2523
  	orig_request->copyup_page_count = page_count;
3d7efd18d   Alex Elder   rbd: implement fu...
2524

0eefd470f   Alex Elder   rbd: issue a copy...
2525
  	/* Initialize the copyup op */
3d7efd18d   Alex Elder   rbd: implement fu...
2526

0eefd470f   Alex Elder   rbd: issue a copy...
2527
  	osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
ebda6408f   Alex Elder   rbd: fix parent r...
2528
  	osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
0eefd470f   Alex Elder   rbd: issue a copy...
2529
  						false, false);
3d7efd18d   Alex Elder   rbd: implement fu...
2530

d3246fb0d   Josh Durgin   rbd: use helpers ...
2531
  	/* Add the other op(s) */
0eefd470f   Alex Elder   rbd: issue a copy...
2532

d3246fb0d   Josh Durgin   rbd: use helpers ...
2533
2534
  	op_type = rbd_img_request_op_type(orig_request->img_request);
  	rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
0eefd470f   Alex Elder   rbd: issue a copy...
2535
2536
  
  	/* All set, send it off. */
0eefd470f   Alex Elder   rbd: issue a copy...
2537
  	osdc = &rbd_dev->rbd_client->client->osdc;
bbea1c1a3   Alex Elder   rbd: re-submit wr...
2538
2539
  	img_result = rbd_obj_request_submit(osdc, orig_request);
  	if (!img_result)
0eefd470f   Alex Elder   rbd: issue a copy...
2540
2541
2542
  		return;
  out_err:
  	/* Record the error code and complete the request */
bbea1c1a3   Alex Elder   rbd: re-submit wr...
2543
  	orig_request->result = img_result;
0eefd470f   Alex Elder   rbd: issue a copy...
2544
2545
2546
  	orig_request->xferred = 0;
  	obj_request_done_set(orig_request);
  	rbd_obj_request_complete(orig_request);
3d7efd18d   Alex Elder   rbd: implement fu...
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
  }
  
  /*
   * Read from the parent image the range of data that covers the
   * entire target of the given object request.  This is used for
   * satisfying a layered image write request when the target of an
   * object request from the image request does not exist.
   *
   * A page array big enough to hold the returned data is allocated
   * and supplied to rbd_img_request_fill() as the "data descriptor."
   * When the read completes, this page array will be transferred to
   * the original object request for the copyup operation.
   *
   * If an error occurs, record it as the result of the original
   * object request and mark it done so it gets completed.
   */
  static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
  {
  	struct rbd_img_request *img_request = NULL;
  	struct rbd_img_request *parent_request = NULL;
  	struct rbd_device *rbd_dev;
  	u64 img_offset;
  	u64 length;
  	struct page **pages = NULL;
  	u32 page_count;
  	int result;
  
  	rbd_assert(obj_request_img_data_test(obj_request));
b91f09f17   Alex Elder   rbd: support read...
2575
  	rbd_assert(obj_request_type_valid(obj_request->type));
3d7efd18d   Alex Elder   rbd: implement fu...
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
  
  	img_request = obj_request->img_request;
  	rbd_assert(img_request != NULL);
  	rbd_dev = img_request->rbd_dev;
  	rbd_assert(rbd_dev->parent != NULL);
  
  	/*
  	 * Determine the byte range covered by the object in the
  	 * child image to which the original request was to be sent.
  	 */
  	img_offset = obj_request->img_offset - obj_request->offset;
  	length = (u64)1 << rbd_dev->header.obj_order;
  
  	/*
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
  	 * There is no defined parent data beyond the parent
  	 * overlap, so limit what we read at that boundary if
  	 * necessary.
  	 */
  	if (img_offset + length > rbd_dev->parent_overlap) {
  		rbd_assert(img_offset < rbd_dev->parent_overlap);
  		length = rbd_dev->parent_overlap - img_offset;
  	}
  
  	/*
3d7efd18d   Alex Elder   rbd: implement fu...
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
  	 * Allocate a page array big enough to receive the data read
  	 * from the parent.
  	 */
  	page_count = (u32)calc_pages_for(0, length);
  	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
  	if (IS_ERR(pages)) {
  		result = PTR_ERR(pages);
  		pages = NULL;
  		goto out_err;
  	}
  
  	result = -ENOMEM;
e93f31523   Alex Elder   rbd: define paren...
2612
2613
  	parent_request = rbd_parent_request_create(obj_request,
  						img_offset, length);
3d7efd18d   Alex Elder   rbd: implement fu...
2614
2615
  	if (!parent_request)
  		goto out_err;
3d7efd18d   Alex Elder   rbd: implement fu...
2616
2617
2618
2619
2620
  
  	result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
  	if (result)
  		goto out_err;
  	parent_request->copyup_pages = pages;
ebda6408f   Alex Elder   rbd: fix parent r...
2621
  	parent_request->copyup_page_count = page_count;
3d7efd18d   Alex Elder   rbd: implement fu...
2622
2623
2624
2625
2626
2627
2628
  
  	parent_request->callback = rbd_img_obj_parent_read_full_callback;
  	result = rbd_img_request_submit(parent_request);
  	if (!result)
  		return 0;
  
  	parent_request->copyup_pages = NULL;
ebda6408f   Alex Elder   rbd: fix parent r...
2629
  	parent_request->copyup_page_count = 0;
3d7efd18d   Alex Elder   rbd: implement fu...
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
  	parent_request->obj_request = NULL;
  	rbd_obj_request_put(obj_request);
  out_err:
  	if (pages)
  		ceph_release_page_vector(pages, page_count);
  	if (parent_request)
  		rbd_img_request_put(parent_request);
  	obj_request->result = result;
  	obj_request->xferred = 0;
  	obj_request_done_set(obj_request);
  
  	return result;
  }
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2643
2644
  static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
  {
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2645
  	struct rbd_obj_request *orig_request;
638f5abed   Alex Elder   rbd: re-submit fl...
2646
  	struct rbd_device *rbd_dev;
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
  	int result;
  
  	rbd_assert(!obj_request_img_data_test(obj_request));
  
  	/*
  	 * All we need from the object request is the original
  	 * request and the result of the STAT op.  Grab those, then
  	 * we're done with the request.
  	 */
  	orig_request = obj_request->obj_request;
  	obj_request->obj_request = NULL;
912c317d4   Alex Elder   rbd: drop origina...
2658
  	rbd_obj_request_put(orig_request);
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
  	rbd_assert(orig_request);
  	rbd_assert(orig_request->img_request);
  
  	result = obj_request->result;
  	obj_request->result = 0;
  
  	dout("%s: obj %p for obj %p result %d %llu/%llu
  ", __func__,
  		obj_request, orig_request, result,
  		obj_request->xferred, obj_request->length);
  	rbd_obj_request_put(obj_request);
638f5abed   Alex Elder   rbd: re-submit fl...
2670
2671
2672
2673
2674
2675
2676
2677
  	/*
  	 * If the overlap has become 0 (most likely because the
  	 * image has been flattened) we need to free the pages
  	 * and re-submit the original write request.
  	 */
  	rbd_dev = orig_request->img_request->rbd_dev;
  	if (!rbd_dev->parent_overlap) {
  		struct ceph_osd_client *osdc;
638f5abed   Alex Elder   rbd: re-submit fl...
2678
2679
2680
2681
2682
  		osdc = &rbd_dev->rbd_client->client->osdc;
  		result = rbd_obj_request_submit(osdc, orig_request);
  		if (!result)
  			return;
  	}
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
  
  	/*
  	 * Our only purpose here is to determine whether the object
  	 * exists, and we don't want to treat the non-existence as
  	 * an error.  If something else comes back, transfer the
  	 * error to the original request and complete it now.
  	 */
  	if (!result) {
  		obj_request_existence_set(orig_request, true);
  	} else if (result == -ENOENT) {
  		obj_request_existence_set(orig_request, false);
  	} else if (result) {
  		orig_request->result = result;
3d7efd18d   Alex Elder   rbd: implement fu...
2696
  		goto out;
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2697
2698
2699
2700
2701
2702
  	}
  
  	/*
  	 * Resubmit the original request now that we have recorded
  	 * whether the target object exists.
  	 */
b454e36d2   Alex Elder   rbd: encapsulate ...
2703
  	orig_request->result = rbd_img_obj_request_submit(orig_request);
3d7efd18d   Alex Elder   rbd: implement fu...
2704
  out:
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2705
2706
  	if (orig_request->result)
  		rbd_obj_request_complete(orig_request);
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
  }
  
  static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
  {
  	struct rbd_obj_request *stat_request;
  	struct rbd_device *rbd_dev;
  	struct ceph_osd_client *osdc;
  	struct page **pages = NULL;
  	u32 page_count;
  	size_t size;
  	int ret;
  
  	/*
  	 * The response data for a STAT call consists of:
  	 *     le64 length;
  	 *     struct {
  	 *         le32 tv_sec;
  	 *         le32 tv_nsec;
  	 *     } mtime;
  	 */
  	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
  	page_count = (u32)calc_pages_for(0, size);
  	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
  	if (IS_ERR(pages))
  		return PTR_ERR(pages);
  
  	ret = -ENOMEM;
  	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
  							OBJ_REQUEST_PAGES);
  	if (!stat_request)
  		goto out;
  
  	rbd_obj_request_get(obj_request);
  	stat_request->obj_request = obj_request;
  	stat_request->pages = pages;
  	stat_request->page_count = page_count;
  
  	rbd_assert(obj_request->img_request);
  	rbd_dev = obj_request->img_request->rbd_dev;
6d2940c88   Guangliang Zhao   rbd: extend the o...
2746
  	stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
deb236b30   Ilya Dryomov   rbd: num_ops para...
2747
  						   stat_request);
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2748
2749
2750
  	if (!stat_request->osd_req)
  		goto out;
  	stat_request->callback = rbd_img_obj_exists_callback;
144cba149   Yan, Zheng   libceph: allow se...
2751
  	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2752
2753
  	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
  					false, false);
9d4df01f0   Alex Elder   rbd: define separ...
2754
  	rbd_osd_req_format_read(stat_request);
c5b5ef6c5   Alex Elder   rbd: issue stat r...
2755
2756
2757
2758
2759
2760
2761
2762
2763
  
  	osdc = &rbd_dev->rbd_client->client->osdc;
  	ret = rbd_obj_request_submit(osdc, stat_request);
  out:
  	if (ret)
  		rbd_obj_request_put(obj_request);
  
  	return ret;
  }
70d045f66   Ilya Dryomov   rbd: add img_obj_...
2764
  static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
b454e36d2   Alex Elder   rbd: encapsulate ...
2765
2766
  {
  	struct rbd_img_request *img_request;
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2767
  	struct rbd_device *rbd_dev;
b454e36d2   Alex Elder   rbd: encapsulate ...
2768
2769
2770
2771
2772
  
  	rbd_assert(obj_request_img_data_test(obj_request));
  
  	img_request = obj_request->img_request;
  	rbd_assert(img_request);
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2773
  	rbd_dev = img_request->rbd_dev;
b454e36d2   Alex Elder   rbd: encapsulate ...
2774

70d045f66   Ilya Dryomov   rbd: add img_obj_...
2775
  	/* Reads */
1c220881e   Josh Durgin   rbd: make discard...
2776
2777
  	if (!img_request_write_test(img_request) &&
  	    !img_request_discard_test(img_request))
70d045f66   Ilya Dryomov   rbd: add img_obj_...
2778
2779
2780
2781
2782
  		return true;
  
  	/* Non-layered writes */
  	if (!img_request_layered_test(img_request))
  		return true;
b454e36d2   Alex Elder   rbd: encapsulate ...
2783
  	/*
70d045f66   Ilya Dryomov   rbd: add img_obj_...
2784
2785
  	 * Layered writes outside of the parent overlap range don't
  	 * share any data with the parent.
b454e36d2   Alex Elder   rbd: encapsulate ...
2786
  	 */
70d045f66   Ilya Dryomov   rbd: add img_obj_...
2787
2788
  	if (!obj_request_overlaps_parent(obj_request))
  		return true;
b454e36d2   Alex Elder   rbd: encapsulate ...
2789

70d045f66   Ilya Dryomov   rbd: add img_obj_...
2790
  	/*
c622d2261   Guangliang Zhao   rbd: skip the cop...
2791
2792
2793
2794
2795
2796
2797
2798
  	 * Entire-object layered writes - we will overwrite whatever
  	 * parent data there is anyway.
  	 */
  	if (!obj_request->offset &&
  	    obj_request->length == rbd_obj_bytes(&rbd_dev->header))
  		return true;
  
  	/*
70d045f66   Ilya Dryomov   rbd: add img_obj_...
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
  	 * If the object is known to already exist, its parent data has
  	 * already been copied.
  	 */
  	if (obj_request_known_test(obj_request) &&
  	    obj_request_exists_test(obj_request))
  		return true;
  
  	return false;
  }
  
  static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
  {
  	if (img_obj_request_simple(obj_request)) {
b454e36d2   Alex Elder   rbd: encapsulate ...
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
  		struct rbd_device *rbd_dev;
  		struct ceph_osd_client *osdc;
  
  		rbd_dev = obj_request->img_request->rbd_dev;
  		osdc = &rbd_dev->rbd_client->client->osdc;
  
  		return rbd_obj_request_submit(osdc, obj_request);
  	}
  
  	/*
3d7efd18d   Alex Elder   rbd: implement fu...
2822
2823
2824
2825
  	 * It's a layered write.  The target object might exist but
  	 * we may not know that yet.  If we know it doesn't exist,
  	 * start by reading the data for the full target object from
  	 * the parent so we can use it for a copyup to the target.
b454e36d2   Alex Elder   rbd: encapsulate ...
2826
  	 */
70d045f66   Ilya Dryomov   rbd: add img_obj_...
2827
  	if (obj_request_known_test(obj_request))
3d7efd18d   Alex Elder   rbd: implement fu...
2828
2829
2830
  		return rbd_img_obj_parent_read_full(obj_request);
  
  	/* We don't know whether the target exists.  Go find out. */
b454e36d2   Alex Elder   rbd: encapsulate ...
2831
2832
2833
  
  	return rbd_img_obj_exists_submit(obj_request);
  }
bf0d5f503   Alex Elder   rbd: new request ...
2834
2835
  static int rbd_img_request_submit(struct rbd_img_request *img_request)
  {
bf0d5f503   Alex Elder   rbd: new request ...
2836
  	struct rbd_obj_request *obj_request;
46faeed4a   Alex Elder   rbd: do a safe li...
2837
  	struct rbd_obj_request *next_obj_request;
663ae2cc0   Ilya Dryomov   rbd: get/put img_...
2838
  	int ret = 0;
bf0d5f503   Alex Elder   rbd: new request ...
2839

37206ee5b   Alex Elder   rbd: normalize do...
2840
2841
  	dout("%s: img %p
  ", __func__, img_request);
bf0d5f503   Alex Elder   rbd: new request ...
2842

663ae2cc0   Ilya Dryomov   rbd: get/put img_...
2843
2844
  	rbd_img_request_get(img_request);
  	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
b454e36d2   Alex Elder   rbd: encapsulate ...
2845
  		ret = rbd_img_obj_request_submit(obj_request);
bf0d5f503   Alex Elder   rbd: new request ...
2846
  		if (ret)
663ae2cc0   Ilya Dryomov   rbd: get/put img_...
2847
  			goto out_put_ireq;
bf0d5f503   Alex Elder   rbd: new request ...
2848
  	}
663ae2cc0   Ilya Dryomov   rbd: get/put img_...
2849
2850
2851
  out_put_ireq:
  	rbd_img_request_put(img_request);
  	return ret;
bf0d5f503   Alex Elder   rbd: new request ...
2852
  }
8b3e1a569   Alex Elder   rbd: implement la...
2853
2854
2855
2856
  
  static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
  {
  	struct rbd_obj_request *obj_request;
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2857
2858
  	struct rbd_device *rbd_dev;
  	u64 obj_end;
02c74fbad   Alex Elder   rbd: re-submit re...
2859
2860
  	u64 img_xferred;
  	int img_result;
8b3e1a569   Alex Elder   rbd: implement la...
2861
2862
  
  	rbd_assert(img_request_child_test(img_request));
02c74fbad   Alex Elder   rbd: re-submit re...
2863
  	/* First get what we need from the image request and release it */
8b3e1a569   Alex Elder   rbd: implement la...
2864
  	obj_request = img_request->obj_request;
02c74fbad   Alex Elder   rbd: re-submit re...
2865
2866
2867
2868
2869
2870
2871
2872
2873
  	img_xferred = img_request->xferred;
  	img_result = img_request->result;
  	rbd_img_request_put(img_request);
  
  	/*
  	 * If the overlap has become 0 (most likely because the
  	 * image has been flattened) we need to re-submit the
  	 * original request.
  	 */
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2874
2875
  	rbd_assert(obj_request);
  	rbd_assert(obj_request->img_request);
02c74fbad   Alex Elder   rbd: re-submit re...
2876
2877
2878
2879
2880
2881
2882
2883
2884
  	rbd_dev = obj_request->img_request->rbd_dev;
  	if (!rbd_dev->parent_overlap) {
  		struct ceph_osd_client *osdc;
  
  		osdc = &rbd_dev->rbd_client->client->osdc;
  		img_result = rbd_obj_request_submit(osdc, obj_request);
  		if (!img_result)
  			return;
  	}
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2885

02c74fbad   Alex Elder   rbd: re-submit re...
2886
  	obj_request->result = img_result;
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
  	if (obj_request->result)
  		goto out;
  
  	/*
  	 * We need to zero anything beyond the parent overlap
  	 * boundary.  Since rbd_img_obj_request_read_callback()
  	 * will zero anything beyond the end of a short read, an
  	 * easy way to do this is to pretend the data from the
  	 * parent came up short--ending at the overlap boundary.
  	 */
  	rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
  	obj_end = obj_request->img_offset + obj_request->length;
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2899
2900
2901
2902
2903
2904
  	if (obj_end > rbd_dev->parent_overlap) {
  		u64 xferred = 0;
  
  		if (obj_request->img_offset < rbd_dev->parent_overlap)
  			xferred = rbd_dev->parent_overlap -
  					obj_request->img_offset;
8b3e1a569   Alex Elder   rbd: implement la...
2905

02c74fbad   Alex Elder   rbd: re-submit re...
2906
  		obj_request->xferred = min(img_xferred, xferred);
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2907
  	} else {
02c74fbad   Alex Elder   rbd: re-submit re...
2908
  		obj_request->xferred = img_xferred;
a9e8ba2cb   Alex Elder   rbd: enforce pare...
2909
2910
  	}
  out:
8b3e1a569   Alex Elder   rbd: implement la...
2911
2912
2913
2914
2915
2916
  	rbd_img_obj_request_read_callback(obj_request);
  	rbd_obj_request_complete(obj_request);
  }
  
  static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
  {
8b3e1a569   Alex Elder   rbd: implement la...
2917
2918
2919
2920
2921
2922
  	struct rbd_img_request *img_request;
  	int result;
  
  	rbd_assert(obj_request_img_data_test(obj_request));
  	rbd_assert(obj_request->img_request != NULL);
  	rbd_assert(obj_request->result == (s32) -ENOENT);
5b2ab72d3   Alex Elder   rbd: support read...
2923
  	rbd_assert(obj_request_type_valid(obj_request->type));
8b3e1a569   Alex Elder   rbd: implement la...
2924

8b3e1a569   Alex Elder   rbd: implement la...
2925
  	/* rbd_read_finish(obj_request, obj_request->length); */
e93f31523   Alex Elder   rbd: define paren...
2926
  	img_request = rbd_parent_request_create(obj_request,
8b3e1a569   Alex Elder   rbd: implement la...
2927
  						obj_request->img_offset,
e93f31523   Alex Elder   rbd: define paren...
2928
  						obj_request->length);
8b3e1a569   Alex Elder   rbd: implement la...
2929
2930
2931
  	result = -ENOMEM;
  	if (!img_request)
  		goto out_err;
5b2ab72d3   Alex Elder   rbd: support read...
2932
2933
2934
2935
2936
2937
  	if (obj_request->type == OBJ_REQUEST_BIO)
  		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
  						obj_request->bio_list);
  	else
  		result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
  						obj_request->pages);
8b3e1a569   Alex Elder   rbd: implement la...
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
  	if (result)
  		goto out_err;
  
  	img_request->callback = rbd_img_parent_read_callback;
  	result = rbd_img_request_submit(img_request);
  	if (result)
  		goto out_err;
  
  	return;
  out_err:
  	if (img_request)
  		rbd_img_request_put(img_request);
  	obj_request->result = result;
  	obj_request->xferred = 0;
  	obj_request_done_set(obj_request);
  }
bf0d5f503   Alex Elder   rbd: new request ...
2954

922dab613   Ilya Dryomov   libceph, rbd: cep...
2955
2956
  static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
  static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
b8d70035b   Alex Elder   rbd: use new code...
2957

922dab613   Ilya Dryomov   libceph, rbd: cep...
2958
2959
  static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
  			 u64 notifier_id, void *data, size_t data_len)
b8d70035b   Alex Elder   rbd: use new code...
2960
  {
922dab613   Ilya Dryomov   libceph, rbd: cep...
2961
2962
  	struct rbd_device *rbd_dev = arg;
  	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
e627db085   Alex Elder   rbd: revalidate o...
2963
  	int ret;
b8d70035b   Alex Elder   rbd: use new code...
2964

922dab613   Ilya Dryomov   libceph, rbd: cep...
2965
2966
2967
  	dout("%s rbd_dev %p cookie %llu notify_id %llu
  ", __func__, rbd_dev,
  	     cookie, notify_id);
52bb1f9be   Ilya Dryomov   rbd: harden rbd_d...
2968
2969
2970
2971
2972
2973
2974
  
  	/*
  	 * Until adequate refresh error handling is in place, there is
  	 * not much we can do here, except warn.
  	 *
  	 * See http://tracker.ceph.com/issues/5040
  	 */
e627db085   Alex Elder   rbd: revalidate o...
2975
2976
  	ret = rbd_dev_refresh(rbd_dev);
  	if (ret)
9584d5082   Ilya Dryomov   rbd: remove extra...
2977
  		rbd_warn(rbd_dev, "refresh failed: %d", ret);
b8d70035b   Alex Elder   rbd: use new code...
2978

922dab613   Ilya Dryomov   libceph, rbd: cep...
2979
2980
2981
  	ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
  				   &rbd_dev->header_oloc, notify_id, cookie,
  				   NULL, 0);
52bb1f9be   Ilya Dryomov   rbd: harden rbd_d...
2982
  	if (ret)
9584d5082   Ilya Dryomov   rbd: remove extra...
2983
  		rbd_warn(rbd_dev, "notify_ack ret %d", ret);
b8d70035b   Alex Elder   rbd: use new code...
2984
  }
922dab613   Ilya Dryomov   libceph, rbd: cep...
2985
  static void rbd_watch_errcb(void *arg, u64 cookie, int err)
bb040aa03   Ilya Dryomov   rbd: add rbd_obj_...
2986
  {
922dab613   Ilya Dryomov   libceph, rbd: cep...
2987
  	struct rbd_device *rbd_dev = arg;
bb040aa03   Ilya Dryomov   rbd: add rbd_obj_...
2988
  	int ret;
922dab613   Ilya Dryomov   libceph, rbd: cep...
2989
  	rbd_warn(rbd_dev, "encountered watch error: %d", err);
bb040aa03   Ilya Dryomov   rbd: add rbd_obj_...
2990

922dab613   Ilya Dryomov   libceph, rbd: cep...
2991
  	__rbd_dev_header_unwatch_sync(rbd_dev);
bb040aa03   Ilya Dryomov   rbd: add rbd_obj_...
2992

922dab613   Ilya Dryomov   libceph, rbd: cep...
2993
  	ret = rbd_dev_header_watch_sync(rbd_dev);
bb040aa03   Ilya Dryomov   rbd: add rbd_obj_...
2994
  	if (ret) {
922dab613   Ilya Dryomov   libceph, rbd: cep...
2995
2996
  		rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
  		return;
bb040aa03   Ilya Dryomov   rbd: add rbd_obj_...
2997
  	}
922dab613   Ilya Dryomov   libceph, rbd: cep...
2998
2999
3000
  	ret = rbd_dev_refresh(rbd_dev);
  	if (ret)
  		rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
bb040aa03   Ilya Dryomov   rbd: add rbd_obj_...
3001
3002
3003
  }
  
  /*
b30a01f2a   Ilya Dryomov   rbd: fix osd_requ...
3004
   * Initiate a watch request, synchronously.
9969ebc5a   Alex Elder   rbd: implement wa...
3005
   */
b30a01f2a   Ilya Dryomov   rbd: fix osd_requ...
3006
  static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
9969ebc5a   Alex Elder   rbd: implement wa...
3007
3008
  {
  	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
922dab613   Ilya Dryomov   libceph, rbd: cep...
3009
  	struct ceph_osd_linger_request *handle;
9969ebc5a   Alex Elder   rbd: implement wa...
3010

922dab613   Ilya Dryomov   libceph, rbd: cep...
3011
  	rbd_assert(!rbd_dev->watch_handle);
9969ebc5a   Alex Elder   rbd: implement wa...
3012

922dab613   Ilya Dryomov   libceph, rbd: cep...
3013
3014
3015
3016
3017
  	handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
  				 &rbd_dev->header_oloc, rbd_watch_cb,
  				 rbd_watch_errcb, rbd_dev);
  	if (IS_ERR(handle))
  		return PTR_ERR(handle);
8eb875653   Alex Elder   rbd: don't drop w...
3018

922dab613   Ilya Dryomov   libceph, rbd: cep...
3019
  	rbd_dev->watch_handle = handle;
b30a01f2a   Ilya Dryomov   rbd: fix osd_requ...
3020
  	return 0;
b30a01f2a   Ilya Dryomov   rbd: fix osd_requ...
3021
  }
c525f0360   Ilya Dryomov   rbd: rbd_dev_head...
3022
  static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
b30a01f2a   Ilya Dryomov   rbd: fix osd_requ...
3023
  {
922dab613   Ilya Dryomov   libceph, rbd: cep...
3024
3025
  	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
  	int ret;
b30a01f2a   Ilya Dryomov   rbd: fix osd_requ...
3026

922dab613   Ilya Dryomov   libceph, rbd: cep...
3027
3028
  	if (!rbd_dev->watch_handle)
  		return;
b30a01f2a   Ilya Dryomov   rbd: fix osd_requ...
3029

922dab613   Ilya Dryomov   libceph, rbd: cep...
3030
3031
3032
  	ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
  	if (ret)
  		rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
76756a51e   Ilya Dryomov   rbd: use rbd_obj_...
3033

922dab613   Ilya Dryomov   libceph, rbd: cep...
3034
  	rbd_dev->watch_handle = NULL;
c525f0360   Ilya Dryomov   rbd: rbd_dev_head...
3035
3036
3037
3038
3039
3040
3041
3042
  }
  
  /*
   * Tear down a watch request, synchronously.
   */
  static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
  {
  	__rbd_dev_header_unwatch_sync(rbd_dev);
811c66887   Ilya Dryomov   rbd: fix rbd map ...
3043
3044
3045
3046
  
  	dout("%s flushing notifies
  ", __func__);
  	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
fca270653   Ilya Dryomov   rbd: introduce rb...
3047
  }
36be9a761   Alex Elder   rbd: implement sy...
3048
  /*
f40eb349e   Alex Elder   rbd: use rbd_obj_...
3049
3050
   * Synchronous osd object method call.  Returns the number of bytes
   * returned in the outbound buffer, or a negative error code.
36be9a761   Alex Elder   rbd: implement sy...
3051
3052
3053
3054
3055
   */
  static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
  			     const char *object_name,
  			     const char *class_name,
  			     const char *method_name,
4157976b2   Alex Elder   rbd: void data po...
3056
  			     const void *outbound,
36be9a761   Alex Elder   rbd: implement sy...
3057
  			     size_t outbound_size,
4157976b2   Alex Elder   rbd: void data po...
3058
  			     void *inbound,
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
3059
  			     size_t inbound_size)
36be9a761   Alex Elder   rbd: implement sy...
3060
  {
2169238dd   Alex Elder   rbd: rearrange so...
3061
  	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
36be9a761   Alex Elder   rbd: implement sy...
3062
  	struct rbd_obj_request *obj_request;
36be9a761   Alex Elder   rbd: implement sy...
3063
3064
3065
3066
3067
  	struct page **pages;
  	u32 page_count;
  	int ret;
  
  	/*
6010a451c   Alex Elder   rbd: define inbou...
3068
3069
3070
3071
3072
  	 * Method calls are ultimately read operations.  The result
  	 * should placed into the inbound buffer provided.  They
  	 * also supply outbound data--parameters for the object
  	 * method.  Currently if this is present it will be a
  	 * snapshot id.
36be9a761   Alex Elder   rbd: implement sy...
3073
  	 */
57385b51c   Alex Elder   rbd: have rbd_obj...
3074
  	page_count = (u32)calc_pages_for(0, inbound_size);
36be9a761   Alex Elder   rbd: implement sy...
3075
3076
3077
3078
3079
  	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
  	if (IS_ERR(pages))
  		return PTR_ERR(pages);
  
  	ret = -ENOMEM;
6010a451c   Alex Elder   rbd: define inbou...
3080
  	obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
36be9a761   Alex Elder   rbd: implement sy...
3081
3082
3083
3084
3085
3086
  							OBJ_REQUEST_PAGES);
  	if (!obj_request)
  		goto out;
  
  	obj_request->pages = pages;
  	obj_request->page_count = page_count;
6d2940c88   Guangliang Zhao   rbd: extend the o...
3087
  	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
deb236b30   Ilya Dryomov   rbd: num_ops para...
3088
  						  obj_request);
36be9a761   Alex Elder   rbd: implement sy...
3089
3090
  	if (!obj_request->osd_req)
  		goto out;
c99d2d4ab   Alex Elder   libceph: specify ...
3091
  	osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
04017e29b   Alex Elder   libceph: make met...
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
  					class_name, method_name);
  	if (outbound_size) {
  		struct ceph_pagelist *pagelist;
  
  		pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
  		if (!pagelist)
  			goto out;
  
  		ceph_pagelist_init(pagelist);
  		ceph_pagelist_append(pagelist, outbound, outbound_size);
  		osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
  						pagelist);
  	}
a4ce40a9a   Alex Elder   libceph: combine ...
3105
3106
  	osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
  					obj_request->pages, inbound_size,
44cd188d4   Alex Elder   rbd: separate ini...
3107
  					0, false, false);
9d4df01f0   Alex Elder   rbd: define separ...
3108
  	rbd_osd_req_format_read(obj_request);
430c28c3c   Alex Elder   rbd: define rbd_o...
3109

36be9a761   Alex Elder   rbd: implement sy...
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
  	ret = rbd_obj_request_submit(osdc, obj_request);
  	if (ret)
  		goto out;
  	ret = rbd_obj_request_wait(obj_request);
  	if (ret)
  		goto out;
  
  	ret = obj_request->result;
  	if (ret < 0)
  		goto out;
57385b51c   Alex Elder   rbd: have rbd_obj...
3120
3121
3122
  
  	rbd_assert(obj_request->xferred < (u64)INT_MAX);
  	ret = (int)obj_request->xferred;
903bb32e8   Alex Elder   libceph: drop ret...
3123
  	ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
36be9a761   Alex Elder   rbd: implement sy...
3124
3125
3126
3127
3128
3129
3130
3131
  out:
  	if (obj_request)
  		rbd_obj_request_put(obj_request);
  	else
  		ceph_release_page_vector(pages, page_count);
  
  	return ret;
  }
7ad18afad   Christoph Hellwig   rbd: convert to b...
3132
  static void rbd_queue_workfn(struct work_struct *work)
bf0d5f503   Alex Elder   rbd: new request ...
3133
  {
7ad18afad   Christoph Hellwig   rbd: convert to b...
3134
3135
  	struct request *rq = blk_mq_rq_from_pdu(work);
  	struct rbd_device *rbd_dev = rq->q->queuedata;
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3136
  	struct rbd_img_request *img_request;
4e752f0ab   Josh Durgin   rbd: access snaps...
3137
  	struct ceph_snap_context *snapc = NULL;
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3138
3139
  	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
  	u64 length = blk_rq_bytes(rq);
6d2940c88   Guangliang Zhao   rbd: extend the o...
3140
  	enum obj_operation_type op_type;
4e752f0ab   Josh Durgin   rbd: access snaps...
3141
  	u64 mapping_size;
bf0d5f503   Alex Elder   rbd: new request ...
3142
  	int result;
7ad18afad   Christoph Hellwig   rbd: convert to b...
3143
3144
3145
3146
3147
3148
3149
  	if (rq->cmd_type != REQ_TYPE_FS) {
  		dout("%s: non-fs request type %d
  ", __func__,
  			(int) rq->cmd_type);
  		result = -EIO;
  		goto err;
  	}
c2df40dfb   Mike Christie   drivers: use req ...
3150
  	if (req_op(rq) == REQ_OP_DISCARD)
90e98c522   Guangliang Zhao   rbd: initial disc...
3151
  		op_type = OBJ_OP_DISCARD;
c2df40dfb   Mike Christie   drivers: use req ...
3152
  	else if (req_op(rq) == REQ_OP_WRITE)
6d2940c88   Guangliang Zhao   rbd: extend the o...
3153
3154
3155
  		op_type = OBJ_OP_WRITE;
  	else
  		op_type = OBJ_OP_READ;
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3156
  	/* Ignore/skip any zero-length requests */
bf0d5f503   Alex Elder   rbd: new request ...
3157

bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3158
3159
3160
3161
3162
3163
  	if (!length) {
  		dout("%s: zero-length request
  ", __func__);
  		result = 0;
  		goto err_rq;
  	}
bf0d5f503   Alex Elder   rbd: new request ...
3164

6d2940c88   Guangliang Zhao   rbd: extend the o...
3165
  	/* Only reads are allowed to a read-only device */
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3166

6d2940c88   Guangliang Zhao   rbd: extend the o...
3167
  	if (op_type != OBJ_OP_READ) {
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3168
3169
3170
  		if (rbd_dev->mapping.read_only) {
  			result = -EROFS;
  			goto err_rq;
4dda41d3d   Alex Elder   rbd: ignore zero-...
3171
  		}
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3172
3173
  		rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
  	}
4dda41d3d   Alex Elder   rbd: ignore zero-...
3174

bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
  	/*
  	 * Quit early if the mapped snapshot no longer exists.  It's
  	 * still possible the snapshot will have disappeared by the
  	 * time our request arrives at the osd, but there's no sense in
  	 * sending it if we already know.
  	 */
  	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
  		dout("request for non-existent snapshot");
  		rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
  		result = -ENXIO;
  		goto err_rq;
  	}
4dda41d3d   Alex Elder   rbd: ignore zero-...
3187

bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3188
3189
3190
3191
3192
3193
  	if (offset && length > U64_MAX - offset + 1) {
  		rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
  			 length);
  		result = -EINVAL;
  		goto err_rq;	/* Shouldn't happen */
  	}
4dda41d3d   Alex Elder   rbd: ignore zero-...
3194

7ad18afad   Christoph Hellwig   rbd: convert to b...
3195
  	blk_mq_start_request(rq);
4e752f0ab   Josh Durgin   rbd: access snaps...
3196
3197
  	down_read(&rbd_dev->header_rwsem);
  	mapping_size = rbd_dev->mapping.size;
6d2940c88   Guangliang Zhao   rbd: extend the o...
3198
  	if (op_type != OBJ_OP_READ) {
4e752f0ab   Josh Durgin   rbd: access snaps...
3199
3200
3201
3202
3203
3204
  		snapc = rbd_dev->header.snapc;
  		ceph_get_snap_context(snapc);
  	}
  	up_read(&rbd_dev->header_rwsem);
  
  	if (offset + length > mapping_size) {
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3205
  		rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4e752f0ab   Josh Durgin   rbd: access snaps...
3206
  			 length, mapping_size);
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3207
3208
3209
  		result = -EIO;
  		goto err_rq;
  	}
bf0d5f503   Alex Elder   rbd: new request ...
3210

6d2940c88   Guangliang Zhao   rbd: extend the o...
3211
  	img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
4e752f0ab   Josh Durgin   rbd: access snaps...
3212
  					     snapc);
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3213
3214
3215
3216
3217
  	if (!img_request) {
  		result = -ENOMEM;
  		goto err_rq;
  	}
  	img_request->rq = rq;
70b16db86   Ilya Dryomov   rbd: don't put sn...
3218
  	snapc = NULL; /* img_request consumes a ref */
bf0d5f503   Alex Elder   rbd: new request ...
3219

90e98c522   Guangliang Zhao   rbd: initial disc...
3220
3221
3222
3223
3224
3225
  	if (op_type == OBJ_OP_DISCARD)
  		result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
  					      NULL);
  	else
  		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
  					      rq->bio);
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3226
3227
  	if (result)
  		goto err_img_request;
bf0d5f503   Alex Elder   rbd: new request ...
3228

bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3229
3230
3231
  	result = rbd_img_request_submit(img_request);
  	if (result)
  		goto err_img_request;
bf0d5f503   Alex Elder   rbd: new request ...
3232

bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3233
  	return;
bf0d5f503   Alex Elder   rbd: new request ...
3234

bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3235
3236
3237
3238
3239
  err_img_request:
  	rbd_img_request_put(img_request);
  err_rq:
  	if (result)
  		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
6d2940c88   Guangliang Zhao   rbd: extend the o...
3240
  			 obj_op_name(op_type), length, offset, result);
e96a650a8   SF Markus Elfring   ceph, rbd: delete...
3241
  	ceph_put_snap_context(snapc);
7ad18afad   Christoph Hellwig   rbd: convert to b...
3242
3243
  err:
  	blk_mq_end_request(rq, result);
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3244
  }
bf0d5f503   Alex Elder   rbd: new request ...
3245

7ad18afad   Christoph Hellwig   rbd: convert to b...
3246
3247
  static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
  		const struct blk_mq_queue_data *bd)
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
3248
  {
7ad18afad   Christoph Hellwig   rbd: convert to b...
3249
3250
  	struct request *rq = bd->rq;
  	struct work_struct *work = blk_mq_rq_to_pdu(rq);
bf0d5f503   Alex Elder   rbd: new request ...
3251

7ad18afad   Christoph Hellwig   rbd: convert to b...
3252
3253
  	queue_work(rbd_wq, work);
  	return BLK_MQ_RQ_QUEUE_OK;
bf0d5f503   Alex Elder   rbd: new request ...
3254
  }
602adf400   Yehuda Sadeh   rbd: introduce ra...
3255
3256
3257
3258
3259
3260
  static void rbd_free_disk(struct rbd_device *rbd_dev)
  {
  	struct gendisk *disk = rbd_dev->disk;
  
  	if (!disk)
  		return;
a0cab9243   Alex Elder   rbd: avoid droppi...
3261
3262
  	rbd_dev->disk = NULL;
  	if (disk->flags & GENHD_FL_UP) {
602adf400   Yehuda Sadeh   rbd: introduce ra...
3263
  		del_gendisk(disk);
a0cab9243   Alex Elder   rbd: avoid droppi...
3264
3265
  		if (disk->queue)
  			blk_cleanup_queue(disk->queue);
7ad18afad   Christoph Hellwig   rbd: convert to b...
3266
  		blk_mq_free_tag_set(&rbd_dev->tag_set);
a0cab9243   Alex Elder   rbd: avoid droppi...
3267
  	}
602adf400   Yehuda Sadeh   rbd: introduce ra...
3268
3269
  	put_disk(disk);
  }
788e2df3b   Alex Elder   rbd: implement sy...
3270
3271
  static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
  				const char *object_name,
7097f8df6   Alex Elder   rbd: get rid of s...
3272
  				u64 offset, u64 length, void *buf)
788e2df3b   Alex Elder   rbd: implement sy...
3273
3274
  
  {
2169238dd   Alex Elder   rbd: rearrange so...
3275
  	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
788e2df3b   Alex Elder   rbd: implement sy...
3276
  	struct rbd_obj_request *obj_request;
788e2df3b   Alex Elder   rbd: implement sy...
3277
3278
  	struct page **pages = NULL;
  	u32 page_count;
1ceae7ef0   Alex Elder   rbd: prevent byte...
3279
  	size_t size;
788e2df3b   Alex Elder   rbd: implement sy...
3280
3281
3282
3283
3284
  	int ret;
  
  	page_count = (u32) calc_pages_for(offset, length);
  	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
  	if (IS_ERR(pages))
a8d420562   Jan Kara   rbd: Fix error re...
3285
  		return PTR_ERR(pages);
788e2df3b   Alex Elder   rbd: implement sy...
3286
3287
3288
  
  	ret = -ENOMEM;
  	obj_request = rbd_obj_request_create(object_name, offset, length,
36be9a761   Alex Elder   rbd: implement sy...
3289
  							OBJ_REQUEST_PAGES);
788e2df3b   Alex Elder   rbd: implement sy...
3290
3291
3292
3293
3294
  	if (!obj_request)
  		goto out;
  
  	obj_request->pages = pages;
  	obj_request->page_count = page_count;
6d2940c88   Guangliang Zhao   rbd: extend the o...
3295
  	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
deb236b30   Ilya Dryomov   rbd: num_ops para...
3296
  						  obj_request);
788e2df3b   Alex Elder   rbd: implement sy...
3297
3298
  	if (!obj_request->osd_req)
  		goto out;
c99d2d4ab   Alex Elder   libceph: specify ...
3299
3300
  	osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
  					offset, length, 0, 0);
406e2c9f9   Alex Elder   libceph: kill off...
3301
  	osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
a4ce40a9a   Alex Elder   libceph: combine ...
3302
  					obj_request->pages,
44cd188d4   Alex Elder   rbd: separate ini...
3303
3304
3305
  					obj_request->length,
  					obj_request->offset & ~PAGE_MASK,
  					false, false);
9d4df01f0   Alex Elder   rbd: define separ...
3306
  	rbd_osd_req_format_read(obj_request);
430c28c3c   Alex Elder   rbd: define rbd_o...
3307

788e2df3b   Alex Elder   rbd: implement sy...
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
  	ret = rbd_obj_request_submit(osdc, obj_request);
  	if (ret)
  		goto out;
  	ret = rbd_obj_request_wait(obj_request);
  	if (ret)
  		goto out;
  
  	ret = obj_request->result;
  	if (ret < 0)
  		goto out;
1ceae7ef0   Alex Elder   rbd: prevent byte...
3318
3319
3320
  
  	rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
  	size = (size_t) obj_request->xferred;
903bb32e8   Alex Elder   libceph: drop ret...
3321
  	ceph_copy_from_page_vector(pages, buf, 0, size);
7097f8df6   Alex Elder   rbd: get rid of s...
3322
3323
  	rbd_assert(size <= (size_t)INT_MAX);
  	ret = (int)size;
788e2df3b   Alex Elder   rbd: implement sy...
3324
3325
3326
3327
3328
3329
3330
3331
  out:
  	if (obj_request)
  		rbd_obj_request_put(obj_request);
  	else
  		ceph_release_page_vector(pages, page_count);
  
  	return ret;
  }
602adf400   Yehuda Sadeh   rbd: introduce ra...
3332
  /*
662518b12   Alex Elder   rbd: update in-co...
3333
3334
3335
   * Read the complete header for the given rbd device.  On successful
   * return, the rbd_dev->header field will contain up-to-date
   * information about the image.
602adf400   Yehuda Sadeh   rbd: introduce ra...
3336
   */
99a41ebce   Alex Elder   rbd: get rid of t...
3337
  static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
602adf400   Yehuda Sadeh   rbd: introduce ra...
3338
  {
4156d9984   Alex Elder   rbd: separate rea...
3339
  	struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c96   Xi Wang   rbd: fix integer ...
3340
  	u32 snap_count = 0;
4156d9984   Alex Elder   rbd: separate rea...
3341
3342
3343
  	u64 names_size = 0;
  	u32 want_count;
  	int ret;
602adf400   Yehuda Sadeh   rbd: introduce ra...
3344

00f1f36ff   Alex Elder   rbd: do some refa...
3345
  	/*
4156d9984   Alex Elder   rbd: separate rea...
3346
3347
3348
3349
3350
  	 * The complete header will include an array of its 64-bit
  	 * snapshot ids, followed by the names of those snapshots as
  	 * a contiguous block of NUL-terminated strings.  Note that
  	 * the number of snapshots could change by the time we read
  	 * it in, in which case we re-read it.
00f1f36ff   Alex Elder   rbd: do some refa...
3351
  	 */
4156d9984   Alex Elder   rbd: separate rea...
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
  	do {
  		size_t size;
  
  		kfree(ondisk);
  
  		size = sizeof (*ondisk);
  		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
  		size += names_size;
  		ondisk = kmalloc(size, GFP_KERNEL);
  		if (!ondisk)
662518b12   Alex Elder   rbd: update in-co...
3362
  			return -ENOMEM;
4156d9984   Alex Elder   rbd: separate rea...
3363

c41d13a31   Ilya Dryomov   rbd: use header_o...
3364
  		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
7097f8df6   Alex Elder   rbd: get rid of s...
3365
  				       0, size, ondisk);
4156d9984   Alex Elder   rbd: separate rea...
3366
  		if (ret < 0)
662518b12   Alex Elder   rbd: update in-co...
3367
  			goto out;
c0cd10db4   Alex Elder   rbd: use rbd_warn...
3368
  		if ((size_t)ret < size) {
4156d9984   Alex Elder   rbd: separate rea...
3369
  			ret = -ENXIO;
06ecc6cbf   Alex Elder   rbd: define and u...
3370
3371
  			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
  				size, ret);
662518b12   Alex Elder   rbd: update in-co...
3372
  			goto out;
4156d9984   Alex Elder   rbd: separate rea...
3373
3374
3375
  		}
  		if (!rbd_dev_ondisk_valid(ondisk)) {
  			ret = -ENXIO;
06ecc6cbf   Alex Elder   rbd: define and u...
3376
  			rbd_warn(rbd_dev, "invalid header");
662518b12   Alex Elder   rbd: update in-co...
3377
  			goto out;
81e759fbf   Josh Durgin   rbd: return an er...
3378
  		}
602adf400   Yehuda Sadeh   rbd: introduce ra...
3379

4156d9984   Alex Elder   rbd: separate rea...
3380
3381
3382
3383
  		names_size = le64_to_cpu(ondisk->snap_names_len);
  		want_count = snap_count;
  		snap_count = le32_to_cpu(ondisk->snap_count);
  	} while (snap_count != want_count);
00f1f36ff   Alex Elder   rbd: do some refa...
3384

662518b12   Alex Elder   rbd: update in-co...
3385
3386
  	ret = rbd_header_from_disk(rbd_dev, ondisk);
  out:
4156d9984   Alex Elder   rbd: separate rea...
3387
3388
3389
  	kfree(ondisk);
  
  	return ret;
602adf400   Yehuda Sadeh   rbd: introduce ra...
3390
  }
15228ede7   Alex Elder   rbd: clear EXISTS...
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
  /*
   * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
   * has disappeared from the (just updated) snapshot context.
   */
  static void rbd_exists_validate(struct rbd_device *rbd_dev)
  {
  	u64 snap_id;
  
  	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
  		return;
  
  	snap_id = rbd_dev->spec->snap_id;
  	if (snap_id == CEPH_NOSNAP)
  		return;
  
  	if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
  		clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
  }
9875201e1   Josh Durgin   rbd: fix use-afte...
3409
3410
3411
  static void rbd_dev_update_size(struct rbd_device *rbd_dev)
  {
  	sector_t size;
9875201e1   Josh Durgin   rbd: fix use-afte...
3412
3413
  
  	/*
811c66887   Ilya Dryomov   rbd: fix rbd map ...
3414
3415
3416
  	 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
  	 * try to update its size.  If REMOVING is set, updating size
  	 * is just useless work since the device can't be opened.
9875201e1   Josh Durgin   rbd: fix use-afte...
3417
  	 */
811c66887   Ilya Dryomov   rbd: fix rbd map ...
3418
3419
  	if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
  	    !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
9875201e1   Josh Durgin   rbd: fix use-afte...
3420
3421
3422
3423
3424
3425
  		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
  		dout("setting size to %llu sectors", (unsigned long long)size);
  		set_capacity(rbd_dev->disk, size);
  		revalidate_disk(rbd_dev->disk);
  	}
  }
cc4a38bdd   Alex Elder   rbd: more version...
3426
  static int rbd_dev_refresh(struct rbd_device *rbd_dev)
1fe5e9932   Alex Elder   rbd: create rbd_r...
3427
  {
e627db085   Alex Elder   rbd: revalidate o...
3428
  	u64 mapping_size;
1fe5e9932   Alex Elder   rbd: create rbd_r...
3429
  	int ret;
cfbf6377b   Alex Elder   rbd: use rwsem to...
3430
  	down_write(&rbd_dev->header_rwsem);
3b5cf2a2f   Alex Elder   rbd: clean up a f...
3431
  	mapping_size = rbd_dev->mapping.size;
a720ae090   Ilya Dryomov   rbd: introduce rb...
3432
3433
  
  	ret = rbd_dev_header_info(rbd_dev);
52bb1f9be   Ilya Dryomov   rbd: harden rbd_d...
3434
  	if (ret)
73e39e4db   Ilya Dryomov   rbd: fix error pa...
3435
  		goto out;
15228ede7   Alex Elder   rbd: clear EXISTS...
3436

e8f59b595   Ilya Dryomov   rbd: do not read ...
3437
3438
3439
3440
3441
3442
3443
  	/*
  	 * If there is a parent, see if it has disappeared due to the
  	 * mapped image getting flattened.
  	 */
  	if (rbd_dev->parent) {
  		ret = rbd_dev_v2_parent_info(rbd_dev);
  		if (ret)
73e39e4db   Ilya Dryomov   rbd: fix error pa...
3444
  			goto out;
e8f59b595   Ilya Dryomov   rbd: do not read ...
3445
  	}
5ff1108cc   Ilya Dryomov   rbd: update mappi...
3446
  	if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
73e39e4db   Ilya Dryomov   rbd: fix error pa...
3447
  		rbd_dev->mapping.size = rbd_dev->header.image_size;
5ff1108cc   Ilya Dryomov   rbd: update mappi...
3448
3449
3450
3451
  	} else {
  		/* validate mapped snapshot's EXISTS flag */
  		rbd_exists_validate(rbd_dev);
  	}
15228ede7   Alex Elder   rbd: clear EXISTS...
3452

73e39e4db   Ilya Dryomov   rbd: fix error pa...
3453
  out:
cfbf6377b   Alex Elder   rbd: use rwsem to...
3454
  	up_write(&rbd_dev->header_rwsem);
73e39e4db   Ilya Dryomov   rbd: fix error pa...
3455
  	if (!ret && mapping_size != rbd_dev->mapping.size)
9875201e1   Josh Durgin   rbd: fix use-afte...
3456
  		rbd_dev_update_size(rbd_dev);
1fe5e9932   Alex Elder   rbd: create rbd_r...
3457

73e39e4db   Ilya Dryomov   rbd: fix error pa...
3458
  	return ret;
1fe5e9932   Alex Elder   rbd: create rbd_r...
3459
  }
7ad18afad   Christoph Hellwig   rbd: convert to b...
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
  static int rbd_init_request(void *data, struct request *rq,
  		unsigned int hctx_idx, unsigned int request_idx,
  		unsigned int numa_node)
  {
  	struct work_struct *work = blk_mq_rq_to_pdu(rq);
  
  	INIT_WORK(work, rbd_queue_workfn);
  	return 0;
  }
  
  static struct blk_mq_ops rbd_mq_ops = {
  	.queue_rq	= rbd_queue_rq,
  	.map_queue	= blk_mq_map_queue,
  	.init_request	= rbd_init_request,
  };
602adf400   Yehuda Sadeh   rbd: introduce ra...
3475
3476
3477
3478
  static int rbd_init_disk(struct rbd_device *rbd_dev)
  {
  	struct gendisk *disk;
  	struct request_queue *q;
593a9e7b3   Alex Elder   rbd: small changes
3479
  	u64 segment_size;
7ad18afad   Christoph Hellwig   rbd: convert to b...
3480
  	int err;
602adf400   Yehuda Sadeh   rbd: introduce ra...
3481

602adf400   Yehuda Sadeh   rbd: introduce ra...
3482
  	/* create gendisk info */
7e513d436   Ilya Dryomov   rbd: enable exten...
3483
3484
3485
  	disk = alloc_disk(single_major ?
  			  (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
  			  RBD_MINORS_PER_MAJOR);
602adf400   Yehuda Sadeh   rbd: introduce ra...
3486
  	if (!disk)
1fcdb8aa1   Alex Elder   rbd: simplify rbd...
3487
  		return -ENOMEM;
602adf400   Yehuda Sadeh   rbd: introduce ra...
3488

f0f8cef5a   Alex Elder   rbd: a few simple...
3489
  	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a2970   Alex Elder   rbd: rename rbd_d...
3490
  		 rbd_dev->dev_id);
602adf400   Yehuda Sadeh   rbd: introduce ra...
3491
  	disk->major = rbd_dev->major;
dd82fff1e   Ilya Dryomov   rbd: add 'minor' ...
3492
  	disk->first_minor = rbd_dev->minor;
7e513d436   Ilya Dryomov   rbd: enable exten...
3493
3494
  	if (single_major)
  		disk->flags |= GENHD_FL_EXT_DEVT;
602adf400   Yehuda Sadeh   rbd: introduce ra...
3495
3496
  	disk->fops = &rbd_bd_ops;
  	disk->private_data = rbd_dev;
7ad18afad   Christoph Hellwig   rbd: convert to b...
3497
3498
  	memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
  	rbd_dev->tag_set.ops = &rbd_mq_ops;
b55841807   Ilya Dryomov   rbd: queue_depth ...
3499
  	rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
7ad18afad   Christoph Hellwig   rbd: convert to b...
3500
  	rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
b55841807   Ilya Dryomov   rbd: queue_depth ...
3501
  	rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
7ad18afad   Christoph Hellwig   rbd: convert to b...
3502
3503
3504
3505
3506
  	rbd_dev->tag_set.nr_hw_queues = 1;
  	rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
  
  	err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
  	if (err)
602adf400   Yehuda Sadeh   rbd: introduce ra...
3507
  		goto out_disk;
029bcbd8b   Josh Durgin   rbd: set blk_queu...
3508

7ad18afad   Christoph Hellwig   rbd: convert to b...
3509
3510
3511
3512
3513
  	q = blk_mq_init_queue(&rbd_dev->tag_set);
  	if (IS_ERR(q)) {
  		err = PTR_ERR(q);
  		goto out_tag_set;
  	}
d8a2c89c8   Ilya Dryomov   rbd: mark block q...
3514
3515
  	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  	/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
593a9e7b3   Alex Elder   rbd: small changes
3516

029bcbd8b   Josh Durgin   rbd: set blk_queu...
3517
  	/* set io sizes to object size */
593a9e7b3   Alex Elder   rbd: small changes
3518
3519
  	segment_size = rbd_obj_bytes(&rbd_dev->header);
  	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
0d9fde4fc   Ilya Dryomov   rbd: set max_sect...
3520
  	q->limits.max_sectors = queue_max_hw_sectors(q);
d3834fefc   Ilya Dryomov   rbd: bump queue_m...
3521
  	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
593a9e7b3   Alex Elder   rbd: small changes
3522
3523
3524
  	blk_queue_max_segment_size(q, segment_size);
  	blk_queue_io_min(q, segment_size);
  	blk_queue_io_opt(q, segment_size);
029bcbd8b   Josh Durgin   rbd: set blk_queu...
3525

90e98c522   Guangliang Zhao   rbd: initial disc...
3526
3527
3528
3529
  	/* enable the discard support */
  	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
  	q->limits.discard_granularity = segment_size;
  	q->limits.discard_alignment = segment_size;
2bb4cd5cc   Jens Axboe   block: have drive...
3530
  	blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
b76f82398   Josh Durgin   rbd: set the rema...
3531
  	q->limits.discard_zeroes_data = 1;
90e98c522   Guangliang Zhao   rbd: initial disc...
3532

bae818ee1   Ronny Hegewald   rbd: require stab...
3533
3534
  	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
  		q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
602adf400   Yehuda Sadeh   rbd: introduce ra...
3535
3536
3537
3538
3539
  	disk->queue = q;
  
  	q->queuedata = rbd_dev;
  
  	rbd_dev->disk = disk;
602adf400   Yehuda Sadeh   rbd: introduce ra...
3540

602adf400   Yehuda Sadeh   rbd: introduce ra...
3541
  	return 0;
7ad18afad   Christoph Hellwig   rbd: convert to b...
3542
3543
  out_tag_set:
  	blk_mq_free_tag_set(&rbd_dev->tag_set);
602adf400   Yehuda Sadeh   rbd: introduce ra...
3544
3545
  out_disk:
  	put_disk(disk);
7ad18afad   Christoph Hellwig   rbd: convert to b...
3546
  	return err;
602adf400   Yehuda Sadeh   rbd: introduce ra...
3547
  }
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3548
3549
3550
  /*
    sysfs
  */
593a9e7b3   Alex Elder   rbd: small changes
3551
3552
3553
3554
  static struct rbd_device *dev_to_rbd_dev(struct device *dev)
  {
  	return container_of(dev, struct rbd_device, dev);
  }
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3555
3556
3557
  static ssize_t rbd_size_show(struct device *dev,
  			     struct device_attribute *attr, char *buf)
  {
593a9e7b3   Alex Elder   rbd: small changes
3558
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c04   Josh Durgin   rbd: expose the c...
3559

fc71d8330   Alex Elder   rbd: fix up some ...
3560
3561
3562
  	return sprintf(buf, "%llu
  ",
  		(unsigned long long)rbd_dev->mapping.size);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3563
  }
34b131849   Alex Elder   rbd: add an rbd f...
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
  /*
   * Note this shows the features for whatever's mapped, which is not
   * necessarily the base image.
   */
  static ssize_t rbd_features_show(struct device *dev,
  			     struct device_attribute *attr, char *buf)
  {
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
  
  	return sprintf(buf, "0x%016llx
  ",
fc71d8330   Alex Elder   rbd: fix up some ...
3575
  			(unsigned long long)rbd_dev->mapping.features);
34b131849   Alex Elder   rbd: add an rbd f...
3576
  }
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3577
3578
3579
  static ssize_t rbd_major_show(struct device *dev,
  			      struct device_attribute *attr, char *buf)
  {
593a9e7b3   Alex Elder   rbd: small changes
3580
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf400   Yehuda Sadeh   rbd: introduce ra...
3581

fc71d8330   Alex Elder   rbd: fix up some ...
3582
3583
3584
3585
3586
3587
  	if (rbd_dev->major)
  		return sprintf(buf, "%d
  ", rbd_dev->major);
  
  	return sprintf(buf, "(none)
  ");
dd82fff1e   Ilya Dryomov   rbd: add 'minor' ...
3588
3589
3590
3591
3592
3593
  }
  
  static ssize_t rbd_minor_show(struct device *dev,
  			      struct device_attribute *attr, char *buf)
  {
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
fc71d8330   Alex Elder   rbd: fix up some ...
3594

dd82fff1e   Ilya Dryomov   rbd: add 'minor' ...
3595
3596
  	return sprintf(buf, "%d
  ", rbd_dev->minor);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3597
3598
3599
3600
  }
  
  static ssize_t rbd_client_id_show(struct device *dev,
  				  struct device_attribute *attr, char *buf)
602adf400   Yehuda Sadeh   rbd: introduce ra...
3601
  {
593a9e7b3   Alex Elder   rbd: small changes
3602
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3603

1dbb43991   Alex Elder   rbd: do not dupli...
3604
3605
3606
  	return sprintf(buf, "client%lld
  ",
  			ceph_client_id(rbd_dev->rbd_client->client));
602adf400   Yehuda Sadeh   rbd: introduce ra...
3607
  }
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3608
3609
  static ssize_t rbd_pool_show(struct device *dev,
  			     struct device_attribute *attr, char *buf)
602adf400   Yehuda Sadeh   rbd: introduce ra...
3610
  {
593a9e7b3   Alex Elder   rbd: small changes
3611
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3612

0d7dbfce9   Alex Elder   rbd: define image...
3613
3614
  	return sprintf(buf, "%s
  ", rbd_dev->spec->pool_name);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3615
  }
9bb2f334b   Alex Elder   rbd: create pool_...
3616
3617
3618
3619
  static ssize_t rbd_pool_id_show(struct device *dev,
  			     struct device_attribute *attr, char *buf)
  {
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
0d7dbfce9   Alex Elder   rbd: define image...
3620
3621
  	return sprintf(buf, "%llu
  ",
fc71d8330   Alex Elder   rbd: fix up some ...
3622
  			(unsigned long long) rbd_dev->spec->pool_id);
9bb2f334b   Alex Elder   rbd: create pool_...
3623
  }
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3624
3625
3626
  static ssize_t rbd_name_show(struct device *dev,
  			     struct device_attribute *attr, char *buf)
  {
593a9e7b3   Alex Elder   rbd: small changes
3627
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3628

a92ffdf8a   Alex Elder   rbd: allow null i...
3629
3630
3631
3632
3633
3634
  	if (rbd_dev->spec->image_name)
  		return sprintf(buf, "%s
  ", rbd_dev->spec->image_name);
  
  	return sprintf(buf, "(unknown)
  ");
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3635
  }
589d30e0b   Alex Elder   rbd: define rbd_d...
3636
3637
3638
3639
  static ssize_t rbd_image_id_show(struct device *dev,
  			     struct device_attribute *attr, char *buf)
  {
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
0d7dbfce9   Alex Elder   rbd: define image...
3640
3641
  	return sprintf(buf, "%s
  ", rbd_dev->spec->image_id);
589d30e0b   Alex Elder   rbd: define rbd_d...
3642
  }
34b131849   Alex Elder   rbd: add an rbd f...
3643
3644
3645
3646
  /*
   * Shows the name of the currently-mapped snapshot (or
   * RBD_SNAP_HEAD_NAME for the base image).
   */
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3647
3648
3649
3650
  static ssize_t rbd_snap_show(struct device *dev,
  			     struct device_attribute *attr,
  			     char *buf)
  {
593a9e7b3   Alex Elder   rbd: small changes
3651
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3652

0d7dbfce9   Alex Elder   rbd: define image...
3653
3654
  	return sprintf(buf, "%s
  ", rbd_dev->spec->snap_name);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3655
  }
86b00e0da   Alex Elder   rbd: get parent s...
3656
  /*
ff96128fb   Ilya Dryomov   rbd: show the ent...
3657
3658
3659
   * For a v2 image, shows the chain of parent images, separated by empty
   * lines.  For v1 images or if there is no parent, shows "(no parent
   * image)".
86b00e0da   Alex Elder   rbd: get parent s...
3660
3661
   */
  static ssize_t rbd_parent_show(struct device *dev,
ff96128fb   Ilya Dryomov   rbd: show the ent...
3662
3663
  			       struct device_attribute *attr,
  			       char *buf)
86b00e0da   Alex Elder   rbd: get parent s...
3664
3665
  {
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
ff96128fb   Ilya Dryomov   rbd: show the ent...
3666
  	ssize_t count = 0;
86b00e0da   Alex Elder   rbd: get parent s...
3667

ff96128fb   Ilya Dryomov   rbd: show the ent...
3668
  	if (!rbd_dev->parent)
86b00e0da   Alex Elder   rbd: get parent s...
3669
3670
  		return sprintf(buf, "(no parent image)
  ");
ff96128fb   Ilya Dryomov   rbd: show the ent...
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
  	for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
  		struct rbd_spec *spec = rbd_dev->parent_spec;
  
  		count += sprintf(&buf[count], "%s"
  			    "pool_id %llu
  pool_name %s
  "
  			    "image_id %s
  image_name %s
  "
  			    "snap_id %llu
  snap_name %s
  "
  			    "overlap %llu
  ",
  			    !count ? "" : "
  ", /* first? */
  			    spec->pool_id, spec->pool_name,
  			    spec->image_id, spec->image_name ?: "(unknown)",
  			    spec->snap_id, spec->snap_name,
  			    rbd_dev->parent_overlap);
  	}
  
  	return count;
86b00e0da   Alex Elder   rbd: get parent s...
3695
  }
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3696
3697
3698
3699
3700
  static ssize_t rbd_image_refresh(struct device *dev,
  				 struct device_attribute *attr,
  				 const char *buf,
  				 size_t size)
  {
593a9e7b3   Alex Elder   rbd: small changes
3701
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623ab   Alex Elder   rbd: return obj v...
3702
  	int ret;
602adf400   Yehuda Sadeh   rbd: introduce ra...
3703

cc4a38bdd   Alex Elder   rbd: more version...
3704
  	ret = rbd_dev_refresh(rbd_dev);
e627db085   Alex Elder   rbd: revalidate o...
3705
  	if (ret)
52bb1f9be   Ilya Dryomov   rbd: harden rbd_d...
3706
  		return ret;
b813623ab   Alex Elder   rbd: return obj v...
3707

52bb1f9be   Ilya Dryomov   rbd: harden rbd_d...
3708
  	return size;
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3709
  }
602adf400   Yehuda Sadeh   rbd: introduce ra...
3710

dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3711
  static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
34b131849   Alex Elder   rbd: add an rbd f...
3712
  static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3713
  static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
dd82fff1e   Ilya Dryomov   rbd: add 'minor' ...
3714
  static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3715
3716
  static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
  static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
9bb2f334b   Alex Elder   rbd: create pool_...
3717
  static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3718
  static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
589d30e0b   Alex Elder   rbd: define rbd_d...
3719
  static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3720
3721
  static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
  static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
86b00e0da   Alex Elder   rbd: get parent s...
3722
  static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3723
3724
3725
  
  static struct attribute *rbd_attrs[] = {
  	&dev_attr_size.attr,
34b131849   Alex Elder   rbd: add an rbd f...
3726
  	&dev_attr_features.attr,
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3727
  	&dev_attr_major.attr,
dd82fff1e   Ilya Dryomov   rbd: add 'minor' ...
3728
  	&dev_attr_minor.attr,
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3729
3730
  	&dev_attr_client_id.attr,
  	&dev_attr_pool.attr,
9bb2f334b   Alex Elder   rbd: create pool_...
3731
  	&dev_attr_pool_id.attr,
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3732
  	&dev_attr_name.attr,
589d30e0b   Alex Elder   rbd: define rbd_d...
3733
  	&dev_attr_image_id.attr,
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3734
  	&dev_attr_current_snap.attr,
86b00e0da   Alex Elder   rbd: get parent s...
3735
  	&dev_attr_parent.attr,
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3736
  	&dev_attr_refresh.attr,
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
  	NULL
  };
  
  static struct attribute_group rbd_attr_group = {
  	.attrs = rbd_attrs,
  };
  
  static const struct attribute_group *rbd_attr_groups[] = {
  	&rbd_attr_group,
  	NULL
  };
6cac4695f   Ilya Dryomov   rbd: set device_t...
3748
  static void rbd_dev_release(struct device *dev);
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3749
3750
3751
3752
  
  static struct device_type rbd_device_type = {
  	.name		= "rbd",
  	.groups		= rbd_attr_groups,
6cac4695f   Ilya Dryomov   rbd: set device_t...
3753
  	.release	= rbd_dev_release,
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3754
  };
8b8fb99c5   Alex Elder   rbd: add referenc...
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
  static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
  {
  	kref_get(&spec->kref);
  
  	return spec;
  }
  
  static void rbd_spec_free(struct kref *kref);
  static void rbd_spec_put(struct rbd_spec *spec)
  {
  	if (spec)
  		kref_put(&spec->kref, rbd_spec_free);
  }
  
  static struct rbd_spec *rbd_spec_alloc(void)
  {
  	struct rbd_spec *spec;
  
  	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
  	if (!spec)
  		return NULL;
040775997   Ilya Dryomov   rbd: split rbd_de...
3776
3777
3778
  
  	spec->pool_id = CEPH_NOPOOL;
  	spec->snap_id = CEPH_NOSNAP;
8b8fb99c5   Alex Elder   rbd: add referenc...
3779
  	kref_init(&spec->kref);
8b8fb99c5   Alex Elder   rbd: add referenc...
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
  	return spec;
  }
  
  static void rbd_spec_free(struct kref *kref)
  {
  	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
  
  	kfree(spec->pool_name);
  	kfree(spec->image_id);
  	kfree(spec->image_name);
  	kfree(spec->snap_name);
  	kfree(spec);
  }
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
3793
3794
3795
3796
  static void rbd_dev_release(struct device *dev)
  {
  	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
  	bool need_put = !!rbd_dev->opts;
c41d13a31   Ilya Dryomov   rbd: use header_o...
3797
  	ceph_oid_destroy(&rbd_dev->header_oid);
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
  	rbd_put_client(rbd_dev->rbd_client);
  	rbd_spec_put(rbd_dev->spec);
  	kfree(rbd_dev->opts);
  	kfree(rbd_dev);
  
  	/*
  	 * This is racy, but way better than putting module outside of
  	 * the release callback.  The race window is pretty small, so
  	 * doing something similar to dm (dm-builtin.c) is overkill.
  	 */
  	if (need_put)
  		module_put(THIS_MODULE);
  }
cc344fa1b   Alex Elder   rbd: eliminate sp...
3811
  static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
d147543d7   Ilya Dryomov   rbd: store rbd_op...
3812
3813
  					 struct rbd_spec *spec,
  					 struct rbd_options *opts)
c53d58933   Alex Elder   rbd: define rbd_d...
3814
3815
3816
3817
3818
3819
3820
3821
  {
  	struct rbd_device *rbd_dev;
  
  	rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
  	if (!rbd_dev)
  		return NULL;
  
  	spin_lock_init(&rbd_dev->lock);
6d292906f   Alex Elder   rbd: define flags...
3822
  	rbd_dev->flags = 0;
a2acd00e7   Alex Elder   rbd: reference co...
3823
  	atomic_set(&rbd_dev->parent_ref, 0);
c53d58933   Alex Elder   rbd: define rbd_d...
3824
  	INIT_LIST_HEAD(&rbd_dev->node);
c53d58933   Alex Elder   rbd: define rbd_d...
3825
  	init_rwsem(&rbd_dev->header_rwsem);
c41d13a31   Ilya Dryomov   rbd: use header_o...
3826
  	ceph_oid_init(&rbd_dev->header_oid);
922dab613   Ilya Dryomov   libceph, rbd: cep...
3827
  	ceph_oloc_init(&rbd_dev->header_oloc);
c41d13a31   Ilya Dryomov   rbd: use header_o...
3828

dd5ac32d4   Ilya Dryomov   rbd: don't free r...
3829
3830
3831
  	rbd_dev->dev.bus = &rbd_bus_type;
  	rbd_dev->dev.type = &rbd_device_type;
  	rbd_dev->dev.parent = &rbd_root_dev;
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
3832
  	device_initialize(&rbd_dev->dev);
c53d58933   Alex Elder   rbd: define rbd_d...
3833
  	rbd_dev->rbd_client = rbdc;
d147543d7   Ilya Dryomov   rbd: store rbd_op...
3834
3835
  	rbd_dev->spec = spec;
  	rbd_dev->opts = opts;
c53d58933   Alex Elder   rbd: define rbd_d...
3836

0903e875c   Alex Elder   rbd: use a common...
3837
  	/* Initialize the layout used for all rbd requests */
7627151ea   Yan, Zheng   libceph: define n...
3838
3839
3840
3841
  	rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
  	rbd_dev->layout.stripe_count = 1;
  	rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
  	rbd_dev->layout.pool_id = spec->pool_id;
30c156d99   Yan, Zheng   libceph: rados po...
3842
  	RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
0903e875c   Alex Elder   rbd: use a common...
3843

dd5ac32d4   Ilya Dryomov   rbd: don't free r...
3844
3845
3846
3847
3848
3849
3850
  	/*
  	 * If this is a mapping rbd_dev (as opposed to a parent one),
  	 * pin our module.  We have a ref from do_rbd_add(), so use
  	 * __module_get().
  	 */
  	if (rbd_dev->opts)
  		__module_get(THIS_MODULE);
c53d58933   Alex Elder   rbd: define rbd_d...
3851
3852
3853
3854
3855
  	return rbd_dev;
  }
  
  static void rbd_dev_destroy(struct rbd_device *rbd_dev)
  {
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
3856
3857
  	if (rbd_dev)
  		put_device(&rbd_dev->dev);
c53d58933   Alex Elder   rbd: define rbd_d...
3858
  }
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
3859
  /*
9d475de5d   Alex Elder   rbd: add code to ...
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
   * Get the size and object order for an image snapshot, or if
   * snap_id is CEPH_NOSNAP, gets this information for the base
   * image.
   */
  static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
  				u8 *order, u64 *snap_size)
  {
  	__le64 snapid = cpu_to_le64(snap_id);
  	int ret;
  	struct {
  		u8 order;
  		__le64 size;
  	} __attribute__ ((packed)) size_buf = { 0 };
c41d13a31   Ilya Dryomov   rbd: use header_o...
3873
  	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
9d475de5d   Alex Elder   rbd: add code to ...
3874
  				"rbd", "get_size",
4157976b2   Alex Elder   rbd: void data po...
3875
  				&snapid, sizeof (snapid),
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
3876
  				&size_buf, sizeof (size_buf));
36be9a761   Alex Elder   rbd: implement sy...
3877
3878
  	dout("%s: rbd_obj_method_sync returned %d
  ", __func__, ret);
9d475de5d   Alex Elder   rbd: add code to ...
3879
3880
  	if (ret < 0)
  		return ret;
57385b51c   Alex Elder   rbd: have rbd_obj...
3881
3882
  	if (ret < sizeof (size_buf))
  		return -ERANGE;
9d475de5d   Alex Elder   rbd: add code to ...
3883

c35455791   Josh Durgin   rbd: fix null der...
3884
  	if (order) {
c86f86e9e   Alex Elder   rbd: make snap_si...
3885
  		*order = size_buf.order;
c35455791   Josh Durgin   rbd: fix null der...
3886
3887
  		dout("  order %u", (unsigned int)*order);
  	}
9d475de5d   Alex Elder   rbd: add code to ...
3888
  	*snap_size = le64_to_cpu(size_buf.size);
c35455791   Josh Durgin   rbd: fix null der...
3889
3890
3891
  	dout("  snap_id 0x%016llx snap_size = %llu
  ",
  		(unsigned long long)snap_id,
57385b51c   Alex Elder   rbd: have rbd_obj...
3892
  		(unsigned long long)*snap_size);
9d475de5d   Alex Elder   rbd: add code to ...
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
  
  	return 0;
  }
  
  static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
  {
  	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
  					&rbd_dev->header.obj_order,
  					&rbd_dev->header.image_size);
  }
1e1301998   Alex Elder   rbd: get the obje...
3903
3904
3905
3906
3907
3908
3909
3910
3911
  static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
  {
  	void *reply_buf;
  	int ret;
  	void *p;
  
  	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
  	if (!reply_buf)
  		return -ENOMEM;
c41d13a31   Ilya Dryomov   rbd: use header_o...
3912
  	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4157976b2   Alex Elder   rbd: void data po...
3913
  				"rbd", "get_object_prefix", NULL, 0,
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
3914
  				reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
36be9a761   Alex Elder   rbd: implement sy...
3915
3916
  	dout("%s: rbd_obj_method_sync returned %d
  ", __func__, ret);
1e1301998   Alex Elder   rbd: get the obje...
3917
3918
3919
3920
3921
  	if (ret < 0)
  		goto out;
  
  	p = reply_buf;
  	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
57385b51c   Alex Elder   rbd: have rbd_obj...
3922
3923
  						p + ret, NULL, GFP_NOIO);
  	ret = 0;
1e1301998   Alex Elder   rbd: get the obje...
3924
3925
3926
3927
3928
3929
3930
3931
  
  	if (IS_ERR(rbd_dev->header.object_prefix)) {
  		ret = PTR_ERR(rbd_dev->header.object_prefix);
  		rbd_dev->header.object_prefix = NULL;
  	} else {
  		dout("  object_prefix = %s
  ", rbd_dev->header.object_prefix);
  	}
1e1301998   Alex Elder   rbd: get the obje...
3932
3933
3934
3935
3936
  out:
  	kfree(reply_buf);
  
  	return ret;
  }
b1b5402aa   Alex Elder   rbd: get image fe...
3937
3938
3939
3940
3941
3942
3943
  static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
  		u64 *snap_features)
  {
  	__le64 snapid = cpu_to_le64(snap_id);
  	struct {
  		__le64 features;
  		__le64 incompat;
4157976b2   Alex Elder   rbd: void data po...
3944
  	} __attribute__ ((packed)) features_buf = { 0 };
d3767f0fa   Ilya Dryomov   rbd: report unsup...
3945
  	u64 unsup;
b1b5402aa   Alex Elder   rbd: get image fe...
3946
  	int ret;
c41d13a31   Ilya Dryomov   rbd: use header_o...
3947
  	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
b1b5402aa   Alex Elder   rbd: get image fe...
3948
  				"rbd", "get_features",
4157976b2   Alex Elder   rbd: void data po...
3949
  				&snapid, sizeof (snapid),
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
3950
  				&features_buf, sizeof (features_buf));
36be9a761   Alex Elder   rbd: implement sy...
3951
3952
  	dout("%s: rbd_obj_method_sync returned %d
  ", __func__, ret);
b1b5402aa   Alex Elder   rbd: get image fe...
3953
3954
  	if (ret < 0)
  		return ret;
57385b51c   Alex Elder   rbd: have rbd_obj...
3955
3956
  	if (ret < sizeof (features_buf))
  		return -ERANGE;
d889140c4   Alex Elder   rbd: implement fe...
3957

d3767f0fa   Ilya Dryomov   rbd: report unsup...
3958
3959
3960
3961
  	unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
  	if (unsup) {
  		rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
  			 unsup);
b8f5c6edc   Alex Elder   rbd: don't use EN...
3962
  		return -ENXIO;
d3767f0fa   Ilya Dryomov   rbd: report unsup...
3963
  	}
d889140c4   Alex Elder   rbd: implement fe...
3964

b1b5402aa   Alex Elder   rbd: get image fe...
3965
3966
3967
3968
  	*snap_features = le64_to_cpu(features_buf.features);
  
  	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx
  ",
57385b51c   Alex Elder   rbd: have rbd_obj...
3969
3970
3971
  		(unsigned long long)snap_id,
  		(unsigned long long)*snap_features,
  		(unsigned long long)le64_to_cpu(features_buf.incompat));
b1b5402aa   Alex Elder   rbd: get image fe...
3972
3973
3974
3975
3976
3977
3978
3979
3980
  
  	return 0;
  }
  
  static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
  {
  	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
  						&rbd_dev->header.features);
  }
86b00e0da   Alex Elder   rbd: get parent s...
3981
3982
3983
3984
3985
3986
3987
3988
  static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
  {
  	struct rbd_spec *parent_spec;
  	size_t size;
  	void *reply_buf = NULL;
  	__le64 snapid;
  	void *p;
  	void *end;
642a25375   Alex Elder   rbd: get parent i...
3989
  	u64 pool_id;
86b00e0da   Alex Elder   rbd: get parent s...
3990
  	char *image_id;
3b5cf2a2f   Alex Elder   rbd: clean up a f...
3991
  	u64 snap_id;
86b00e0da   Alex Elder   rbd: get parent s...
3992
  	u64 overlap;
86b00e0da   Alex Elder   rbd: get parent s...
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
  	int ret;
  
  	parent_spec = rbd_spec_alloc();
  	if (!parent_spec)
  		return -ENOMEM;
  
  	size = sizeof (__le64) +				/* pool_id */
  		sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +	/* image_id */
  		sizeof (__le64) +				/* snap_id */
  		sizeof (__le64);				/* overlap */
  	reply_buf = kmalloc(size, GFP_KERNEL);
  	if (!reply_buf) {
  		ret = -ENOMEM;
  		goto out_err;
  	}
4d9b67cdd   Ilya Dryomov   rbd: take snap_id...
4008
  	snapid = cpu_to_le64(rbd_dev->spec->snap_id);
c41d13a31   Ilya Dryomov   rbd: use header_o...
4009
  	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
86b00e0da   Alex Elder   rbd: get parent s...
4010
  				"rbd", "get_parent",
4157976b2   Alex Elder   rbd: void data po...
4011
  				&snapid, sizeof (snapid),
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
4012
  				reply_buf, size);
36be9a761   Alex Elder   rbd: implement sy...
4013
4014
  	dout("%s: rbd_obj_method_sync returned %d
  ", __func__, ret);
86b00e0da   Alex Elder   rbd: get parent s...
4015
4016
  	if (ret < 0)
  		goto out_err;
86b00e0da   Alex Elder   rbd: get parent s...
4017
  	p = reply_buf;
57385b51c   Alex Elder   rbd: have rbd_obj...
4018
4019
  	end = reply_buf + ret;
  	ret = -ERANGE;
642a25375   Alex Elder   rbd: get parent i...
4020
  	ceph_decode_64_safe(&p, end, pool_id, out_err);
392a9dad7   Alex Elder   rbd: detect when ...
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
  	if (pool_id == CEPH_NOPOOL) {
  		/*
  		 * Either the parent never existed, or we have
  		 * record of it but the image got flattened so it no
  		 * longer has a parent.  When the parent of a
  		 * layered image disappears we immediately set the
  		 * overlap to 0.  The effect of this is that all new
  		 * requests will be treated as if the image had no
  		 * parent.
  		 */
  		if (rbd_dev->parent_overlap) {
  			rbd_dev->parent_overlap = 0;
392a9dad7   Alex Elder   rbd: detect when ...
4033
4034
4035
4036
4037
  			rbd_dev_parent_put(rbd_dev);
  			pr_info("%s: clone image has been flattened
  ",
  				rbd_dev->disk->disk_name);
  		}
86b00e0da   Alex Elder   rbd: get parent s...
4038
  		goto out;	/* No parent?  No problem. */
392a9dad7   Alex Elder   rbd: detect when ...
4039
  	}
86b00e0da   Alex Elder   rbd: get parent s...
4040

0903e875c   Alex Elder   rbd: use a common...
4041
4042
4043
  	/* The ceph file layout needs to fit pool id in 32 bits */
  
  	ret = -EIO;
642a25375   Alex Elder   rbd: get parent i...
4044
  	if (pool_id > (u64)U32_MAX) {
9584d5082   Ilya Dryomov   rbd: remove extra...
4045
  		rbd_warn(NULL, "parent pool id too large (%llu > %u)",
642a25375   Alex Elder   rbd: get parent i...
4046
  			(unsigned long long)pool_id, U32_MAX);
57385b51c   Alex Elder   rbd: have rbd_obj...
4047
  		goto out_err;
c0cd10db4   Alex Elder   rbd: use rbd_warn...
4048
  	}
0903e875c   Alex Elder   rbd: use a common...
4049

979ed480a   Alex Elder   rbd: kill rbd_spe...
4050
  	image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
86b00e0da   Alex Elder   rbd: get parent s...
4051
4052
4053
4054
  	if (IS_ERR(image_id)) {
  		ret = PTR_ERR(image_id);
  		goto out_err;
  	}
3b5cf2a2f   Alex Elder   rbd: clean up a f...
4055
  	ceph_decode_64_safe(&p, end, snap_id, out_err);
86b00e0da   Alex Elder   rbd: get parent s...
4056
  	ceph_decode_64_safe(&p, end, overlap, out_err);
3b5cf2a2f   Alex Elder   rbd: clean up a f...
4057
4058
4059
4060
4061
4062
4063
4064
4065
  	/*
  	 * The parent won't change (except when the clone is
  	 * flattened, already handled that).  So we only need to
  	 * record the parent spec we have not already done so.
  	 */
  	if (!rbd_dev->parent_spec) {
  		parent_spec->pool_id = pool_id;
  		parent_spec->image_id = image_id;
  		parent_spec->snap_id = snap_id;
70cf49cfc   Alex Elder   rbd: ignore zero-...
4066
4067
  		rbd_dev->parent_spec = parent_spec;
  		parent_spec = NULL;	/* rbd_dev now owns this */
fbba11b3b   Ilya Dryomov   rbd: do not leak ...
4068
4069
  	} else {
  		kfree(image_id);
3b5cf2a2f   Alex Elder   rbd: clean up a f...
4070
4071
4072
  	}
  
  	/*
cf32bd9c8   Ilya Dryomov   rbd: do not treat...
4073
4074
  	 * We always update the parent overlap.  If it's zero we issue
  	 * a warning, as we will proceed as if there was no parent.
3b5cf2a2f   Alex Elder   rbd: clean up a f...
4075
  	 */
3b5cf2a2f   Alex Elder   rbd: clean up a f...
4076
  	if (!overlap) {
3b5cf2a2f   Alex Elder   rbd: clean up a f...
4077
  		if (parent_spec) {
cf32bd9c8   Ilya Dryomov   rbd: do not treat...
4078
4079
4080
4081
  			/* refresh, careful to warn just once */
  			if (rbd_dev->parent_overlap)
  				rbd_warn(rbd_dev,
  				    "clone now standalone (overlap became 0)");
3b5cf2a2f   Alex Elder   rbd: clean up a f...
4082
  		} else {
cf32bd9c8   Ilya Dryomov   rbd: do not treat...
4083
4084
  			/* initial probe */
  			rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
3b5cf2a2f   Alex Elder   rbd: clean up a f...
4085
  		}
70cf49cfc   Alex Elder   rbd: ignore zero-...
4086
  	}
cf32bd9c8   Ilya Dryomov   rbd: do not treat...
4087
  	rbd_dev->parent_overlap = overlap;
86b00e0da   Alex Elder   rbd: get parent s...
4088
4089
4090
4091
4092
4093
4094
4095
  out:
  	ret = 0;
  out_err:
  	kfree(reply_buf);
  	rbd_spec_put(parent_spec);
  
  	return ret;
  }
cc070d59b   Alex Elder   rbd: get and chec...
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
  static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
  {
  	struct {
  		__le64 stripe_unit;
  		__le64 stripe_count;
  	} __attribute__ ((packed)) striping_info_buf = { 0 };
  	size_t size = sizeof (striping_info_buf);
  	void *p;
  	u64 obj_size;
  	u64 stripe_unit;
  	u64 stripe_count;
  	int ret;
c41d13a31   Ilya Dryomov   rbd: use header_o...
4108
  	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
cc070d59b   Alex Elder   rbd: get and chec...
4109
  				"rbd", "get_stripe_unit_count", NULL, 0,
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
4110
  				(char *)&striping_info_buf, size);
cc070d59b   Alex Elder   rbd: get and chec...
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
  	dout("%s: rbd_obj_method_sync returned %d
  ", __func__, ret);
  	if (ret < 0)
  		return ret;
  	if (ret < size)
  		return -ERANGE;
  
  	/*
  	 * We don't actually support the "fancy striping" feature
  	 * (STRIPINGV2) yet, but if the striping sizes are the
  	 * defaults the behavior is the same as before.  So find
  	 * out, and only fail if the image has non-default values.
  	 */
  	ret = -EINVAL;
  	obj_size = (u64)1 << rbd_dev->header.obj_order;
  	p = &striping_info_buf;
  	stripe_unit = ceph_decode_64(&p);
  	if (stripe_unit != obj_size) {
  		rbd_warn(rbd_dev, "unsupported stripe unit "
  				"(got %llu want %llu)",
  				stripe_unit, obj_size);
  		return -EINVAL;
  	}
  	stripe_count = ceph_decode_64(&p);
  	if (stripe_count != 1) {
  		rbd_warn(rbd_dev, "unsupported stripe count "
  				"(got %llu want 1)", stripe_count);
  		return -EINVAL;
  	}
500d0c0fb   Alex Elder   rbd: move stripe_...
4140
4141
  	rbd_dev->header.stripe_unit = stripe_unit;
  	rbd_dev->header.stripe_count = stripe_count;
cc070d59b   Alex Elder   rbd: get and chec...
4142
4143
4144
  
  	return 0;
  }
9e15b77d9   Alex Elder   rbd: get addition...
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
  static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
  {
  	size_t image_id_size;
  	char *image_id;
  	void *p;
  	void *end;
  	size_t size;
  	void *reply_buf = NULL;
  	size_t len = 0;
  	char *image_name = NULL;
  	int ret;
  
  	rbd_assert(!rbd_dev->spec->image_name);
69e7a02f6   Alex Elder   rbd: kill rbd_spe...
4158
4159
  	len = strlen(rbd_dev->spec->image_id);
  	image_id_size = sizeof (__le32) + len;
9e15b77d9   Alex Elder   rbd: get addition...
4160
4161
4162
4163
4164
  	image_id = kmalloc(image_id_size, GFP_KERNEL);
  	if (!image_id)
  		return NULL;
  
  	p = image_id;
4157976b2   Alex Elder   rbd: void data po...
4165
  	end = image_id + image_id_size;
57385b51c   Alex Elder   rbd: have rbd_obj...
4166
  	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
9e15b77d9   Alex Elder   rbd: get addition...
4167
4168
4169
4170
4171
  
  	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
  	reply_buf = kmalloc(size, GFP_KERNEL);
  	if (!reply_buf)
  		goto out;
36be9a761   Alex Elder   rbd: implement sy...
4172
  	ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
9e15b77d9   Alex Elder   rbd: get addition...
4173
4174
  				"rbd", "dir_get_name",
  				image_id, image_id_size,
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
4175
  				reply_buf, size);
9e15b77d9   Alex Elder   rbd: get addition...
4176
4177
4178
  	if (ret < 0)
  		goto out;
  	p = reply_buf;
f40eb349e   Alex Elder   rbd: use rbd_obj_...
4179
  	end = reply_buf + ret;
9e15b77d9   Alex Elder   rbd: get addition...
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
  	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
  	if (IS_ERR(image_name))
  		image_name = NULL;
  	else
  		dout("%s: name is %s len is %zd
  ", __func__, image_name, len);
  out:
  	kfree(reply_buf);
  	kfree(image_id);
  
  	return image_name;
  }
2ad3d7167   Alex Elder   rbd: define rbd_s...
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
  static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
  {
  	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
  	const char *snap_name;
  	u32 which = 0;
  
  	/* Skip over names until we find the one we are looking for */
  
  	snap_name = rbd_dev->header.snap_names;
  	while (which < snapc->num_snaps) {
  		if (!strcmp(name, snap_name))
  			return snapc->snaps[which];
  		snap_name += strlen(snap_name) + 1;
  		which++;
  	}
  	return CEPH_NOSNAP;
  }
  
  static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
  {
  	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
  	u32 which;
  	bool found = false;
  	u64 snap_id;
  
  	for (which = 0; !found && which < snapc->num_snaps; which++) {
  		const char *snap_name;
  
  		snap_id = snapc->snaps[which];
  		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
efadc98aa   Josh Durgin   rbd: ignore unmap...
4222
4223
4224
4225
4226
4227
4228
  		if (IS_ERR(snap_name)) {
  			/* ignore no-longer existing snapshots */
  			if (PTR_ERR(snap_name) == -ENOENT)
  				continue;
  			else
  				break;
  		}
2ad3d7167   Alex Elder   rbd: define rbd_s...
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
  		found = !strcmp(name, snap_name);
  		kfree(snap_name);
  	}
  	return found ? snap_id : CEPH_NOSNAP;
  }
  
  /*
   * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
   * no snapshot by that name is found, or if an error occurs.
   */
  static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
  {
  	if (rbd_dev->image_format == 1)
  		return rbd_v1_snap_id_by_name(rbd_dev, name);
  
  	return rbd_v2_snap_id_by_name(rbd_dev, name);
  }
9e15b77d9   Alex Elder   rbd: get addition...
4246
  /*
040775997   Ilya Dryomov   rbd: split rbd_de...
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
   * An image being mapped will have everything but the snap id.
   */
  static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
  {
  	struct rbd_spec *spec = rbd_dev->spec;
  
  	rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
  	rbd_assert(spec->image_id && spec->image_name);
  	rbd_assert(spec->snap_name);
  
  	if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
  		u64 snap_id;
  
  		snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
  		if (snap_id == CEPH_NOSNAP)
  			return -ENOENT;
  
  		spec->snap_id = snap_id;
  	} else {
  		spec->snap_id = CEPH_NOSNAP;
  	}
  
  	return 0;
  }
  
  /*
   * A parent image will have all ids but none of the names.
e1d4213f0   Alex Elder   rbd: set snapshot...
4274
   *
040775997   Ilya Dryomov   rbd: split rbd_de...
4275
4276
   * All names in an rbd spec are dynamically allocated.  It's OK if we
   * can't figure out the name for an image id.
9e15b77d9   Alex Elder   rbd: get addition...
4277
   */
040775997   Ilya Dryomov   rbd: split rbd_de...
4278
  static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
9e15b77d9   Alex Elder   rbd: get addition...
4279
  {
2e9f7f1c0   Alex Elder   rbd: refactor rbd...
4280
4281
4282
4283
4284
  	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
  	struct rbd_spec *spec = rbd_dev->spec;
  	const char *pool_name;
  	const char *image_name;
  	const char *snap_name;
9e15b77d9   Alex Elder   rbd: get addition...
4285
  	int ret;
040775997   Ilya Dryomov   rbd: split rbd_de...
4286
4287
4288
  	rbd_assert(spec->pool_id != CEPH_NOPOOL);
  	rbd_assert(spec->image_id);
  	rbd_assert(spec->snap_id != CEPH_NOSNAP);
9e15b77d9   Alex Elder   rbd: get addition...
4289

2e9f7f1c0   Alex Elder   rbd: refactor rbd...
4290
  	/* Get the pool name; we have to make our own copy of this */
9e15b77d9   Alex Elder   rbd: get addition...
4291

2e9f7f1c0   Alex Elder   rbd: refactor rbd...
4292
4293
4294
  	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
  	if (!pool_name) {
  		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
935dc89f3   Alex Elder   rbd: add warnings...
4295
4296
  		return -EIO;
  	}
2e9f7f1c0   Alex Elder   rbd: refactor rbd...
4297
4298
  	pool_name = kstrdup(pool_name, GFP_KERNEL);
  	if (!pool_name)
9e15b77d9   Alex Elder   rbd: get addition...
4299
4300
4301
  		return -ENOMEM;
  
  	/* Fetch the image name; tolerate failure here */
2e9f7f1c0   Alex Elder   rbd: refactor rbd...
4302
4303
  	image_name = rbd_dev_image_name(rbd_dev);
  	if (!image_name)
06ecc6cbf   Alex Elder   rbd: define and u...
4304
  		rbd_warn(rbd_dev, "unable to get image name");
9e15b77d9   Alex Elder   rbd: get addition...
4305

040775997   Ilya Dryomov   rbd: split rbd_de...
4306
  	/* Fetch the snapshot name */
9e15b77d9   Alex Elder   rbd: get addition...
4307

2e9f7f1c0   Alex Elder   rbd: refactor rbd...
4308
  	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
da6a6b639   Josh Durgin   rbd: fix error ha...
4309
4310
  	if (IS_ERR(snap_name)) {
  		ret = PTR_ERR(snap_name);
9e15b77d9   Alex Elder   rbd: get addition...
4311
  		goto out_err;
2e9f7f1c0   Alex Elder   rbd: refactor rbd...
4312
4313
4314
4315
4316
  	}
  
  	spec->pool_name = pool_name;
  	spec->image_name = image_name;
  	spec->snap_name = snap_name;
9e15b77d9   Alex Elder   rbd: get addition...
4317
4318
  
  	return 0;
040775997   Ilya Dryomov   rbd: split rbd_de...
4319

9e15b77d9   Alex Elder   rbd: get addition...
4320
  out_err:
2e9f7f1c0   Alex Elder   rbd: refactor rbd...
4321
4322
  	kfree(image_name);
  	kfree(pool_name);
9e15b77d9   Alex Elder   rbd: get addition...
4323
4324
  	return ret;
  }
cc4a38bdd   Alex Elder   rbd: more version...
4325
  static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
35d489f94   Alex Elder   rbd: get the snap...
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
  {
  	size_t size;
  	int ret;
  	void *reply_buf;
  	void *p;
  	void *end;
  	u64 seq;
  	u32 snap_count;
  	struct ceph_snap_context *snapc;
  	u32 i;
  
  	/*
  	 * We'll need room for the seq value (maximum snapshot id),
  	 * snapshot count, and array of that many snapshot ids.
  	 * For now we have a fixed upper limit on the number we're
  	 * prepared to receive.
  	 */
  	size = sizeof (__le64) + sizeof (__le32) +
  			RBD_MAX_SNAP_COUNT * sizeof (__le64);
  	reply_buf = kzalloc(size, GFP_KERNEL);
  	if (!reply_buf)
  		return -ENOMEM;
c41d13a31   Ilya Dryomov   rbd: use header_o...
4348
  	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4157976b2   Alex Elder   rbd: void data po...
4349
  				"rbd", "get_snapcontext", NULL, 0,
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
4350
  				reply_buf, size);
36be9a761   Alex Elder   rbd: implement sy...
4351
4352
  	dout("%s: rbd_obj_method_sync returned %d
  ", __func__, ret);
35d489f94   Alex Elder   rbd: get the snap...
4353
4354
  	if (ret < 0)
  		goto out;
35d489f94   Alex Elder   rbd: get the snap...
4355
  	p = reply_buf;
57385b51c   Alex Elder   rbd: have rbd_obj...
4356
4357
  	end = reply_buf + ret;
  	ret = -ERANGE;
35d489f94   Alex Elder   rbd: get the snap...
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
  	ceph_decode_64_safe(&p, end, seq, out);
  	ceph_decode_32_safe(&p, end, snap_count, out);
  
  	/*
  	 * Make sure the reported number of snapshot ids wouldn't go
  	 * beyond the end of our buffer.  But before checking that,
  	 * make sure the computed size of the snapshot context we
  	 * allocate is representable in a size_t.
  	 */
  	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
  				 / sizeof (u64)) {
  		ret = -EINVAL;
  		goto out;
  	}
  	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
  		goto out;
468521c1b   Alex Elder   rbd: define rbd s...
4374
  	ret = 0;
35d489f94   Alex Elder   rbd: get the snap...
4375

812164f8c   Alex Elder   ceph: use ceph_cr...
4376
  	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
35d489f94   Alex Elder   rbd: get the snap...
4377
4378
4379
4380
  	if (!snapc) {
  		ret = -ENOMEM;
  		goto out;
  	}
35d489f94   Alex Elder   rbd: get the snap...
4381
  	snapc->seq = seq;
35d489f94   Alex Elder   rbd: get the snap...
4382
4383
  	for (i = 0; i < snap_count; i++)
  		snapc->snaps[i] = ceph_decode_64(&p);
49ece5542   Alex Elder   rbd: fix leak of ...
4384
  	ceph_put_snap_context(rbd_dev->header.snapc);
35d489f94   Alex Elder   rbd: get the snap...
4385
4386
4387
4388
  	rbd_dev->header.snapc = snapc;
  
  	dout("  snap context seq = %llu, snap_count = %u
  ",
57385b51c   Alex Elder   rbd: have rbd_obj...
4389
  		(unsigned long long)seq, (unsigned int)snap_count);
35d489f94   Alex Elder   rbd: get the snap...
4390
4391
  out:
  	kfree(reply_buf);
57385b51c   Alex Elder   rbd: have rbd_obj...
4392
  	return ret;
35d489f94   Alex Elder   rbd: get the snap...
4393
  }
54cac61fb   Alex Elder   rbd: use snap_id ...
4394
4395
  static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
  					u64 snap_id)
b8b1e2db5   Alex Elder   rbd: get snapshot...
4396
4397
4398
  {
  	size_t size;
  	void *reply_buf;
54cac61fb   Alex Elder   rbd: use snap_id ...
4399
  	__le64 snapid;
b8b1e2db5   Alex Elder   rbd: get snapshot...
4400
4401
4402
  	int ret;
  	void *p;
  	void *end;
b8b1e2db5   Alex Elder   rbd: get snapshot...
4403
4404
4405
4406
4407
4408
  	char *snap_name;
  
  	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
  	reply_buf = kmalloc(size, GFP_KERNEL);
  	if (!reply_buf)
  		return ERR_PTR(-ENOMEM);
54cac61fb   Alex Elder   rbd: use snap_id ...
4409
  	snapid = cpu_to_le64(snap_id);
c41d13a31   Ilya Dryomov   rbd: use header_o...
4410
  	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
b8b1e2db5   Alex Elder   rbd: get snapshot...
4411
  				"rbd", "get_snapshot_name",
54cac61fb   Alex Elder   rbd: use snap_id ...
4412
  				&snapid, sizeof (snapid),
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
4413
  				reply_buf, size);
36be9a761   Alex Elder   rbd: implement sy...
4414
4415
  	dout("%s: rbd_obj_method_sync returned %d
  ", __func__, ret);
f40eb349e   Alex Elder   rbd: use rbd_obj_...
4416
4417
  	if (ret < 0) {
  		snap_name = ERR_PTR(ret);
b8b1e2db5   Alex Elder   rbd: get snapshot...
4418
  		goto out;
f40eb349e   Alex Elder   rbd: use rbd_obj_...
4419
  	}
b8b1e2db5   Alex Elder   rbd: get snapshot...
4420
4421
  
  	p = reply_buf;
f40eb349e   Alex Elder   rbd: use rbd_obj_...
4422
  	end = reply_buf + ret;
e5c355340   Alex Elder   rbd: get rid of s...
4423
  	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
f40eb349e   Alex Elder   rbd: use rbd_obj_...
4424
  	if (IS_ERR(snap_name))
b8b1e2db5   Alex Elder   rbd: get snapshot...
4425
  		goto out;
b8b1e2db5   Alex Elder   rbd: get snapshot...
4426

f40eb349e   Alex Elder   rbd: use rbd_obj_...
4427
4428
  	dout("  snap_id 0x%016llx snap_name = %s
  ",
54cac61fb   Alex Elder   rbd: use snap_id ...
4429
  		(unsigned long long)snap_id, snap_name);
b8b1e2db5   Alex Elder   rbd: get snapshot...
4430
4431
  out:
  	kfree(reply_buf);
f40eb349e   Alex Elder   rbd: use rbd_obj_...
4432
  	return snap_name;
b8b1e2db5   Alex Elder   rbd: get snapshot...
4433
  }
2df3fac75   Alex Elder   rbd: define rbd_d...
4434
  static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
117973fb4   Alex Elder   rbd: define rbd_d...
4435
  {
2df3fac75   Alex Elder   rbd: define rbd_d...
4436
  	bool first_time = rbd_dev->header.object_prefix == NULL;
117973fb4   Alex Elder   rbd: define rbd_d...
4437
  	int ret;
117973fb4   Alex Elder   rbd: define rbd_d...
4438

1617e40c1   Josh Durgin   rbd: fetch object...
4439
4440
  	ret = rbd_dev_v2_image_size(rbd_dev);
  	if (ret)
cfbf6377b   Alex Elder   rbd: use rwsem to...
4441
  		return ret;
1617e40c1   Josh Durgin   rbd: fetch object...
4442

2df3fac75   Alex Elder   rbd: define rbd_d...
4443
4444
4445
  	if (first_time) {
  		ret = rbd_dev_v2_header_onetime(rbd_dev);
  		if (ret)
cfbf6377b   Alex Elder   rbd: use rwsem to...
4446
  			return ret;
2df3fac75   Alex Elder   rbd: define rbd_d...
4447
  	}
cc4a38bdd   Alex Elder   rbd: more version...
4448
  	ret = rbd_dev_v2_snap_context(rbd_dev);
d194cd1dd   Ilya Dryomov   rbd: plug rbd_dev...
4449
4450
4451
4452
  	if (ret && first_time) {
  		kfree(rbd_dev->header.object_prefix);
  		rbd_dev->header.object_prefix = NULL;
  	}
117973fb4   Alex Elder   rbd: define rbd_d...
4453
4454
4455
  
  	return ret;
  }
a720ae090   Ilya Dryomov   rbd: introduce rb...
4456
4457
4458
4459
4460
4461
4462
4463
4464
  static int rbd_dev_header_info(struct rbd_device *rbd_dev)
  {
  	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
  
  	if (rbd_dev->image_format == 1)
  		return rbd_dev_v1_header_info(rbd_dev);
  
  	return rbd_dev_v2_header_info(rbd_dev);
  }
1ddbe94ed   Alex Elder   rbd: rework calcu...
4465
  /*
499afd5b8   Alex Elder   rbd: tie rbd_dev_...
4466
   * Get a unique rbd identifier for the given new rbd_dev, and add
f8a22fc23   Ilya Dryomov   rbd: switch to id...
4467
   * the rbd_dev to the global list.
1ddbe94ed   Alex Elder   rbd: rework calcu...
4468
   */
f8a22fc23   Ilya Dryomov   rbd: switch to id...
4469
  static int rbd_dev_id_get(struct rbd_device *rbd_dev)
b7f23c361   Alex Elder   rbd: encapsulate ...
4470
  {
f8a22fc23   Ilya Dryomov   rbd: switch to id...
4471
  	int new_dev_id;
9b60e70b3   Ilya Dryomov   rbd: add support ...
4472
4473
4474
  	new_dev_id = ida_simple_get(&rbd_dev_id_ida,
  				    0, minor_to_rbd_dev_id(1 << MINORBITS),
  				    GFP_KERNEL);
f8a22fc23   Ilya Dryomov   rbd: switch to id...
4475
4476
4477
4478
  	if (new_dev_id < 0)
  		return new_dev_id;
  
  	rbd_dev->dev_id = new_dev_id;
499afd5b8   Alex Elder   rbd: tie rbd_dev_...
4479
4480
4481
4482
  
  	spin_lock(&rbd_dev_list_lock);
  	list_add_tail(&rbd_dev->node, &rbd_dev_list);
  	spin_unlock(&rbd_dev_list_lock);
f8a22fc23   Ilya Dryomov   rbd: switch to id...
4483

70eebd200   Ilya Dryomov   rbd: rbd_device::...
4484
4485
  	dout("rbd_dev %p given dev id %d
  ", rbd_dev, rbd_dev->dev_id);
f8a22fc23   Ilya Dryomov   rbd: switch to id...
4486
4487
  
  	return 0;
1ddbe94ed   Alex Elder   rbd: rework calcu...
4488
  }
b7f23c361   Alex Elder   rbd: encapsulate ...
4489

1ddbe94ed   Alex Elder   rbd: rework calcu...
4490
  /*
499afd5b8   Alex Elder   rbd: tie rbd_dev_...
4491
4492
   * Remove an rbd_dev from the global list, and record that its
   * identifier is no longer in use.
1ddbe94ed   Alex Elder   rbd: rework calcu...
4493
   */
e28393082   Alex Elder   rbd: rename rbd_i...
4494
  static void rbd_dev_id_put(struct rbd_device *rbd_dev)
1ddbe94ed   Alex Elder   rbd: rework calcu...
4495
  {
499afd5b8   Alex Elder   rbd: tie rbd_dev_...
4496
4497
4498
  	spin_lock(&rbd_dev_list_lock);
  	list_del_init(&rbd_dev->node);
  	spin_unlock(&rbd_dev_list_lock);
b7f23c361   Alex Elder   rbd: encapsulate ...
4499

f8a22fc23   Ilya Dryomov   rbd: switch to id...
4500
4501
4502
4503
  	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
  
  	dout("rbd_dev %p released dev id %d
  ", rbd_dev, rbd_dev->dev_id);
b7f23c361   Alex Elder   rbd: encapsulate ...
4504
  }
a725f65e5   Alex Elder   rbd: encapsulate ...
4505
  /*
e28fff268   Alex Elder   rbd: don't use ss...
4506
4507
   * Skips over white space at *buf, and updates *buf to point to the
   * first found non-space character (if any). Returns the length of
593a9e7b3   Alex Elder   rbd: small changes
4508
4509
   * the token (string of non-white space characters) found.  Note
   * that *buf must be terminated with '\0'.
e28fff268   Alex Elder   rbd: don't use ss...
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
   */
  static inline size_t next_token(const char **buf)
  {
          /*
          * These are the characters that produce nonzero for
          * isspace() in the "C" and "POSIX" locales.
          */
          const char *spaces = " \f
  \r\t\v";
  
          *buf += strspn(*buf, spaces);	/* Find start of token */
  
  	return strcspn(*buf, spaces);   /* Return token length */
  }
  
  /*
ea3352f4a   Alex Elder   rbd: define dup_t...
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
   * Finds the next token in *buf, dynamically allocates a buffer big
   * enough to hold a copy of it, and copies the token into the new
   * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
   * that a duplicate buffer is created even for a zero-length token.
   *
   * Returns a pointer to the newly-allocated duplicate, or a null
   * pointer if memory for the duplicate was not available.  If
   * the lenp argument is a non-null pointer, the length of the token
   * (not including the '\0') is returned in *lenp.
   *
   * If successful, the *buf pointer will be updated to point beyond
   * the end of the found token.
   *
   * Note: uses GFP_KERNEL for allocation.
   */
  static inline char *dup_token(const char **buf, size_t *lenp)
  {
  	char *dup;
  	size_t len;
  
  	len = next_token(buf);
4caf35f9e   Alex Elder   rbd: use kmemdup()
4547
  	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4a   Alex Elder   rbd: define dup_t...
4548
4549
  	if (!dup)
  		return NULL;
ea3352f4a   Alex Elder   rbd: define dup_t...
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
  	*(dup + len) = '\0';
  	*buf += len;
  
  	if (lenp)
  		*lenp = len;
  
  	return dup;
  }
  
  /*
859c31df9   Alex Elder   rbd: fill rbd_spe...
4560
4561
4562
4563
   * Parse the options provided for an "rbd add" (i.e., rbd image
   * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
   * and the data written is passed here via a NUL-terminated buffer.
   * Returns 0 if successful or an error code otherwise.
d22f76e70   Alex Elder   rbd: dynamically ...
4564
   *
859c31df9   Alex Elder   rbd: fill rbd_spe...
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
   * The information extracted from these options is recorded in
   * the other parameters which return dynamically-allocated
   * structures:
   *  ceph_opts
   *      The address of a pointer that will refer to a ceph options
   *      structure.  Caller must release the returned pointer using
   *      ceph_destroy_options() when it is no longer needed.
   *  rbd_opts
   *	Address of an rbd options pointer.  Fully initialized by
   *	this function; caller must release with kfree().
   *  spec
   *	Address of an rbd image specification pointer.  Fully
   *	initialized by this function based on parsed options.
   *	Caller must release with rbd_spec_put().
   *
   * The options passed take this form:
   *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
   * where:
   *  <mon_addrs>
   *      A comma-separated list of one or more monitor addresses.
   *      A monitor address is an ip address, optionally followed
   *      by a port number (separated by a colon).
   *        I.e.:  ip1[:port1][,ip2[:port2]...]
   *  <options>
   *      A comma-separated list of ceph and/or rbd options.
   *  <pool_name>
   *      The name of the rados pool containing the rbd image.
   *  <image_name>
   *      The name of the image in that pool to map.
   *  <snap_id>
   *      An optional snapshot id.  If provided, the mapping will
   *      present data from the image at the time that snapshot was
   *      created.  The image head is used if no snapshot id is
   *      provided.  Snapshot mappings are always read-only.
a725f65e5   Alex Elder   rbd: encapsulate ...
4599
   */
859c31df9   Alex Elder   rbd: fill rbd_spe...
4600
  static int rbd_add_parse_args(const char *buf,
dc79b113d   Alex Elder   rbd: have rbd_add...
4601
  				struct ceph_options **ceph_opts,
859c31df9   Alex Elder   rbd: fill rbd_spe...
4602
4603
  				struct rbd_options **opts,
  				struct rbd_spec **rbd_spec)
e28fff268   Alex Elder   rbd: don't use ss...
4604
  {
d22f76e70   Alex Elder   rbd: dynamically ...
4605
  	size_t len;
859c31df9   Alex Elder   rbd: fill rbd_spe...
4606
  	char *options;
0ddebc0c6   Alex Elder   rbd: do all argum...
4607
  	const char *mon_addrs;
ecb4dc225   Alex Elder   rbd: make rbd spe...
4608
  	char *snap_name;
0ddebc0c6   Alex Elder   rbd: do all argum...
4609
  	size_t mon_addrs_size;
859c31df9   Alex Elder   rbd: fill rbd_spe...
4610
  	struct rbd_spec *spec = NULL;
4e9afeba7   Alex Elder   rbd: pass and pop...
4611
  	struct rbd_options *rbd_opts = NULL;
859c31df9   Alex Elder   rbd: fill rbd_spe...
4612
  	struct ceph_options *copts;
dc79b113d   Alex Elder   rbd: have rbd_add...
4613
  	int ret;
e28fff268   Alex Elder   rbd: don't use ss...
4614
4615
  
  	/* The first four tokens are required */
7ef3214af   Alex Elder   rbd: don't alloca...
4616
  	len = next_token(&buf);
4fb5d6713   Alex Elder   rbd: add warning ...
4617
4618
4619
4620
  	if (!len) {
  		rbd_warn(NULL, "no monitor address(es) provided");
  		return -EINVAL;
  	}
0ddebc0c6   Alex Elder   rbd: do all argum...
4621
  	mon_addrs = buf;
f28e565a1   Alex Elder   rbd: remove optio...
4622
  	mon_addrs_size = len + 1;
7ef3214af   Alex Elder   rbd: don't alloca...
4623
  	buf += len;
a725f65e5   Alex Elder   rbd: encapsulate ...
4624

dc79b113d   Alex Elder   rbd: have rbd_add...
4625
  	ret = -EINVAL;
f28e565a1   Alex Elder   rbd: remove optio...
4626
4627
  	options = dup_token(&buf, NULL);
  	if (!options)
dc79b113d   Alex Elder   rbd: have rbd_add...
4628
  		return -ENOMEM;
4fb5d6713   Alex Elder   rbd: add warning ...
4629
4630
4631
4632
  	if (!*options) {
  		rbd_warn(NULL, "no options provided");
  		goto out_err;
  	}
e28fff268   Alex Elder   rbd: don't use ss...
4633

859c31df9   Alex Elder   rbd: fill rbd_spe...
4634
4635
  	spec = rbd_spec_alloc();
  	if (!spec)
f28e565a1   Alex Elder   rbd: remove optio...
4636
  		goto out_mem;
859c31df9   Alex Elder   rbd: fill rbd_spe...
4637
4638
4639
4640
  
  	spec->pool_name = dup_token(&buf, NULL);
  	if (!spec->pool_name)
  		goto out_mem;
4fb5d6713   Alex Elder   rbd: add warning ...
4641
4642
4643
4644
  	if (!*spec->pool_name) {
  		rbd_warn(NULL, "no pool name provided");
  		goto out_err;
  	}
e28fff268   Alex Elder   rbd: don't use ss...
4645

69e7a02f6   Alex Elder   rbd: kill rbd_spe...
4646
  	spec->image_name = dup_token(&buf, NULL);
859c31df9   Alex Elder   rbd: fill rbd_spe...
4647
  	if (!spec->image_name)
f28e565a1   Alex Elder   rbd: remove optio...
4648
  		goto out_mem;
4fb5d6713   Alex Elder   rbd: add warning ...
4649
4650
4651
4652
  	if (!*spec->image_name) {
  		rbd_warn(NULL, "no image name provided");
  		goto out_err;
  	}
d4b125e9e   Alex Elder   rbd: increase max...
4653

f28e565a1   Alex Elder   rbd: remove optio...
4654
4655
4656
4657
  	/*
  	 * Snapshot name is optional; default is to use "-"
  	 * (indicating the head/no snapshot).
  	 */
3feeb8946   Alex Elder   rbd: return snap ...
4658
  	len = next_token(&buf);
820a5f3e9   Alex Elder   rbd: dynamically ...
4659
  	if (!len) {
3feeb8946   Alex Elder   rbd: return snap ...
4660
4661
  		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
  		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a1   Alex Elder   rbd: remove optio...
4662
  	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113d   Alex Elder   rbd: have rbd_add...
4663
  		ret = -ENAMETOOLONG;
f28e565a1   Alex Elder   rbd: remove optio...
4664
  		goto out_err;
849b4260d   Alex Elder   rbd: dynamically ...
4665
  	}
ecb4dc225   Alex Elder   rbd: make rbd spe...
4666
4667
  	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
  	if (!snap_name)
f28e565a1   Alex Elder   rbd: remove optio...
4668
  		goto out_mem;
ecb4dc225   Alex Elder   rbd: make rbd spe...
4669
4670
  	*(snap_name + len) = '\0';
  	spec->snap_name = snap_name;
e5c355340   Alex Elder   rbd: get rid of s...
4671

0ddebc0c6   Alex Elder   rbd: do all argum...
4672
  	/* Initialize all rbd options to the defaults */
e28fff268   Alex Elder   rbd: don't use ss...
4673

4e9afeba7   Alex Elder   rbd: pass and pop...
4674
4675
4676
4677
4678
  	rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
  	if (!rbd_opts)
  		goto out_mem;
  
  	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
b55841807   Ilya Dryomov   rbd: queue_depth ...
4679
  	rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
d22f76e70   Alex Elder   rbd: dynamically ...
4680

859c31df9   Alex Elder   rbd: fill rbd_spe...
4681
  	copts = ceph_parse_options(options, mon_addrs,
0ddebc0c6   Alex Elder   rbd: do all argum...
4682
  					mon_addrs + mon_addrs_size - 1,
4e9afeba7   Alex Elder   rbd: pass and pop...
4683
  					parse_rbd_opts_token, rbd_opts);
859c31df9   Alex Elder   rbd: fill rbd_spe...
4684
4685
  	if (IS_ERR(copts)) {
  		ret = PTR_ERR(copts);
dc79b113d   Alex Elder   rbd: have rbd_add...
4686
4687
  		goto out_err;
  	}
859c31df9   Alex Elder   rbd: fill rbd_spe...
4688
4689
4690
  	kfree(options);
  
  	*ceph_opts = copts;
4e9afeba7   Alex Elder   rbd: pass and pop...
4691
  	*opts = rbd_opts;
859c31df9   Alex Elder   rbd: fill rbd_spe...
4692
  	*rbd_spec = spec;
0ddebc0c6   Alex Elder   rbd: do all argum...
4693

dc79b113d   Alex Elder   rbd: have rbd_add...
4694
  	return 0;
f28e565a1   Alex Elder   rbd: remove optio...
4695
  out_mem:
dc79b113d   Alex Elder   rbd: have rbd_add...
4696
  	ret = -ENOMEM;
d22f76e70   Alex Elder   rbd: dynamically ...
4697
  out_err:
859c31df9   Alex Elder   rbd: fill rbd_spe...
4698
4699
  	kfree(rbd_opts);
  	rbd_spec_put(spec);
f28e565a1   Alex Elder   rbd: remove optio...
4700
  	kfree(options);
d22f76e70   Alex Elder   rbd: dynamically ...
4701

dc79b113d   Alex Elder   rbd: have rbd_add...
4702
  	return ret;
a725f65e5   Alex Elder   rbd: encapsulate ...
4703
  }
589d30e0b   Alex Elder   rbd: define rbd_d...
4704
  /*
30ba1f020   Ilya Dryomov   rbd: make sure we...
4705
4706
4707
4708
   * Return pool id (>= 0) or a negative error code.
   */
  static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
  {
a319bf56a   Ilya Dryomov   libceph: store ti...
4709
  	struct ceph_options *opts = rbdc->client->options;
30ba1f020   Ilya Dryomov   rbd: make sure we...
4710
  	u64 newest_epoch;
30ba1f020   Ilya Dryomov   rbd: make sure we...
4711
4712
4713
4714
4715
4716
  	int tries = 0;
  	int ret;
  
  again:
  	ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
  	if (ret == -ENOENT && tries++ < 1) {
d0b19705e   Ilya Dryomov   libceph: async MO...
4717
4718
  		ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
  					    &newest_epoch);
30ba1f020   Ilya Dryomov   rbd: make sure we...
4719
4720
4721
4722
  		if (ret < 0)
  			return ret;
  
  		if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
7cca78c9d   Ilya Dryomov   libceph: replace ...
4723
  			ceph_osdc_maybe_request_map(&rbdc->client->osdc);
30ba1f020   Ilya Dryomov   rbd: make sure we...
4724
  			(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
a319bf56a   Ilya Dryomov   libceph: store ti...
4725
4726
  						     newest_epoch,
  						     opts->mount_timeout);
30ba1f020   Ilya Dryomov   rbd: make sure we...
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
  			goto again;
  		} else {
  			/* the osdmap we have is new enough */
  			return -ENOENT;
  		}
  	}
  
  	return ret;
  }
  
  /*
589d30e0b   Alex Elder   rbd: define rbd_d...
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
   * An rbd format 2 image has a unique identifier, distinct from the
   * name given to it by the user.  Internally, that identifier is
   * what's used to specify the names of objects related to the image.
   *
   * A special "rbd id" object is used to map an rbd image name to its
   * id.  If that object doesn't exist, then there is no v2 rbd image
   * with the supplied name.
   *
   * This function will record the given rbd_dev's image_id field if
   * it can be determined, and in that case will return 0.  If any
   * errors occur a negative errno will be returned and the rbd_dev's
   * image_id field will be unchanged (and should be NULL).
   */
  static int rbd_dev_image_id(struct rbd_device *rbd_dev)
  {
  	int ret;
  	size_t size;
  	char *object_name;
  	void *response;
c0fba3688   Alex Elder   rbd: have rbd_dev...
4757
  	char *image_id;
2f82ee54d   Alex Elder   rbd: probe the pa...
4758

589d30e0b   Alex Elder   rbd: define rbd_d...
4759
  	/*
2c0d0a10e   Alex Elder   rbd: allow null i...
4760
4761
  	 * When probing a parent image, the image id is already
  	 * known (and the image name likely is not).  There's no
c0fba3688   Alex Elder   rbd: have rbd_dev...
4762
4763
  	 * need to fetch the image id again in this case.  We
  	 * do still need to set the image format though.
2c0d0a10e   Alex Elder   rbd: allow null i...
4764
  	 */
c0fba3688   Alex Elder   rbd: have rbd_dev...
4765
4766
  	if (rbd_dev->spec->image_id) {
  		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
2c0d0a10e   Alex Elder   rbd: allow null i...
4767
  		return 0;
c0fba3688   Alex Elder   rbd: have rbd_dev...
4768
  	}
2c0d0a10e   Alex Elder   rbd: allow null i...
4769
4770
  
  	/*
589d30e0b   Alex Elder   rbd: define rbd_d...
4771
4772
4773
  	 * First, see if the format 2 image id file exists, and if
  	 * so, get the image's persistent id from it.
  	 */
69e7a02f6   Alex Elder   rbd: kill rbd_spe...
4774
  	size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
589d30e0b   Alex Elder   rbd: define rbd_d...
4775
4776
4777
  	object_name = kmalloc(size, GFP_NOIO);
  	if (!object_name)
  		return -ENOMEM;
0d7dbfce9   Alex Elder   rbd: define image...
4778
  	sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
589d30e0b   Alex Elder   rbd: define rbd_d...
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
  	dout("rbd id object name is %s
  ", object_name);
  
  	/* Response will be an encoded string, which includes a length */
  
  	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
  	response = kzalloc(size, GFP_NOIO);
  	if (!response) {
  		ret = -ENOMEM;
  		goto out;
  	}
c0fba3688   Alex Elder   rbd: have rbd_dev...
4790
  	/* If it doesn't exist we'll assume it's a format 1 image */
36be9a761   Alex Elder   rbd: implement sy...
4791
  	ret = rbd_obj_method_sync(rbd_dev, object_name,
4157976b2   Alex Elder   rbd: void data po...
4792
  				"rbd", "get_id", NULL, 0,
e2a58ee55   Alex Elder   rbd: drop rbd_obj...
4793
  				response, RBD_IMAGE_ID_LEN_MAX);
36be9a761   Alex Elder   rbd: implement sy...
4794
4795
  	dout("%s: rbd_obj_method_sync returned %d
  ", __func__, ret);
c0fba3688   Alex Elder   rbd: have rbd_dev...
4796
4797
4798
4799
4800
  	if (ret == -ENOENT) {
  		image_id = kstrdup("", GFP_KERNEL);
  		ret = image_id ? 0 : -ENOMEM;
  		if (!ret)
  			rbd_dev->image_format = 1;
7dd440c9e   Ilya Dryomov   rbd: do not retur...
4801
  	} else if (ret >= 0) {
c0fba3688   Alex Elder   rbd: have rbd_dev...
4802
4803
4804
  		void *p = response;
  
  		image_id = ceph_extract_encoded_string(&p, p + ret,
979ed480a   Alex Elder   rbd: kill rbd_spe...
4805
  						NULL, GFP_NOIO);
461f758ac   Duan Jiong   rbd: replace IS_E...
4806
  		ret = PTR_ERR_OR_ZERO(image_id);
c0fba3688   Alex Elder   rbd: have rbd_dev...
4807
4808
  		if (!ret)
  			rbd_dev->image_format = 2;
c0fba3688   Alex Elder   rbd: have rbd_dev...
4809
4810
4811
4812
4813
4814
  	}
  
  	if (!ret) {
  		rbd_dev->spec->image_id = image_id;
  		dout("image_id is %s
  ", image_id);
589d30e0b   Alex Elder   rbd: define rbd_d...
4815
4816
4817
4818
4819
4820
4821
  	}
  out:
  	kfree(response);
  	kfree(object_name);
  
  	return ret;
  }
3abef3b35   Alex Elder   rbd: fix cleanup ...
4822
4823
4824
4825
  /*
   * Undo whatever state changes are made by v1 or v2 header info
   * call.
   */
6fd48b3be   Alex Elder   rbd: define rbd_d...
4826
4827
4828
  static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
  {
  	struct rbd_image_header	*header;
e69b8d414   Ilya Dryomov   rbd: drop parent_...
4829
  	rbd_dev_parent_put(rbd_dev);
6fd48b3be   Alex Elder   rbd: define rbd_d...
4830
4831
4832
4833
  
  	/* Free dynamic fields from the header, then zero it out */
  
  	header = &rbd_dev->header;
812164f8c   Alex Elder   ceph: use ceph_cr...
4834
  	ceph_put_snap_context(header->snapc);
6fd48b3be   Alex Elder   rbd: define rbd_d...
4835
4836
4837
4838
4839
  	kfree(header->snap_sizes);
  	kfree(header->snap_names);
  	kfree(header->object_prefix);
  	memset(header, 0, sizeof (*header));
  }
2df3fac75   Alex Elder   rbd: define rbd_d...
4840
  static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
a30b71b99   Alex Elder   rbd: lay out head...
4841
4842
  {
  	int ret;
a30b71b99   Alex Elder   rbd: lay out head...
4843

1e1301998   Alex Elder   rbd: get the obje...
4844
  	ret = rbd_dev_v2_object_prefix(rbd_dev);
57385b51c   Alex Elder   rbd: have rbd_obj...
4845
  	if (ret)
1e1301998   Alex Elder   rbd: get the obje...
4846
  		goto out_err;
b1b5402aa   Alex Elder   rbd: get image fe...
4847

2df3fac75   Alex Elder   rbd: define rbd_d...
4848
4849
4850
4851
  	/*
  	 * Get the and check features for the image.  Currently the
  	 * features are assumed to never change.
  	 */
b1b5402aa   Alex Elder   rbd: get image fe...
4852
  	ret = rbd_dev_v2_features(rbd_dev);
57385b51c   Alex Elder   rbd: have rbd_obj...
4853
  	if (ret)
b1b5402aa   Alex Elder   rbd: get image fe...
4854
  		goto out_err;
35d489f94   Alex Elder   rbd: get the snap...
4855

cc070d59b   Alex Elder   rbd: get and chec...
4856
4857
4858
4859
4860
4861
4862
  	/* If the image supports fancy striping, get its parameters */
  
  	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
  		ret = rbd_dev_v2_striping_info(rbd_dev);
  		if (ret < 0)
  			goto out_err;
  	}
2df3fac75   Alex Elder   rbd: define rbd_d...
4863
  	/* No support for crypto and compression type format 2 images */
a30b71b99   Alex Elder   rbd: lay out head...
4864

35152979e   Alex Elder   rbd: activate v2 ...
4865
  	return 0;
9d475de5d   Alex Elder   rbd: add code to ...
4866
  out_err:
642a25375   Alex Elder   rbd: get parent i...
4867
  	rbd_dev->header.features = 0;
1e1301998   Alex Elder   rbd: get the obje...
4868
4869
  	kfree(rbd_dev->header.object_prefix);
  	rbd_dev->header.object_prefix = NULL;
9d475de5d   Alex Elder   rbd: add code to ...
4870
4871
  
  	return ret;
a30b71b99   Alex Elder   rbd: lay out head...
4872
  }
6d69bb536   Ilya Dryomov   rbd: prevent kern...
4873
4874
4875
4876
4877
4878
  /*
   * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
   * rbd_dev_image_probe() recursion depth, which means it's also the
   * length of the already discovered part of the parent chain.
   */
  static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
83a062636   Alex Elder   rbd: encapsulate ...
4879
  {
2f82ee54d   Alex Elder   rbd: probe the pa...
4880
  	struct rbd_device *parent = NULL;
124afba25   Alex Elder   rbd: encapsulate ...
4881
4882
4883
4884
  	int ret;
  
  	if (!rbd_dev->parent_spec)
  		return 0;
124afba25   Alex Elder   rbd: encapsulate ...
4885

6d69bb536   Ilya Dryomov   rbd: prevent kern...
4886
4887
4888
4889
4890
4891
  	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
  		pr_info("parent chain is too long (%d)
  ", depth);
  		ret = -EINVAL;
  		goto out_err;
  	}
1f2c6651f   Ilya Dryomov   rbd: don't leak p...
4892
4893
4894
4895
  	parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
  				NULL);
  	if (!parent) {
  		ret = -ENOMEM;
124afba25   Alex Elder   rbd: encapsulate ...
4896
  		goto out_err;
1f2c6651f   Ilya Dryomov   rbd: don't leak p...
4897
4898
4899
4900
4901
4902
4903
4904
  	}
  
  	/*
  	 * Images related by parent/child relationships always share
  	 * rbd_client and spec/parent_spec, so bump their refcounts.
  	 */
  	__rbd_get_client(rbd_dev->rbd_client);
  	rbd_spec_get(rbd_dev->parent_spec);
124afba25   Alex Elder   rbd: encapsulate ...
4905

6d69bb536   Ilya Dryomov   rbd: prevent kern...
4906
  	ret = rbd_dev_image_probe(parent, depth);
124afba25   Alex Elder   rbd: encapsulate ...
4907
4908
  	if (ret < 0)
  		goto out_err;
1f2c6651f   Ilya Dryomov   rbd: don't leak p...
4909

124afba25   Alex Elder   rbd: encapsulate ...
4910
  	rbd_dev->parent = parent;
a2acd00e7   Alex Elder   rbd: reference co...
4911
  	atomic_set(&rbd_dev->parent_ref, 1);
124afba25   Alex Elder   rbd: encapsulate ...
4912
  	return 0;
1f2c6651f   Ilya Dryomov   rbd: don't leak p...
4913

124afba25   Alex Elder   rbd: encapsulate ...
4914
  out_err:
1f2c6651f   Ilya Dryomov   rbd: don't leak p...
4915
  	rbd_dev_unparent(rbd_dev);
1761b2296   Markus Elfring   rbd: delete an un...
4916
  	rbd_dev_destroy(parent);
124afba25   Alex Elder   rbd: encapsulate ...
4917
4918
  	return ret;
  }
811c66887   Ilya Dryomov   rbd: fix rbd map ...
4919
4920
4921
4922
  /*
   * rbd_dev->header_rwsem must be locked for write and will be unlocked
   * upon return.
   */
200a6a8be   Alex Elder   rbd: don't destro...
4923
  static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
124afba25   Alex Elder   rbd: encapsulate ...
4924
  {
83a062636   Alex Elder   rbd: encapsulate ...
4925
  	int ret;
d1cf57884   Alex Elder   rbd: set mapping ...
4926

f8a22fc23   Ilya Dryomov   rbd: switch to id...
4927
4928
4929
4930
  	/* Get an id and fill in device name. */
  
  	ret = rbd_dev_id_get(rbd_dev);
  	if (ret)
811c66887   Ilya Dryomov   rbd: fix rbd map ...
4931
  		goto err_out_unlock;
83a062636   Alex Elder   rbd: encapsulate ...
4932

83a062636   Alex Elder   rbd: encapsulate ...
4933
4934
4935
  	BUILD_BUG_ON(DEV_NAME_LEN
  			< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
  	sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
9b60e70b3   Ilya Dryomov   rbd: add support ...
4936
  	/* Record our major and minor device numbers. */
83a062636   Alex Elder   rbd: encapsulate ...
4937

9b60e70b3   Ilya Dryomov   rbd: add support ...
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
  	if (!single_major) {
  		ret = register_blkdev(0, rbd_dev->name);
  		if (ret < 0)
  			goto err_out_id;
  
  		rbd_dev->major = ret;
  		rbd_dev->minor = 0;
  	} else {
  		rbd_dev->major = rbd_major;
  		rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
  	}
83a062636   Alex Elder   rbd: encapsulate ...
4949
4950
4951
4952
4953
4954
  
  	/* Set up the blkdev mapping. */
  
  	ret = rbd_init_disk(rbd_dev);
  	if (ret)
  		goto err_out_blkdev;
f35a4dee1   Alex Elder   rbd: set the mapp...
4955
  	ret = rbd_dev_mapping_set(rbd_dev);
83a062636   Alex Elder   rbd: encapsulate ...
4956
4957
  	if (ret)
  		goto err_out_disk;
bc1ecc65a   Ilya Dryomov   rbd: rework rbd_r...
4958

f35a4dee1   Alex Elder   rbd: set the mapp...
4959
  	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
22001f619   Josh Durgin   rbd: only set dis...
4960
  	set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
f35a4dee1   Alex Elder   rbd: set the mapp...
4961

dd5ac32d4   Ilya Dryomov   rbd: don't free r...
4962
4963
  	dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
  	ret = device_add(&rbd_dev->dev);
f35a4dee1   Alex Elder   rbd: set the mapp...
4964
  	if (ret)
f5ee37bd3   Ilya Dryomov   rbd: use a single...
4965
  		goto err_out_mapping;
83a062636   Alex Elder   rbd: encapsulate ...
4966

83a062636   Alex Elder   rbd: encapsulate ...
4967
  	/* Everything's ready.  Announce the disk to the world. */
129b79d44   Alex Elder   rbd: only set dev...
4968
  	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
811c66887   Ilya Dryomov   rbd: fix rbd map ...
4969
  	up_write(&rbd_dev->header_rwsem);
83a062636   Alex Elder   rbd: encapsulate ...
4970

811c66887   Ilya Dryomov   rbd: fix rbd map ...
4971
  	add_disk(rbd_dev->disk);
83a062636   Alex Elder   rbd: encapsulate ...
4972
4973
4974
4975
4976
  	pr_info("%s: added with size 0x%llx
  ", rbd_dev->disk->disk_name,
  		(unsigned long long) rbd_dev->mapping.size);
  
  	return ret;
2f82ee54d   Alex Elder   rbd: probe the pa...
4977

f35a4dee1   Alex Elder   rbd: set the mapp...
4978
4979
  err_out_mapping:
  	rbd_dev_mapping_clear(rbd_dev);
83a062636   Alex Elder   rbd: encapsulate ...
4980
4981
4982
  err_out_disk:
  	rbd_free_disk(rbd_dev);
  err_out_blkdev:
9b60e70b3   Ilya Dryomov   rbd: add support ...
4983
4984
  	if (!single_major)
  		unregister_blkdev(rbd_dev->major, rbd_dev->name);
83a062636   Alex Elder   rbd: encapsulate ...
4985
4986
  err_out_id:
  	rbd_dev_id_put(rbd_dev);
811c66887   Ilya Dryomov   rbd: fix rbd map ...
4987
4988
  err_out_unlock:
  	up_write(&rbd_dev->header_rwsem);
83a062636   Alex Elder   rbd: encapsulate ...
4989
4990
  	return ret;
  }
332bb12db   Alex Elder   rbd: define rbd_h...
4991
4992
4993
  static int rbd_dev_header_name(struct rbd_device *rbd_dev)
  {
  	struct rbd_spec *spec = rbd_dev->spec;
c41d13a31   Ilya Dryomov   rbd: use header_o...
4994
  	int ret;
332bb12db   Alex Elder   rbd: define rbd_h...
4995
4996
4997
4998
  
  	/* Record the header object name for this rbd image. */
  
  	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
7627151ea   Yan, Zheng   libceph: define n...
4999
  	rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
332bb12db   Alex Elder   rbd: define rbd_h...
5000
  	if (rbd_dev->image_format == 1)
c41d13a31   Ilya Dryomov   rbd: use header_o...
5001
5002
  		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
  				       spec->image_name, RBD_SUFFIX);
332bb12db   Alex Elder   rbd: define rbd_h...
5003
  	else
c41d13a31   Ilya Dryomov   rbd: use header_o...
5004
5005
  		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
  				       RBD_HEADER_PREFIX, spec->image_id);
332bb12db   Alex Elder   rbd: define rbd_h...
5006

c41d13a31   Ilya Dryomov   rbd: use header_o...
5007
  	return ret;
332bb12db   Alex Elder   rbd: define rbd_h...
5008
  }
200a6a8be   Alex Elder   rbd: don't destro...
5009
5010
  static void rbd_dev_image_release(struct rbd_device *rbd_dev)
  {
6fd48b3be   Alex Elder   rbd: define rbd_d...
5011
  	rbd_dev_unprobe(rbd_dev);
6fd48b3be   Alex Elder   rbd: define rbd_d...
5012
5013
5014
  	rbd_dev->image_format = 0;
  	kfree(rbd_dev->spec->image_id);
  	rbd_dev->spec->image_id = NULL;
200a6a8be   Alex Elder   rbd: don't destro...
5015
5016
  	rbd_dev_destroy(rbd_dev);
  }
a30b71b99   Alex Elder   rbd: lay out head...
5017
5018
  /*
   * Probe for the existence of the header object for the given rbd
1f3ef7886   Alex Elder   rbd: only set up ...
5019
5020
5021
   * device.  If this image is the one being mapped (i.e., not a
   * parent), initiate a watch on its header object before using that
   * object to get detailed information about the rbd image.
a30b71b99   Alex Elder   rbd: lay out head...
5022
   */
6d69bb536   Ilya Dryomov   rbd: prevent kern...
5023
  static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
a30b71b99   Alex Elder   rbd: lay out head...
5024
5025
5026
5027
  {
  	int ret;
  
  	/*
3abef3b35   Alex Elder   rbd: fix cleanup ...
5028
5029
5030
5031
  	 * Get the id from the image id object.  Unless there's an
  	 * error, rbd_dev->spec->image_id will be filled in with
  	 * a dynamically-allocated string, and rbd_dev->image_format
  	 * will be set to either 1 or 2.
a30b71b99   Alex Elder   rbd: lay out head...
5032
5033
5034
  	 */
  	ret = rbd_dev_image_id(rbd_dev);
  	if (ret)
c0fba3688   Alex Elder   rbd: have rbd_dev...
5035
  		return ret;
c0fba3688   Alex Elder   rbd: have rbd_dev...
5036

332bb12db   Alex Elder   rbd: define rbd_h...
5037
5038
5039
  	ret = rbd_dev_header_name(rbd_dev);
  	if (ret)
  		goto err_out_format;
6d69bb536   Ilya Dryomov   rbd: prevent kern...
5040
  	if (!depth) {
fca270653   Ilya Dryomov   rbd: introduce rb...
5041
  		ret = rbd_dev_header_watch_sync(rbd_dev);
1fe480235   Ilya Dryomov   rbd: be more info...
5042
5043
5044
5045
5046
5047
  		if (ret) {
  			if (ret == -ENOENT)
  				pr_info("image %s/%s does not exist
  ",
  					rbd_dev->spec->pool_name,
  					rbd_dev->spec->image_name);
c41d13a31   Ilya Dryomov   rbd: use header_o...
5048
  			goto err_out_format;
1fe480235   Ilya Dryomov   rbd: be more info...
5049
  		}
1f3ef7886   Alex Elder   rbd: only set up ...
5050
  	}
b644de2ba   Alex Elder   rbd: set up watch...
5051

a720ae090   Ilya Dryomov   rbd: introduce rb...
5052
  	ret = rbd_dev_header_info(rbd_dev);
5655c4d94   Alex Elder   rbd: fix image id...
5053
  	if (ret)
b644de2ba   Alex Elder   rbd: set up watch...
5054
  		goto err_out_watch;
83a062636   Alex Elder   rbd: encapsulate ...
5055

040775997   Ilya Dryomov   rbd: split rbd_de...
5056
5057
5058
5059
5060
5061
  	/*
  	 * If this image is the one being mapped, we have pool name and
  	 * id, image name and id, and snap name - need to fill snap id.
  	 * Otherwise this is a parent image, identified by pool, image
  	 * and snap ids - need to fill in names for those ids.
  	 */
6d69bb536   Ilya Dryomov   rbd: prevent kern...
5062
  	if (!depth)
040775997   Ilya Dryomov   rbd: split rbd_de...
5063
5064
5065
  		ret = rbd_spec_fill_snap_id(rbd_dev);
  	else
  		ret = rbd_spec_fill_names(rbd_dev);
1fe480235   Ilya Dryomov   rbd: be more info...
5066
5067
5068
5069
5070
5071
5072
  	if (ret) {
  		if (ret == -ENOENT)
  			pr_info("snap %s/%s@%s does not exist
  ",
  				rbd_dev->spec->pool_name,
  				rbd_dev->spec->image_name,
  				rbd_dev->spec->snap_name);
33dca39f5   Alex Elder   rbd: kill off the...
5073
  		goto err_out_probe;
1fe480235   Ilya Dryomov   rbd: be more info...
5074
  	}
9bb81c9be   Alex Elder   rbd: move more in...
5075

e8f59b595   Ilya Dryomov   rbd: do not read ...
5076
5077
5078
5079
5080
5081
5082
5083
5084
  	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
  		ret = rbd_dev_v2_parent_info(rbd_dev);
  		if (ret)
  			goto err_out_probe;
  
  		/*
  		 * Need to warn users if this image is the one being
  		 * mapped and has a parent.
  		 */
6d69bb536   Ilya Dryomov   rbd: prevent kern...
5085
  		if (!depth && rbd_dev->parent_spec)
e8f59b595   Ilya Dryomov   rbd: do not read ...
5086
5087
5088
  			rbd_warn(rbd_dev,
  				 "WARNING: kernel layering is EXPERIMENTAL!");
  	}
6d69bb536   Ilya Dryomov   rbd: prevent kern...
5089
  	ret = rbd_dev_probe_parent(rbd_dev, depth);
30d60ba2f   Alex Elder   rbd: simplify rbd...
5090
5091
5092
5093
5094
  	if (ret)
  		goto err_out_probe;
  
  	dout("discovered format %u image, header name is %s
  ",
c41d13a31   Ilya Dryomov   rbd: use header_o...
5095
  		rbd_dev->image_format, rbd_dev->header_oid.name);
30d60ba2f   Alex Elder   rbd: simplify rbd...
5096
  	return 0;
e8f59b595   Ilya Dryomov   rbd: do not read ...
5097

6fd48b3be   Alex Elder   rbd: define rbd_d...
5098
5099
  err_out_probe:
  	rbd_dev_unprobe(rbd_dev);
b644de2ba   Alex Elder   rbd: set up watch...
5100
  err_out_watch:
6d69bb536   Ilya Dryomov   rbd: prevent kern...
5101
  	if (!depth)
fca270653   Ilya Dryomov   rbd: introduce rb...
5102
  		rbd_dev_header_unwatch_sync(rbd_dev);
332bb12db   Alex Elder   rbd: define rbd_h...
5103
5104
  err_out_format:
  	rbd_dev->image_format = 0;
5655c4d94   Alex Elder   rbd: fix image id...
5105
5106
  	kfree(rbd_dev->spec->image_id);
  	rbd_dev->spec->image_id = NULL;
5655c4d94   Alex Elder   rbd: fix image id...
5107
  	return ret;
a30b71b99   Alex Elder   rbd: lay out head...
5108
  }
9b60e70b3   Ilya Dryomov   rbd: add support ...
5109
5110
5111
  static ssize_t do_rbd_add(struct bus_type *bus,
  			  const char *buf,
  			  size_t count)
602adf400   Yehuda Sadeh   rbd: introduce ra...
5112
  {
cb8627c76   Alex Elder   rbd: dynamically ...
5113
  	struct rbd_device *rbd_dev = NULL;
dc79b113d   Alex Elder   rbd: have rbd_add...
5114
  	struct ceph_options *ceph_opts = NULL;
4e9afeba7   Alex Elder   rbd: pass and pop...
5115
  	struct rbd_options *rbd_opts = NULL;
859c31df9   Alex Elder   rbd: fill rbd_spe...
5116
  	struct rbd_spec *spec = NULL;
9d3997fdf   Alex Elder   rbd: don't pass r...
5117
  	struct rbd_client *rbdc;
51344a38b   Alex Elder   rbd: always set r...
5118
  	bool read_only;
b51c83c24   Ilya Dryomov   rbd: return -ENOM...
5119
  	int rc;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5120
5121
5122
  
  	if (!try_module_get(THIS_MODULE))
  		return -ENODEV;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5123
  	/* parse add command */
859c31df9   Alex Elder   rbd: fill rbd_spe...
5124
  	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113d   Alex Elder   rbd: have rbd_add...
5125
  	if (rc < 0)
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
5126
  		goto out;
78cea76e0   Alex Elder   rbd: move ceph_pa...
5127

9d3997fdf   Alex Elder   rbd: don't pass r...
5128
5129
5130
  	rbdc = rbd_get_client(ceph_opts);
  	if (IS_ERR(rbdc)) {
  		rc = PTR_ERR(rbdc);
0ddebc0c6   Alex Elder   rbd: do all argum...
5131
  		goto err_out_args;
9d3997fdf   Alex Elder   rbd: don't pass r...
5132
  	}
602adf400   Yehuda Sadeh   rbd: introduce ra...
5133

602adf400   Yehuda Sadeh   rbd: introduce ra...
5134
  	/* pick the pool */
30ba1f020   Ilya Dryomov   rbd: make sure we...
5135
  	rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
1fe480235   Ilya Dryomov   rbd: be more info...
5136
5137
5138
5139
  	if (rc < 0) {
  		if (rc == -ENOENT)
  			pr_info("pool %s does not exist
  ", spec->pool_name);
602adf400   Yehuda Sadeh   rbd: introduce ra...
5140
  		goto err_out_client;
1fe480235   Ilya Dryomov   rbd: be more info...
5141
  	}
c0cd10db4   Alex Elder   rbd: use rbd_warn...
5142
  	spec->pool_id = (u64)rc;
859c31df9   Alex Elder   rbd: fill rbd_spe...
5143

0903e875c   Alex Elder   rbd: use a common...
5144
  	/* The ceph file layout needs to fit pool id in 32 bits */
c0cd10db4   Alex Elder   rbd: use rbd_warn...
5145
  	if (spec->pool_id > (u64)U32_MAX) {
9584d5082   Ilya Dryomov   rbd: remove extra...
5146
  		rbd_warn(NULL, "pool id too large (%llu > %u)",
c0cd10db4   Alex Elder   rbd: use rbd_warn...
5147
  				(unsigned long long)spec->pool_id, U32_MAX);
0903e875c   Alex Elder   rbd: use a common...
5148
5149
5150
  		rc = -EIO;
  		goto err_out_client;
  	}
d147543d7   Ilya Dryomov   rbd: store rbd_op...
5151
  	rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
b51c83c24   Ilya Dryomov   rbd: return -ENOM...
5152
5153
  	if (!rbd_dev) {
  		rc = -ENOMEM;
bd4ba6554   Alex Elder   rbd: consolidate ...
5154
  		goto err_out_client;
b51c83c24   Ilya Dryomov   rbd: return -ENOM...
5155
  	}
c53d58933   Alex Elder   rbd: define rbd_d...
5156
5157
  	rbdc = NULL;		/* rbd_dev now owns this */
  	spec = NULL;		/* rbd_dev now owns this */
d147543d7   Ilya Dryomov   rbd: store rbd_op...
5158
  	rbd_opts = NULL;	/* rbd_dev now owns this */
602adf400   Yehuda Sadeh   rbd: introduce ra...
5159

811c66887   Ilya Dryomov   rbd: fix rbd map ...
5160
  	down_write(&rbd_dev->header_rwsem);
6d69bb536   Ilya Dryomov   rbd: prevent kern...
5161
  	rc = rbd_dev_image_probe(rbd_dev, 0);
a30b71b99   Alex Elder   rbd: lay out head...
5162
  	if (rc < 0)
c53d58933   Alex Elder   rbd: define rbd_d...
5163
  		goto err_out_rbd_dev;
05fd6f6f8   Alex Elder   rbd: read the hea...
5164

7ce4eef7b   Alex Elder   rbd: set mapping ...
5165
  	/* If we are mapping a snapshot it must be marked read-only */
d147543d7   Ilya Dryomov   rbd: store rbd_op...
5166
  	read_only = rbd_dev->opts->read_only;
7ce4eef7b   Alex Elder   rbd: set mapping ...
5167
5168
5169
  	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
  		read_only = true;
  	rbd_dev->mapping.read_only = read_only;
b536f69a3   Alex Elder   rbd: set up devic...
5170
  	rc = rbd_dev_device_setup(rbd_dev);
3abef3b35   Alex Elder   rbd: fix cleanup ...
5171
  	if (rc) {
e37180c0f   Ilya Dryomov   rbd: tear down wa...
5172
5173
5174
5175
5176
5177
  		/*
  		 * rbd_dev_header_unwatch_sync() can't be moved into
  		 * rbd_dev_image_release() without refactoring, see
  		 * commit 1f3ef78861ac.
  		 */
  		rbd_dev_header_unwatch_sync(rbd_dev);
3abef3b35   Alex Elder   rbd: fix cleanup ...
5178
  		rbd_dev_image_release(rbd_dev);
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
5179
  		goto out;
3abef3b35   Alex Elder   rbd: fix cleanup ...
5180
  	}
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
5181
5182
5183
5184
  	rc = count;
  out:
  	module_put(THIS_MODULE);
  	return rc;
b536f69a3   Alex Elder   rbd: set up devic...
5185

c53d58933   Alex Elder   rbd: define rbd_d...
5186
  err_out_rbd_dev:
811c66887   Ilya Dryomov   rbd: fix rbd map ...
5187
  	up_write(&rbd_dev->header_rwsem);
c53d58933   Alex Elder   rbd: define rbd_d...
5188
  	rbd_dev_destroy(rbd_dev);
bd4ba6554   Alex Elder   rbd: consolidate ...
5189
  err_out_client:
9d3997fdf   Alex Elder   rbd: don't pass r...
5190
  	rbd_put_client(rbdc);
0ddebc0c6   Alex Elder   rbd: do all argum...
5191
  err_out_args:
859c31df9   Alex Elder   rbd: fill rbd_spe...
5192
  	rbd_spec_put(spec);
d147543d7   Ilya Dryomov   rbd: store rbd_op...
5193
  	kfree(rbd_opts);
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
5194
  	goto out;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5195
  }
9b60e70b3   Ilya Dryomov   rbd: add support ...
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
  static ssize_t rbd_add(struct bus_type *bus,
  		       const char *buf,
  		       size_t count)
  {
  	if (single_major)
  		return -EINVAL;
  
  	return do_rbd_add(bus, buf, count);
  }
  
  static ssize_t rbd_add_single_major(struct bus_type *bus,
  				    const char *buf,
  				    size_t count)
  {
  	return do_rbd_add(bus, buf, count);
  }
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
5212
  static void rbd_dev_device_release(struct rbd_device *rbd_dev)
602adf400   Yehuda Sadeh   rbd: introduce ra...
5213
  {
602adf400   Yehuda Sadeh   rbd: introduce ra...
5214
  	rbd_free_disk(rbd_dev);
200a6a8be   Alex Elder   rbd: don't destro...
5215
  	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
5216
  	device_del(&rbd_dev->dev);
6d80b130d   Alex Elder   rbd: kill rbd_dev...
5217
  	rbd_dev_mapping_clear(rbd_dev);
9b60e70b3   Ilya Dryomov   rbd: add support ...
5218
5219
  	if (!single_major)
  		unregister_blkdev(rbd_dev->major, rbd_dev->name);
e28393082   Alex Elder   rbd: rename rbd_i...
5220
  	rbd_dev_id_put(rbd_dev);
602adf400   Yehuda Sadeh   rbd: introduce ra...
5221
  }
05a46afdc   Alex Elder   rbd: encapsulate ...
5222
5223
  static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
  {
ad945fc1d   Alex Elder   rbd: fix rbd_dev_...
5224
  	while (rbd_dev->parent) {
05a46afdc   Alex Elder   rbd: encapsulate ...
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
  		struct rbd_device *first = rbd_dev;
  		struct rbd_device *second = first->parent;
  		struct rbd_device *third;
  
  		/*
  		 * Follow to the parent with no grandparent and
  		 * remove it.
  		 */
  		while (second && (third = second->parent)) {
  			first = second;
  			second = third;
  		}
ad945fc1d   Alex Elder   rbd: fix rbd_dev_...
5237
  		rbd_assert(second);
8ad42cd0c   Alex Elder   rbd: don't have d...
5238
  		rbd_dev_image_release(second);
ad945fc1d   Alex Elder   rbd: fix rbd_dev_...
5239
5240
5241
5242
  		first->parent = NULL;
  		first->parent_overlap = 0;
  
  		rbd_assert(first->parent_spec);
05a46afdc   Alex Elder   rbd: encapsulate ...
5243
5244
  		rbd_spec_put(first->parent_spec);
  		first->parent_spec = NULL;
05a46afdc   Alex Elder   rbd: encapsulate ...
5245
5246
  	}
  }
9b60e70b3   Ilya Dryomov   rbd: add support ...
5247
5248
5249
  static ssize_t do_rbd_remove(struct bus_type *bus,
  			     const char *buf,
  			     size_t count)
602adf400   Yehuda Sadeh   rbd: introduce ra...
5250
5251
  {
  	struct rbd_device *rbd_dev = NULL;
751cc0e3c   Alex Elder   rbd: set removing...
5252
5253
  	struct list_head *tmp;
  	int dev_id;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5254
  	unsigned long ul;
82a442d23   Alex Elder   rbd: protect agai...
5255
  	bool already = false;
0d8189e17   Alex Elder   rbd: don't clean ...
5256
  	int ret;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5257

bb8e0e84b   Jingoo Han   block: replace st...
5258
  	ret = kstrtoul(buf, 10, &ul);
0d8189e17   Alex Elder   rbd: don't clean ...
5259
5260
  	if (ret)
  		return ret;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5261
5262
  
  	/* convert to int; abort if we lost anything in the conversion */
751cc0e3c   Alex Elder   rbd: set removing...
5263
5264
  	dev_id = (int)ul;
  	if (dev_id != ul)
602adf400   Yehuda Sadeh   rbd: introduce ra...
5265
  		return -EINVAL;
751cc0e3c   Alex Elder   rbd: set removing...
5266
5267
5268
5269
5270
5271
5272
5273
  	ret = -ENOENT;
  	spin_lock(&rbd_dev_list_lock);
  	list_for_each(tmp, &rbd_dev_list) {
  		rbd_dev = list_entry(tmp, struct rbd_device, node);
  		if (rbd_dev->dev_id == dev_id) {
  			ret = 0;
  			break;
  		}
42382b709   Alex Elder   rbd: do not allow...
5274
  	}
751cc0e3c   Alex Elder   rbd: set removing...
5275
5276
5277
5278
5279
  	if (!ret) {
  		spin_lock_irq(&rbd_dev->lock);
  		if (rbd_dev->open_count)
  			ret = -EBUSY;
  		else
82a442d23   Alex Elder   rbd: protect agai...
5280
5281
  			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
  							&rbd_dev->flags);
751cc0e3c   Alex Elder   rbd: set removing...
5282
5283
5284
  		spin_unlock_irq(&rbd_dev->lock);
  	}
  	spin_unlock(&rbd_dev_list_lock);
82a442d23   Alex Elder   rbd: protect agai...
5285
  	if (ret < 0 || already)
1ba0f1e79   Alex Elder   rbd: don't hold c...
5286
  		return ret;
751cc0e3c   Alex Elder   rbd: set removing...
5287

fca270653   Ilya Dryomov   rbd: introduce rb...
5288
  	rbd_dev_header_unwatch_sync(rbd_dev);
fca270653   Ilya Dryomov   rbd: introduce rb...
5289

9875201e1   Josh Durgin   rbd: fix use-afte...
5290
5291
5292
5293
5294
5295
  	/*
  	 * Don't free anything from rbd_dev->disk until after all
  	 * notifies are completely processed. Otherwise
  	 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
  	 * in a potential use after free of rbd_dev->disk or rbd_dev.
  	 */
dd5ac32d4   Ilya Dryomov   rbd: don't free r...
5296
  	rbd_dev_device_release(rbd_dev);
8ad42cd0c   Alex Elder   rbd: don't have d...
5297
  	rbd_dev_image_release(rbd_dev);
aafb230eb   Alex Elder   rbd: define rbd_a...
5298

1ba0f1e79   Alex Elder   rbd: don't hold c...
5299
  	return count;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5300
  }
9b60e70b3   Ilya Dryomov   rbd: add support ...
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
  static ssize_t rbd_remove(struct bus_type *bus,
  			  const char *buf,
  			  size_t count)
  {
  	if (single_major)
  		return -EINVAL;
  
  	return do_rbd_remove(bus, buf, count);
  }
  
  static ssize_t rbd_remove_single_major(struct bus_type *bus,
  				       const char *buf,
  				       size_t count)
  {
  	return do_rbd_remove(bus, buf, count);
  }
602adf400   Yehuda Sadeh   rbd: introduce ra...
5317
5318
  /*
   * create control files in sysfs
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
5319
   * /sys/bus/rbd/...
602adf400   Yehuda Sadeh   rbd: introduce ra...
5320
5321
5322
   */
  static int rbd_sysfs_init(void)
  {
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
5323
  	int ret;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5324

fed4c143b   Alex Elder   rbd: fix module s...
5325
  	ret = device_register(&rbd_root_dev);
210797866   Alex Elder   rbd: a few small ...
5326
  	if (ret < 0)
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
5327
  		return ret;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5328

fed4c143b   Alex Elder   rbd: fix module s...
5329
5330
5331
  	ret = bus_register(&rbd_bus_type);
  	if (ret < 0)
  		device_unregister(&rbd_root_dev);
602adf400   Yehuda Sadeh   rbd: introduce ra...
5332

602adf400   Yehuda Sadeh   rbd: introduce ra...
5333
5334
5335
5336
5337
  	return ret;
  }
  
  static void rbd_sysfs_cleanup(void)
  {
dfc5606dc   Yehuda Sadeh   rbd: replace the ...
5338
  	bus_unregister(&rbd_bus_type);
fed4c143b   Alex Elder   rbd: fix module s...
5339
  	device_unregister(&rbd_root_dev);
602adf400   Yehuda Sadeh   rbd: introduce ra...
5340
  }
1c2a9dfe2   Alex Elder   rbd: allocate ima...
5341
5342
5343
  static int rbd_slab_init(void)
  {
  	rbd_assert(!rbd_img_request_cache);
03d944067   Geliang Tang   rbd: use KMEM_CAC...
5344
  	rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
868311b1e   Alex Elder   rbd: allocate obj...
5345
5346
5347
5348
  	if (!rbd_img_request_cache)
  		return -ENOMEM;
  
  	rbd_assert(!rbd_obj_request_cache);
03d944067   Geliang Tang   rbd: use KMEM_CAC...
5349
  	rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
78c2a44aa   Alex Elder   rbd: allocate ima...
5350
5351
5352
5353
5354
  	if (!rbd_obj_request_cache)
  		goto out_err;
  
  	rbd_assert(!rbd_segment_name_cache);
  	rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
2d0ebc5d5   Ilya Dryomov   libceph: rename M...
5355
  					CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
78c2a44aa   Alex Elder   rbd: allocate ima...
5356
  	if (rbd_segment_name_cache)
1c2a9dfe2   Alex Elder   rbd: allocate ima...
5357
  		return 0;
78c2a44aa   Alex Elder   rbd: allocate ima...
5358
  out_err:
13bf28340   Julia Lawall   rbd: drop null te...
5359
5360
  	kmem_cache_destroy(rbd_obj_request_cache);
  	rbd_obj_request_cache = NULL;
1c2a9dfe2   Alex Elder   rbd: allocate ima...
5361

868311b1e   Alex Elder   rbd: allocate obj...
5362
5363
  	kmem_cache_destroy(rbd_img_request_cache);
  	rbd_img_request_cache = NULL;
1c2a9dfe2   Alex Elder   rbd: allocate ima...
5364
5365
5366
5367
5368
  	return -ENOMEM;
  }
  
  static void rbd_slab_exit(void)
  {
78c2a44aa   Alex Elder   rbd: allocate ima...
5369
5370
5371
  	rbd_assert(rbd_segment_name_cache);
  	kmem_cache_destroy(rbd_segment_name_cache);
  	rbd_segment_name_cache = NULL;
868311b1e   Alex Elder   rbd: allocate obj...
5372
5373
5374
  	rbd_assert(rbd_obj_request_cache);
  	kmem_cache_destroy(rbd_obj_request_cache);
  	rbd_obj_request_cache = NULL;
1c2a9dfe2   Alex Elder   rbd: allocate ima...
5375
5376
5377
5378
  	rbd_assert(rbd_img_request_cache);
  	kmem_cache_destroy(rbd_img_request_cache);
  	rbd_img_request_cache = NULL;
  }
cc344fa1b   Alex Elder   rbd: eliminate sp...
5379
  static int __init rbd_init(void)
602adf400   Yehuda Sadeh   rbd: introduce ra...
5380
5381
  {
  	int rc;
1e32d34cf   Alex Elder   rbd: don't take e...
5382
5383
  	if (!libceph_compatible(NULL)) {
  		rbd_warn(NULL, "libceph incompatibility (quitting)");
1e32d34cf   Alex Elder   rbd: don't take e...
5384
5385
  		return -EINVAL;
  	}
e1b4d96de   Ilya Dryomov   rbd: refactor rbd...
5386

1c2a9dfe2   Alex Elder   rbd: allocate ima...
5387
  	rc = rbd_slab_init();
602adf400   Yehuda Sadeh   rbd: introduce ra...
5388
5389
  	if (rc)
  		return rc;
e1b4d96de   Ilya Dryomov   rbd: refactor rbd...
5390

f5ee37bd3   Ilya Dryomov   rbd: use a single...
5391
5392
  	/*
  	 * The number of active work items is limited by the number of
f77303bdd   Ilya Dryomov   rbd: rbd_wq comme...
5393
  	 * rbd devices * queue depth, so leave @max_active at default.
f5ee37bd3   Ilya Dryomov   rbd: use a single...
5394
5395
5396
5397
5398
5399
  	 */
  	rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
  	if (!rbd_wq) {
  		rc = -ENOMEM;
  		goto err_out_slab;
  	}
9b60e70b3   Ilya Dryomov   rbd: add support ...
5400
5401
5402
5403
  	if (single_major) {
  		rbd_major = register_blkdev(0, RBD_DRV_NAME);
  		if (rbd_major < 0) {
  			rc = rbd_major;
f5ee37bd3   Ilya Dryomov   rbd: use a single...
5404
  			goto err_out_wq;
9b60e70b3   Ilya Dryomov   rbd: add support ...
5405
5406
  		}
  	}
1c2a9dfe2   Alex Elder   rbd: allocate ima...
5407
5408
  	rc = rbd_sysfs_init();
  	if (rc)
9b60e70b3   Ilya Dryomov   rbd: add support ...
5409
5410
5411
5412
5413
5414
5415
5416
  		goto err_out_blkdev;
  
  	if (single_major)
  		pr_info("loaded (major %d)
  ", rbd_major);
  	else
  		pr_info("loaded
  ");
1c2a9dfe2   Alex Elder   rbd: allocate ima...
5417

e1b4d96de   Ilya Dryomov   rbd: refactor rbd...
5418
  	return 0;
9b60e70b3   Ilya Dryomov   rbd: add support ...
5419
5420
5421
  err_out_blkdev:
  	if (single_major)
  		unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd3   Ilya Dryomov   rbd: use a single...
5422
5423
  err_out_wq:
  	destroy_workqueue(rbd_wq);
e1b4d96de   Ilya Dryomov   rbd: refactor rbd...
5424
5425
  err_out_slab:
  	rbd_slab_exit();
1c2a9dfe2   Alex Elder   rbd: allocate ima...
5426
  	return rc;
602adf400   Yehuda Sadeh   rbd: introduce ra...
5427
  }
cc344fa1b   Alex Elder   rbd: eliminate sp...
5428
  static void __exit rbd_exit(void)
602adf400   Yehuda Sadeh   rbd: introduce ra...
5429
  {
ffe312cf3   Ilya Dryomov   rbd: fix ida/idr ...
5430
  	ida_destroy(&rbd_dev_id_ida);
602adf400   Yehuda Sadeh   rbd: introduce ra...
5431
  	rbd_sysfs_cleanup();
9b60e70b3   Ilya Dryomov   rbd: add support ...
5432
5433
  	if (single_major)
  		unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd3   Ilya Dryomov   rbd: use a single...
5434
  	destroy_workqueue(rbd_wq);
1c2a9dfe2   Alex Elder   rbd: allocate ima...
5435
  	rbd_slab_exit();
602adf400   Yehuda Sadeh   rbd: introduce ra...
5436
5437
5438
5439
  }
  
  module_init(rbd_init);
  module_exit(rbd_exit);
d552c6191   Alex Elder   rbd: take a littl...
5440
  MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
602adf400   Yehuda Sadeh   rbd: introduce ra...
5441
5442
  MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
  MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
602adf400   Yehuda Sadeh   rbd: introduce ra...
5443
5444
  /* following authorship retained from original osdblk.c */
  MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
90da258b8   Ilya Dryomov   rbd: tweak "loade...
5445
  MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
602adf400   Yehuda Sadeh   rbd: introduce ra...
5446
  MODULE_LICENSE("GPL");