Blame view

drivers/lightnvm/rrpc.c 33.9 KB
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  /*
   * Copyright (C) 2015 IT University of Copenhagen
   * Initial release: Matias Bjorling <m@bjorling.me>
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License version
   * 2 as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful, but
   * WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * General Public License for more details.
   *
   * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
   */
  
  #include "rrpc.h"
  
  static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
  static DECLARE_RWSEM(rrpc_lock);
  
  static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
  				struct nvm_rq *rqd, unsigned long flags);
  
  #define rrpc_for_each_lun(rrpc, rlun, i) \
  		for ((i) = 0, rlun = &(rrpc)->luns[0]; \
  			(i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
  
  static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
  {
  	struct rrpc_block *rblk = a->rblk;
  	unsigned int pg_offset;
  
  	lockdep_assert_held(&rrpc->rev_lock);
  
  	if (a->addr == ADDR_EMPTY || !rblk)
  		return;
  
  	spin_lock(&rblk->lock);
afb18e0ed   Javier González   lightnvm: general...
40
  	div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
41
42
43
44
45
46
47
48
49
  	WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
  	rblk->nr_invalid_pages++;
  
  	spin_unlock(&rblk->lock);
  
  	rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
  }
  
  static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
5114e2773   Matias Bjørling   lightnvm: remove ...
50
  							unsigned int len)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
  {
  	sector_t i;
  
  	spin_lock(&rrpc->rev_lock);
  	for (i = slba; i < slba + len; i++) {
  		struct rrpc_addr *gp = &rrpc->trans_map[i];
  
  		rrpc_page_invalidate(rrpc, gp);
  		gp->rblk = NULL;
  	}
  	spin_unlock(&rrpc->rev_lock);
  }
  
  static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
  					sector_t laddr, unsigned int pages)
  {
  	struct nvm_rq *rqd;
  	struct rrpc_inflight_rq *inf;
  
  	rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
  	if (!rqd)
  		return ERR_PTR(-ENOMEM);
  
  	inf = rrpc_get_inflight_rq(rqd);
  	if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
  		mempool_free(rqd, rrpc->rq_pool);
  		return NULL;
  	}
  
  	return rqd;
  }
  
  static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
  {
  	struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
  
  	rrpc_unlock_laddr(rrpc, inf);
  
  	mempool_free(rqd, rrpc->rq_pool);
  }
  
  static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
  {
  	sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
  	sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
  	struct nvm_rq *rqd;
0de2415bb   Wenwei Tao   lightnvm: break t...
97
  	while (1) {
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
98
  		rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
0de2415bb   Wenwei Tao   lightnvm: break t...
99
100
  		if (rqd)
  			break;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
101
  		schedule();
0de2415bb   Wenwei Tao   lightnvm: break t...
102
  	}
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
103
104
105
106
107
108
109
110
111
112
113
114
115
116
  
  	if (IS_ERR(rqd)) {
  		pr_err("rrpc: unable to acquire inflight IO
  ");
  		bio_io_error(bio);
  		return;
  	}
  
  	rrpc_invalidate_range(rrpc, slba, len);
  	rrpc_inflight_laddr_release(rrpc, rqd);
  }
  
  static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
  {
afb18e0ed   Javier González   lightnvm: general...
117
  	return (rblk->next_page == rrpc->dev->sec_per_blk);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
118
  }
afb18e0ed   Javier González   lightnvm: general...
119
120
121
122
123
124
125
126
127
128
  /* Calculate relative addr for the given block, considering instantiated LUNs */
  static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
  {
  	struct nvm_block *blk = rblk->parent;
  	int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
  
  	return lun_blk * rrpc->dev->sec_per_blk;
  }
  
  /* Calculate global addr for the given block */
b7ceb7d50   Matias Bjørling   lightnvm: refacto...
129
  static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
130
131
  {
  	struct nvm_block *blk = rblk->parent;
afb18e0ed   Javier González   lightnvm: general...
132
  	return blk->id * rrpc->dev->sec_per_blk;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
133
  }
7386af270   Matias Bjørling   lightnvm: remove ...
134
135
136
137
138
139
140
141
142
143
144
145
146
  static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
  							struct ppa_addr r)
  {
  	struct ppa_addr l;
  	int secs, pgs, blks, luns;
  	sector_t ppa = r.ppa;
  
  	l.ppa = 0;
  
  	div_u64_rem(ppa, dev->sec_per_pg, &secs);
  	l.g.sec = secs;
  
  	sector_div(ppa, dev->sec_per_pg);
afb18e0ed   Javier González   lightnvm: general...
147
  	div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
7386af270   Matias Bjørling   lightnvm: remove ...
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
  	l.g.pg = pgs;
  
  	sector_div(ppa, dev->pgs_per_blk);
  	div_u64_rem(ppa, dev->blks_per_lun, &blks);
  	l.g.blk = blks;
  
  	sector_div(ppa, dev->blks_per_lun);
  	div_u64_rem(ppa, dev->luns_per_chnl, &luns);
  	l.g.lun = luns;
  
  	sector_div(ppa, dev->luns_per_chnl);
  	l.g.ch = ppa;
  
  	return l;
  }
b7ceb7d50   Matias Bjørling   lightnvm: refacto...
163
  static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
164
165
166
167
  {
  	struct ppa_addr paddr;
  
  	paddr.ppa = addr;
7386af270   Matias Bjørling   lightnvm: remove ...
168
  	return linear_to_generic_addr(dev, paddr);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
169
170
171
  }
  
  /* requires lun->lock taken */
855cdd2c0   Matias Bjørling   lightnvm: make rr...
172
173
  static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
  						struct rrpc_block **cur_rblk)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
174
175
  {
  	struct rrpc *rrpc = rlun->rrpc;
855cdd2c0   Matias Bjørling   lightnvm: make rr...
176
177
178
179
  	if (*cur_rblk) {
  		spin_lock(&(*cur_rblk)->lock);
  		WARN_ON(!block_is_full(rrpc, *cur_rblk));
  		spin_unlock(&(*cur_rblk)->lock);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
180
  	}
855cdd2c0   Matias Bjørling   lightnvm: make rr...
181
  	*cur_rblk = new_rblk;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
182
183
184
185
186
187
188
  }
  
  static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
  							unsigned long flags)
  {
  	struct nvm_block *blk;
  	struct rrpc_block *rblk;
41285fad5   Matias Bjørling   lightnvm: remove ...
189
  	blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
ff0e498bf   Javier González   lightnvm: manage ...
190
191
192
  	if (!blk) {
  		pr_err("nvm: rrpc: cannot get new block from media manager
  ");
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
193
  		return NULL;
ff0e498bf   Javier González   lightnvm: manage ...
194
  	}
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
195

afb18e0ed   Javier González   lightnvm: general...
196
  	rblk = rrpc_get_rblk(rlun, blk->id);
ff0e498bf   Javier González   lightnvm: manage ...
197
  	blk->priv = rblk;
afb18e0ed   Javier González   lightnvm: general...
198
  	bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
199
200
201
202
203
204
205
206
207
  	rblk->next_page = 0;
  	rblk->nr_invalid_pages = 0;
  	atomic_set(&rblk->data_cmnt_size, 0);
  
  	return rblk;
  }
  
  static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
  {
41285fad5   Matias Bjørling   lightnvm: remove ...
208
  	nvm_put_blk(rrpc->dev, rblk->parent);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
209
  }
d3d1a4384   Wenwei Tao   lightnvm: put blk...
210
211
212
213
214
215
216
217
218
219
220
221
222
  static void rrpc_put_blks(struct rrpc *rrpc)
  {
  	struct rrpc_lun *rlun;
  	int i;
  
  	for (i = 0; i < rrpc->nr_luns; i++) {
  		rlun = &rrpc->luns[i];
  		if (rlun->cur)
  			rrpc_put_blk(rrpc, rlun->cur);
  		if (rlun->gc_cur)
  			rrpc_put_blk(rrpc, rlun->gc_cur);
  	}
  }
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
  static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
  {
  	int next = atomic_inc_return(&rrpc->next_lun);
  
  	return &rrpc->luns[next % rrpc->nr_luns];
  }
  
  static void rrpc_gc_kick(struct rrpc *rrpc)
  {
  	struct rrpc_lun *rlun;
  	unsigned int i;
  
  	for (i = 0; i < rrpc->nr_luns; i++) {
  		rlun = &rrpc->luns[i];
  		queue_work(rrpc->krqd_wq, &rlun->ws_gc);
  	}
  }
  
  /*
   * timed GC every interval.
   */
  static void rrpc_gc_timer(unsigned long data)
  {
  	struct rrpc *rrpc = (struct rrpc *)data;
  
  	rrpc_gc_kick(rrpc);
  	mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
  }
  
  static void rrpc_end_sync_bio(struct bio *bio)
  {
  	struct completion *waiting = bio->bi_private;
  
  	if (bio->bi_error)
  		pr_err("nvm: gc request failed (%u).
  ", bio->bi_error);
  
  	complete(waiting);
  }
  
  /*
   * rrpc_move_valid_pages -- migrate live data off the block
   * @rrpc: the 'rrpc' structure
   * @block: the block from which to migrate live pages
   *
   * Description:
   *   GC algorithms may call this function to migrate remaining live
   *   pages off the block prior to erasing it. This function blocks
   *   further execution until the operation is complete.
   */
  static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
  {
  	struct request_queue *q = rrpc->dev->q;
  	struct rrpc_rev_addr *rev;
  	struct nvm_rq *rqd;
  	struct bio *bio;
  	struct page *page;
  	int slot;
afb18e0ed   Javier González   lightnvm: general...
281
  	int nr_sec_per_blk = rrpc->dev->sec_per_blk;
b7ceb7d50   Matias Bjørling   lightnvm: refacto...
282
  	u64 phys_addr;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
283
  	DECLARE_COMPLETION_ONSTACK(wait);
afb18e0ed   Javier González   lightnvm: general...
284
  	if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
285
286
287
288
289
290
291
292
293
294
  		return 0;
  
  	bio = bio_alloc(GFP_NOIO, 1);
  	if (!bio) {
  		pr_err("nvm: could not alloc bio to gc
  ");
  		return -ENOMEM;
  	}
  
  	page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
16c6d048d   Wenwei Tao   lightnvm: put bio...
295
296
  	if (!page) {
  		bio_put(bio);
3bfbc6adb   Javier Gonzalez   lightnvm: add che...
297
  		return -ENOMEM;
16c6d048d   Wenwei Tao   lightnvm: put bio...
298
  	}
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
299
300
  
  	while ((slot = find_first_zero_bit(rblk->invalid_pages,
afb18e0ed   Javier González   lightnvm: general...
301
  					    nr_sec_per_blk)) < nr_sec_per_blk) {
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
302
303
  
  		/* Lock laddr */
afb18e0ed   Javier González   lightnvm: general...
304
  		phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
  
  try:
  		spin_lock(&rrpc->rev_lock);
  		/* Get logical address from physical to logical table */
  		rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
  		/* already updated by previous regular write */
  		if (rev->addr == ADDR_EMPTY) {
  			spin_unlock(&rrpc->rev_lock);
  			continue;
  		}
  
  		rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
  		if (IS_ERR_OR_NULL(rqd)) {
  			spin_unlock(&rrpc->rev_lock);
  			schedule();
  			goto try;
  		}
  
  		spin_unlock(&rrpc->rev_lock);
  
  		/* Perform read to do GC */
  		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
95fe6c1a2   Mike Christie   block, fs, mm, dr...
327
  		bio_set_op_attrs(bio,  REQ_OP_READ, 0);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
328
329
330
331
332
333
334
335
336
337
338
339
340
  		bio->bi_private = &wait;
  		bio->bi_end_io = rrpc_end_sync_bio;
  
  		/* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
  		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
  
  		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
  			pr_err("rrpc: gc read failed.
  ");
  			rrpc_inflight_laddr_release(rrpc, rqd);
  			goto finished;
  		}
  		wait_for_completion_io(&wait);
2b11c1b24   Wenwei Tao   lightnvm: check b...
341
342
343
344
  		if (bio->bi_error) {
  			rrpc_inflight_laddr_release(rrpc, rqd);
  			goto finished;
  		}
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
345
346
347
348
349
  
  		bio_reset(bio);
  		reinit_completion(&wait);
  
  		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
95fe6c1a2   Mike Christie   block, fs, mm, dr...
350
  		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
  		bio->bi_private = &wait;
  		bio->bi_end_io = rrpc_end_sync_bio;
  
  		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
  
  		/* turn the command around and write the data back to a new
  		 * address
  		 */
  		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
  			pr_err("rrpc: gc write failed.
  ");
  			rrpc_inflight_laddr_release(rrpc, rqd);
  			goto finished;
  		}
  		wait_for_completion_io(&wait);
  
  		rrpc_inflight_laddr_release(rrpc, rqd);
2b11c1b24   Wenwei Tao   lightnvm: check b...
368
369
  		if (bio->bi_error)
  			goto finished;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
370
371
372
373
374
375
376
  
  		bio_reset(bio);
  	}
  
  finished:
  	mempool_free(page, rrpc->page_pool);
  	bio_put(bio);
afb18e0ed   Javier González   lightnvm: general...
377
  	if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
378
379
380
381
382
383
384
385
386
387
388
389
390
391
  		pr_err("nvm: failed to garbage collect block
  ");
  		return -EIO;
  	}
  
  	return 0;
  }
  
  static void rrpc_block_gc(struct work_struct *work)
  {
  	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
  									ws_gc);
  	struct rrpc *rrpc = gcb->rrpc;
  	struct rrpc_block *rblk = gcb->rblk;
cca87bc9d   Javier González   lightnvm: do not ...
392
  	struct rrpc_lun *rlun = rblk->rlun;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
393
  	struct nvm_dev *dev = rrpc->dev;
d0ca798f9   Wenwei Tao   lightnvm: put blo...
394
  	mempool_free(gcb, rrpc->gcb_pool);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
395
396
397
398
  	pr_debug("nvm: block '%lu' being reclaimed
  ", rblk->parent->id);
  
  	if (rrpc_move_valid_pages(rrpc, rblk))
d0ca798f9   Wenwei Tao   lightnvm: put blo...
399
400
401
402
  		goto put_back;
  
  	if (nvm_erase_blk(dev, rblk->parent))
  		goto put_back;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
403

ae1519ec4   Matias Bjørling   rrpc: Round-robin...
404
  	rrpc_put_blk(rrpc, rblk);
d0ca798f9   Wenwei Tao   lightnvm: put blo...
405
406
407
408
409
410
411
  
  	return;
  
  put_back:
  	spin_lock(&rlun->lock);
  	list_add_tail(&rblk->prio, &rlun->prio_list);
  	spin_unlock(&rlun->lock);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
  }
  
  /* the block with highest number of invalid pages, will be in the beginning
   * of the list
   */
  static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
  							struct rrpc_block *rb)
  {
  	if (ra->nr_invalid_pages == rb->nr_invalid_pages)
  		return ra;
  
  	return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
  }
  
  /* linearly find the block with highest number of invalid pages
   * requires lun->lock
   */
  static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
  {
  	struct list_head *prio_list = &rlun->prio_list;
  	struct rrpc_block *rblock, *max;
  
  	BUG_ON(list_empty(prio_list));
  
  	max = list_first_entry(prio_list, struct rrpc_block, prio);
  	list_for_each_entry(rblock, prio_list, prio)
  		max = rblock_max_invalid(max, rblock);
  
  	return max;
  }
  
  static void rrpc_lun_gc(struct work_struct *work)
  {
  	struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
  	struct rrpc *rrpc = rlun->rrpc;
  	struct nvm_lun *lun = rlun->parent;
  	struct rrpc_block_gc *gcb;
  	unsigned int nr_blocks_need;
  
  	nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
  
  	if (nr_blocks_need < rrpc->nr_luns)
  		nr_blocks_need = rrpc->nr_luns;
b262924be   Wenwei Tao   lightnvm: fix loc...
455
  	spin_lock(&rlun->lock);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
456
457
458
459
460
461
462
  	while (nr_blocks_need > lun->nr_free_blocks &&
  					!list_empty(&rlun->prio_list)) {
  		struct rrpc_block *rblock = block_prio_find_max(rlun);
  		struct nvm_block *block = rblock->parent;
  
  		if (!rblock->nr_invalid_pages)
  			break;
b262924be   Wenwei Tao   lightnvm: fix loc...
463
464
465
  		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
  		if (!gcb)
  			break;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
466
467
468
469
470
471
  		list_del_init(&rblock->prio);
  
  		BUG_ON(!block_is_full(rrpc, rblock));
  
  		pr_debug("rrpc: selected block '%lu' for GC
  ", block->id);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
472
473
474
475
476
477
478
479
  		gcb->rrpc = rrpc;
  		gcb->rblk = rblock;
  		INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
  
  		queue_work(rrpc->kgc_wq, &gcb->ws_gc);
  
  		nr_blocks_need--;
  	}
b262924be   Wenwei Tao   lightnvm: fix loc...
480
  	spin_unlock(&rlun->lock);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
481
482
483
484
485
486
487
488
489
490
  
  	/* TODO: Hint that request queue can be started again */
  }
  
  static void rrpc_gc_queue(struct work_struct *work)
  {
  	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
  									ws_gc);
  	struct rrpc *rrpc = gcb->rrpc;
  	struct rrpc_block *rblk = gcb->rblk;
cca87bc9d   Javier González   lightnvm: do not ...
491
  	struct rrpc_lun *rlun = rblk->rlun;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
  
  	spin_lock(&rlun->lock);
  	list_add_tail(&rblk->prio, &rlun->prio_list);
  	spin_unlock(&rlun->lock);
  
  	mempool_free(gcb, rrpc->gcb_pool);
  	pr_debug("nvm: block '%lu' is full, allow GC (sched)
  ",
  							rblk->parent->id);
  }
  
  static const struct block_device_operations rrpc_fops = {
  	.owner		= THIS_MODULE,
  };
  
  static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
  {
  	unsigned int i;
  	struct rrpc_lun *rlun, *max_free;
  
  	if (!is_gc)
  		return get_next_lun(rrpc);
  
  	/* during GC, we don't care about RR, instead we want to make
  	 * sure that we maintain evenness between the block luns.
  	 */
  	max_free = &rrpc->luns[0];
  	/* prevent GC-ing lun from devouring pages of a lun with
  	 * little free blocks. We don't take the lock as we only need an
  	 * estimate.
  	 */
  	rrpc_for_each_lun(rrpc, rlun, i) {
  		if (rlun->parent->nr_free_blocks >
  					max_free->parent->nr_free_blocks)
  			max_free = rlun;
  	}
  
  	return max_free;
  }
  
  static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
b7ceb7d50   Matias Bjørling   lightnvm: refacto...
533
  					struct rrpc_block *rblk, u64 paddr)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
534
535
536
  {
  	struct rrpc_addr *gp;
  	struct rrpc_rev_addr *rev;
4ece44af7   Matias Bjørling   lightnvm: rename ...
537
  	BUG_ON(laddr >= rrpc->nr_sects);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
  
  	gp = &rrpc->trans_map[laddr];
  	spin_lock(&rrpc->rev_lock);
  	if (gp->rblk)
  		rrpc_page_invalidate(rrpc, gp);
  
  	gp->addr = paddr;
  	gp->rblk = rblk;
  
  	rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
  	rev->addr = laddr;
  	spin_unlock(&rrpc->rev_lock);
  
  	return gp;
  }
b7ceb7d50   Matias Bjørling   lightnvm: refacto...
553
  static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
554
  {
b7ceb7d50   Matias Bjørling   lightnvm: refacto...
555
  	u64 addr = ADDR_EMPTY;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
556
557
558
559
560
561
562
563
564
565
566
567
  
  	spin_lock(&rblk->lock);
  	if (block_is_full(rrpc, rblk))
  		goto out;
  
  	addr = block_to_addr(rrpc, rblk) + rblk->next_page;
  
  	rblk->next_page++;
  out:
  	spin_unlock(&rblk->lock);
  	return addr;
  }
855cdd2c0   Matias Bjørling   lightnvm: make rr...
568
569
  /* Map logical address to a physical page. The mapping implements a round robin
   * approach and allocates a page from the next lun available.
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
570
   *
855cdd2c0   Matias Bjørling   lightnvm: make rr...
571
572
   * Returns rrpc_addr with the physical address and block. Returns NULL if no
   * blocks in the next rlun are available.
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
573
574
575
576
577
   */
  static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
  								int is_gc)
  {
  	struct rrpc_lun *rlun;
855cdd2c0   Matias Bjørling   lightnvm: make rr...
578
  	struct rrpc_block *rblk, **cur_rblk;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
579
  	struct nvm_lun *lun;
b7ceb7d50   Matias Bjørling   lightnvm: refacto...
580
  	u64 paddr;
855cdd2c0   Matias Bjørling   lightnvm: make rr...
581
  	int gc_force = 0;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
582
583
584
585
586
587
  
  	rlun = rrpc_get_lun_rr(rrpc, is_gc);
  	lun = rlun->parent;
  
  	if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
  		return NULL;
855cdd2c0   Matias Bjørling   lightnvm: make rr...
588
589
590
591
592
593
594
595
596
597
598
599
600
  	/*
  	 * page allocation steps:
  	 * 1. Try to allocate new page from current rblk
  	 * 2a. If succeed, proceed to map it in and return
  	 * 2b. If fail, first try to allocate a new block from media manger,
  	 *     and then retry step 1. Retry until the normal block pool is
  	 *     exhausted.
  	 * 3. If exhausted, and garbage collector is requesting the block,
  	 *    go to the reserved block and retry step 1.
  	 *    In the case that this fails as well, or it is not GC
  	 *    requesting, report not able to retrieve a block and let the
  	 *    caller handle further processing.
  	 */
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
601

855cdd2c0   Matias Bjørling   lightnvm: make rr...
602
603
  	spin_lock(&rlun->lock);
  	cur_rblk = &rlun->cur;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
604
605
606
  	rblk = rlun->cur;
  retry:
  	paddr = rrpc_alloc_addr(rrpc, rblk);
855cdd2c0   Matias Bjørling   lightnvm: make rr...
607
608
  	if (paddr != ADDR_EMPTY)
  		goto done;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
609

855cdd2c0   Matias Bjørling   lightnvm: make rr...
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
  	if (!list_empty(&rlun->wblk_list)) {
  new_blk:
  		rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
  									prio);
  		rrpc_set_lun_cur(rlun, rblk, cur_rblk);
  		list_del(&rblk->prio);
  		goto retry;
  	}
  	spin_unlock(&rlun->lock);
  
  	rblk = rrpc_get_blk(rrpc, rlun, gc_force);
  	if (rblk) {
  		spin_lock(&rlun->lock);
  		list_add_tail(&rblk->prio, &rlun->wblk_list);
  		/*
  		 * another thread might already have added a new block,
  		 * Therefore, make sure that one is used, instead of the
  		 * one just added.
  		 */
  		goto new_blk;
  	}
  
  	if (unlikely(is_gc) && !gc_force) {
  		/* retry from emergency gc block */
  		cur_rblk = &rlun->gc_cur;
  		rblk = rlun->gc_cur;
  		gc_force = 1;
  		spin_lock(&rlun->lock);
  		goto retry;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
639
  	}
855cdd2c0   Matias Bjørling   lightnvm: make rr...
640
641
642
643
  	pr_err("rrpc: failed to allocate new block
  ");
  	return NULL;
  done:
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
644
645
  	spin_unlock(&rlun->lock);
  	return rrpc_update_map(rrpc, laddr, rblk, paddr);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
  }
  
  static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
  {
  	struct rrpc_block_gc *gcb;
  
  	gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
  	if (!gcb) {
  		pr_err("rrpc: unable to queue block for gc.");
  		return;
  	}
  
  	gcb->rrpc = rrpc;
  	gcb->rblk = rblk;
  
  	INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
  	queue_work(rrpc->kgc_wq, &gcb->ws_gc);
  }
  
  static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
  						sector_t laddr, uint8_t npages)
  {
  	struct rrpc_addr *p;
  	struct rrpc_block *rblk;
  	struct nvm_lun *lun;
  	int cmnt_size, i;
  
  	for (i = 0; i < npages; i++) {
  		p = &rrpc->trans_map[laddr + i];
  		rblk = p->rblk;
  		lun = rblk->parent->lun;
  
  		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
afb18e0ed   Javier González   lightnvm: general...
679
  		if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
680
681
682
  			rrpc_run_gc(rrpc, rblk);
  	}
  }
72d256ecc   Matias Bjørling   lightnvm: move rq...
683
  static void rrpc_end_io(struct nvm_rq *rqd)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
684
685
686
  {
  	struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
  	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
6d5be9590   Javier González   lightnvm: rename ...
687
  	uint8_t npages = rqd->nr_ppas;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
688
689
690
691
  	sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
  
  	if (bio_data_dir(rqd->bio) == WRITE)
  		rrpc_end_io_write(rrpc, rrqd, laddr, npages);
3cd485b1f   Wenwei Tao   lightnvm: fix bio...
692
  	bio_put(rqd->bio);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
693
  	if (rrqd->flags & NVM_IOTYPE_GC)
91276162d   Matias Bjørling   lightnvm: refacto...
694
  		return;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
695
696
  
  	rrpc_unlock_rq(rrpc, rqd);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
697
698
699
  
  	if (npages > 1)
  		nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
700
701
  
  	mempool_free(rqd, rrpc->rq_pool);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
  }
  
  static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
  			struct nvm_rq *rqd, unsigned long flags, int npages)
  {
  	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
  	struct rrpc_addr *gp;
  	sector_t laddr = rrpc_get_laddr(bio);
  	int is_gc = flags & NVM_IOTYPE_GC;
  	int i;
  
  	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
  		nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
  		return NVM_IO_REQUEUE;
  	}
  
  	for (i = 0; i < npages; i++) {
  		/* We assume that mapping occurs at 4KB granularity */
4ece44af7   Matias Bjørling   lightnvm: rename ...
720
  		BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
  		gp = &rrpc->trans_map[laddr + i];
  
  		if (gp->rblk) {
  			rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
  								gp->addr);
  		} else {
  			BUG_ON(is_gc);
  			rrpc_unlock_laddr(rrpc, r);
  			nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
  							rqd->dma_ppa_list);
  			return NVM_IO_DONE;
  		}
  	}
  
  	rqd->opcode = NVM_OP_HBREAD;
  
  	return NVM_IO_OK;
  }
  
  static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
  							unsigned long flags)
  {
  	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
  	int is_gc = flags & NVM_IOTYPE_GC;
  	sector_t laddr = rrpc_get_laddr(bio);
  	struct rrpc_addr *gp;
  
  	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
  		return NVM_IO_REQUEUE;
4ece44af7   Matias Bjørling   lightnvm: rename ...
750
  	BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
  	gp = &rrpc->trans_map[laddr];
  
  	if (gp->rblk) {
  		rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
  	} else {
  		BUG_ON(is_gc);
  		rrpc_unlock_rq(rrpc, rqd);
  		return NVM_IO_DONE;
  	}
  
  	rqd->opcode = NVM_OP_HBREAD;
  	rrqd->addr = gp;
  
  	return NVM_IO_OK;
  }
  
  static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
  			struct nvm_rq *rqd, unsigned long flags, int npages)
  {
  	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
  	struct rrpc_addr *p;
  	sector_t laddr = rrpc_get_laddr(bio);
  	int is_gc = flags & NVM_IOTYPE_GC;
  	int i;
  
  	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
  		nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
  		return NVM_IO_REQUEUE;
  	}
  
  	for (i = 0; i < npages; i++) {
  		/* We assume that mapping occurs at 4KB granularity */
  		p = rrpc_map_page(rrpc, laddr + i, is_gc);
  		if (!p) {
  			BUG_ON(is_gc);
  			rrpc_unlock_laddr(rrpc, r);
  			nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
  							rqd->dma_ppa_list);
  			rrpc_gc_kick(rrpc);
  			return NVM_IO_REQUEUE;
  		}
  
  		rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
  								p->addr);
  	}
  
  	rqd->opcode = NVM_OP_HBWRITE;
  
  	return NVM_IO_OK;
  }
  
  static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
  				struct nvm_rq *rqd, unsigned long flags)
  {
  	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
  	struct rrpc_addr *p;
  	int is_gc = flags & NVM_IOTYPE_GC;
  	sector_t laddr = rrpc_get_laddr(bio);
  
  	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
  		return NVM_IO_REQUEUE;
  
  	p = rrpc_map_page(rrpc, laddr, is_gc);
  	if (!p) {
  		BUG_ON(is_gc);
  		rrpc_unlock_rq(rrpc, rqd);
  		rrpc_gc_kick(rrpc);
  		return NVM_IO_REQUEUE;
  	}
  
  	rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
  	rqd->opcode = NVM_OP_HBWRITE;
  	rrqd->addr = p;
  
  	return NVM_IO_OK;
  }
  
  static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
  			struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
  {
  	if (npages > 1) {
  		rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
  							&rqd->dma_ppa_list);
  		if (!rqd->ppa_list) {
  			pr_err("rrpc: not able to allocate ppa list
  ");
  			return NVM_IO_ERR;
  		}
70246286e   Christoph Hellwig   block: get rid of...
839
  		if (bio_op(bio) == REQ_OP_WRITE)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
840
841
842
843
844
  			return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
  									npages);
  
  		return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
  	}
70246286e   Christoph Hellwig   block: get rid of...
845
  	if (bio_op(bio) == REQ_OP_WRITE)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
  		return rrpc_write_rq(rrpc, bio, rqd, flags);
  
  	return rrpc_read_rq(rrpc, bio, rqd, flags);
  }
  
  static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
  				struct nvm_rq *rqd, unsigned long flags)
  {
  	int err;
  	struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
  	uint8_t nr_pages = rrpc_get_pages(bio);
  	int bio_size = bio_sectors(bio) << 9;
  
  	if (bio_size < rrpc->dev->sec_size)
  		return NVM_IO_ERR;
  	else if (bio_size > rrpc->dev->max_rq_size)
  		return NVM_IO_ERR;
  
  	err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
  	if (err)
  		return err;
  
  	bio_get(bio);
  	rqd->bio = bio;
  	rqd->ins = &rrpc->instance;
6d5be9590   Javier González   lightnvm: rename ...
871
  	rqd->nr_ppas = nr_pages;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
872
873
874
875
876
877
  	rrq->flags = flags;
  
  	err = nvm_submit_io(rrpc->dev, rqd);
  	if (err) {
  		pr_err("rrpc: I/O submission failed: %d
  ", err);
3cd485b1f   Wenwei Tao   lightnvm: fix bio...
878
  		bio_put(bio);
c27278bdd   Wenwei Tao   lightnvm: unlock ...
879
880
  		if (!(flags & NVM_IOTYPE_GC)) {
  			rrpc_unlock_rq(rrpc, rqd);
6d5be9590   Javier González   lightnvm: rename ...
881
  			if (rqd->nr_ppas > 1)
c27278bdd   Wenwei Tao   lightnvm: unlock ...
882
883
884
  				nvm_dev_dma_free(rrpc->dev,
  			rqd->ppa_list, rqd->dma_ppa_list);
  		}
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
885
886
887
888
889
  		return NVM_IO_ERR;
  	}
  
  	return NVM_IO_OK;
  }
dece16353   Jens Axboe   block: change ->m...
890
  static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
891
892
893
894
  {
  	struct rrpc *rrpc = q->queuedata;
  	struct nvm_rq *rqd;
  	int err;
95fe6c1a2   Mike Christie   block, fs, mm, dr...
895
  	if (bio_op(bio) == REQ_OP_DISCARD) {
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
896
  		rrpc_discard(rrpc, bio);
dece16353   Jens Axboe   block: change ->m...
897
  		return BLK_QC_T_NONE;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
898
899
900
901
902
903
  	}
  
  	rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
  	if (!rqd) {
  		pr_err_ratelimited("rrpc: not able to queue bio.");
  		bio_io_error(bio);
dece16353   Jens Axboe   block: change ->m...
904
  		return BLK_QC_T_NONE;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
905
906
907
908
909
910
  	}
  	memset(rqd, 0, sizeof(struct nvm_rq));
  
  	err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
  	switch (err) {
  	case NVM_IO_OK:
dece16353   Jens Axboe   block: change ->m...
911
  		return BLK_QC_T_NONE;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
  	case NVM_IO_ERR:
  		bio_io_error(bio);
  		break;
  	case NVM_IO_DONE:
  		bio_endio(bio);
  		break;
  	case NVM_IO_REQUEUE:
  		spin_lock(&rrpc->bio_lock);
  		bio_list_add(&rrpc->requeue_bios, bio);
  		spin_unlock(&rrpc->bio_lock);
  		queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
  		break;
  	}
  
  	mempool_free(rqd, rrpc->rq_pool);
dece16353   Jens Axboe   block: change ->m...
927
  	return BLK_QC_T_NONE;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
  }
  
  static void rrpc_requeue(struct work_struct *work)
  {
  	struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
  	struct bio_list bios;
  	struct bio *bio;
  
  	bio_list_init(&bios);
  
  	spin_lock(&rrpc->bio_lock);
  	bio_list_merge(&bios, &rrpc->requeue_bios);
  	bio_list_init(&rrpc->requeue_bios);
  	spin_unlock(&rrpc->bio_lock);
  
  	while ((bio = bio_list_pop(&bios)))
  		rrpc_make_rq(rrpc->disk->queue, bio);
  }
  
  static void rrpc_gc_free(struct rrpc *rrpc)
  {
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
949
950
951
952
953
  	if (rrpc->krqd_wq)
  		destroy_workqueue(rrpc->krqd_wq);
  
  	if (rrpc->kgc_wq)
  		destroy_workqueue(rrpc->kgc_wq);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
  }
  
  static int rrpc_gc_init(struct rrpc *rrpc)
  {
  	rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
  								rrpc->nr_luns);
  	if (!rrpc->krqd_wq)
  		return -ENOMEM;
  
  	rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
  	if (!rrpc->kgc_wq)
  		return -ENOMEM;
  
  	setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
  
  	return 0;
  }
  
  static void rrpc_map_free(struct rrpc *rrpc)
  {
  	vfree(rrpc->rev_trans_map);
  	vfree(rrpc->trans_map);
  }
  
  static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
  {
  	struct rrpc *rrpc = (struct rrpc *)private;
  	struct nvm_dev *dev = rrpc->dev;
  	struct rrpc_addr *addr = rrpc->trans_map + slba;
  	struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
984
985
  	u64 elba = slba + nlb;
  	u64 i;
4ece44af7   Matias Bjørling   lightnvm: rename ...
986
  	if (unlikely(elba > dev->total_secs)) {
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
987
988
989
990
991
992
993
  		pr_err("nvm: L2P data from device is out of bounds!
  ");
  		return -EINVAL;
  	}
  
  	for (i = 0; i < nlb; i++) {
  		u64 pba = le64_to_cpu(entries[i]);
afb18e0ed   Javier González   lightnvm: general...
994
  		unsigned int mod;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
995
996
997
  		/* LNVM treats address-spaces as silos, LBA and PBA are
  		 * equally large and zero-indexed.
  		 */
4ece44af7   Matias Bjørling   lightnvm: rename ...
998
  		if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
  			pr_err("nvm: L2P data entry is out of bounds!
  ");
  			return -EINVAL;
  		}
  
  		/* Address zero is a special one. The first page on a disk is
  		 * protected. As it often holds internal device boot
  		 * information.
  		 */
  		if (!pba)
  			continue;
afb18e0ed   Javier González   lightnvm: general...
1010
  		div_u64_rem(pba, rrpc->nr_sects, &mod);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1011
  		addr[i].addr = pba;
afb18e0ed   Javier González   lightnvm: general...
1012
  		raddr[mod].addr = slba + i;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
  	}
  
  	return 0;
  }
  
  static int rrpc_map_init(struct rrpc *rrpc)
  {
  	struct nvm_dev *dev = rrpc->dev;
  	sector_t i;
  	int ret;
4ece44af7   Matias Bjørling   lightnvm: rename ...
1023
  	rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1024
1025
1026
1027
  	if (!rrpc->trans_map)
  		return -ENOMEM;
  
  	rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
4ece44af7   Matias Bjørling   lightnvm: rename ...
1028
  							* rrpc->nr_sects);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1029
1030
  	if (!rrpc->rev_trans_map)
  		return -ENOMEM;
4ece44af7   Matias Bjørling   lightnvm: rename ...
1031
  	for (i = 0; i < rrpc->nr_sects; i++) {
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
  		struct rrpc_addr *p = &rrpc->trans_map[i];
  		struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
  
  		p->addr = ADDR_EMPTY;
  		r->addr = ADDR_EMPTY;
  	}
  
  	if (!dev->ops->get_l2p_tbl)
  		return 0;
  
  	/* Bring up the mapping table from device */
909049a71   Wenwei Tao   lightnvm: store r...
1043
1044
  	ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
  					rrpc_l2p_update, rrpc);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1045
1046
1047
1048
1049
1050
1051
1052
  	if (ret) {
  		pr_err("nvm: rrpc: could not read L2P table.
  ");
  		return -EINVAL;
  	}
  
  	return 0;
  }
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
  /* Minimum pages needed within a lun */
  #define PAGE_POOL_SIZE 16
  #define ADDR_POOL_SIZE 64
  
  static int rrpc_core_init(struct rrpc *rrpc)
  {
  	down_write(&rrpc_lock);
  	if (!rrpc_gcb_cache) {
  		rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
  				sizeof(struct rrpc_block_gc), 0, 0, NULL);
  		if (!rrpc_gcb_cache) {
  			up_write(&rrpc_lock);
  			return -ENOMEM;
  		}
  
  		rrpc_rq_cache = kmem_cache_create("rrpc_rq",
  				sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
  				0, 0, NULL);
  		if (!rrpc_rq_cache) {
  			kmem_cache_destroy(rrpc_gcb_cache);
  			up_write(&rrpc_lock);
  			return -ENOMEM;
  		}
  	}
  	up_write(&rrpc_lock);
  
  	rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
  	if (!rrpc->page_pool)
  		return -ENOMEM;
  
  	rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
  								rrpc_gcb_cache);
  	if (!rrpc->gcb_pool)
  		return -ENOMEM;
  
  	rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
  	if (!rrpc->rq_pool)
  		return -ENOMEM;
  
  	spin_lock_init(&rrpc->inflights.lock);
  	INIT_LIST_HEAD(&rrpc->inflights.reqs);
  
  	return 0;
  }
  
  static void rrpc_core_free(struct rrpc *rrpc)
  {
  	mempool_destroy(rrpc->page_pool);
  	mempool_destroy(rrpc->gcb_pool);
  	mempool_destroy(rrpc->rq_pool);
  }
  
  static void rrpc_luns_free(struct rrpc *rrpc)
  {
da1e28491   Wenwei Tao   lightnvm: add a b...
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
  	struct nvm_dev *dev = rrpc->dev;
  	struct nvm_lun *lun;
  	struct rrpc_lun *rlun;
  	int i;
  
  	if (!rrpc->luns)
  		return;
  
  	for (i = 0; i < rrpc->nr_luns; i++) {
  		rlun = &rrpc->luns[i];
  		lun = rlun->parent;
  		if (!lun)
  			break;
  		dev->mt->release_lun(dev, lun->id);
  		vfree(rlun->blocks);
  	}
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1123
1124
1125
1126
1127
1128
1129
  	kfree(rrpc->luns);
  }
  
  static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
  {
  	struct nvm_dev *dev = rrpc->dev;
  	struct rrpc_lun *rlun;
da1e28491   Wenwei Tao   lightnvm: add a b...
1130
  	int i, j, ret = -EINVAL;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1131

afb18e0ed   Javier González   lightnvm: general...
1132
  	if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
4b79beb4c   Wenwei Tao   lightnvm: move th...
1133
1134
1135
  		pr_err("rrpc: number of pages per block too high.");
  		return -EINVAL;
  	}
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1136
1137
1138
1139
1140
1141
1142
1143
1144
  	spin_lock_init(&rrpc->rev_lock);
  
  	rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
  								GFP_KERNEL);
  	if (!rrpc->luns)
  		return -ENOMEM;
  
  	/* 1:1 mapping */
  	for (i = 0; i < rrpc->nr_luns; i++) {
da1e28491   Wenwei Tao   lightnvm: add a b...
1145
1146
  		int lunid = lun_begin + i;
  		struct nvm_lun *lun;
ff0e498bf   Javier González   lightnvm: manage ...
1147

da1e28491   Wenwei Tao   lightnvm: add a b...
1148
1149
1150
1151
1152
  		if (dev->mt->reserve_lun(dev, lunid)) {
  			pr_err("rrpc: lun %u is already allocated
  ", lunid);
  			goto err;
  		}
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1153

da1e28491   Wenwei Tao   lightnvm: add a b...
1154
1155
1156
  		lun = dev->mt->get_lun(dev, lunid);
  		if (!lun)
  			goto err;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1157

da1e28491   Wenwei Tao   lightnvm: add a b...
1158
1159
  		rlun = &rrpc->luns[i];
  		rlun->parent = lun;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1160
1161
  		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
  						rrpc->dev->blks_per_lun);
da1e28491   Wenwei Tao   lightnvm: add a b...
1162
1163
  		if (!rlun->blocks) {
  			ret = -ENOMEM;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1164
  			goto err;
da1e28491   Wenwei Tao   lightnvm: add a b...
1165
  		}
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1166
1167
1168
1169
1170
1171
  
  		for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
  			struct rrpc_block *rblk = &rlun->blocks[j];
  			struct nvm_block *blk = &lun->blocks[j];
  
  			rblk->parent = blk;
d7a64d275   Javier González   lightnvm: referen...
1172
  			rblk->rlun = rlun;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1173
1174
1175
  			INIT_LIST_HEAD(&rblk->prio);
  			spin_lock_init(&rblk->lock);
  		}
da1e28491   Wenwei Tao   lightnvm: add a b...
1176
1177
1178
  
  		rlun->rrpc = rrpc;
  		INIT_LIST_HEAD(&rlun->prio_list);
855cdd2c0   Matias Bjørling   lightnvm: make rr...
1179
  		INIT_LIST_HEAD(&rlun->wblk_list);
da1e28491   Wenwei Tao   lightnvm: add a b...
1180
1181
1182
  
  		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
  		spin_lock_init(&rlun->lock);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1183
1184
1185
1186
  	}
  
  	return 0;
  err:
da1e28491   Wenwei Tao   lightnvm: add a b...
1187
  	return ret;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1188
  }
4c9dacb82   Wenwei Tao   lightnvm: specify...
1189
1190
1191
1192
1193
1194
  /* returns 0 on success and stores the beginning address in *begin */
  static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
  {
  	struct nvm_dev *dev = rrpc->dev;
  	struct nvmm_type *mt = dev->mt;
  	sector_t size = rrpc->nr_sects * dev->sec_size;
909049a71   Wenwei Tao   lightnvm: store r...
1195
  	int ret;
4c9dacb82   Wenwei Tao   lightnvm: specify...
1196
1197
  
  	size >>= 9;
909049a71   Wenwei Tao   lightnvm: store r...
1198
1199
1200
1201
1202
  	ret = mt->get_area(dev, begin, size);
  	if (!ret)
  		*begin >>= (ilog2(dev->sec_size) - 9);
  
  	return ret;
4c9dacb82   Wenwei Tao   lightnvm: specify...
1203
1204
1205
1206
1207
1208
  }
  
  static void rrpc_area_free(struct rrpc *rrpc)
  {
  	struct nvm_dev *dev = rrpc->dev;
  	struct nvmm_type *mt = dev->mt;
909049a71   Wenwei Tao   lightnvm: store r...
1209
  	sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
4c9dacb82   Wenwei Tao   lightnvm: specify...
1210

909049a71   Wenwei Tao   lightnvm: store r...
1211
  	mt->put_area(dev, begin);
4c9dacb82   Wenwei Tao   lightnvm: specify...
1212
  }
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1213
1214
1215
1216
1217
1218
  static void rrpc_free(struct rrpc *rrpc)
  {
  	rrpc_gc_free(rrpc);
  	rrpc_map_free(rrpc);
  	rrpc_core_free(rrpc);
  	rrpc_luns_free(rrpc);
4c9dacb82   Wenwei Tao   lightnvm: specify...
1219
  	rrpc_area_free(rrpc);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
  
  	kfree(rrpc);
  }
  
  static void rrpc_exit(void *private)
  {
  	struct rrpc *rrpc = private;
  
  	del_timer(&rrpc->gc_timer);
  
  	flush_workqueue(rrpc->krqd_wq);
  	flush_workqueue(rrpc->kgc_wq);
  
  	rrpc_free(rrpc);
  }
  
  static sector_t rrpc_capacity(void *private)
  {
  	struct rrpc *rrpc = private;
  	struct nvm_dev *dev = rrpc->dev;
  	sector_t reserved, provisioned;
  
  	/* cur, gc, and two emergency blocks for each lun */
116f7d4a2   Javier González   lightnvm: reserve...
1243
  	reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
4ece44af7   Matias Bjørling   lightnvm: rename ...
1244
  	provisioned = rrpc->nr_sects - reserved;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1245

4ece44af7   Matias Bjørling   lightnvm: rename ...
1246
  	if (reserved > rrpc->nr_sects) {
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
  		pr_err("rrpc: not enough space available to expose storage.
  ");
  		return 0;
  	}
  
  	sector_div(provisioned, 10);
  	return provisioned * 9 * NR_PHY_IN_LOG;
  }
  
  /*
   * Looks up the logical address from reverse trans map and check if its valid by
   * comparing the logical to physical address with the physical address.
   * Returns 0 on free, otherwise 1 if in use
   */
  static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
  {
  	struct nvm_dev *dev = rrpc->dev;
  	int offset;
  	struct rrpc_addr *laddr;
afb18e0ed   Javier González   lightnvm: general...
1266
  	u64 bpaddr, paddr, pladdr;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1267

afb18e0ed   Javier González   lightnvm: general...
1268
1269
1270
  	bpaddr = block_to_rel_addr(rrpc, rblk);
  	for (offset = 0; offset < dev->sec_per_blk; offset++) {
  		paddr = bpaddr + offset;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
  
  		pladdr = rrpc->rev_trans_map[paddr].addr;
  		if (pladdr == ADDR_EMPTY)
  			continue;
  
  		laddr = &rrpc->trans_map[pladdr];
  
  		if (paddr == laddr->addr) {
  			laddr->rblk = rblk;
  		} else {
  			set_bit(offset, rblk->invalid_pages);
  			rblk->nr_invalid_pages++;
  		}
  	}
  }
  
  static int rrpc_blocks_init(struct rrpc *rrpc)
  {
  	struct rrpc_lun *rlun;
  	struct rrpc_block *rblk;
  	int lun_iter, blk_iter;
  
  	for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
  		rlun = &rrpc->luns[lun_iter];
  
  		for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
  								blk_iter++) {
  			rblk = &rlun->blocks[blk_iter];
  			rrpc_block_map_update(rrpc, rblk);
  		}
  	}
  
  	return 0;
  }
  
  static int rrpc_luns_configure(struct rrpc *rrpc)
  {
  	struct rrpc_lun *rlun;
  	struct rrpc_block *rblk;
  	int i;
  
  	for (i = 0; i < rrpc->nr_luns; i++) {
  		rlun = &rrpc->luns[i];
  
  		rblk = rrpc_get_blk(rrpc, rlun, 0);
  		if (!rblk)
d3d1a4384   Wenwei Tao   lightnvm: put blk...
1317
  			goto err;
855cdd2c0   Matias Bjørling   lightnvm: make rr...
1318
  		rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1319
1320
1321
1322
  
  		/* Emergency gc block */
  		rblk = rrpc_get_blk(rrpc, rlun, 1);
  		if (!rblk)
d3d1a4384   Wenwei Tao   lightnvm: put blk...
1323
  			goto err;
855cdd2c0   Matias Bjørling   lightnvm: make rr...
1324
  		rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1325
1326
1327
  	}
  
  	return 0;
d3d1a4384   Wenwei Tao   lightnvm: put blk...
1328
1329
1330
  err:
  	rrpc_put_blks(rrpc);
  	return -EINVAL;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
  }
  
  static struct nvm_tgt_type tt_rrpc;
  
  static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
  						int lun_begin, int lun_end)
  {
  	struct request_queue *bqueue = dev->q;
  	struct request_queue *tqueue = tdisk->queue;
  	struct rrpc *rrpc;
4c9dacb82   Wenwei Tao   lightnvm: specify...
1341
  	sector_t soffset;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
  	int ret;
  
  	if (!(dev->identity.dom & NVM_RSP_L2P)) {
  		pr_err("nvm: rrpc: device does not support l2p (%x)
  ",
  							dev->identity.dom);
  		return ERR_PTR(-EINVAL);
  	}
  
  	rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
  	if (!rrpc)
  		return ERR_PTR(-ENOMEM);
  
  	rrpc->instance.tt = &tt_rrpc;
  	rrpc->dev = dev;
  	rrpc->disk = tdisk;
  
  	bio_list_init(&rrpc->requeue_bios);
  	spin_lock_init(&rrpc->bio_lock);
  	INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
  
  	rrpc->nr_luns = lun_end - lun_begin + 1;
66e3d07f7   Wenwei Tao   lightnvm: calcula...
1364
1365
  	rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
  	rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1366
1367
1368
  
  	/* simple round-robin strategy */
  	atomic_set(&rrpc->next_lun, -1);
4c9dacb82   Wenwei Tao   lightnvm: specify...
1369
1370
1371
1372
1373
1374
1375
  	ret = rrpc_area_init(rrpc, &soffset);
  	if (ret < 0) {
  		pr_err("nvm: rrpc: could not initialize area
  ");
  		return ERR_PTR(ret);
  	}
  	rrpc->soffset = soffset;
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
  	ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
  	if (ret) {
  		pr_err("nvm: rrpc: could not initialize luns
  ");
  		goto err;
  	}
  
  	rrpc->poffset = dev->sec_per_lun * lun_begin;
  	rrpc->lun_offset = lun_begin;
  
  	ret = rrpc_core_init(rrpc);
  	if (ret) {
  		pr_err("nvm: rrpc: could not initialize core
  ");
  		goto err;
  	}
  
  	ret = rrpc_map_init(rrpc);
  	if (ret) {
  		pr_err("nvm: rrpc: could not initialize maps
  ");
  		goto err;
  	}
  
  	ret = rrpc_blocks_init(rrpc);
  	if (ret) {
  		pr_err("nvm: rrpc: could not initialize state for blocks
  ");
  		goto err;
  	}
  
  	ret = rrpc_luns_configure(rrpc);
  	if (ret) {
  		pr_err("nvm: rrpc: not enough blocks available in LUNs.
  ");
  		goto err;
  	}
  
  	ret = rrpc_gc_init(rrpc);
  	if (ret) {
  		pr_err("nvm: rrpc: could not initialize gc
  ");
  		goto err;
  	}
  
  	/* inherit the size from the underlying device */
  	blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
  	blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
  
  	pr_info("nvm: rrpc initialized with %u luns and %llu pages.
  ",
4ece44af7   Matias Bjørling   lightnvm: rename ...
1427
  			rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
  
  	mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
  
  	return rrpc;
  err:
  	rrpc_free(rrpc);
  	return ERR_PTR(ret);
  }
  
  /* round robin, page-based FTL, and cost-based GC */
  static struct nvm_tgt_type tt_rrpc = {
  	.name		= "rrpc",
  	.version	= {1, 0, 0},
  
  	.make_rq	= rrpc_make_rq,
  	.capacity	= rrpc_capacity,
  	.end_io		= rrpc_end_io,
  
  	.init		= rrpc_init,
  	.exit		= rrpc_exit,
  };
  
  static int __init rrpc_module_init(void)
  {
6063fe399   Simon A. F. Lund   lightnvm: rename ...
1452
  	return nvm_register_tgt_type(&tt_rrpc);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1453
1454
1455
1456
  }
  
  static void rrpc_module_exit(void)
  {
6063fe399   Simon A. F. Lund   lightnvm: rename ...
1457
  	nvm_unregister_tgt_type(&tt_rrpc);
ae1519ec4   Matias Bjørling   rrpc: Round-robin...
1458
1459
1460
1461
1462
1463
  }
  
  module_init(rrpc_module_init);
  module_exit(rrpc_module_exit);
  MODULE_LICENSE("GPL v2");
  MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");