Blame view
drivers/nvdimm/pmem.c
16.7 KB
2025cf9e1 treewide: Replace... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
9e853f231 drivers/block/pme... |
2 3 4 |
/* * Persistent Memory Driver * |
9f53f9fa4 libnvdimm, pmem: ... |
5 |
* Copyright (c) 2014-2015, Intel Corporation. |
9e853f231 drivers/block/pme... |
6 7 |
* Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. |
9e853f231 drivers/block/pme... |
8 |
*/ |
9e853f231 drivers/block/pme... |
9 10 11 12 |
#include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/init.h> #include <linux/platform_device.h> |
c953cc987 libnvdimm, pmem: ... |
13 |
#include <linux/set_memory.h> |
9e853f231 drivers/block/pme... |
14 15 |
#include <linux/module.h> #include <linux/moduleparam.h> |
b95f5f439 libnvdimm: conver... |
16 |
#include <linux/badblocks.h> |
9476df7d8 mm: introduce fin... |
17 |
#include <linux/memremap.h> |
32ab0a3f5 libnvdimm, pmem: ... |
18 |
#include <linux/vmalloc.h> |
713897038 mm, zone_device: ... |
19 |
#include <linux/blk-mq.h> |
34c0fd540 mm, dax, pmem: in... |
20 |
#include <linux/pfn_t.h> |
9e853f231 drivers/block/pme... |
21 |
#include <linux/slab.h> |
0aed55af8 x86, uaccess: int... |
22 |
#include <linux/uio.h> |
c1d6e828a pmem: add dax_ope... |
23 |
#include <linux/dax.h> |
9f53f9fa4 libnvdimm, pmem: ... |
24 |
#include <linux/nd.h> |
23c47d2ad bdi: introduce BD... |
25 |
#include <linux/backing-dev.h> |
e0cf615d7 asm-generic: don'... |
26 27 |
#include <linux/mm.h> #include <asm/cacheflush.h> |
f295e53b6 libnvdimm, pmem: ... |
28 |
#include "pmem.h" |
32ab0a3f5 libnvdimm, pmem: ... |
29 |
#include "pfn.h" |
9f53f9fa4 libnvdimm, pmem: ... |
30 |
#include "nd.h" |
9e853f231 drivers/block/pme... |
31 |
|
f284a4f23 libnvdimm: introd... |
32 33 34 35 36 37 38 39 40 41 42 43 44 |
static struct device *to_dev(struct pmem_device *pmem) { /* * nvdimm bus services need a 'dev' parameter, and we record the device * at init in bb.dev. */ return pmem->bb.dev; } static struct nd_region *to_region(struct pmem_device *pmem) { return to_nd_region(to_dev(pmem)->parent); } |
9e853f231 drivers/block/pme... |
45 |
|
c953cc987 libnvdimm, pmem: ... |
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
static void hwpoison_clear(struct pmem_device *pmem, phys_addr_t phys, unsigned int len) { unsigned long pfn_start, pfn_end, pfn; /* only pmem in the linear map supports HWPoison */ if (is_vmalloc_addr(pmem->virt_addr)) return; pfn_start = PHYS_PFN(phys); pfn_end = pfn_start + PHYS_PFN(len); for (pfn = pfn_start; pfn < pfn_end; pfn++) { struct page *page = pfn_to_page(pfn); /* * Note, no need to hold a get_dev_pagemap() reference * here since we're in the driver I/O path and * outstanding I/O requests pin the dev_pagemap. */ if (test_and_clear_pmem_poison(page)) clear_mce_nospec(pfn); } } |
4e4cbee93 block: switch bio... |
69 70 |
static blk_status_t pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, unsigned int len) |
59e647398 libnvdimm, pmem: ... |
71 |
{ |
f284a4f23 libnvdimm: introd... |
72 |
struct device *dev = to_dev(pmem); |
59e647398 libnvdimm, pmem: ... |
73 74 |
sector_t sector; long cleared; |
4e4cbee93 block: switch bio... |
75 |
blk_status_t rc = BLK_STS_OK; |
59e647398 libnvdimm, pmem: ... |
76 77 |
sector = (offset - pmem->data_offset) / 512; |
59e647398 libnvdimm, pmem: ... |
78 |
|
868f036fe libnvdimm: fix mi... |
79 80 |
cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); if (cleared < len) |
4e4cbee93 block: switch bio... |
81 |
rc = BLK_STS_IOERR; |
59e647398 libnvdimm, pmem: ... |
82 |
if (cleared > 0 && cleared / 512) { |
c953cc987 libnvdimm, pmem: ... |
83 |
hwpoison_clear(pmem, pmem->phys_addr + offset, cleared); |
868f036fe libnvdimm: fix mi... |
84 |
cleared /= 512; |
426824d63 libnvdimm: remove... |
85 86 |
dev_dbg(dev, "%#llx clear %ld sector%s ", |
868f036fe libnvdimm: fix mi... |
87 88 |
(unsigned long long) sector, cleared, cleared > 1 ? "s" : ""); |
0a3f27b9a libnvdimm, namesp... |
89 |
badblocks_clear(&pmem->bb, sector, cleared); |
975750a98 libnvdimm, pmem: ... |
90 91 |
if (pmem->bb_state) sysfs_notify_dirent(pmem->bb_state); |
59e647398 libnvdimm, pmem: ... |
92 |
} |
3115bb02b pmem: report erro... |
93 |
|
f2b612578 x86, libnvdimm, p... |
94 |
arch_invalidate_pmem(pmem->virt_addr + offset, len); |
868f036fe libnvdimm: fix mi... |
95 96 |
return rc; |
59e647398 libnvdimm, pmem: ... |
97 |
} |
bd697a80c pmem: reduce kmap... |
98 99 100 |
static void write_pmem(void *pmem_addr, struct page *page, unsigned int off, unsigned int len) { |
98cc093cb block, THP: make ... |
101 102 103 104 105 |
unsigned int chunk; void *mem; while (len) { mem = kmap_atomic(page); |
9dc6488e8 libnvdimm/pmem: f... |
106 |
chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
98cc093cb block, THP: make ... |
107 108 109 110 111 |
memcpy_flushcache(pmem_addr, mem + off, chunk); kunmap_atomic(mem); len -= chunk; off = 0; page++; |
9dc6488e8 libnvdimm/pmem: f... |
112 |
pmem_addr += chunk; |
98cc093cb block, THP: make ... |
113 |
} |
bd697a80c pmem: reduce kmap... |
114 |
} |
4e4cbee93 block: switch bio... |
115 |
static blk_status_t read_pmem(struct page *page, unsigned int off, |
bd697a80c pmem: reduce kmap... |
116 117 |
void *pmem_addr, unsigned int len) { |
98cc093cb block, THP: make ... |
118 |
unsigned int chunk; |
60622d682 x86/asm/memcpy_mc... |
119 |
unsigned long rem; |
98cc093cb block, THP: make ... |
120 121 122 123 |
void *mem; while (len) { mem = kmap_atomic(page); |
9dc6488e8 libnvdimm/pmem: f... |
124 |
chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
ec6347bb4 x86, powerpc: Ren... |
125 |
rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk); |
98cc093cb block, THP: make ... |
126 |
kunmap_atomic(mem); |
60622d682 x86/asm/memcpy_mc... |
127 |
if (rem) |
98cc093cb block, THP: make ... |
128 129 130 131 |
return BLK_STS_IOERR; len -= chunk; off = 0; page++; |
9dc6488e8 libnvdimm/pmem: f... |
132 |
pmem_addr += chunk; |
98cc093cb block, THP: make ... |
133 |
} |
4e4cbee93 block: switch bio... |
134 |
return BLK_STS_OK; |
bd697a80c pmem: reduce kmap... |
135 |
} |
5d64efe79 pmem: Add functio... |
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
static blk_status_t pmem_do_read(struct pmem_device *pmem, struct page *page, unsigned int page_off, sector_t sector, unsigned int len) { blk_status_t rc; phys_addr_t pmem_off = sector * 512 + pmem->data_offset; void *pmem_addr = pmem->virt_addr + pmem_off; if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) return BLK_STS_IOERR; rc = read_pmem(page, page_off, pmem_addr, len); flush_dcache_page(page); return rc; } static blk_status_t pmem_do_write(struct pmem_device *pmem, struct page *page, unsigned int page_off, sector_t sector, unsigned int len) |
9e853f231 drivers/block/pme... |
155 |
{ |
4e4cbee93 block: switch bio... |
156 |
blk_status_t rc = BLK_STS_OK; |
59e647398 libnvdimm, pmem: ... |
157 |
bool bad_pmem = false; |
32ab0a3f5 libnvdimm, pmem: ... |
158 |
phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
7a9eb2066 pmem: kill __pmem... |
159 |
void *pmem_addr = pmem->virt_addr + pmem_off; |
9e853f231 drivers/block/pme... |
160 |
|
59e647398 libnvdimm, pmem: ... |
161 162 |
if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) bad_pmem = true; |
5d64efe79 pmem: Add functio... |
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
/* * Note that we write the data both before and after * clearing poison. The write before clear poison * handles situations where the latest written data is * preserved and the clear poison operation simply marks * the address range as valid without changing the data. * In this case application software can assume that an * interrupted write will either return the new good * data or an error. * * However, if pmem_clear_poison() leaves the data in an * indeterminate state we need to perform the write * after clear poison. */ flush_dcache_page(page); write_pmem(pmem_addr, page, page_off, len); if (unlikely(bad_pmem)) { rc = pmem_clear_poison(pmem, pmem_off, len); write_pmem(pmem_addr, page, page_off, len); |
9e853f231 drivers/block/pme... |
182 |
} |
b5ebc8ec6 libnvdimm, pmem: ... |
183 |
return rc; |
9e853f231 drivers/block/pme... |
184 |
} |
c62b37d96 block: move ->mak... |
185 |
static blk_qc_t pmem_submit_bio(struct bio *bio) |
9e853f231 drivers/block/pme... |
186 |
{ |
c5d4355d1 libnvdimm: nd_reg... |
187 |
int ret = 0; |
4e4cbee93 block: switch bio... |
188 |
blk_status_t rc = 0; |
f0dc089ce libnvdimm: enable... |
189 190 |
bool do_acct; unsigned long start; |
9e853f231 drivers/block/pme... |
191 |
struct bio_vec bvec; |
9e853f231 drivers/block/pme... |
192 |
struct bvec_iter iter; |
6ec26b8b2 nvdimm/pmem: stop... |
193 |
struct pmem_device *pmem = bio->bi_disk->private_data; |
7e267a8c7 libnvdimm, pmem: ... |
194 |
struct nd_region *nd_region = to_region(pmem); |
d2d6364dc libnvdimm, pmem: ... |
195 |
if (bio->bi_opf & REQ_PREFLUSH) |
c5d4355d1 libnvdimm: nd_reg... |
196 |
ret = nvdimm_flush(nd_region, bio); |
9e853f231 drivers/block/pme... |
197 |
|
0fd92f89a nvdimm: use bio_{... |
198 199 200 |
do_acct = blk_queue_io_stat(bio->bi_disk->queue); if (do_acct) start = bio_start_io_acct(bio); |
e10624f8c pmem: fail io-req... |
201 |
bio_for_each_segment(bvec, bio, iter) { |
5d64efe79 pmem: Add functio... |
202 203 204 205 206 207 |
if (op_is_write(bio_op(bio))) rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset, iter.bi_sector, bvec.bv_len); else rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset, iter.bi_sector, bvec.bv_len); |
e10624f8c pmem: fail io-req... |
208 |
if (rc) { |
4e4cbee93 block: switch bio... |
209 |
bio->bi_status = rc; |
e10624f8c pmem: fail io-req... |
210 211 212 |
break; } } |
f0dc089ce libnvdimm: enable... |
213 |
if (do_acct) |
0fd92f89a nvdimm: use bio_{... |
214 |
bio_end_io_acct(bio, start); |
61031952f arch, x86: pmem a... |
215 |
|
1eff9d322 block: rename bio... |
216 |
if (bio->bi_opf & REQ_FUA) |
c5d4355d1 libnvdimm: nd_reg... |
217 218 219 220 |
ret = nvdimm_flush(nd_region, bio); if (ret) bio->bi_status = errno_to_blk_status(ret); |
61031952f arch, x86: pmem a... |
221 |
|
4246a0b63 block: add a bi_e... |
222 |
bio_endio(bio); |
dece16353 block: change ->m... |
223 |
return BLK_QC_T_NONE; |
9e853f231 drivers/block/pme... |
224 225 226 |
} static int pmem_rw_page(struct block_device *bdev, sector_t sector, |
3f289dcb4 block: make bdev_... |
227 |
struct page *page, unsigned int op) |
9e853f231 drivers/block/pme... |
228 |
{ |
6ec26b8b2 nvdimm/pmem: stop... |
229 |
struct pmem_device *pmem = bdev->bd_disk->private_data; |
4e4cbee93 block: switch bio... |
230 |
blk_status_t rc; |
9e853f231 drivers/block/pme... |
231 |
|
5d64efe79 pmem: Add functio... |
232 |
if (op_is_write(op)) |
af3bbc12d mm: add thp_size |
233 |
rc = pmem_do_write(pmem, page, 0, sector, thp_size(page)); |
5d64efe79 pmem: Add functio... |
234 |
else |
af3bbc12d mm: add thp_size |
235 |
rc = pmem_do_read(pmem, page, 0, sector, thp_size(page)); |
e10624f8c pmem: fail io-req... |
236 237 238 239 240 241 242 |
/* * The ->rw_page interface is subtle and tricky. The core * retries on any error, so we can only invoke page_endio() in * the successful completion case. Otherwise, we'll see crashes * caused by double completion. */ if (rc == 0) |
3f289dcb4 block: make bdev_... |
243 |
page_endio(page, op_is_write(op), 0); |
e10624f8c pmem: fail io-req... |
244 |
|
4e4cbee93 block: switch bio... |
245 |
return blk_status_to_errno(rc); |
9e853f231 drivers/block/pme... |
246 |
} |
f295e53b6 libnvdimm, pmem: ... |
247 |
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ |
c1d6e828a pmem: add dax_ope... |
248 249 |
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) |
9e853f231 drivers/block/pme... |
250 |
{ |
c1d6e828a pmem: add dax_ope... |
251 |
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; |
589e75d15 libnvdimm, pmem: ... |
252 |
|
c1d6e828a pmem: add dax_ope... |
253 254 |
if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, PFN_PHYS(nr_pages)))) |
0a70bd430 dax: enable dax i... |
255 |
return -EIO; |
46a590cde libnvdimm, pmem: ... |
256 257 258 259 260 |
if (kaddr) *kaddr = pmem->virt_addr + offset; if (pfn) *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); |
9e853f231 drivers/block/pme... |
261 |
|
0a70bd430 dax: enable dax i... |
262 263 264 265 266 |
/* * If badblocks are present, limit known good range to the * requested range. */ if (unlikely(pmem->bb.count)) |
c1d6e828a pmem: add dax_ope... |
267 268 |
return nr_pages; return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); |
9e853f231 drivers/block/pme... |
269 270 271 272 |
} static const struct block_device_operations pmem_fops = { .owner = THIS_MODULE, |
c62b37d96 block: move ->mak... |
273 |
.submit_bio = pmem_submit_bio, |
9e853f231 drivers/block/pme... |
274 |
.rw_page = pmem_rw_page, |
9e853f231 drivers/block/pme... |
275 |
}; |
f605a263e dax, pmem: Add a ... |
276 277 278 279 280 281 282 283 284 |
static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages) { struct pmem_device *pmem = dax_get_private(dax_dev); return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0, PFN_PHYS(pgoff) >> SECTOR_SHIFT, PAGE_SIZE)); } |
c1d6e828a pmem: add dax_ope... |
285 286 287 288 289 290 291 |
static long pmem_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { struct pmem_device *pmem = dax_get_private(dax_dev); return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); } |
52f476a32 libnvdimm/pmem: B... |
292 293 |
/* * Use the 'no check' versions of copy_from_iter_flushcache() and |
ec6347bb4 x86, powerpc: Ren... |
294 |
* copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds |
52f476a32 libnvdimm/pmem: B... |
295 296 297 |
* checking, both file offset and device offset, is handled by * dax_iomap_actor() */ |
0aed55af8 x86, uaccess: int... |
298 299 300 |
static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { |
52f476a32 libnvdimm/pmem: B... |
301 |
return _copy_from_iter_flushcache(addr, bytes, i); |
0aed55af8 x86, uaccess: int... |
302 |
} |
b3a9a0c36 dax: Introduce a ... |
303 304 305 |
static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { |
ec6347bb4 x86, powerpc: Ren... |
306 |
return _copy_mc_to_iter(addr, bytes, i); |
b3a9a0c36 dax: Introduce a ... |
307 |
} |
c1d6e828a pmem: add dax_ope... |
308 309 |
static const struct dax_operations pmem_dax_ops = { .direct_access = pmem_dax_direct_access, |
7bf7eac8d dax: Arrange for ... |
310 |
.dax_supported = generic_fsdax_supported, |
0aed55af8 x86, uaccess: int... |
311 |
.copy_from_iter = pmem_copy_from_iter, |
b3a9a0c36 dax: Introduce a ... |
312 |
.copy_to_iter = pmem_copy_to_iter, |
f605a263e dax, pmem: Add a ... |
313 |
.zero_page_range = pmem_dax_zero_page_range, |
c1d6e828a pmem: add dax_ope... |
314 |
}; |
6e0c90d69 libnvdimm, pmem, ... |
315 316 317 |
static const struct attribute_group *pmem_attribute_groups[] = { &dax_attribute_group, NULL, |
c1d6e828a pmem: add dax_ope... |
318 |
}; |
d8668bb04 memremap: pass a ... |
319 |
static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap) |
030b99e39 libnvdimm, pmem: ... |
320 |
{ |
d8668bb04 memremap: pass a ... |
321 322 |
struct request_queue *q = container_of(pgmap->ref, struct request_queue, q_usage_counter); |
50f44ee72 mm/devm_memremap_... |
323 |
|
030b99e39 libnvdimm, pmem: ... |
324 325 |
blk_cleanup_queue(q); } |
d8668bb04 memremap: pass a ... |
326 |
static void pmem_release_queue(void *pgmap) |
50f44ee72 mm/devm_memremap_... |
327 |
{ |
d8668bb04 memremap: pass a ... |
328 |
pmem_pagemap_cleanup(pgmap); |
50f44ee72 mm/devm_memremap_... |
329 |
} |
d8668bb04 memremap: pass a ... |
330 |
static void pmem_pagemap_kill(struct dev_pagemap *pgmap) |
713897038 mm, zone_device: ... |
331 |
{ |
d8668bb04 memremap: pass a ... |
332 333 |
struct request_queue *q = container_of(pgmap->ref, struct request_queue, q_usage_counter); |
a95c90f1e mm, devm_memremap... |
334 |
|
d3b5d3529 Merge branch 'x86... |
335 |
blk_freeze_queue_start(q); |
713897038 mm, zone_device: ... |
336 |
} |
c1d6e828a pmem: add dax_ope... |
337 |
static void pmem_release_disk(void *__pmem) |
030b99e39 libnvdimm, pmem: ... |
338 |
{ |
c1d6e828a pmem: add dax_ope... |
339 340 341 342 343 344 |
struct pmem_device *pmem = __pmem; kill_dax(pmem->dax_dev); put_dax(pmem->dax_dev); del_gendisk(pmem->disk); put_disk(pmem->disk); |
030b99e39 libnvdimm, pmem: ... |
345 |
} |
1e240e8d4 memremap: move de... |
346 |
static const struct dev_pagemap_ops fsdax_pagemap_ops = { |
1e240e8d4 memremap: move de... |
347 348 349 |
.kill = pmem_pagemap_kill, .cleanup = pmem_pagemap_cleanup, }; |
200c79da8 libnvdimm, pmem, ... |
350 351 |
static int pmem_attach_disk(struct device *dev, struct nd_namespace_common *ndns) |
9e853f231 drivers/block/pme... |
352 |
{ |
200c79da8 libnvdimm, pmem, ... |
353 |
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
f284a4f23 libnvdimm: introd... |
354 |
struct nd_region *nd_region = to_nd_region(dev->parent); |
ce7f11a23 libnvdimm, pmem: ... |
355 |
int nid = dev_to_node(dev), fua; |
200c79da8 libnvdimm, pmem, ... |
356 |
struct resource *res = &nsio->res; |
a4574f63e mm/memremap_pages... |
357 |
struct range bb_range; |
200c79da8 libnvdimm, pmem, ... |
358 |
struct nd_pfn *nd_pfn = NULL; |
c1d6e828a pmem: add dax_ope... |
359 |
struct dax_device *dax_dev; |
200c79da8 libnvdimm, pmem, ... |
360 |
struct nd_pfn_sb *pfn_sb; |
9e853f231 drivers/block/pme... |
361 |
struct pmem_device *pmem; |
468ded03c libnvdimm, pmem: ... |
362 |
struct request_queue *q; |
6e0c90d69 libnvdimm, pmem, ... |
363 |
struct device *gendev; |
200c79da8 libnvdimm, pmem, ... |
364 365 |
struct gendisk *disk; void *addr; |
e8d513483 memremap: change ... |
366 |
int rc; |
fefc1d97f libnvdimm: add da... |
367 |
unsigned long flags = 0UL; |
e8d513483 memremap: change ... |
368 369 370 371 |
pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); if (!pmem) return -ENOMEM; |
200c79da8 libnvdimm, pmem, ... |
372 |
|
8f4b01fcd libnvdimm/namespa... |
373 374 375 |
rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve()); if (rc) return rc; |
200c79da8 libnvdimm, pmem, ... |
376 377 378 |
/* while nsio_rw_bytes is active, parse a pfn info block if present */ if (is_nd_pfn(dev)) { nd_pfn = to_nd_pfn(dev); |
e8d513483 memremap: change ... |
379 380 381 |
rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap); if (rc) return rc; |
200c79da8 libnvdimm, pmem, ... |
382 383 384 |
} /* we're attaching a block device, disable raw namespace access */ |
8f4b01fcd libnvdimm/namespa... |
385 |
devm_namespace_disable(dev, ndns); |
9e853f231 drivers/block/pme... |
386 |
|
200c79da8 libnvdimm, pmem, ... |
387 |
dev_set_drvdata(dev, pmem); |
9e853f231 drivers/block/pme... |
388 389 |
pmem->phys_addr = res->start; pmem->size = resource_size(res); |
0b277961f libnvdimm, pmem: ... |
390 391 |
fua = nvdimm_has_flush(nd_region); if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) { |
61031952f arch, x86: pmem a... |
392 393 |
dev_warn(dev, "unable to guarantee persistence of writes "); |
0b277961f libnvdimm, pmem: ... |
394 395 |
fua = 0; } |
9e853f231 drivers/block/pme... |
396 |
|
947df02d2 libnvdimm, pmem: ... |
397 |
if (!devm_request_mem_region(dev, res->start, resource_size(res), |
450c6633e libnvdimm: use co... |
398 |
dev_name(&ndns->dev))) { |
947df02d2 libnvdimm, pmem: ... |
399 400 |
dev_warn(dev, "could not reserve region %pR ", res); |
200c79da8 libnvdimm, pmem, ... |
401 |
return -EBUSY; |
9e853f231 drivers/block/pme... |
402 |
} |
c62b37d96 block: move ->mak... |
403 |
q = blk_alloc_queue(dev_to_node(dev)); |
468ded03c libnvdimm, pmem: ... |
404 |
if (!q) |
200c79da8 libnvdimm, pmem, ... |
405 |
return -ENOMEM; |
468ded03c libnvdimm, pmem: ... |
406 |
|
34c0fd540 mm, dax, pmem: in... |
407 |
pmem->pfn_flags = PFN_DEV; |
e8d513483 memremap: change ... |
408 |
pmem->pgmap.ref = &q->q_usage_counter; |
200c79da8 libnvdimm, pmem, ... |
409 |
if (is_nd_pfn(dev)) { |
f6a55e1a3 memremap: lift th... |
410 411 |
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; |
e8d513483 memremap: change ... |
412 |
addr = devm_memremap_pages(dev, &pmem->pgmap); |
200c79da8 libnvdimm, pmem, ... |
413 414 |
pfn_sb = nd_pfn->pfn_sb; pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); |
e8d513483 memremap: change ... |
415 |
pmem->pfn_pad = resource_size(res) - |
a4574f63e mm/memremap_pages... |
416 |
range_len(&pmem->pgmap.range); |
200c79da8 libnvdimm, pmem, ... |
417 |
pmem->pfn_flags |= PFN_MAP; |
a4574f63e mm/memremap_pages... |
418 419 |
bb_range = pmem->pgmap.range; bb_range.start += pmem->data_offset; |
200c79da8 libnvdimm, pmem, ... |
420 |
} else if (pmem_should_map_pages(dev)) { |
a4574f63e mm/memremap_pages... |
421 422 |
pmem->pgmap.range.start = res->start; pmem->pgmap.range.end = res->end; |
b7b3c01b1 mm/memremap_pages... |
423 |
pmem->pgmap.nr_range = 1; |
f6a55e1a3 memremap: lift th... |
424 425 |
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; |
e8d513483 memremap: change ... |
426 |
addr = devm_memremap_pages(dev, &pmem->pgmap); |
34c0fd540 mm, dax, pmem: in... |
427 |
pmem->pfn_flags |= PFN_MAP; |
a4574f63e mm/memremap_pages... |
428 |
bb_range = pmem->pgmap.range; |
91ed7ac44 libnvdimm, pmem: ... |
429 |
} else { |
50f44ee72 mm/devm_memremap_... |
430 |
if (devm_add_action_or_reset(dev, pmem_release_queue, |
d8668bb04 memremap: pass a ... |
431 |
&pmem->pgmap)) |
50f44ee72 mm/devm_memremap_... |
432 |
return -ENOMEM; |
200c79da8 libnvdimm, pmem, ... |
433 434 |
addr = devm_memremap(dev, pmem->phys_addr, pmem->size, ARCH_MEMREMAP_PMEM); |
a4574f63e mm/memremap_pages... |
435 436 |
bb_range.start = res->start; bb_range.end = res->end; |
91ed7ac44 libnvdimm, pmem: ... |
437 |
} |
b36f47617 devm_memremap: co... |
438 |
|
200c79da8 libnvdimm, pmem, ... |
439 440 |
if (IS_ERR(addr)) return PTR_ERR(addr); |
7a9eb2066 pmem: kill __pmem... |
441 |
pmem->virt_addr = addr; |
9e853f231 drivers/block/pme... |
442 |
|
ce7f11a23 libnvdimm, pmem: ... |
443 |
blk_queue_write_cache(q, true, fua); |
5a92289f4 libnvdimm, pmem: ... |
444 |
blk_queue_physical_block_size(q, PAGE_SIZE); |
f979b13c3 libnvdimm, label:... |
445 |
blk_queue_logical_block_size(q, pmem_sector_size(ndns)); |
5a92289f4 libnvdimm, pmem: ... |
446 |
blk_queue_max_hw_sectors(q, UINT_MAX); |
8b904b5b6 block: Use blk_qu... |
447 |
blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
4557641b4 pmem: only set QU... |
448 449 |
if (pmem->pfn_flags & PFN_MAP) blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
9e853f231 drivers/block/pme... |
450 |
|
538ea4aa4 pmem, memremap: c... |
451 |
disk = alloc_disk_node(0, nid); |
030b99e39 libnvdimm, pmem: ... |
452 453 |
if (!disk) return -ENOMEM; |
c1d6e828a pmem: add dax_ope... |
454 |
pmem->disk = disk; |
9e853f231 drivers/block/pme... |
455 |
|
9e853f231 drivers/block/pme... |
456 |
disk->fops = &pmem_fops; |
5a92289f4 libnvdimm, pmem: ... |
457 |
disk->queue = q; |
9e853f231 drivers/block/pme... |
458 |
disk->flags = GENHD_FL_EXT_DEVT; |
6ec26b8b2 nvdimm/pmem: stop... |
459 |
disk->private_data = pmem; |
5212e11fd nd_btt: atomic se... |
460 |
nvdimm_namespace_disk_name(ndns, disk->disk_name); |
cfe30b872 libnvdimm, pmem: ... |
461 462 |
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) / 512); |
b95f5f439 libnvdimm: conver... |
463 464 |
if (devm_init_badblocks(dev, &pmem->bb)) return -ENOMEM; |
a4574f63e mm/memremap_pages... |
465 |
nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range); |
57f7f317a pmem, dax: disabl... |
466 |
disk->bb = &pmem->bb; |
f02716db9 libnvdimm: use de... |
467 |
|
fefc1d97f libnvdimm: add da... |
468 469 470 |
if (is_nvdimm_sync(nd_region)) flags = DAXDEV_F_SYNC; dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags); |
4e4ced937 dax: Move mandato... |
471 |
if (IS_ERR(dax_dev)) { |
c1d6e828a pmem: add dax_ope... |
472 |
put_disk(disk); |
4e4ced937 dax: Move mandato... |
473 |
return PTR_ERR(dax_dev); |
c1d6e828a pmem: add dax_ope... |
474 |
} |
ce7f11a23 libnvdimm, pmem: ... |
475 |
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region)); |
c1d6e828a pmem: add dax_ope... |
476 |
pmem->dax_dev = dax_dev; |
6e0c90d69 libnvdimm, pmem, ... |
477 478 |
gendev = disk_to_dev(disk); gendev->groups = pmem_attribute_groups; |
fef912bf8 block: genhd: add... |
479 |
device_add_disk(dev, disk, NULL); |
c1d6e828a pmem: add dax_ope... |
480 |
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) |
f02716db9 libnvdimm: use de... |
481 |
return -ENOMEM; |
32f61d675 nvdimm: simplify ... |
482 |
nvdimm_check_and_set_ro(disk); |
9e853f231 drivers/block/pme... |
483 |
|
975750a98 libnvdimm, pmem: ... |
484 485 |
pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd, "badblocks"); |
6aa734a2f libnvdimm, region... |
486 487 488 |
if (!pmem->bb_state) dev_warn(dev, "'badblocks' notification disabled "); |
975750a98 libnvdimm, pmem: ... |
489 |
|
8c2f7e865 libnvdimm: infras... |
490 491 |
return 0; } |
9e853f231 drivers/block/pme... |
492 |
|
9f53f9fa4 libnvdimm, pmem: ... |
493 |
static int nd_pmem_probe(struct device *dev) |
9e853f231 drivers/block/pme... |
494 |
{ |
1c97afa71 libnvdimm/pmem: A... |
495 |
int ret; |
8c2f7e865 libnvdimm: infras... |
496 |
struct nd_namespace_common *ndns; |
9e853f231 drivers/block/pme... |
497 |
|
8c2f7e865 libnvdimm: infras... |
498 499 500 |
ndns = nvdimm_namespace_common_probe(dev); if (IS_ERR(ndns)) return PTR_ERR(ndns); |
bf9bccc14 libnvdimm: pmem l... |
501 |
|
200c79da8 libnvdimm, pmem, ... |
502 |
if (is_nd_btt(dev)) |
708ab62be pmem: switch to d... |
503 |
return nvdimm_namespace_attach_btt(ndns); |
32ab0a3f5 libnvdimm, pmem: ... |
504 |
if (is_nd_pfn(dev)) |
200c79da8 libnvdimm, pmem, ... |
505 |
return pmem_attach_disk(dev, ndns); |
32ab0a3f5 libnvdimm, pmem: ... |
506 |
|
8f4b01fcd libnvdimm/namespa... |
507 508 509 |
ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve()); if (ret) return ret; |
1c97afa71 libnvdimm/pmem: A... |
510 511 |
ret = nd_btt_probe(dev, ndns); if (ret == 0) |
32ab0a3f5 libnvdimm, pmem: ... |
512 |
return -ENXIO; |
32ab0a3f5 libnvdimm, pmem: ... |
513 |
|
1c97afa71 libnvdimm/pmem: A... |
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 |
/* * We have two failure conditions here, there is no * info reserver block or we found a valid info reserve block * but failed to initialize the pfn superblock. * * For the first case consider namespace as a raw pmem namespace * and attach a disk. * * For the latter, consider this a success and advance the namespace * seed. */ ret = nd_pfn_probe(dev, ndns); if (ret == 0) return -ENXIO; else if (ret == -EOPNOTSUPP) return ret; ret = nd_dax_probe(dev, ndns); if (ret == 0) return -ENXIO; else if (ret == -EOPNOTSUPP) return ret; |
8f4b01fcd libnvdimm/namespa... |
536 537 538 |
/* probe complete, attach handles namespace enabling */ devm_namespace_disable(dev, ndns); |
200c79da8 libnvdimm, pmem, ... |
539 |
return pmem_attach_disk(dev, ndns); |
9e853f231 drivers/block/pme... |
540 |
} |
9f53f9fa4 libnvdimm, pmem: ... |
541 |
static int nd_pmem_remove(struct device *dev) |
9e853f231 drivers/block/pme... |
542 |
{ |
6aa734a2f libnvdimm, region... |
543 |
struct pmem_device *pmem = dev_get_drvdata(dev); |
8c2f7e865 libnvdimm: infras... |
544 |
if (is_nd_btt(dev)) |
298f2bc5d libnvdimm, pmem: ... |
545 |
nvdimm_namespace_detach_btt(to_nd_btt(dev)); |
6aa734a2f libnvdimm, region... |
546 547 |
else { /* |
87a30e1f0 driver-core, libn... |
548 549 |
* Note, this assumes nd_device_lock() context to not * race nd_pmem_notify() |
6aa734a2f libnvdimm, region... |
550 551 552 553 |
*/ sysfs_put(pmem->bb_state); pmem->bb_state = NULL; } |
c5d4355d1 libnvdimm: nd_reg... |
554 |
nvdimm_flush(to_nd_region(dev->parent), NULL); |
476f848aa libnvdimm, pmem: ... |
555 |
|
9e853f231 drivers/block/pme... |
556 557 |
return 0; } |
476f848aa libnvdimm, pmem: ... |
558 559 |
static void nd_pmem_shutdown(struct device *dev) { |
c5d4355d1 libnvdimm: nd_reg... |
560 |
nvdimm_flush(to_nd_region(dev->parent), NULL); |
476f848aa libnvdimm, pmem: ... |
561 |
} |
719994660 libnvdimm: async ... |
562 563 |
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) { |
b2518c78c libnvdimm, pmem: ... |
564 |
struct nd_region *nd_region; |
298f2bc5d libnvdimm, pmem: ... |
565 566 567 |
resource_size_t offset = 0, end_trunc = 0; struct nd_namespace_common *ndns; struct nd_namespace_io *nsio; |
b2518c78c libnvdimm, pmem: ... |
568 |
struct badblocks *bb; |
a4574f63e mm/memremap_pages... |
569 |
struct range range; |
975750a98 libnvdimm, pmem: ... |
570 |
struct kernfs_node *bb_state; |
719994660 libnvdimm: async ... |
571 572 573 |
if (event != NVDIMM_REVALIDATE_POISON) return; |
298f2bc5d libnvdimm, pmem: ... |
574 575 576 577 |
if (is_nd_btt(dev)) { struct nd_btt *nd_btt = to_nd_btt(dev); ndns = nd_btt->ndns; |
b2518c78c libnvdimm, pmem: ... |
578 579 580 |
nd_region = to_nd_region(ndns->dev.parent); nsio = to_nd_namespace_io(&ndns->dev); bb = &nsio->bb; |
975750a98 libnvdimm, pmem: ... |
581 |
bb_state = NULL; |
b2518c78c libnvdimm, pmem: ... |
582 583 |
} else { struct pmem_device *pmem = dev_get_drvdata(dev); |
a39018029 libnvdimm, pfn: f... |
584 |
|
b2518c78c libnvdimm, pmem: ... |
585 586 |
nd_region = to_region(pmem); bb = &pmem->bb; |
975750a98 libnvdimm, pmem: ... |
587 |
bb_state = pmem->bb_state; |
b2518c78c libnvdimm, pmem: ... |
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 |
if (is_nd_pfn(dev)) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; ndns = nd_pfn->ndns; offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad); end_trunc = __le32_to_cpu(pfn_sb->end_trunc); } else { ndns = to_ndns(dev); } nsio = to_nd_namespace_io(&ndns->dev); } |
a39018029 libnvdimm, pfn: f... |
603 |
|
a4574f63e mm/memremap_pages... |
604 605 606 |
range.start = nsio->res.start + offset; range.end = nsio->res.end - end_trunc; nvdimm_badblocks_populate(nd_region, bb, &range); |
975750a98 libnvdimm, pmem: ... |
607 608 |
if (bb_state) sysfs_notify_dirent(bb_state); |
719994660 libnvdimm: async ... |
609 |
} |
9f53f9fa4 libnvdimm, pmem: ... |
610 611 |
MODULE_ALIAS("pmem"); MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); |
bf9bccc14 libnvdimm: pmem l... |
612 |
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); |
9f53f9fa4 libnvdimm, pmem: ... |
613 614 615 |
static struct nd_device_driver nd_pmem_driver = { .probe = nd_pmem_probe, .remove = nd_pmem_remove, |
719994660 libnvdimm: async ... |
616 |
.notify = nd_pmem_notify, |
476f848aa libnvdimm, pmem: ... |
617 |
.shutdown = nd_pmem_shutdown, |
9f53f9fa4 libnvdimm, pmem: ... |
618 619 |
.drv = { .name = "nd_pmem", |
9e853f231 drivers/block/pme... |
620 |
}, |
bf9bccc14 libnvdimm: pmem l... |
621 |
.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, |
9e853f231 drivers/block/pme... |
622 |
}; |
03e908437 libnvdimm, pmem: ... |
623 |
module_nd_driver(nd_pmem_driver); |
9e853f231 drivers/block/pme... |
624 625 626 |
MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); MODULE_LICENSE("GPL v2"); |