Blame view
drivers/dax/device.c
11.8 KB
51cf784c4
|
1 2 |
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */ |
89ec9f2cf
|
3 |
#include <linux/memremap.h> |
ab68f2622
|
4 5 6 7 |
#include <linux/pagemap.h> #include <linux/module.h> #include <linux/device.h> #include <linux/pfn_t.h> |
ba09c01d2
|
8 |
#include <linux/cdev.h> |
ab68f2622
|
9 10 11 12 |
#include <linux/slab.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/mm.h> |
ef8423022
|
13 |
#include <linux/mman.h> |
efebc7111
|
14 |
#include "dax-private.h" |
51cf784c4
|
15 |
#include "bus.h" |
ab68f2622
|
16 |
|
5f0694b30
|
17 |
static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, |
dee410792
|
18 19 |
const char *func) { |
5f0694b30
|
20 |
struct device *dev = &dev_dax->dev; |
dee410792
|
21 |
unsigned long mask; |
7b6be8444
|
22 |
if (!dax_alive(dev_dax->dax_dev)) |
dee410792
|
23 |
return -ENXIO; |
4cb19355e
|
24 |
/* prevent private mappings from being established */ |
325896ffd
|
25 |
if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { |
5a14e91d5
|
26 27 28 |
dev_info_ratelimited(dev, "%s: %s: fail, attempted private mapping ", |
dee410792
|
29 30 31 |
current->comm, func); return -EINVAL; } |
33cf94d71
|
32 |
mask = dev_dax->align - 1; |
dee410792
|
33 |
if (vma->vm_start & mask || vma->vm_end & mask) { |
5a14e91d5
|
34 35 36 |
dev_info_ratelimited(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx) ", |
dee410792
|
37 38 39 40 |
current->comm, func, vma->vm_start, vma->vm_end, mask); return -EINVAL; } |
dee410792
|
41 |
if (!vma_is_dax(vma)) { |
5a14e91d5
|
42 43 44 |
dev_info_ratelimited(dev, "%s: %s: fail, vma is not DAX capable ", |
dee410792
|
45 46 47 48 49 50 |
current->comm, func); return -EINVAL; } return 0; } |
efebc7111
|
51 |
/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */ |
736163671
|
52 |
__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, |
dee410792
|
53 54 |
unsigned long size) { |
60e93dc09
|
55 56 57 58 59 60 61 62 63 64 65 66 |
int i; for (i = 0; i < dev_dax->nr_range; i++) { struct dev_dax_range *dax_range = &dev_dax->ranges[i]; struct range *range = &dax_range->range; unsigned long long pgoff_end; phys_addr_t phys; pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1; if (pgoff < dax_range->pgoff || pgoff > pgoff_end) continue; phys = PFN_PHYS(pgoff - dax_range->pgoff) + range->start; |
f5516ec5e
|
67 |
if (phys + size - 1 <= range->end) |
dee410792
|
68 |
return phys; |
60e93dc09
|
69 |
break; |
dee410792
|
70 |
} |
dee410792
|
71 72 |
return -1; } |
226ab5610
|
73 |
static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, |
2232c6382
|
74 |
struct vm_fault *vmf, pfn_t *pfn) |
dee410792
|
75 |
{ |
5f0694b30
|
76 |
struct device *dev = &dev_dax->dev; |
dee410792
|
77 |
phys_addr_t phys; |
0134ed4fb
|
78 |
unsigned int fault_size = PAGE_SIZE; |
dee410792
|
79 |
|
5f0694b30
|
80 |
if (check_vma(dev_dax, vmf->vma, __func__)) |
dee410792
|
81 |
return VM_FAULT_SIGBUS; |
33cf94d71
|
82 |
if (dev_dax->align > PAGE_SIZE) { |
6daaca522
|
83 84 |
dev_dbg(dev, "alignment (%#x) > fault size (%#x) ", |
33cf94d71
|
85 |
dev_dax->align, fault_size); |
dee410792
|
86 87 |
return VM_FAULT_SIGBUS; } |
33cf94d71
|
88 |
if (fault_size != dev_dax->align) |
0134ed4fb
|
89 |
return VM_FAULT_SIGBUS; |
736163671
|
90 |
phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); |
dee410792
|
91 |
if (phys == -1) { |
6daaca522
|
92 93 |
dev_dbg(dev, "pgoff_to_phys(%#lx) failed ", vmf->pgoff); |
dee410792
|
94 95 |
return VM_FAULT_SIGBUS; } |
ec8269099
|
96 |
*pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); |
dee410792
|
97 |
|
2232c6382
|
98 |
return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); |
dee410792
|
99 |
} |
226ab5610
|
100 |
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, |
2232c6382
|
101 |
struct vm_fault *vmf, pfn_t *pfn) |
dee410792
|
102 |
{ |
d8a849e1b
|
103 |
unsigned long pmd_addr = vmf->address & PMD_MASK; |
5f0694b30
|
104 |
struct device *dev = &dev_dax->dev; |
dee410792
|
105 106 |
phys_addr_t phys; pgoff_t pgoff; |
0134ed4fb
|
107 |
unsigned int fault_size = PMD_SIZE; |
dee410792
|
108 |
|
5f0694b30
|
109 |
if (check_vma(dev_dax, vmf->vma, __func__)) |
dee410792
|
110 |
return VM_FAULT_SIGBUS; |
33cf94d71
|
111 |
if (dev_dax->align > PMD_SIZE) { |
6daaca522
|
112 113 |
dev_dbg(dev, "alignment (%#x) > fault size (%#x) ", |
33cf94d71
|
114 |
dev_dax->align, fault_size); |
dee410792
|
115 116 |
return VM_FAULT_SIGBUS; } |
33cf94d71
|
117 |
if (fault_size < dev_dax->align) |
0134ed4fb
|
118 |
return VM_FAULT_SIGBUS; |
33cf94d71
|
119 |
else if (fault_size > dev_dax->align) |
0134ed4fb
|
120 121 122 123 124 125 |
return VM_FAULT_FALLBACK; /* if we are outside of the VMA */ if (pmd_addr < vmf->vma->vm_start || (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) return VM_FAULT_SIGBUS; |
f42003917
|
126 |
pgoff = linear_page_index(vmf->vma, pmd_addr); |
736163671
|
127 |
phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); |
dee410792
|
128 |
if (phys == -1) { |
6daaca522
|
129 130 |
dev_dbg(dev, "pgoff_to_phys(%#lx) failed ", pgoff); |
dee410792
|
131 132 |
return VM_FAULT_SIGBUS; } |
ec8269099
|
133 |
*pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); |
dee410792
|
134 |
|
fce86ff58
|
135 |
return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); |
dee410792
|
136 |
} |
9557feee3
|
137 |
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
226ab5610
|
138 |
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, |
2232c6382
|
139 |
struct vm_fault *vmf, pfn_t *pfn) |
9557feee3
|
140 141 |
{ unsigned long pud_addr = vmf->address & PUD_MASK; |
5f0694b30
|
142 |
struct device *dev = &dev_dax->dev; |
9557feee3
|
143 144 |
phys_addr_t phys; pgoff_t pgoff; |
70b085b06
|
145 |
unsigned int fault_size = PUD_SIZE; |
9557feee3
|
146 |
|
5f0694b30
|
147 |
if (check_vma(dev_dax, vmf->vma, __func__)) |
9557feee3
|
148 |
return VM_FAULT_SIGBUS; |
33cf94d71
|
149 |
if (dev_dax->align > PUD_SIZE) { |
6daaca522
|
150 151 |
dev_dbg(dev, "alignment (%#x) > fault size (%#x) ", |
33cf94d71
|
152 |
dev_dax->align, fault_size); |
9557feee3
|
153 154 |
return VM_FAULT_SIGBUS; } |
33cf94d71
|
155 |
if (fault_size < dev_dax->align) |
70b085b06
|
156 |
return VM_FAULT_SIGBUS; |
33cf94d71
|
157 |
else if (fault_size > dev_dax->align) |
70b085b06
|
158 159 160 161 162 163 |
return VM_FAULT_FALLBACK; /* if we are outside of the VMA */ if (pud_addr < vmf->vma->vm_start || (pud_addr + PUD_SIZE) > vmf->vma->vm_end) return VM_FAULT_SIGBUS; |
9557feee3
|
164 |
pgoff = linear_page_index(vmf->vma, pud_addr); |
736163671
|
165 |
phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); |
9557feee3
|
166 |
if (phys == -1) { |
6daaca522
|
167 168 |
dev_dbg(dev, "pgoff_to_phys(%#lx) failed ", pgoff); |
9557feee3
|
169 170 |
return VM_FAULT_SIGBUS; } |
ec8269099
|
171 |
*pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); |
9557feee3
|
172 |
|
fce86ff58
|
173 |
return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); |
9557feee3
|
174 175 |
} #else |
226ab5610
|
176 |
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, |
2232c6382
|
177 |
struct vm_fault *vmf, pfn_t *pfn) |
9557feee3
|
178 179 180 181 |
{ return VM_FAULT_FALLBACK; } #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
226ab5610
|
182 |
static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, |
c791ace1e
|
183 |
enum page_entry_size pe_size) |
dee410792
|
184 |
{ |
f42003917
|
185 |
struct file *filp = vmf->vma->vm_file; |
2232c6382
|
186 |
unsigned long fault_size; |
36bdac1e6
|
187 188 |
vm_fault_t rc = VM_FAULT_SIGBUS; int id; |
2232c6382
|
189 |
pfn_t pfn; |
5f0694b30
|
190 |
struct dev_dax *dev_dax = filp->private_data; |
dee410792
|
191 |
|
6daaca522
|
192 193 194 |
dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d ", current->comm, (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", |
762026203
|
195 |
vmf->vma->vm_start, vmf->vma->vm_end, pe_size); |
dee410792
|
196 |
|
7b6be8444
|
197 |
id = dax_read_lock(); |
c791ace1e
|
198 199 |
switch (pe_size) { case PE_SIZE_PTE: |
2232c6382
|
200 201 |
fault_size = PAGE_SIZE; rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn); |
a2d581675
|
202 |
break; |
c791ace1e
|
203 |
case PE_SIZE_PMD: |
2232c6382
|
204 205 |
fault_size = PMD_SIZE; rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn); |
9557feee3
|
206 |
break; |
c791ace1e
|
207 |
case PE_SIZE_PUD: |
2232c6382
|
208 209 |
fault_size = PUD_SIZE; rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn); |
a2d581675
|
210 211 |
break; default: |
54eafcc9e
|
212 |
rc = VM_FAULT_SIGBUS; |
a2d581675
|
213 |
} |
2232c6382
|
214 215 216 |
if (rc == VM_FAULT_NOPAGE) { unsigned long i; |
35de29954
|
217 |
pgoff_t pgoff; |
2232c6382
|
218 219 220 221 222 223 224 |
/* * In the device-dax case the only possibility for a * VM_FAULT_NOPAGE result is when device-dax capacity is * mapped. No need to consider the zero page, or racing * conflicting mappings. */ |
35de29954
|
225 226 |
pgoff = linear_page_index(vmf->vma, vmf->address & ~(fault_size - 1)); |
2232c6382
|
227 228 229 230 231 232 233 |
for (i = 0; i < fault_size / PAGE_SIZE; i++) { struct page *page; page = pfn_to_page(pfn_t_to_pfn(pfn) + i); if (page->mapping) continue; page->mapping = filp->f_mapping; |
35de29954
|
234 |
page->index = pgoff + i; |
2232c6382
|
235 236 |
} } |
7b6be8444
|
237 |
dax_read_unlock(id); |
dee410792
|
238 239 240 |
return rc; } |
226ab5610
|
241 |
static vm_fault_t dev_dax_fault(struct vm_fault *vmf) |
c791ace1e
|
242 |
{ |
5f0694b30
|
243 |
return dev_dax_huge_fault(vmf, PE_SIZE_PTE); |
c791ace1e
|
244 |
} |
9702cffdb
|
245 246 247 248 |
static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr) { struct file *filp = vma->vm_file; struct dev_dax *dev_dax = filp->private_data; |
9702cffdb
|
249 |
|
33cf94d71
|
250 |
if (!IS_ALIGNED(addr, dev_dax->align)) |
9702cffdb
|
251 252 253 |
return -EINVAL; return 0; } |
c1d53b92b
|
254 255 256 257 |
static unsigned long dev_dax_pagesize(struct vm_area_struct *vma) { struct file *filp = vma->vm_file; struct dev_dax *dev_dax = filp->private_data; |
c1d53b92b
|
258 |
|
33cf94d71
|
259 |
return dev_dax->align; |
c1d53b92b
|
260 |
} |
5f0694b30
|
261 262 263 |
static const struct vm_operations_struct dax_vm_ops = { .fault = dev_dax_fault, .huge_fault = dev_dax_huge_fault, |
9702cffdb
|
264 |
.split = dev_dax_split, |
c1d53b92b
|
265 |
.pagesize = dev_dax_pagesize, |
dee410792
|
266 |
}; |
af69f51e5
|
267 |
static int dax_mmap(struct file *filp, struct vm_area_struct *vma) |
dee410792
|
268 |
{ |
5f0694b30
|
269 |
struct dev_dax *dev_dax = filp->private_data; |
7b6be8444
|
270 |
int rc, id; |
dee410792
|
271 |
|
6daaca522
|
272 273 |
dev_dbg(&dev_dax->dev, "trace "); |
dee410792
|
274 |
|
7b6be8444
|
275 276 277 278 279 |
/* * We lock to check dax_dev liveness and will re-check at * fault time. */ id = dax_read_lock(); |
5f0694b30
|
280 |
rc = check_vma(dev_dax, vma, __func__); |
7b6be8444
|
281 |
dax_read_unlock(id); |
dee410792
|
282 283 |
if (rc) return rc; |
5f0694b30
|
284 |
vma->vm_ops = &dax_vm_ops; |
e1fb4a086
|
285 |
vma->vm_flags |= VM_HUGEPAGE; |
dee410792
|
286 |
return 0; |
043a92550
|
287 288 289 |
} /* return an unmapped area aligned to the dax region specified alignment */ |
af69f51e5
|
290 |
static unsigned long dax_get_unmapped_area(struct file *filp, |
043a92550
|
291 292 293 294 |
unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long off, off_end, off_align, len_align, addr_align, align; |
5f0694b30
|
295 |
struct dev_dax *dev_dax = filp ? filp->private_data : NULL; |
043a92550
|
296 |
|
5f0694b30
|
297 |
if (!dev_dax || addr) |
043a92550
|
298 |
goto out; |
33cf94d71
|
299 |
align = dev_dax->align; |
043a92550
|
300 301 302 303 304 305 306 307 308 309 |
off = pgoff << PAGE_SHIFT; off_end = off + len; off_align = round_up(off, align); if ((off_end <= off_align) || ((off_end - off_align) < align)) goto out; len_align = len + align; if ((off + len_align) < off) goto out; |
dee410792
|
310 |
|
043a92550
|
311 312 313 314 315 316 317 318 319 |
addr_align = current->mm->get_unmapped_area(filp, addr, len_align, pgoff, flags); if (!IS_ERR_VALUE(addr_align)) { addr_align += (off - addr_align) & (align - 1); return addr_align; } out: return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); } |
41c9b1be3
|
320 321 322 323 |
static const struct address_space_operations dev_dax_aops = { .set_page_dirty = noop_set_page_dirty, .invalidatepage = noop_invalidatepage, }; |
af69f51e5
|
324 |
static int dax_open(struct inode *inode, struct file *filp) |
043a92550
|
325 |
{ |
7b6be8444
|
326 327 328 |
struct dax_device *dax_dev = inode_dax(inode); struct inode *__dax_inode = dax_inode(dax_dev); struct dev_dax *dev_dax = dax_get_private(dax_dev); |
043a92550
|
329 |
|
6daaca522
|
330 331 |
dev_dbg(&dev_dax->dev, "trace "); |
7b6be8444
|
332 333 |
inode->i_mapping = __dax_inode->i_mapping; inode->i_mapping->host = __dax_inode; |
41c9b1be3
|
334 |
inode->i_mapping->a_ops = &dev_dax_aops; |
3bc52c45b
|
335 |
filp->f_mapping = inode->i_mapping; |
5660e13d2
|
336 |
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); |
735e4ae5b
|
337 |
filp->f_sb_err = file_sample_sb_err(filp); |
5f0694b30
|
338 |
filp->private_data = dev_dax; |
ebd84d724
|
339 |
inode->i_flags = S_DAX; |
043a92550
|
340 |
|
043a92550
|
341 342 |
return 0; } |
dee410792
|
343 |
|
af69f51e5
|
344 |
static int dax_release(struct inode *inode, struct file *filp) |
043a92550
|
345 |
{ |
5f0694b30
|
346 |
struct dev_dax *dev_dax = filp->private_data; |
043a92550
|
347 |
|
6daaca522
|
348 349 |
dev_dbg(&dev_dax->dev, "trace "); |
043a92550
|
350 |
return 0; |
dee410792
|
351 |
} |
ab68f2622
|
352 353 354 |
static const struct file_operations dax_fops = { .llseek = noop_llseek, .owner = THIS_MODULE, |
af69f51e5
|
355 356 357 358 |
.open = dax_open, .release = dax_release, .get_unmapped_area = dax_get_unmapped_area, .mmap = dax_mmap, |
ef8423022
|
359 |
.mmap_supported_flags = MAP_SYNC, |
ab68f2622
|
360 |
}; |
9567da0b4
|
361 |
static void dev_dax_cdev_del(void *cdev) |
043a92550
|
362 |
{ |
9567da0b4
|
363 364 |
cdev_del(cdev); } |
043a92550
|
365 |
|
9567da0b4
|
366 367 368 |
static void dev_dax_kill(void *dev_dax) { kill_dev_dax(dev_dax); |
ebd84d724
|
369 |
} |
f11cf813d
|
370 |
int dev_dax_probe(struct dev_dax *dev_dax) |
043a92550
|
371 |
{ |
9567da0b4
|
372 |
struct dax_device *dax_dev = dev_dax->dax_dev; |
f11cf813d
|
373 |
struct device *dev = &dev_dax->dev; |
f5516ec5e
|
374 |
struct dev_pagemap *pgmap; |
7b6be8444
|
375 |
struct inode *inode; |
ba09c01d2
|
376 |
struct cdev *cdev; |
89ec9f2cf
|
377 |
void *addr; |
60e93dc09
|
378 |
int rc, i; |
89ec9f2cf
|
379 |
|
f5516ec5e
|
380 |
pgmap = dev_dax->pgmap; |
60e93dc09
|
381 382 383 384 |
if (dev_WARN_ONCE(dev, pgmap && dev_dax->nr_range > 1, "static pgmap / multi-range device conflict ")) return -EINVAL; |
f5516ec5e
|
385 |
if (!pgmap) { |
60e93dc09
|
386 387 |
pgmap = devm_kzalloc(dev, sizeof(*pgmap) + sizeof(struct range) * (dev_dax->nr_range - 1), GFP_KERNEL); |
f5516ec5e
|
388 389 |
if (!pgmap) return -ENOMEM; |
60e93dc09
|
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 |
pgmap->nr_range = dev_dax->nr_range; } for (i = 0; i < dev_dax->nr_range; i++) { struct range *range = &dev_dax->ranges[i].range; if (!devm_request_mem_region(dev, range->start, range_len(range), dev_name(dev))) { dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve range ", i, range->start, range->end); return -EBUSY; } /* don't update the range for static pgmap */ if (!dev_dax->pgmap) pgmap->ranges[i] = *range; |
f5516ec5e
|
406 |
} |
60e93dc09
|
407 |
|
f5516ec5e
|
408 409 |
pgmap->type = MEMORY_DEVICE_GENERIC; addr = devm_memremap_pages(dev, pgmap); |
50f44ee72
|
410 |
if (IS_ERR(addr)) |
89ec9f2cf
|
411 |
return PTR_ERR(addr); |
89ec9f2cf
|
412 |
|
7b6be8444
|
413 414 |
inode = dax_inode(dax_dev); cdev = inode->i_cdev; |
ba09c01d2
|
415 |
cdev_init(cdev, &dax_fops); |
730926c3b
|
416 417 418 419 420 |
if (dev->class) { /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */ cdev->owner = dev->parent->driver->owner; } else cdev->owner = dev->driver->owner; |
9567da0b4
|
421 422 |
cdev_set_parent(cdev, &dev->kobj); rc = cdev_add(cdev, dev->devt, 1); |
d76911ee9
|
423 |
if (rc) |
9567da0b4
|
424 |
return rc; |
d76911ee9
|
425 |
|
9567da0b4
|
426 427 428 |
rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev); if (rc) return rc; |
043a92550
|
429 |
|
9567da0b4
|
430 431 432 |
run_dax(dax_dev); return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax); } |
730926c3b
|
433 |
EXPORT_SYMBOL_GPL(dev_dax_probe); |
043a92550
|
434 |
|
f11cf813d
|
435 |
static int dev_dax_remove(struct dev_dax *dev_dax) |
9567da0b4
|
436 437 438 |
{ /* all probe actions are unwound by devm */ return 0; |
043a92550
|
439 |
} |
9567da0b4
|
440 |
|
d200781ef
|
441 |
static struct dax_device_driver device_dax_driver = { |
f11cf813d
|
442 443 |
.probe = dev_dax_probe, .remove = dev_dax_remove, |
d200781ef
|
444 |
.match_always = 1, |
9567da0b4
|
445 |
}; |
043a92550
|
446 |
|
ab68f2622
|
447 448 |
static int __init dax_init(void) { |
9567da0b4
|
449 |
return dax_driver_register(&device_dax_driver); |
ab68f2622
|
450 451 452 453 |
} static void __exit dax_exit(void) { |
d200781ef
|
454 |
dax_driver_unregister(&device_dax_driver); |
ab68f2622
|
455 456 457 458 |
} MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); |
9567da0b4
|
459 |
module_init(dax_init); |
ab68f2622
|
460 |
module_exit(dax_exit); |
9567da0b4
|
461 |
MODULE_ALIAS_DAX_DEVICE(0); |