Blame view
drivers/char/mspec.c
7.1 KB
25763b3c8
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
17a3b0504
|
2 3 4 |
/* * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights * reserved. |
17a3b0504
|
5 6 7 8 9 10 11 |
*/ /* * SN Platform Special Memory (mspec) Support * * This driver exports the SN special memory (mspec) facility to user * processes. |
0fef2532d
|
12 13 |
* There are two types of memory made available thru this driver: * uncached and cached. |
17a3b0504
|
14 15 16 17 18 19 20 21 22 23 24 25 |
* * Uncached are used for memory write combining feature of the ia64 * cpu. * * Cached are used for areas of memory that are used as cached addresses * on our partition and used as uncached addresses from other partitions. * Due to a design constraint of the SN2 Shub, you can not have processors * on the same FSB perform both a cached and uncached reference to the * same cache line. These special memory cached regions prevent the * kernel from ever dropping in a TLB entry and therefore prevent the * processor from ever speculating a cache line from this page. */ |
17a3b0504
|
26 27 28 29 30 31 32 33 |
#include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/spinlock.h> #include <linux/mm.h> |
4e950f6f0
|
34 |
#include <linux/fs.h> |
17a3b0504
|
35 36 37 38 |
#include <linux/vmalloc.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/numa.h> |
f7d88d24c
|
39 |
#include <linux/refcount.h> |
17a3b0504
|
40 |
#include <asm/page.h> |
60063497a
|
41 |
#include <linux/atomic.h> |
17a3b0504
|
42 43 |
#include <asm/tlbflush.h> #include <asm/uncached.h> |
17a3b0504
|
44 |
|
17a3b0504
|
45 46 47 48 49 50 51 52 |
#define CACHED_ID "Cached," #define UNCACHED_ID "Uncached" #define REVISION "4.0" #define MSPEC_BASENAME "mspec" /* * Page types allocated by the device. */ |
4191ba26d
|
53 |
enum mspec_page_type { |
0fef2532d
|
54 |
MSPEC_CACHED = 2, |
17a3b0504
|
55 56 |
MSPEC_UNCACHED }; |
17a3b0504
|
57 58 59 60 |
/* * One of these structures is allocated when an mspec region is mmaped. The * structure is pointed to by the vma->vm_private_data field in the vma struct. * This structure is used to record the addresses of the mspec pages. |
4191ba26d
|
61 62 63 |
* This structure is shared by all vma's that are split off from the * original vma when split_vma()'s are done. * |
c1e8d7c6a
|
64 |
* The refcnt is incremented atomically because mm->mmap_lock does not |
4191ba26d
|
65 |
* protect in fork case where multiple tasks share the vma_data. |
17a3b0504
|
66 67 |
*/ struct vma_data { |
f7d88d24c
|
68 |
refcount_t refcnt; /* Number of vmas sharing the data. */ |
4191ba26d
|
69 |
spinlock_t lock; /* Serialize access to this structure. */ |
17a3b0504
|
70 |
int count; /* Number of pages allocated. */ |
4191ba26d
|
71 |
enum mspec_page_type type; /* Type of pages allocated. */ |
4191ba26d
|
72 73 |
unsigned long vm_start; /* Original (unsplit) base. */ unsigned long vm_end; /* Original (unsplit) end. */ |
3c2faf61f
|
74 |
unsigned long maddr[]; /* Array of MSPEC addresses. */ |
17a3b0504
|
75 |
}; |
17a3b0504
|
76 77 78 79 |
/* * mspec_open * * Called when a device mapping is created by a means other than mmap |
4191ba26d
|
80 81 |
* (via fork, munmap, etc.). Increments the reference count on the * underlying mspec data so it is not freed prematurely. |
17a3b0504
|
82 83 84 85 86 87 88 |
*/ static void mspec_open(struct vm_area_struct *vma) { struct vma_data *vdata; vdata = vma->vm_private_data; |
f7d88d24c
|
89 |
refcount_inc(&vdata->refcnt); |
17a3b0504
|
90 91 92 93 94 95 |
} /* * mspec_close * * Called when unmapping a device mapping. Frees all mspec pages |
afa684f6f
|
96 |
* belonging to all the vma's sharing this vma_data structure. |
17a3b0504
|
97 98 99 100 101 |
*/ static void mspec_close(struct vm_area_struct *vma) { struct vma_data *vdata; |
afa684f6f
|
102 |
int index, last_index; |
4191ba26d
|
103 |
unsigned long my_page; |
17a3b0504
|
104 105 |
vdata = vma->vm_private_data; |
17a3b0504
|
106 |
|
f7d88d24c
|
107 |
if (!refcount_dec_and_test(&vdata->refcnt)) |
afa684f6f
|
108 |
return; |
4191ba26d
|
109 |
|
afa684f6f
|
110 111 |
last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT; for (index = 0; index < last_index; index++) { |
4191ba26d
|
112 |
if (vdata->maddr[index] == 0) |
17a3b0504
|
113 114 115 116 117 |
continue; /* * Clear the page before sticking it back * into the pool. */ |
4191ba26d
|
118 119 |
my_page = vdata->maddr[index]; vdata->maddr[index] = 0; |
0fef2532d
|
120 121 |
memset((char *)my_page, 0, PAGE_SIZE); uncached_free_page(my_page, 1); |
17a3b0504
|
122 |
} |
4191ba26d
|
123 |
|
1d5cfdb07
|
124 |
kvfree(vdata); |
17a3b0504
|
125 |
} |
17a3b0504
|
126 |
/* |
efe9e7799
|
127 |
* mspec_fault |
17a3b0504
|
128 129 130 |
* * Creates a mspec page and maps it to user space. */ |
3eb87d4e5
|
131 |
static vm_fault_t |
11bac8000
|
132 |
mspec_fault(struct vm_fault *vmf) |
17a3b0504
|
133 134 135 |
{ unsigned long paddr, maddr; unsigned long pfn; |
efe9e7799
|
136 |
pgoff_t index = vmf->pgoff; |
11bac8000
|
137 |
struct vma_data *vdata = vmf->vma->vm_private_data; |
17a3b0504
|
138 |
|
17a3b0504
|
139 140 |
maddr = (volatile unsigned long) vdata->maddr[index]; if (maddr == 0) { |
e4a064dfa
|
141 |
maddr = uncached_alloc_page(numa_node_id(), 1); |
17a3b0504
|
142 |
if (maddr == 0) |
efe9e7799
|
143 |
return VM_FAULT_OOM; |
17a3b0504
|
144 145 146 147 148 149 |
spin_lock(&vdata->lock); if (vdata->maddr[index] == 0) { vdata->count++; vdata->maddr[index] = maddr; } else { |
e4a064dfa
|
150 |
uncached_free_page(maddr, 1); |
17a3b0504
|
151 152 153 154 |
maddr = vdata->maddr[index]; } spin_unlock(&vdata->lock); } |
0fef2532d
|
155 |
paddr = maddr & ~__IA64_UNCACHED_OFFSET; |
17a3b0504
|
156 |
pfn = paddr >> PAGE_SHIFT; |
3eb87d4e5
|
157 |
return vmf_insert_pfn(vmf->vma, vmf->address, pfn); |
17a3b0504
|
158 |
} |
f0f37e2f7
|
159 |
static const struct vm_operations_struct mspec_vm_ops = { |
17a3b0504
|
160 161 |
.open = mspec_open, .close = mspec_close, |
efe9e7799
|
162 |
.fault = mspec_fault, |
17a3b0504
|
163 164 165 166 167 |
}; /* * mspec_mmap * |
af901ca18
|
168 |
* Called when mmapping the device. Initializes the vma with a fault handler |
17a3b0504
|
169 170 171 172 |
* and private data structure necessary to allocate, track, and free the * underlying pages. */ static int |
4191ba26d
|
173 174 |
mspec_mmap(struct file *file, struct vm_area_struct *vma, enum mspec_page_type type) |
17a3b0504
|
175 176 |
{ struct vma_data *vdata; |
1d5cfdb07
|
177 |
int pages, vdata_size; |
17a3b0504
|
178 179 180 181 182 183 184 185 186 |
if (vma->vm_pgoff != 0) return -EINVAL; if ((vma->vm_flags & VM_SHARED) == 0) return -EINVAL; if ((vma->vm_flags & VM_WRITE) == 0) return -EPERM; |
a0ea59d56
|
187 |
pages = vma_pages(vma); |
17a3b0504
|
188 |
vdata_size = sizeof(struct vma_data) + pages * sizeof(long); |
fe69795e8
|
189 |
vdata = kvzalloc(vdata_size, GFP_KERNEL); |
17a3b0504
|
190 191 |
if (!vdata) return -ENOMEM; |
17a3b0504
|
192 |
|
4191ba26d
|
193 194 |
vdata->vm_start = vma->vm_start; vdata->vm_end = vma->vm_end; |
17a3b0504
|
195 196 |
vdata->type = type; spin_lock_init(&vdata->lock); |
f7d88d24c
|
197 |
refcount_set(&vdata->refcnt, 1); |
17a3b0504
|
198 |
vma->vm_private_data = vdata; |
314e51b98
|
199 |
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
0fef2532d
|
200 |
if (vdata->type == MSPEC_UNCACHED) |
17a3b0504
|
201 202 203 204 205 206 207 |
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &mspec_vm_ops; return 0; } static int |
17a3b0504
|
208 209 210 211 212 213 214 215 216 217 |
cached_mmap(struct file *file, struct vm_area_struct *vma) { return mspec_mmap(file, vma, MSPEC_CACHED); } static int uncached_mmap(struct file *file, struct vm_area_struct *vma) { return mspec_mmap(file, vma, MSPEC_UNCACHED); } |
2b8693c06
|
218 |
static const struct file_operations cached_fops = { |
17a3b0504
|
219 |
.owner = THIS_MODULE, |
6038f373a
|
220 221 |
.mmap = cached_mmap, .llseek = noop_llseek, |
17a3b0504
|
222 223 224 225 226 227 228 |
}; static struct miscdevice cached_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "mspec_cached", .fops = &cached_fops }; |
2b8693c06
|
229 |
static const struct file_operations uncached_fops = { |
17a3b0504
|
230 |
.owner = THIS_MODULE, |
6038f373a
|
231 232 |
.mmap = uncached_mmap, .llseek = noop_llseek, |
17a3b0504
|
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
}; static struct miscdevice uncached_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "mspec_uncached", .fops = &uncached_fops }; /* * mspec_init * * Called at boot time to initialize the mspec facility. */ static int __init mspec_init(void) { int ret; |
17a3b0504
|
250 |
|
17a3b0504
|
251 252 253 254 255 |
ret = misc_register(&cached_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i ", CACHED_ID, ret); |
0fef2532d
|
256 |
return ret; |
17a3b0504
|
257 258 259 260 261 262 263 |
} ret = misc_register(&uncached_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i ", UNCACHED_ID, ret); misc_deregister(&cached_miscdev); |
0fef2532d
|
264 |
return ret; |
17a3b0504
|
265 |
} |
0fef2532d
|
266 267 268 |
printk(KERN_INFO "%s %s initialized devices: %s %s ", MSPEC_BASENAME, REVISION, CACHED_ID, UNCACHED_ID); |
17a3b0504
|
269 270 |
return 0; |
17a3b0504
|
271 272 273 274 275 |
} static void __exit mspec_exit(void) { |
17a3b0504
|
276 277 |
misc_deregister(&uncached_miscdev); misc_deregister(&cached_miscdev); |
17a3b0504
|
278 279 280 281 282 283 284 285 |
} module_init(mspec_init); module_exit(mspec_exit); MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>"); MODULE_DESCRIPTION("Driver for SGI SN special memory operations"); MODULE_LICENSE("GPL"); |