Blame view

arch/x86/kernel/sys_x86_64.c 6.79 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
  #include <linux/errno.h>
  #include <linux/sched.h>
  #include <linux/syscalls.h>
  #include <linux/mm.h>
4e950f6f0   Alexey Dobriyan   Remove fs.h from ...
5
  #include <linux/fs.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
  #include <linux/smp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
7
8
9
10
11
12
13
14
  #include <linux/sem.h>
  #include <linux/msg.h>
  #include <linux/shm.h>
  #include <linux/stat.h>
  #include <linux/mman.h>
  #include <linux/file.h>
  #include <linux/utsname.h>
  #include <linux/personality.h>
cc503c1b4   Jiri Kosina   x86: PIE executab...
15
  #include <linux/random.h>
e9c8abb66   Gustavo F. Padovan   x86: coding style...
16
  #include <linux/uaccess.h>
910b2c512   Stephen Rothwell   x86, amd: Include...
17
  #include <linux/elf.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
19
  #include <asm/ia32.h>
bbc1f698a   Jaswinder Singh   x86: Introducing ...
20
  #include <asm/syscalls.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
21

dfb09f9b7   Borislav Petkov   x86, amd: Avoid c...
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
  /*
   * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
   *
   * @flags denotes the allocation direction - bottomup or topdown -
   * or vDSO; see call sites below.
   */
  unsigned long align_addr(unsigned long addr, struct file *filp,
  			 enum align_flags flags)
  {
  	unsigned long tmp_addr;
  
  	/* handle 32- and 64-bit case with a single conditional */
  	if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
  		return addr;
  
  	if (!(current->flags & PF_RANDOMIZE))
  		return addr;
  
  	if (!((flags & ALIGN_VDSO) || filp))
  		return addr;
  
  	tmp_addr = addr;
  
  	/*
  	 * We need an address which is <= than the original
  	 * one only when in topdown direction.
  	 */
  	if (!(flags & ALIGN_TOPDOWN))
  		tmp_addr += va_align.mask;
  
  	tmp_addr &= ~va_align.mask;
  
  	return tmp_addr;
  }
  
  static int __init control_va_addr_alignment(char *str)
  {
  	/* guard against enabling this on other CPU families */
  	if (va_align.flags < 0)
  		return 1;
  
  	if (*str == 0)
  		return 1;
  
  	if (*str == '=')
  		str++;
  
  	if (!strcmp(str, "32"))
  		va_align.flags = ALIGN_VA_32;
  	else if (!strcmp(str, "64"))
  		va_align.flags = ALIGN_VA_64;
  	else if (!strcmp(str, "off"))
  		va_align.flags = 0;
  	else if (!strcmp(str, "on"))
  		va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
  	else
  		return 0;
  
  	return 1;
  }
  __setup("align_va_addr", control_va_addr_alignment);
0ac676fb5   Jason Baron   tracing: Convert ...
83
84
85
  SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
  		unsigned long, prot, unsigned long, flags,
  		unsigned long, fd, unsigned long, off)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
86
87
  {
  	long error;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
88
89
90
  	error = -EINVAL;
  	if (off & ~PAGE_MASK)
  		goto out;
f8b725609   Al Viro   Unify sys_mmap*
91
  	error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
92
93
94
95
96
97
98
  out:
  	return error;
  }
  
  static void find_start_end(unsigned long flags, unsigned long *begin,
  			   unsigned long *end)
  {
84929801e   Suresh Siddha   [PATCH] x86_64: T...
99
  	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
cc503c1b4   Jiri Kosina   x86: PIE executab...
100
  		unsigned long new_begin;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
101
102
103
104
105
106
  		/* This is usually used needed to map code in small
  		   model, so it needs to be in the first 31bit. Limit
  		   it to that.  This means we need to move the
  		   unmapped base down for this case. This can give
  		   conflicts with the heap, but we assume that glibc
  		   malloc knows how to fall back to mmap. Give it 1GB
e9c8abb66   Gustavo F. Padovan   x86: coding style...
107
108
109
  		   of playground for now. -AK */
  		*begin = 0x40000000;
  		*end = 0x80000000;
cc503c1b4   Jiri Kosina   x86: PIE executab...
110
111
112
113
114
  		if (current->flags & PF_RANDOMIZE) {
  			new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
  			if (new_begin)
  				*begin = new_begin;
  		}
84929801e   Suresh Siddha   [PATCH] x86_64: T...
115
116
  	} else {
  		*begin = TASK_UNMAPPED_BASE;
e9c8abb66   Gustavo F. Padovan   x86: coding style...
117
  		*end = TASK_SIZE;
84929801e   Suresh Siddha   [PATCH] x86_64: T...
118
  	}
e9c8abb66   Gustavo F. Padovan   x86: coding style...
119
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
120
121
122
123
124
125
126
127
128
  
  unsigned long
  arch_get_unmapped_area(struct file *filp, unsigned long addr,
  		unsigned long len, unsigned long pgoff, unsigned long flags)
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
  	unsigned long start_addr;
  	unsigned long begin, end;
e9c8abb66   Gustavo F. Padovan   x86: coding style...
129

11300a64d   Benjamin Herrenschmidt   get_unmapped_area...
130
131
  	if (flags & MAP_FIXED)
  		return addr;
e9c8abb66   Gustavo F. Padovan   x86: coding style...
132
  	find_start_end(flags, &begin, &end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
133
134
135
136
137
138
139
140
141
142
143
  
  	if (len > end)
  		return -ENOMEM;
  
  	if (addr) {
  		addr = PAGE_ALIGN(addr);
  		vma = find_vma(mm, addr);
  		if (end - len >= addr &&
  		    (!vma || addr + len <= vma->vm_start))
  			return addr;
  	}
1363c3cd8   Wolfgang Wander   [PATCH] Avoiding ...
144
145
  	if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
  	    && len <= mm->cached_hole_size) {
e9c8abb66   Gustavo F. Padovan   x86: coding style...
146
  		mm->cached_hole_size = 0;
1363c3cd8   Wolfgang Wander   [PATCH] Avoiding ...
147
148
  		mm->free_area_cache = begin;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
149
  	addr = mm->free_area_cache;
e9c8abb66   Gustavo F. Padovan   x86: coding style...
150
151
  	if (addr < begin)
  		addr = begin;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
152
153
154
  	start_addr = addr;
  
  full_search:
dfb09f9b7   Borislav Petkov   x86, amd: Avoid c...
155
156
  
  	addr = align_addr(addr, filp, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
157
158
159
160
161
162
163
164
165
  	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (end - len < addr) {
  			/*
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
  			if (start_addr != begin) {
  				start_addr = addr = begin;
1363c3cd8   Wolfgang Wander   [PATCH] Avoiding ...
166
  				mm->cached_hole_size = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
167
168
169
170
171
172
173
174
175
176
177
  				goto full_search;
  			}
  			return -ENOMEM;
  		}
  		if (!vma || addr + len <= vma->vm_start) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
  			mm->free_area_cache = addr + len;
  			return addr;
  		}
1363c3cd8   Wolfgang Wander   [PATCH] Avoiding ...
178
  		if (addr + mm->cached_hole_size < vma->vm_start)
e9c8abb66   Gustavo F. Padovan   x86: coding style...
179
  			mm->cached_hole_size = vma->vm_start - addr;
1363c3cd8   Wolfgang Wander   [PATCH] Avoiding ...
180

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
181
  		addr = vma->vm_end;
dfb09f9b7   Borislav Petkov   x86, amd: Avoid c...
182
  		addr = align_addr(addr, filp, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
183
184
  	}
  }
cc503c1b4   Jiri Kosina   x86: PIE executab...
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
  
  unsigned long
  arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			  const unsigned long len, const unsigned long pgoff,
  			  const unsigned long flags)
  {
  	struct vm_area_struct *vma;
  	struct mm_struct *mm = current->mm;
  	unsigned long addr = addr0;
  
  	/* requested length too big for entire address space */
  	if (len > TASK_SIZE)
  		return -ENOMEM;
  
  	if (flags & MAP_FIXED)
  		return addr;
  
  	/* for MAP_32BIT mappings we force the legact mmap base */
  	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
  		goto bottomup;
  
  	/* requesting a specific address */
  	if (addr) {
  		addr = PAGE_ALIGN(addr);
  		vma = find_vma(mm, addr);
  		if (TASK_SIZE - len >= addr &&
  				(!vma || addr + len <= vma->vm_start))
  			return addr;
  	}
  
  	/* check if free_area_cache is useful for us */
  	if (len <= mm->cached_hole_size) {
bb1ad8205   Andrew Morton   x86: PIE executab...
217
218
219
  		mm->cached_hole_size = 0;
  		mm->free_area_cache = mm->mmap_base;
  	}
cc503c1b4   Jiri Kosina   x86: PIE executab...
220
221
222
223
224
225
  
  	/* either no address requested or can't fit in requested address hole */
  	addr = mm->free_area_cache;
  
  	/* make sure it can fit in the remaining address space */
  	if (addr > len) {
dfb09f9b7   Borislav Petkov   x86, amd: Avoid c...
226
227
228
229
230
  		unsigned long tmp_addr = align_addr(addr - len, filp,
  						    ALIGN_TOPDOWN);
  
  		vma = find_vma(mm, tmp_addr);
  		if (!vma || tmp_addr + len <= vma->vm_start)
cc503c1b4   Jiri Kosina   x86: PIE executab...
231
  			/* remember the address as a hint for next time */
dfb09f9b7   Borislav Petkov   x86, amd: Avoid c...
232
  			return mm->free_area_cache = tmp_addr;
cc503c1b4   Jiri Kosina   x86: PIE executab...
233
234
235
236
237
238
239
240
  	}
  
  	if (mm->mmap_base < len)
  		goto bottomup;
  
  	addr = mm->mmap_base-len;
  
  	do {
dfb09f9b7   Borislav Petkov   x86, amd: Avoid c...
241
  		addr = align_addr(addr, filp, ALIGN_TOPDOWN);
cc503c1b4   Jiri Kosina   x86: PIE executab...
242
243
244
245
246
247
248
249
  		/*
  		 * Lookup failure means no vma is above this address,
  		 * else if new region fits below vma->vm_start,
  		 * return with success:
  		 */
  		vma = find_vma(mm, addr);
  		if (!vma || addr+len <= vma->vm_start)
  			/* remember the address as a hint for next time */
e9c8abb66   Gustavo F. Padovan   x86: coding style...
250
  			return mm->free_area_cache = addr;
cc503c1b4   Jiri Kosina   x86: PIE executab...
251

bb1ad8205   Andrew Morton   x86: PIE executab...
252
253
254
  		/* remember the largest hole we saw so far */
  		if (addr + mm->cached_hole_size < vma->vm_start)
  			mm->cached_hole_size = vma->vm_start - addr;
cc503c1b4   Jiri Kosina   x86: PIE executab...
255
256
257
258
259
260
261
262
263
264
265
266
267
  
  		/* try just below the current vma->vm_start */
  		addr = vma->vm_start-len;
  	} while (len < vma->vm_start);
  
  bottomup:
  	/*
  	 * A failed mmap() very likely causes application failure,
  	 * so fall back to the bottom-up function here. This scenario
  	 * can happen with large stack limits and large mmap()
  	 * allocations.
  	 */
  	mm->cached_hole_size = ~0UL;
bb1ad8205   Andrew Morton   x86: PIE executab...
268
  	mm->free_area_cache = TASK_UNMAPPED_BASE;
cc503c1b4   Jiri Kosina   x86: PIE executab...
269
270
271
272
273
274
275
276
277
  	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
  	/*
  	 * Restore the topdown base:
  	 */
  	mm->free_area_cache = mm->mmap_base;
  	mm->cached_hole_size = ~0UL;
  
  	return addr;
  }