Commit fb7ba2114bcd8bb51640c20bc68f89164b29b9ed
Exists in
master
and in
39 other branches
Merge remote branch 'korg/drm-fixes' into drm-vmware-next
necessary for some of the vmware fixes to be pushed in. Conflicts: drivers/gpu/drm/drm_gem.c drivers/gpu/drm/i915/intel_fb.c include/drm/drmP.h
Showing 81 changed files Side-by-side Diff
- MAINTAINERS
- arch/alpha/kernel/entry.S
- arch/alpha/kernel/process.c
- arch/m32r/include/asm/signal.h
- arch/m32r/include/asm/unistd.h
- arch/m32r/kernel/entry.S
- arch/m32r/kernel/ptrace.c
- arch/m32r/kernel/signal.c
- arch/tile/kernel/intvec_32.S
- arch/x86/include/asm/cpufeature.h
- arch/x86/kernel/cpu/scattered.c
- block/blk-merge.c
- drivers/gpu/drm/drm_buffer.c
- drivers/gpu/drm/drm_gem.c
- drivers/gpu/drm/drm_info.c
- drivers/gpu/drm/drm_vm.c
- drivers/gpu/drm/i810/i810_dma.c
- drivers/gpu/drm/i830/i830_dma.c
- drivers/gpu/drm/i915/i915_gem.c
- drivers/gpu/drm/i915/intel_fb.c
- drivers/gpu/drm/nouveau/nouveau_connector.c
- drivers/gpu/drm/nouveau/nouveau_fbcon.c
- drivers/gpu/drm/nouveau/nouveau_gem.c
- drivers/gpu/drm/nouveau/nouveau_notifier.c
- drivers/gpu/drm/radeon/atombios.h
- drivers/gpu/drm/radeon/r600.c
- drivers/gpu/drm/radeon/radeon_atombios.c
- drivers/gpu/drm/radeon/radeon_display.c
- drivers/gpu/drm/radeon/radeon_fb.c
- drivers/gpu/drm/radeon/radeon_gem.c
- drivers/gpu/drm/radeon/radeon_kms.c
- drivers/gpu/drm/ttm/ttm_bo_util.c
- drivers/gpu/drm/ttm/ttm_page_alloc.c
- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
- drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
- drivers/gpu/vga/vgaarb.c
- drivers/hwmon/Kconfig
- drivers/hwmon/coretemp.c
- drivers/hwmon/lis3lv02d.c
- drivers/hwmon/pkgtemp.c
- drivers/staging/ti-st/st.h
- drivers/staging/ti-st/st_core.c
- drivers/staging/ti-st/st_core.h
- drivers/staging/ti-st/st_kim.c
- drivers/usb/core/Kconfig
- drivers/usb/core/file.c
- drivers/usb/core/message.c
- drivers/usb/musb/cppi_dma.c
- drivers/usb/musb/musb_gadget.c
- drivers/usb/musb/musb_gadget.h
- drivers/usb/musb/musb_gadget_ep0.c
- drivers/usb/musb/musb_host.c
- fs/ocfs2/acl.c
- fs/ocfs2/cluster/tcp.c
- fs/ocfs2/dir.c
- fs/ocfs2/dlm/dlmcommon.h
- fs/ocfs2/dlm/dlmdebug.c
- fs/ocfs2/dlm/dlmdomain.c
- fs/ocfs2/dlm/dlmmaster.c
- fs/ocfs2/dlmglue.h
- fs/ocfs2/ocfs2_fs.h
- fs/ocfs2/ocfs2_ioctl.h
- fs/ocfs2/refcounttree.c
- fs/ocfs2/reservations.c
- fs/ocfs2/suballoc.c
- fs/ocfs2/xattr.c
- include/drm/drmP.h
- include/drm/drm_pciids.h
- mm/fremap.c
- sound/pci/hda/patch_analog.c
- sound/pci/hda/patch_realtek.c
- sound/pci/oxygen/oxygen.c
- sound/pci/rme9652/hdsp.c
- sound/pci/rme9652/hdspm.c
- sound/soc/sh/migor.c
- sound/soc/soc-cache.c
MAINTAINERS
... | ... | @@ -2677,6 +2677,8 @@ |
2677 | 2677 | L: lm-sensors@lm-sensors.org |
2678 | 2678 | W: http://www.lm-sensors.org/ |
2679 | 2679 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ |
2680 | +T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/ | |
2681 | +T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git | |
2680 | 2682 | S: Maintained |
2681 | 2683 | F: Documentation/hwmon/ |
2682 | 2684 | F: drivers/hwmon/ |
arch/alpha/kernel/entry.S
... | ... | @@ -73,8 +73,6 @@ |
73 | 73 | ldq $20, HAE_REG($19); \ |
74 | 74 | stq $21, HAE_CACHE($19); \ |
75 | 75 | stq $21, 0($20); \ |
76 | - ldq $0, 0($sp); \ | |
77 | - ldq $1, 8($sp); \ | |
78 | 76 | 99:; \ |
79 | 77 | ldq $19, 72($sp); \ |
80 | 78 | ldq $20, 80($sp); \ |
... | ... | @@ -316,7 +314,7 @@ |
316 | 314 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ |
317 | 315 | ldq $0, SP_OFF($sp) |
318 | 316 | and $0, 8, $0 |
319 | - beq $0, restore_all | |
317 | + beq $0, ret_to_kernel | |
320 | 318 | ret_to_user: |
321 | 319 | /* Make sure need_resched and sigpending don't change between |
322 | 320 | sampling and the rti. */ |
... | ... | @@ -329,6 +327,11 @@ |
329 | 327 | RESTORE_ALL |
330 | 328 | call_pal PAL_rti |
331 | 329 | |
330 | +ret_to_kernel: | |
331 | + lda $16, 7 | |
332 | + call_pal PAL_swpipl | |
333 | + br restore_all | |
334 | + | |
332 | 335 | .align 3 |
333 | 336 | $syscall_error: |
334 | 337 | /* |
... | ... | @@ -657,7 +660,7 @@ |
657 | 660 | /* We don't actually care for a3 success widgetry in the kernel. |
658 | 661 | Not for positive errno values. */ |
659 | 662 | stq $0, 0($sp) /* $0 */ |
660 | - br restore_all | |
663 | + br ret_to_kernel | |
661 | 664 | .end kernel_thread |
662 | 665 | |
663 | 666 | /* |
arch/alpha/kernel/process.c
... | ... | @@ -356,7 +356,7 @@ |
356 | 356 | dest[27] = pt->r27; |
357 | 357 | dest[28] = pt->r28; |
358 | 358 | dest[29] = pt->gp; |
359 | - dest[30] = rdusp(); | |
359 | + dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; | |
360 | 360 | dest[31] = pt->pc; |
361 | 361 | |
362 | 362 | /* Once upon a time this was the PS value. Which is stupid |
arch/m32r/include/asm/signal.h
arch/m32r/include/asm/unistd.h
arch/m32r/kernel/entry.S
... | ... | @@ -235,10 +235,9 @@ |
235 | 235 | work_notifysig: ; deal with pending signals and |
236 | 236 | ; notify-resume requests |
237 | 237 | mv r0, sp ; arg1 : struct pt_regs *regs |
238 | - ldi r1, #0 ; arg2 : sigset_t *oldset | |
239 | - mv r2, r9 ; arg3 : __u32 thread_info_flags | |
238 | + mv r1, r9 ; arg2 : __u32 thread_info_flags | |
240 | 239 | bl do_notify_resume |
241 | - bra restore_all | |
240 | + bra resume_userspace | |
242 | 241 | |
243 | 242 | ; perform syscall exit tracing |
244 | 243 | ALIGN |
arch/m32r/kernel/ptrace.c
... | ... | @@ -592,16 +592,17 @@ |
592 | 592 | |
593 | 593 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) |
594 | 594 | != sizeof(insn)) |
595 | - break; | |
595 | + return -EIO; | |
596 | 596 | |
597 | 597 | compute_next_pc(insn, pc, &next_pc, child); |
598 | 598 | if (next_pc & 0x80000000) |
599 | - break; | |
599 | + return -EIO; | |
600 | 600 | |
601 | 601 | if (embed_debug_trap(child, next_pc)) |
602 | - break; | |
602 | + return -EIO; | |
603 | 603 | |
604 | 604 | invalidate_cache(); |
605 | + return 0; | |
605 | 606 | } |
606 | 607 | |
607 | 608 | void user_disable_single_step(struct task_struct *child) |
arch/m32r/kernel/signal.c
... | ... | @@ -28,38 +28,7 @@ |
28 | 28 | |
29 | 29 | #define DEBUG_SIG 0 |
30 | 30 | |
31 | -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | |
32 | - | |
33 | -int do_signal(struct pt_regs *, sigset_t *); | |
34 | - | |
35 | 31 | asmlinkage int |
36 | -sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, | |
37 | - unsigned long r2, unsigned long r3, unsigned long r4, | |
38 | - unsigned long r5, unsigned long r6, struct pt_regs *regs) | |
39 | -{ | |
40 | - sigset_t newset; | |
41 | - | |
42 | - /* XXX: Don't preclude handling different sized sigset_t's. */ | |
43 | - if (sigsetsize != sizeof(sigset_t)) | |
44 | - return -EINVAL; | |
45 | - | |
46 | - if (copy_from_user(&newset, unewset, sizeof(newset))) | |
47 | - return -EFAULT; | |
48 | - sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
49 | - | |
50 | - spin_lock_irq(¤t->sighand->siglock); | |
51 | - current->saved_sigmask = current->blocked; | |
52 | - current->blocked = newset; | |
53 | - recalc_sigpending(); | |
54 | - spin_unlock_irq(¤t->sighand->siglock); | |
55 | - | |
56 | - current->state = TASK_INTERRUPTIBLE; | |
57 | - schedule(); | |
58 | - set_thread_flag(TIF_RESTORE_SIGMASK); | |
59 | - return -ERESTARTNOHAND; | |
60 | -} | |
61 | - | |
62 | -asmlinkage int | |
63 | 32 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
64 | 33 | unsigned long r2, unsigned long r3, unsigned long r4, |
65 | 34 | unsigned long r5, unsigned long r6, struct pt_regs *regs) |
... | ... | @@ -218,7 +187,7 @@ |
218 | 187 | return (void __user *)((sp - frame_size) & -8ul); |
219 | 188 | } |
220 | 189 | |
221 | -static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |
190 | +static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |
222 | 191 | sigset_t *set, struct pt_regs *regs) |
223 | 192 | { |
224 | 193 | struct rt_sigframe __user *frame; |
225 | 194 | |
226 | 195 | |
227 | 196 | |
228 | 197 | |
... | ... | @@ -275,22 +244,34 @@ |
275 | 244 | current->comm, current->pid, frame, regs->pc); |
276 | 245 | #endif |
277 | 246 | |
278 | - return; | |
247 | + return 0; | |
279 | 248 | |
280 | 249 | give_sigsegv: |
281 | 250 | force_sigsegv(sig, current); |
251 | + return -EFAULT; | |
282 | 252 | } |
283 | 253 | |
254 | +static int prev_insn(struct pt_regs *regs) | |
255 | +{ | |
256 | + u16 inst; | |
257 | + if (get_user(&inst, (u16 __user *)(regs->bpc - 2))) | |
258 | + return -EFAULT; | |
259 | + if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | |
260 | + regs->bpc -= 2; | |
261 | + else | |
262 | + regs->bpc -= 4; | |
263 | + regs->syscall_nr = -1; | |
264 | + return 0; | |
265 | +} | |
266 | + | |
284 | 267 | /* |
285 | 268 | * OK, we're invoking a handler |
286 | 269 | */ |
287 | 270 | |
288 | -static void | |
271 | +static int | |
289 | 272 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, |
290 | 273 | sigset_t *oldset, struct pt_regs *regs) |
291 | 274 | { |
292 | - unsigned short inst; | |
293 | - | |
294 | 275 | /* Are we from a system call? */ |
295 | 276 | if (regs->syscall_nr >= 0) { |
296 | 277 | /* If so, check system call restarting.. */ |
297 | 278 | |
... | ... | @@ -308,16 +289,14 @@ |
308 | 289 | /* fallthrough */ |
309 | 290 | case -ERESTARTNOINTR: |
310 | 291 | regs->r0 = regs->orig_r0; |
311 | - inst = *(unsigned short *)(regs->bpc - 2); | |
312 | - if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | |
313 | - regs->bpc -= 2; | |
314 | - else | |
315 | - regs->bpc -= 4; | |
292 | + if (prev_insn(regs) < 0) | |
293 | + return -EFAULT; | |
316 | 294 | } |
317 | 295 | } |
318 | 296 | |
319 | 297 | /* Set up the stack frame */ |
320 | - setup_rt_frame(sig, ka, info, oldset, regs); | |
298 | + if (setup_rt_frame(sig, ka, info, oldset, regs)) | |
299 | + return -EFAULT; | |
321 | 300 | |
322 | 301 | spin_lock_irq(¤t->sighand->siglock); |
323 | 302 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
... | ... | @@ -325,6 +304,7 @@ |
325 | 304 | sigaddset(¤t->blocked,sig); |
326 | 305 | recalc_sigpending(); |
327 | 306 | spin_unlock_irq(¤t->sighand->siglock); |
307 | + return 0; | |
328 | 308 | } |
329 | 309 | |
330 | 310 | /* |
331 | 311 | |
... | ... | @@ -332,12 +312,12 @@ |
332 | 312 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
333 | 313 | * mistake. |
334 | 314 | */ |
335 | -int do_signal(struct pt_regs *regs, sigset_t *oldset) | |
315 | +static void do_signal(struct pt_regs *regs) | |
336 | 316 | { |
337 | 317 | siginfo_t info; |
338 | 318 | int signr; |
339 | 319 | struct k_sigaction ka; |
340 | - unsigned short inst; | |
320 | + sigset_t *oldset; | |
341 | 321 | |
342 | 322 | /* |
343 | 323 | * We want the common case to go fast, which |
344 | 324 | |
... | ... | @@ -346,12 +326,14 @@ |
346 | 326 | * if so. |
347 | 327 | */ |
348 | 328 | if (!user_mode(regs)) |
349 | - return 1; | |
329 | + return; | |
350 | 330 | |
351 | 331 | if (try_to_freeze()) |
352 | 332 | goto no_signal; |
353 | 333 | |
354 | - if (!oldset) | |
334 | + if (test_thread_flag(TIF_RESTORE_SIGMASK)) | |
335 | + oldset = ¤t->saved_sigmask; | |
336 | + else | |
355 | 337 | oldset = ¤t->blocked; |
356 | 338 | |
357 | 339 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
... | ... | @@ -363,8 +345,10 @@ |
363 | 345 | */ |
364 | 346 | |
365 | 347 | /* Whee! Actually deliver the signal. */ |
366 | - handle_signal(signr, &ka, &info, oldset, regs); | |
367 | - return 1; | |
348 | + if (handle_signal(signr, &ka, &info, oldset, regs) == 0) | |
349 | + clear_thread_flag(TIF_RESTORE_SIGMASK); | |
350 | + | |
351 | + return; | |
368 | 352 | } |
369 | 353 | |
370 | 354 | no_signal: |
371 | 355 | |
372 | 356 | |
373 | 357 | |
... | ... | @@ -375,31 +359,24 @@ |
375 | 359 | regs->r0 == -ERESTARTSYS || |
376 | 360 | regs->r0 == -ERESTARTNOINTR) { |
377 | 361 | regs->r0 = regs->orig_r0; |
378 | - inst = *(unsigned short *)(regs->bpc - 2); | |
379 | - if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | |
380 | - regs->bpc -= 2; | |
381 | - else | |
382 | - regs->bpc -= 4; | |
383 | - } | |
384 | - if (regs->r0 == -ERESTART_RESTARTBLOCK){ | |
362 | + prev_insn(regs); | |
363 | + } else if (regs->r0 == -ERESTART_RESTARTBLOCK){ | |
385 | 364 | regs->r0 = regs->orig_r0; |
386 | 365 | regs->r7 = __NR_restart_syscall; |
387 | - inst = *(unsigned short *)(regs->bpc - 2); | |
388 | - if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | |
389 | - regs->bpc -= 2; | |
390 | - else | |
391 | - regs->bpc -= 4; | |
366 | + prev_insn(regs); | |
392 | 367 | } |
393 | 368 | } |
394 | - return 0; | |
369 | + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | |
370 | + clear_thread_flag(TIF_RESTORE_SIGMASK); | |
371 | + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | |
372 | + } | |
395 | 373 | } |
396 | 374 | |
397 | 375 | /* |
398 | 376 | * notification of userspace execution resumption |
399 | 377 | * - triggered by current->work.notify_resume |
400 | 378 | */ |
401 | -void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | |
402 | - __u32 thread_info_flags) | |
379 | +void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags) | |
403 | 380 | { |
404 | 381 | /* Pending single-step? */ |
405 | 382 | if (thread_info_flags & _TIF_SINGLESTEP) |
... | ... | @@ -407,7 +384,7 @@ |
407 | 384 | |
408 | 385 | /* deal with pending signal delivery */ |
409 | 386 | if (thread_info_flags & _TIF_SIGPENDING) |
410 | - do_signal(regs,oldset); | |
387 | + do_signal(regs); | |
411 | 388 | |
412 | 389 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
413 | 390 | clear_thread_flag(TIF_NOTIFY_RESUME); |
arch/tile/kernel/intvec_32.S
... | ... | @@ -1506,13 +1506,6 @@ |
1506 | 1506 | } |
1507 | 1507 | STD_ENDPROC(handle_ill) |
1508 | 1508 | |
1509 | - .pushsection .rodata, "a" | |
1510 | - .align 8 | |
1511 | -bpt_code: | |
1512 | - bpt | |
1513 | - ENDPROC(bpt_code) | |
1514 | - .popsection | |
1515 | - | |
1516 | 1509 | /* Various stub interrupt handlers and syscall handlers */ |
1517 | 1510 | |
1518 | 1511 | STD_ENTRY_LOCAL(_kernel_double_fault) |
arch/x86/include/asm/cpufeature.h
... | ... | @@ -168,6 +168,7 @@ |
168 | 168 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ |
169 | 169 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ |
170 | 170 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ |
171 | +#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ | |
171 | 172 | |
172 | 173 | /* Virtualization flags: Linux defined, word 8 */ |
173 | 174 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
arch/x86/kernel/cpu/scattered.c
... | ... | @@ -31,6 +31,7 @@ |
31 | 31 | const struct cpuid_bit *cb; |
32 | 32 | |
33 | 33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { |
34 | + { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 }, | |
34 | 35 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, |
35 | 36 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, |
36 | 37 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, |
block/blk-merge.c
... | ... | @@ -362,6 +362,18 @@ |
362 | 362 | return 0; |
363 | 363 | |
364 | 364 | /* |
365 | + * Don't merge file system requests and discard requests | |
366 | + */ | |
367 | + if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD)) | |
368 | + return 0; | |
369 | + | |
370 | + /* | |
371 | + * Don't merge discard requests and secure discard requests | |
372 | + */ | |
373 | + if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE)) | |
374 | + return 0; | |
375 | + | |
376 | + /* | |
365 | 377 | * not contiguous |
366 | 378 | */ |
367 | 379 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
drivers/gpu/drm/drm_buffer.c
... | ... | @@ -98,8 +98,8 @@ |
98 | 98 | * user_data: A pointer the data that is copied to the buffer. |
99 | 99 | * size: The Number of bytes to copy. |
100 | 100 | */ |
101 | -extern int drm_buffer_copy_from_user(struct drm_buffer *buf, | |
102 | - void __user *user_data, int size) | |
101 | +int drm_buffer_copy_from_user(struct drm_buffer *buf, | |
102 | + void __user *user_data, int size) | |
103 | 103 | { |
104 | 104 | int nr_pages = size / PAGE_SIZE + 1; |
105 | 105 | int idx; |
... | ... | @@ -163,7 +163,7 @@ |
163 | 163 | { |
164 | 164 | int idx = drm_buffer_index(buf); |
165 | 165 | int page = drm_buffer_page(buf); |
166 | - void *obj = 0; | |
166 | + void *obj = NULL; | |
167 | 167 | |
168 | 168 | if (idx + objsize <= PAGE_SIZE) { |
169 | 169 | obj = &buf->data[page][idx]; |
drivers/gpu/drm/drm_gem.c
... | ... | @@ -142,7 +142,7 @@ |
142 | 142 | return -ENOMEM; |
143 | 143 | |
144 | 144 | kref_init(&obj->refcount); |
145 | - kref_init(&obj->handlecount); | |
145 | + atomic_set(&obj->handle_count, 0); | |
146 | 146 | obj->size = size; |
147 | 147 | |
148 | 148 | return 0; |
... | ... | @@ -448,26 +448,6 @@ |
448 | 448 | } |
449 | 449 | EXPORT_SYMBOL(drm_gem_object_free); |
450 | 450 | |
451 | -/** | |
452 | - * Called after the last reference to the object has been lost. | |
453 | - * Must be called without holding struct_mutex | |
454 | - * | |
455 | - * Frees the object | |
456 | - */ | |
457 | -void | |
458 | -drm_gem_object_free_unlocked(struct kref *kref) | |
459 | -{ | |
460 | - struct drm_gem_object *obj = (struct drm_gem_object *) kref; | |
461 | - struct drm_device *dev = obj->dev; | |
462 | - | |
463 | - if (dev->driver->gem_free_object != NULL) { | |
464 | - mutex_lock(&dev->struct_mutex); | |
465 | - dev->driver->gem_free_object(obj); | |
466 | - mutex_unlock(&dev->struct_mutex); | |
467 | - } | |
468 | -} | |
469 | -EXPORT_SYMBOL(drm_gem_object_free_unlocked); | |
470 | - | |
471 | 451 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
472 | 452 | { |
473 | 453 | BUG(); |
474 | 454 | |
... | ... | @@ -480,12 +460,8 @@ |
480 | 460 | * called before drm_gem_object_free or we'll be touching |
481 | 461 | * freed memory |
482 | 462 | */ |
483 | -void | |
484 | -drm_gem_object_handle_free(struct kref *kref) | |
463 | +void drm_gem_object_handle_free(struct drm_gem_object *obj) | |
485 | 464 | { |
486 | - struct drm_gem_object *obj = container_of(kref, | |
487 | - struct drm_gem_object, | |
488 | - handlecount); | |
489 | 465 | struct drm_device *dev = obj->dev; |
490 | 466 | |
491 | 467 | /* Remove any name for this object */ |
... | ... | @@ -512,6 +488,10 @@ |
512 | 488 | struct drm_gem_object *obj = vma->vm_private_data; |
513 | 489 | |
514 | 490 | drm_gem_object_reference(obj); |
491 | + | |
492 | + mutex_lock(&obj->dev->struct_mutex); | |
493 | + drm_vm_open_locked(vma); | |
494 | + mutex_unlock(&obj->dev->struct_mutex); | |
515 | 495 | } |
516 | 496 | EXPORT_SYMBOL(drm_gem_vm_open); |
517 | 497 | |
... | ... | @@ -519,7 +499,10 @@ |
519 | 499 | { |
520 | 500 | struct drm_gem_object *obj = vma->vm_private_data; |
521 | 501 | |
522 | - drm_gem_object_unreference_unlocked(obj); | |
502 | + mutex_lock(&obj->dev->struct_mutex); | |
503 | + drm_vm_close_locked(vma); | |
504 | + drm_gem_object_unreference(obj); | |
505 | + mutex_unlock(&obj->dev->struct_mutex); | |
523 | 506 | } |
524 | 507 | EXPORT_SYMBOL(drm_gem_vm_close); |
525 | 508 |
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_vm.c
... | ... | @@ -433,15 +433,7 @@ |
433 | 433 | mutex_unlock(&dev->struct_mutex); |
434 | 434 | } |
435 | 435 | |
436 | -/** | |
437 | - * \c close method for all virtual memory types. | |
438 | - * | |
439 | - * \param vma virtual memory area. | |
440 | - * | |
441 | - * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | |
442 | - * free it. | |
443 | - */ | |
444 | -static void drm_vm_close(struct vm_area_struct *vma) | |
436 | +void drm_vm_close_locked(struct vm_area_struct *vma) | |
445 | 437 | { |
446 | 438 | struct drm_file *priv = vma->vm_file->private_data; |
447 | 439 | struct drm_device *dev = priv->minor->dev; |
... | ... | @@ -451,7 +443,6 @@ |
451 | 443 | vma->vm_start, vma->vm_end - vma->vm_start); |
452 | 444 | atomic_dec(&dev->vma_count); |
453 | 445 | |
454 | - mutex_lock(&dev->struct_mutex); | |
455 | 446 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { |
456 | 447 | if (pt->vma == vma) { |
457 | 448 | list_del(&pt->head); |
... | ... | @@ -459,6 +450,23 @@ |
459 | 450 | break; |
460 | 451 | } |
461 | 452 | } |
453 | +} | |
454 | + | |
455 | +/** | |
456 | + * \c close method for all virtual memory types. | |
457 | + * | |
458 | + * \param vma virtual memory area. | |
459 | + * | |
460 | + * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | |
461 | + * free it. | |
462 | + */ | |
463 | +static void drm_vm_close(struct vm_area_struct *vma) | |
464 | +{ | |
465 | + struct drm_file *priv = vma->vm_file->private_data; | |
466 | + struct drm_device *dev = priv->minor->dev; | |
467 | + | |
468 | + mutex_lock(&dev->struct_mutex); | |
469 | + drm_vm_close_locked(vma); | |
462 | 470 | mutex_unlock(&dev->struct_mutex); |
463 | 471 | } |
464 | 472 |
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i830/i830_dma.c
drivers/gpu/drm/i915/i915_gem.c
... | ... | @@ -244,13 +244,11 @@ |
244 | 244 | return -ENOMEM; |
245 | 245 | |
246 | 246 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
247 | + /* drop reference from allocate - handle holds it now */ | |
248 | + drm_gem_object_unreference_unlocked(obj); | |
247 | 249 | if (ret) { |
248 | - drm_gem_object_unreference_unlocked(obj); | |
249 | 250 | return ret; |
250 | 251 | } |
251 | - | |
252 | - /* Sink the floating reference from kref_init(handlecount) */ | |
253 | - drm_gem_object_handle_unreference_unlocked(obj); | |
254 | 252 | |
255 | 253 | args->handle = handle; |
256 | 254 | return 0; |
drivers/gpu/drm/i915/intel_fb.c
... | ... | @@ -224,8 +224,10 @@ |
224 | 224 | drm_fb_helper_fini(&ifbdev->helper); |
225 | 225 | |
226 | 226 | drm_framebuffer_cleanup(&ifb->base); |
227 | - if (ifb->obj) | |
227 | + if (ifb->obj) { | |
228 | + drm_gem_object_handle_unreference_unlocked(ifb->obj); | |
228 | 229 | drm_gem_object_unreference_unlocked(ifb->obj); |
230 | + } | |
229 | 231 | } |
230 | 232 | |
231 | 233 | int intel_fbdev_init(struct drm_device *dev) |
drivers/gpu/drm/nouveau/nouveau_connector.c
... | ... | @@ -558,8 +558,10 @@ |
558 | 558 | if (nv_encoder->dcb->type == OUTPUT_LVDS && |
559 | 559 | (nv_encoder->dcb->lvdsconf.use_straps_for_mode || |
560 | 560 | dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { |
561 | - nv_connector->native_mode = drm_mode_create(dev); | |
562 | - nouveau_bios_fp_mode(dev, nv_connector->native_mode); | |
561 | + struct drm_display_mode mode; | |
562 | + | |
563 | + nouveau_bios_fp_mode(dev, &mode); | |
564 | + nv_connector->native_mode = drm_mode_duplicate(dev, &mode); | |
563 | 565 | } |
564 | 566 | |
565 | 567 | /* Find the native mode if this is a digital panel, if we didn't |
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_gem.c
... | ... | @@ -167,11 +167,9 @@ |
167 | 167 | goto out; |
168 | 168 | |
169 | 169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
170 | + /* drop reference from allocate - handle holds it now */ | |
171 | + drm_gem_object_unreference_unlocked(nvbo->gem); | |
170 | 172 | out: |
171 | - drm_gem_object_handle_unreference_unlocked(nvbo->gem); | |
172 | - | |
173 | - if (ret) | |
174 | - drm_gem_object_unreference_unlocked(nvbo->gem); | |
175 | 173 | return ret; |
176 | 174 | } |
177 | 175 |
drivers/gpu/drm/nouveau/nouveau_notifier.c
... | ... | @@ -79,6 +79,7 @@ |
79 | 79 | mutex_lock(&dev->struct_mutex); |
80 | 80 | nouveau_bo_unpin(chan->notifier_bo); |
81 | 81 | mutex_unlock(&dev->struct_mutex); |
82 | + drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); | |
82 | 83 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); |
83 | 84 | drm_mm_takedown(&chan->notifier_heap); |
84 | 85 | } |
drivers/gpu/drm/radeon/atombios.h
... | ... | @@ -4999,7 +4999,7 @@ |
4999 | 4999 | #define SW_I2C_CNTL_WRITE1BIT 6 |
5000 | 5000 | |
5001 | 5001 | //==============================VESA definition Portion=============================== |
5002 | -#define VESA_OEM_PRODUCT_REV '01.00' | |
5002 | +#define VESA_OEM_PRODUCT_REV "01.00" | |
5003 | 5003 | #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support |
5004 | 5004 | #define VESA_MODE_WIN_ATTRIBUTE 7 |
5005 | 5005 | #define VESA_WIN_SIZE 64 |
drivers/gpu/drm/radeon/r600.c
... | ... | @@ -2729,7 +2729,7 @@ |
2729 | 2729 | if (i < rdev->usec_timeout) { |
2730 | 2730 | DRM_INFO("ib test succeeded in %u usecs\n", i); |
2731 | 2731 | } else { |
2732 | - DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", | |
2732 | + DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", | |
2733 | 2733 | scratch, tmp); |
2734 | 2734 | r = -EINVAL; |
2735 | 2735 | } |
... | ... | @@ -3528,7 +3528,8 @@ |
3528 | 3528 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
3529 | 3529 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
3530 | 3530 | */ |
3531 | - if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { | |
3531 | + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && | |
3532 | + rdev->vram_scratch.ptr) { | |
3532 | 3533 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
3533 | 3534 | u32 tmp; |
3534 | 3535 |
drivers/gpu/drm/radeon/radeon_atombios.c
... | ... | @@ -317,6 +317,15 @@ |
317 | 317 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
318 | 318 | } |
319 | 319 | |
320 | + /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ | |
321 | + if ((dev->pdev->device == 0x796e) && | |
322 | + (dev->pdev->subsystem_vendor == 0x1462) && | |
323 | + (dev->pdev->subsystem_device == 0x7302)) { | |
324 | + if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || | |
325 | + (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) | |
326 | + return false; | |
327 | + } | |
328 | + | |
320 | 329 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ |
321 | 330 | if ((dev->pdev->device == 0x7941) && |
322 | 331 | (dev->pdev->subsystem_vendor == 0x147b) && |
drivers/gpu/drm/radeon/radeon_display.c
... | ... | @@ -349,6 +349,8 @@ |
349 | 349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); |
350 | 350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) |
351 | 351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); |
352 | + if (devices & ATOM_DEVICE_DFP6_SUPPORT) | |
353 | + DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); | |
352 | 354 | if (devices & ATOM_DEVICE_TV1_SUPPORT) |
353 | 355 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); |
354 | 356 | if (devices & ATOM_DEVICE_CV_SUPPORT) |
355 | 357 | |
... | ... | @@ -841,8 +843,9 @@ |
841 | 843 | { |
842 | 844 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); |
843 | 845 | |
844 | - if (radeon_fb->obj) | |
846 | + if (radeon_fb->obj) { | |
845 | 847 | drm_gem_object_unreference_unlocked(radeon_fb->obj); |
848 | + } | |
846 | 849 | drm_framebuffer_cleanup(fb); |
847 | 850 | kfree(radeon_fb); |
848 | 851 | } |
drivers/gpu/drm/radeon/radeon_fb.c
... | ... | @@ -94,8 +94,10 @@ |
94 | 94 | ret = radeon_bo_reserve(rbo, false); |
95 | 95 | if (likely(ret == 0)) { |
96 | 96 | radeon_bo_kunmap(rbo); |
97 | + radeon_bo_unpin(rbo); | |
97 | 98 | radeon_bo_unreserve(rbo); |
98 | 99 | } |
100 | + drm_gem_object_handle_unreference(gobj); | |
99 | 101 | drm_gem_object_unreference_unlocked(gobj); |
100 | 102 | } |
101 | 103 | |
... | ... | @@ -325,8 +327,6 @@ |
325 | 327 | { |
326 | 328 | struct fb_info *info; |
327 | 329 | struct radeon_framebuffer *rfb = &rfbdev->rfb; |
328 | - struct radeon_bo *rbo; | |
329 | - int r; | |
330 | 330 | |
331 | 331 | if (rfbdev->helper.fbdev) { |
332 | 332 | info = rfbdev->helper.fbdev; |
... | ... | @@ -338,14 +338,8 @@ |
338 | 338 | } |
339 | 339 | |
340 | 340 | if (rfb->obj) { |
341 | - rbo = rfb->obj->driver_private; | |
342 | - r = radeon_bo_reserve(rbo, false); | |
343 | - if (likely(r == 0)) { | |
344 | - radeon_bo_kunmap(rbo); | |
345 | - radeon_bo_unpin(rbo); | |
346 | - radeon_bo_unreserve(rbo); | |
347 | - } | |
348 | - drm_gem_object_unreference_unlocked(rfb->obj); | |
341 | + radeonfb_destroy_pinned_object(rfb->obj); | |
342 | + rfb->obj = NULL; | |
349 | 343 | } |
350 | 344 | drm_fb_helper_fini(&rfbdev->helper); |
351 | 345 | drm_framebuffer_cleanup(&rfb->base); |
drivers/gpu/drm/radeon/radeon_gem.c
... | ... | @@ -201,11 +201,11 @@ |
201 | 201 | return r; |
202 | 202 | } |
203 | 203 | r = drm_gem_handle_create(filp, gobj, &handle); |
204 | + /* drop reference from allocate - handle holds it now */ | |
205 | + drm_gem_object_unreference_unlocked(gobj); | |
204 | 206 | if (r) { |
205 | - drm_gem_object_unreference_unlocked(gobj); | |
206 | 207 | return r; |
207 | 208 | } |
208 | - drm_gem_object_handle_unreference_unlocked(gobj); | |
209 | 209 | args->handle = handle; |
210 | 210 | return 0; |
211 | 211 | } |
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
... | ... | @@ -69,7 +69,7 @@ |
69 | 69 | spinlock_t lock; |
70 | 70 | bool fill_lock; |
71 | 71 | struct list_head list; |
72 | - int gfp_flags; | |
72 | + gfp_t gfp_flags; | |
73 | 73 | unsigned npages; |
74 | 74 | char *name; |
75 | 75 | unsigned long nfrees; |
... | ... | @@ -475,7 +475,7 @@ |
475 | 475 | * This function is reentrant if caller updates count depending on number of |
476 | 476 | * pages returned in pages array. |
477 | 477 | */ |
478 | -static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | |
478 | +static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, | |
479 | 479 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) |
480 | 480 | { |
481 | 481 | struct page **caching_array; |
... | ... | @@ -666,7 +666,7 @@ |
666 | 666 | { |
667 | 667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
668 | 668 | struct page *p = NULL; |
669 | - int gfp_flags = GFP_USER; | |
669 | + gfp_t gfp_flags = GFP_USER; | |
670 | 670 | int r; |
671 | 671 | |
672 | 672 | /* set zero flag for page allocation if required */ |
... | ... | @@ -818,7 +818,7 @@ |
818 | 818 | return 0; |
819 | 819 | } |
820 | 820 | |
821 | -void ttm_page_alloc_fini() | |
821 | +void ttm_page_alloc_fini(void) | |
822 | 822 | { |
823 | 823 | int i; |
824 | 824 |
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
... | ... | @@ -148,13 +148,16 @@ |
148 | 148 | {0, 0, 0} |
149 | 149 | }; |
150 | 150 | |
151 | -static char *vmw_devname = "vmwgfx"; | |
151 | +static int enable_fbdev; | |
152 | 152 | |
153 | 153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
154 | 154 | static void vmw_master_init(struct vmw_master *); |
155 | 155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
156 | 156 | void *ptr); |
157 | 157 | |
158 | +MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); | |
159 | +module_param_named(enable_fbdev, enable_fbdev, int, 0600); | |
160 | + | |
158 | 161 | static void vmw_print_capabilities(uint32_t capabilities) |
159 | 162 | { |
160 | 163 | DRM_INFO("Capabilities:\n"); |
... | ... | @@ -192,8 +195,6 @@ |
192 | 195 | { |
193 | 196 | int ret; |
194 | 197 | |
195 | - vmw_kms_save_vga(dev_priv); | |
196 | - | |
197 | 198 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
198 | 199 | if (unlikely(ret != 0)) { |
199 | 200 | DRM_ERROR("Unable to initialize FIFO.\n"); |
200 | 201 | |
201 | 202 | |
... | ... | @@ -206,10 +207,36 @@ |
206 | 207 | static void vmw_release_device(struct vmw_private *dev_priv) |
207 | 208 | { |
208 | 209 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
209 | - vmw_kms_restore_vga(dev_priv); | |
210 | 210 | } |
211 | 211 | |
212 | +int vmw_3d_resource_inc(struct vmw_private *dev_priv) | |
213 | +{ | |
214 | + int ret = 0; | |
212 | 215 | |
216 | + mutex_lock(&dev_priv->release_mutex); | |
217 | + if (unlikely(dev_priv->num_3d_resources++ == 0)) { | |
218 | + ret = vmw_request_device(dev_priv); | |
219 | + if (unlikely(ret != 0)) | |
220 | + --dev_priv->num_3d_resources; | |
221 | + } | |
222 | + mutex_unlock(&dev_priv->release_mutex); | |
223 | + return ret; | |
224 | +} | |
225 | + | |
226 | + | |
227 | +void vmw_3d_resource_dec(struct vmw_private *dev_priv) | |
228 | +{ | |
229 | + int32_t n3d; | |
230 | + | |
231 | + mutex_lock(&dev_priv->release_mutex); | |
232 | + if (unlikely(--dev_priv->num_3d_resources == 0)) | |
233 | + vmw_release_device(dev_priv); | |
234 | + n3d = (int32_t) dev_priv->num_3d_resources; | |
235 | + mutex_unlock(&dev_priv->release_mutex); | |
236 | + | |
237 | + BUG_ON(n3d < 0); | |
238 | +} | |
239 | + | |
213 | 240 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
214 | 241 | { |
215 | 242 | struct vmw_private *dev_priv; |
... | ... | @@ -228,6 +255,7 @@ |
228 | 255 | dev_priv->last_read_sequence = (uint32_t) -100; |
229 | 256 | mutex_init(&dev_priv->hw_mutex); |
230 | 257 | mutex_init(&dev_priv->cmdbuf_mutex); |
258 | + mutex_init(&dev_priv->release_mutex); | |
231 | 259 | rwlock_init(&dev_priv->resource_lock); |
232 | 260 | idr_init(&dev_priv->context_idr); |
233 | 261 | idr_init(&dev_priv->surface_idr); |
... | ... | @@ -244,6 +272,8 @@ |
244 | 272 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
245 | 273 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
246 | 274 | |
275 | + dev_priv->enable_fb = enable_fbdev; | |
276 | + | |
247 | 277 | mutex_lock(&dev_priv->hw_mutex); |
248 | 278 | |
249 | 279 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
... | ... | @@ -343,17 +373,6 @@ |
343 | 373 | |
344 | 374 | dev->dev_private = dev_priv; |
345 | 375 | |
346 | - if (!dev->devname) | |
347 | - dev->devname = vmw_devname; | |
348 | - | |
349 | - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | |
350 | - ret = drm_irq_install(dev); | |
351 | - if (unlikely(ret != 0)) { | |
352 | - DRM_ERROR("Failed installing irq: %d\n", ret); | |
353 | - goto out_no_irq; | |
354 | - } | |
355 | - } | |
356 | - | |
357 | 376 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
358 | 377 | dev_priv->stealth = (ret != 0); |
359 | 378 | if (dev_priv->stealth) { |
360 | 379 | |
361 | 380 | |
362 | 381 | |
363 | 382 | |
364 | 383 | |
365 | 384 | |
... | ... | @@ -369,26 +388,52 @@ |
369 | 388 | goto out_no_device; |
370 | 389 | } |
371 | 390 | } |
372 | - ret = vmw_request_device(dev_priv); | |
391 | + ret = vmw_kms_init(dev_priv); | |
373 | 392 | if (unlikely(ret != 0)) |
374 | - goto out_no_device; | |
375 | - vmw_kms_init(dev_priv); | |
393 | + goto out_no_kms; | |
376 | 394 | vmw_overlay_init(dev_priv); |
377 | - vmw_fb_init(dev_priv); | |
395 | + if (dev_priv->enable_fb) { | |
396 | + ret = vmw_3d_resource_inc(dev_priv); | |
397 | + if (unlikely(ret != 0)) | |
398 | + goto out_no_fifo; | |
399 | + vmw_kms_save_vga(dev_priv); | |
400 | + vmw_fb_init(dev_priv); | |
401 | + DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? | |
402 | + "Detected device 3D availability.\n" : | |
403 | + "Detected no device 3D availability.\n"); | |
404 | + } else { | |
405 | + DRM_INFO("Delayed 3D detection since we're not " | |
406 | + "running the device in SVGA mode yet.\n"); | |
407 | + } | |
378 | 408 | |
409 | + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | |
410 | + ret = drm_irq_install(dev); | |
411 | + if (unlikely(ret != 0)) { | |
412 | + DRM_ERROR("Failed installing irq: %d\n", ret); | |
413 | + goto out_no_irq; | |
414 | + } | |
415 | + } | |
416 | + | |
379 | 417 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
380 | 418 | register_pm_notifier(&dev_priv->pm_nb); |
381 | 419 | |
382 | - DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); | |
383 | - | |
384 | 420 | return 0; |
385 | 421 | |
386 | -out_no_device: | |
387 | - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | |
388 | - drm_irq_uninstall(dev_priv->dev); | |
389 | - if (dev->devname == vmw_devname) | |
390 | - dev->devname = NULL; | |
391 | 422 | out_no_irq: |
423 | + if (dev_priv->enable_fb) { | |
424 | + vmw_fb_close(dev_priv); | |
425 | + vmw_kms_restore_vga(dev_priv); | |
426 | + vmw_3d_resource_dec(dev_priv); | |
427 | + } | |
428 | +out_no_fifo: | |
429 | + vmw_overlay_close(dev_priv); | |
430 | + vmw_kms_close(dev_priv); | |
431 | +out_no_kms: | |
432 | + if (dev_priv->stealth) | |
433 | + pci_release_region(dev->pdev, 2); | |
434 | + else | |
435 | + pci_release_regions(dev->pdev); | |
436 | +out_no_device: | |
392 | 437 | ttm_object_device_release(&dev_priv->tdev); |
393 | 438 | out_err4: |
394 | 439 | iounmap(dev_priv->mmio_virt); |
395 | 440 | |
396 | 441 | |
... | ... | @@ -415,19 +460,20 @@ |
415 | 460 | |
416 | 461 | unregister_pm_notifier(&dev_priv->pm_nb); |
417 | 462 | |
418 | - vmw_fb_close(dev_priv); | |
463 | + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | |
464 | + drm_irq_uninstall(dev_priv->dev); | |
465 | + if (dev_priv->enable_fb) { | |
466 | + vmw_fb_close(dev_priv); | |
467 | + vmw_kms_restore_vga(dev_priv); | |
468 | + vmw_3d_resource_dec(dev_priv); | |
469 | + } | |
419 | 470 | vmw_kms_close(dev_priv); |
420 | 471 | vmw_overlay_close(dev_priv); |
421 | - vmw_release_device(dev_priv); | |
422 | 472 | if (dev_priv->stealth) |
423 | 473 | pci_release_region(dev->pdev, 2); |
424 | 474 | else |
425 | 475 | pci_release_regions(dev->pdev); |
426 | 476 | |
427 | - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | |
428 | - drm_irq_uninstall(dev_priv->dev); | |
429 | - if (dev->devname == vmw_devname) | |
430 | - dev->devname = NULL; | |
431 | 477 | ttm_object_device_release(&dev_priv->tdev); |
432 | 478 | iounmap(dev_priv->mmio_virt); |
433 | 479 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, |
... | ... | @@ -500,7 +546,7 @@ |
500 | 546 | struct drm_ioctl_desc *ioctl = |
501 | 547 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
502 | 548 | |
503 | - if (unlikely(ioctl->cmd != cmd)) { | |
549 | + if (unlikely(ioctl->cmd_drv != cmd)) { | |
504 | 550 | DRM_ERROR("Invalid command format, ioctl %d\n", |
505 | 551 | nr - DRM_COMMAND_BASE); |
506 | 552 | return -EINVAL; |
... | ... | @@ -589,6 +635,16 @@ |
589 | 635 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
590 | 636 | int ret = 0; |
591 | 637 | |
638 | + if (!dev_priv->enable_fb) { | |
639 | + ret = vmw_3d_resource_inc(dev_priv); | |
640 | + if (unlikely(ret != 0)) | |
641 | + return ret; | |
642 | + vmw_kms_save_vga(dev_priv); | |
643 | + mutex_lock(&dev_priv->hw_mutex); | |
644 | + vmw_write(dev_priv, SVGA_REG_TRACES, 0); | |
645 | + mutex_unlock(&dev_priv->hw_mutex); | |
646 | + } | |
647 | + | |
592 | 648 | if (active) { |
593 | 649 | BUG_ON(active != &dev_priv->fbdev_master); |
594 | 650 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
... | ... | @@ -617,7 +673,13 @@ |
617 | 673 | return 0; |
618 | 674 | |
619 | 675 | out_no_active_lock: |
620 | - vmw_release_device(dev_priv); | |
676 | + if (!dev_priv->enable_fb) { | |
677 | + mutex_lock(&dev_priv->hw_mutex); | |
678 | + vmw_write(dev_priv, SVGA_REG_TRACES, 1); | |
679 | + mutex_unlock(&dev_priv->hw_mutex); | |
680 | + vmw_kms_restore_vga(dev_priv); | |
681 | + vmw_3d_resource_dec(dev_priv); | |
682 | + } | |
621 | 683 | return ret; |
622 | 684 | } |
623 | 685 | |
624 | 686 | |
... | ... | @@ -645,11 +707,23 @@ |
645 | 707 | |
646 | 708 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
647 | 709 | |
710 | + if (!dev_priv->enable_fb) { | |
711 | + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | |
712 | + if (unlikely(ret != 0)) | |
713 | + DRM_ERROR("Unable to clean VRAM on master drop.\n"); | |
714 | + mutex_lock(&dev_priv->hw_mutex); | |
715 | + vmw_write(dev_priv, SVGA_REG_TRACES, 1); | |
716 | + mutex_unlock(&dev_priv->hw_mutex); | |
717 | + vmw_kms_restore_vga(dev_priv); | |
718 | + vmw_3d_resource_dec(dev_priv); | |
719 | + } | |
720 | + | |
648 | 721 | dev_priv->active_master = &dev_priv->fbdev_master; |
649 | 722 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
650 | 723 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
651 | 724 | |
652 | - vmw_fb_on(dev_priv); | |
725 | + if (dev_priv->enable_fb) | |
726 | + vmw_fb_on(dev_priv); | |
653 | 727 | } |
654 | 728 | |
655 | 729 | |
... | ... | @@ -722,6 +796,7 @@ |
722 | 796 | .irq_postinstall = vmw_irq_postinstall, |
723 | 797 | .irq_uninstall = vmw_irq_uninstall, |
724 | 798 | .irq_handler = vmw_irq_handler, |
799 | + .get_vblank_counter = vmw_get_vblank_counter, | |
725 | 800 | .reclaim_buffers_locked = NULL, |
726 | 801 | .ioctls = vmw_ioctls, |
727 | 802 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), |
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
... | ... | @@ -277,6 +277,7 @@ |
277 | 277 | |
278 | 278 | bool stealth; |
279 | 279 | bool is_opened; |
280 | + bool enable_fb; | |
280 | 281 | |
281 | 282 | /** |
282 | 283 | * Master management. |
... | ... | @@ -285,6 +286,9 @@ |
285 | 286 | struct vmw_master *active_master; |
286 | 287 | struct vmw_master fbdev_master; |
287 | 288 | struct notifier_block pm_nb; |
289 | + | |
290 | + struct mutex release_mutex; | |
291 | + uint32_t num_3d_resources; | |
288 | 292 | }; |
289 | 293 | |
290 | 294 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
... | ... | @@ -319,6 +323,9 @@ |
319 | 323 | return val; |
320 | 324 | } |
321 | 325 | |
326 | +int vmw_3d_resource_inc(struct vmw_private *dev_priv); | |
327 | +void vmw_3d_resource_dec(struct vmw_private *dev_priv); | |
328 | + | |
322 | 329 | /** |
323 | 330 | * GMR utilities - vmwgfx_gmr.c |
324 | 331 | */ |
... | ... | @@ -511,6 +518,7 @@ |
511 | 518 | unsigned bbp, unsigned depth); |
512 | 519 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
513 | 520 | struct drm_file *file_priv); |
521 | +u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | |
514 | 522 | |
515 | 523 | /** |
516 | 524 | * Overlay control - vmwgfx_overlay.c |
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
... | ... | @@ -615,6 +615,11 @@ |
615 | 615 | if (unlikely(ret != 0)) |
616 | 616 | goto err_unlock; |
617 | 617 | |
618 | + if (bo->mem.mem_type == TTM_PL_VRAM && | |
619 | + bo->mem.mm_node->start < bo->num_pages) | |
620 | + (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | |
621 | + false, false); | |
622 | + | |
618 | 623 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
619 | 624 | |
620 | 625 | /* Could probably bug on */ |
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
... | ... | @@ -106,6 +106,7 @@ |
106 | 106 | mutex_lock(&dev_priv->hw_mutex); |
107 | 107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
108 | 108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
109 | + dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); | |
109 | 110 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); |
110 | 111 | |
111 | 112 | min = 4; |
... | ... | @@ -175,6 +176,8 @@ |
175 | 176 | dev_priv->config_done_state); |
176 | 177 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
177 | 178 | dev_priv->enable_state); |
179 | + vmw_write(dev_priv, SVGA_REG_TRACES, | |
180 | + dev_priv->traces_state); | |
178 | 181 | |
179 | 182 | mutex_unlock(&dev_priv->hw_mutex); |
180 | 183 | vmw_fence_queue_takedown(&fifo->fence_queue); |
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
... | ... | @@ -898,7 +898,19 @@ |
898 | 898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); |
899 | 899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); |
900 | 900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
901 | + if (i == 0 && vmw_priv->num_displays == 1 && | |
902 | + save->width == 0 && save->height == 0) { | |
903 | + | |
904 | + /* | |
905 | + * It should be fairly safe to assume that these | |
906 | + * values are uninitialized. | |
907 | + */ | |
908 | + | |
909 | + save->width = vmw_priv->vga_width - save->pos_x; | |
910 | + save->height = vmw_priv->vga_height - save->pos_y; | |
911 | + } | |
901 | 912 | } |
913 | + | |
902 | 914 | return 0; |
903 | 915 | } |
904 | 916 | |
... | ... | @@ -983,5 +995,10 @@ |
983 | 995 | out_unlock: |
984 | 996 | ttm_read_unlock(&vmaster->lock); |
985 | 997 | return ret; |
998 | +} | |
999 | + | |
1000 | +u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | |
1001 | +{ | |
1002 | + return 0; | |
986 | 1003 | } |
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
... | ... | @@ -27,6 +27,8 @@ |
27 | 27 | |
28 | 28 | #include "vmwgfx_kms.h" |
29 | 29 | |
30 | +#define VMWGFX_LDU_NUM_DU 8 | |
31 | + | |
30 | 32 | #define vmw_crtc_to_ldu(x) \ |
31 | 33 | container_of(x, struct vmw_legacy_display_unit, base.crtc) |
32 | 34 | #define vmw_encoder_to_ldu(x) \ |
... | ... | @@ -536,6 +538,10 @@ |
536 | 538 | |
537 | 539 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) |
538 | 540 | { |
541 | + struct drm_device *dev = dev_priv->dev; | |
542 | + int i; | |
543 | + int ret; | |
544 | + | |
539 | 545 | if (dev_priv->ldu_priv) { |
540 | 546 | DRM_INFO("ldu system already on\n"); |
541 | 547 | return -EINVAL; |
542 | 548 | |
543 | 549 | |
544 | 550 | |
... | ... | @@ -553,23 +559,24 @@ |
553 | 559 | |
554 | 560 | drm_mode_create_dirty_info_property(dev_priv->dev); |
555 | 561 | |
556 | - vmw_ldu_init(dev_priv, 0); | |
557 | - /* for old hardware without multimon only enable one display */ | |
558 | 562 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
559 | - vmw_ldu_init(dev_priv, 1); | |
560 | - vmw_ldu_init(dev_priv, 2); | |
561 | - vmw_ldu_init(dev_priv, 3); | |
562 | - vmw_ldu_init(dev_priv, 4); | |
563 | - vmw_ldu_init(dev_priv, 5); | |
564 | - vmw_ldu_init(dev_priv, 6); | |
565 | - vmw_ldu_init(dev_priv, 7); | |
563 | + for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) | |
564 | + vmw_ldu_init(dev_priv, i); | |
565 | + ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); | |
566 | + } else { | |
567 | + /* for old hardware without multimon only enable one display */ | |
568 | + vmw_ldu_init(dev_priv, 0); | |
569 | + ret = drm_vblank_init(dev, 1); | |
566 | 570 | } |
567 | 571 | |
568 | - return 0; | |
572 | + return ret; | |
569 | 573 | } |
570 | 574 | |
571 | 575 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) |
572 | 576 | { |
577 | + struct drm_device *dev = dev_priv->dev; | |
578 | + | |
579 | + drm_vblank_cleanup(dev); | |
573 | 580 | if (!dev_priv->ldu_priv) |
574 | 581 | return -ENOSYS; |
575 | 582 |
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
... | ... | @@ -211,6 +211,7 @@ |
211 | 211 | cmd->body.cid = cpu_to_le32(res->id); |
212 | 212 | |
213 | 213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
214 | + vmw_3d_resource_dec(dev_priv); | |
214 | 215 | } |
215 | 216 | |
216 | 217 | static int vmw_context_init(struct vmw_private *dev_priv, |
... | ... | @@ -247,6 +248,7 @@ |
247 | 248 | cmd->body.cid = cpu_to_le32(res->id); |
248 | 249 | |
249 | 250 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
251 | + (void) vmw_3d_resource_inc(dev_priv); | |
250 | 252 | vmw_resource_activate(res, vmw_hw_context_destroy); |
251 | 253 | return 0; |
252 | 254 | } |
... | ... | @@ -406,6 +408,7 @@ |
406 | 408 | cmd->body.sid = cpu_to_le32(res->id); |
407 | 409 | |
408 | 410 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
411 | + vmw_3d_resource_dec(dev_priv); | |
409 | 412 | } |
410 | 413 | |
411 | 414 | void vmw_surface_res_free(struct vmw_resource *res) |
... | ... | @@ -473,6 +476,7 @@ |
473 | 476 | } |
474 | 477 | |
475 | 478 | vmw_fifo_commit(dev_priv, submit_size); |
479 | + (void) vmw_3d_resource_inc(dev_priv); | |
476 | 480 | vmw_resource_activate(res, vmw_hw_surface_destroy); |
477 | 481 | return 0; |
478 | 482 | } |
drivers/gpu/vga/vgaarb.c
... | ... | @@ -598,7 +598,7 @@ |
598 | 598 | pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); |
599 | 599 | } |
600 | 600 | |
601 | -void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) | |
601 | +static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) | |
602 | 602 | { |
603 | 603 | struct vga_device *vgadev; |
604 | 604 | unsigned long flags; |
drivers/hwmon/Kconfig
... | ... | @@ -409,7 +409,7 @@ |
409 | 409 | |
410 | 410 | config SENSORS_PKGTEMP |
411 | 411 | tristate "Intel processor package temperature sensor" |
412 | - depends on X86 && PCI && EXPERIMENTAL | |
412 | + depends on X86 && EXPERIMENTAL | |
413 | 413 | help |
414 | 414 | If you say yes here you get support for the package level temperature |
415 | 415 | sensor inside your CPU. Check documentation/driver for details. |
drivers/hwmon/coretemp.c
... | ... | @@ -423,10 +423,19 @@ |
423 | 423 | int err; |
424 | 424 | struct platform_device *pdev; |
425 | 425 | struct pdev_entry *pdev_entry; |
426 | -#ifdef CONFIG_SMP | |
427 | 426 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
428 | -#endif | |
429 | 427 | |
428 | + /* | |
429 | + * CPUID.06H.EAX[0] indicates whether the CPU has thermal | |
430 | + * sensors. We check this bit only, all the early CPUs | |
431 | + * without thermal sensors will be filtered out. | |
432 | + */ | |
433 | + if (!cpu_has(c, X86_FEATURE_DTS)) { | |
434 | + printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" | |
435 | + " has no thermal sensor.\n", c->x86_model); | |
436 | + return 0; | |
437 | + } | |
438 | + | |
430 | 439 | mutex_lock(&pdev_list_mutex); |
431 | 440 | |
432 | 441 | #ifdef CONFIG_SMP |
433 | 442 | |
... | ... | @@ -482,14 +491,22 @@ |
482 | 491 | |
483 | 492 | static void coretemp_device_remove(unsigned int cpu) |
484 | 493 | { |
485 | - struct pdev_entry *p, *n; | |
494 | + struct pdev_entry *p; | |
495 | + unsigned int i; | |
496 | + | |
486 | 497 | mutex_lock(&pdev_list_mutex); |
487 | - list_for_each_entry_safe(p, n, &pdev_list, list) { | |
488 | - if (p->cpu == cpu) { | |
489 | - platform_device_unregister(p->pdev); | |
490 | - list_del(&p->list); | |
491 | - kfree(p); | |
492 | - } | |
498 | + list_for_each_entry(p, &pdev_list, list) { | |
499 | + if (p->cpu != cpu) | |
500 | + continue; | |
501 | + | |
502 | + platform_device_unregister(p->pdev); | |
503 | + list_del(&p->list); | |
504 | + mutex_unlock(&pdev_list_mutex); | |
505 | + kfree(p); | |
506 | + for_each_cpu(i, cpu_sibling_mask(cpu)) | |
507 | + if (i != cpu && !coretemp_device_add(i)) | |
508 | + break; | |
509 | + return; | |
493 | 510 | } |
494 | 511 | mutex_unlock(&pdev_list_mutex); |
495 | 512 | } |
496 | 513 | |
497 | 514 | |
498 | 515 | |
... | ... | @@ -527,30 +544,21 @@ |
527 | 544 | if (err) |
528 | 545 | goto exit; |
529 | 546 | |
530 | - for_each_online_cpu(i) { | |
531 | - struct cpuinfo_x86 *c = &cpu_data(i); | |
532 | - /* | |
533 | - * CPUID.06H.EAX[0] indicates whether the CPU has thermal | |
534 | - * sensors. We check this bit only, all the early CPUs | |
535 | - * without thermal sensors will be filtered out. | |
536 | - */ | |
537 | - if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) | |
538 | - coretemp_device_add(i); | |
539 | - else { | |
540 | - printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" | |
541 | - " has no thermal sensor.\n", c->x86_model); | |
542 | - } | |
543 | - } | |
547 | + for_each_online_cpu(i) | |
548 | + coretemp_device_add(i); | |
549 | + | |
550 | +#ifndef CONFIG_HOTPLUG_CPU | |
544 | 551 | if (list_empty(&pdev_list)) { |
545 | 552 | err = -ENODEV; |
546 | 553 | goto exit_driver_unreg; |
547 | 554 | } |
555 | +#endif | |
548 | 556 | |
549 | 557 | register_hotcpu_notifier(&coretemp_cpu_notifier); |
550 | 558 | return 0; |
551 | 559 | |
552 | -exit_driver_unreg: | |
553 | 560 | #ifndef CONFIG_HOTPLUG_CPU |
561 | +exit_driver_unreg: | |
554 | 562 | platform_driver_unregister(&coretemp_driver); |
555 | 563 | #endif |
556 | 564 | exit: |
drivers/hwmon/lis3lv02d.c
... | ... | @@ -277,7 +277,7 @@ |
277 | 277 | wake_up_interruptible(&lis3_dev.misc_wait); |
278 | 278 | kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); |
279 | 279 | out: |
280 | - if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && | |
280 | + if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev && | |
281 | 281 | lis3_dev.idev->input->users) |
282 | 282 | return IRQ_WAKE_THREAD; |
283 | 283 | return IRQ_HANDLED; |
... | ... | @@ -718,7 +718,7 @@ |
718 | 718 | * io-apic is not configurable (and generates a warning) but I keep it |
719 | 719 | * in case of support for other hardware. |
720 | 720 | */ |
721 | - if (dev->whoami == WAI_8B) | |
721 | + if (dev->pdata && dev->whoami == WAI_8B) | |
722 | 722 | thread_fn = lis302dl_interrupt_thread1_8b; |
723 | 723 | else |
724 | 724 | thread_fn = NULL; |
drivers/hwmon/pkgtemp.c
... | ... | @@ -33,7 +33,6 @@ |
33 | 33 | #include <linux/list.h> |
34 | 34 | #include <linux/platform_device.h> |
35 | 35 | #include <linux/cpu.h> |
36 | -#include <linux/pci.h> | |
37 | 36 | #include <asm/msr.h> |
38 | 37 | #include <asm/processor.h> |
39 | 38 | |
... | ... | @@ -224,7 +223,7 @@ |
224 | 223 | |
225 | 224 | err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); |
226 | 225 | if (err) |
227 | - goto exit_free; | |
226 | + goto exit_dev; | |
228 | 227 | |
229 | 228 | data->hwmon_dev = hwmon_device_register(&pdev->dev); |
230 | 229 | if (IS_ERR(data->hwmon_dev)) { |
... | ... | @@ -238,6 +237,8 @@ |
238 | 237 | |
239 | 238 | exit_class: |
240 | 239 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); |
240 | +exit_dev: | |
241 | + device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); | |
241 | 242 | exit_free: |
242 | 243 | kfree(data); |
243 | 244 | exit: |
... | ... | @@ -250,6 +251,7 @@ |
250 | 251 | |
251 | 252 | hwmon_device_unregister(data->hwmon_dev); |
252 | 253 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); |
254 | + device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); | |
253 | 255 | platform_set_drvdata(pdev, NULL); |
254 | 256 | kfree(data); |
255 | 257 | return 0; |
256 | 258 | |
257 | 259 | |
... | ... | @@ -281,10 +283,11 @@ |
281 | 283 | int err; |
282 | 284 | struct platform_device *pdev; |
283 | 285 | struct pdev_entry *pdev_entry; |
284 | -#ifdef CONFIG_SMP | |
285 | 286 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
286 | -#endif | |
287 | 287 | |
288 | + if (!cpu_has(c, X86_FEATURE_PTS)) | |
289 | + return 0; | |
290 | + | |
288 | 291 | mutex_lock(&pdev_list_mutex); |
289 | 292 | |
290 | 293 | #ifdef CONFIG_SMP |
291 | 294 | |
292 | 295 | |
... | ... | @@ -339,17 +342,18 @@ |
339 | 342 | #ifdef CONFIG_HOTPLUG_CPU |
340 | 343 | static void pkgtemp_device_remove(unsigned int cpu) |
341 | 344 | { |
342 | - struct pdev_entry *p, *n; | |
345 | + struct pdev_entry *p; | |
343 | 346 | unsigned int i; |
344 | 347 | int err; |
345 | 348 | |
346 | 349 | mutex_lock(&pdev_list_mutex); |
347 | - list_for_each_entry_safe(p, n, &pdev_list, list) { | |
350 | + list_for_each_entry(p, &pdev_list, list) { | |
348 | 351 | if (p->cpu != cpu) |
349 | 352 | continue; |
350 | 353 | |
351 | 354 | platform_device_unregister(p->pdev); |
352 | 355 | list_del(&p->list); |
356 | + mutex_unlock(&pdev_list_mutex); | |
353 | 357 | kfree(p); |
354 | 358 | for_each_cpu(i, cpu_core_mask(cpu)) { |
355 | 359 | if (i != cpu) { |
... | ... | @@ -358,7 +362,7 @@ |
358 | 362 | break; |
359 | 363 | } |
360 | 364 | } |
361 | - break; | |
365 | + return; | |
362 | 366 | } |
363 | 367 | mutex_unlock(&pdev_list_mutex); |
364 | 368 | } |
... | ... | @@ -399,11 +403,6 @@ |
399 | 403 | goto exit; |
400 | 404 | |
401 | 405 | for_each_online_cpu(i) { |
402 | - struct cpuinfo_x86 *c = &cpu_data(i); | |
403 | - | |
404 | - if (!cpu_has(c, X86_FEATURE_PTS)) | |
405 | - continue; | |
406 | - | |
407 | 406 | err = pkgtemp_device_add(i); |
408 | 407 | if (err) |
409 | 408 | goto exit_devices_unreg; |
drivers/staging/ti-st/st.h
drivers/staging/ti-st/st_core.c
... | ... | @@ -38,7 +38,6 @@ |
38 | 38 | #include "st_ll.h" |
39 | 39 | #include "st.h" |
40 | 40 | |
41 | -#define VERBOSE | |
42 | 41 | /* strings to be used for rfkill entries and by |
43 | 42 | * ST Core to be used for sysfs debug entry |
44 | 43 | */ |
... | ... | @@ -581,7 +580,7 @@ |
581 | 580 | long err = 0; |
582 | 581 | unsigned long flags = 0; |
583 | 582 | |
584 | - st_kim_ref(&st_gdata); | |
583 | + st_kim_ref(&st_gdata, 0); | |
585 | 584 | pr_info("%s(%d) ", __func__, new_proto->type); |
586 | 585 | if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL |
587 | 586 | || new_proto->reg_complete_cb == NULL) { |
... | ... | @@ -713,7 +712,7 @@ |
713 | 712 | |
714 | 713 | pr_debug("%s: %d ", __func__, type); |
715 | 714 | |
716 | - st_kim_ref(&st_gdata); | |
715 | + st_kim_ref(&st_gdata, 0); | |
717 | 716 | if (type < ST_BT || type >= ST_MAX) { |
718 | 717 | pr_err(" protocol %d not supported", type); |
719 | 718 | return -EPROTONOSUPPORT; |
... | ... | @@ -767,7 +766,7 @@ |
767 | 766 | #endif |
768 | 767 | long len; |
769 | 768 | |
770 | - st_kim_ref(&st_gdata); | |
769 | + st_kim_ref(&st_gdata, 0); | |
771 | 770 | if (unlikely(skb == NULL || st_gdata == NULL |
772 | 771 | || st_gdata->tty == NULL)) { |
773 | 772 | pr_err("data/tty unavailable to perform write"); |
... | ... | @@ -818,7 +817,7 @@ |
818 | 817 | struct st_data_s *st_gdata; |
819 | 818 | pr_info("%s ", __func__); |
820 | 819 | |
821 | - st_kim_ref(&st_gdata); | |
820 | + st_kim_ref(&st_gdata, 0); | |
822 | 821 | st_gdata->tty = tty; |
823 | 822 | tty->disc_data = st_gdata; |
824 | 823 |
drivers/staging/ti-st/st_core.h
drivers/staging/ti-st/st_kim.c
... | ... | @@ -72,11 +72,26 @@ |
72 | 72 | PROTO_ENTRY(ST_GPS, "GPS"), |
73 | 73 | }; |
74 | 74 | |
75 | +#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ | |
76 | +struct platform_device *st_kim_devices[MAX_ST_DEVICES]; | |
75 | 77 | |
76 | 78 | /**********************************************************************/ |
77 | 79 | /* internal functions */ |
78 | 80 | |
79 | 81 | /** |
82 | + * st_get_plat_device - | |
83 | + * function which returns the reference to the platform device | |
84 | + * requested by id. As of now only 1 such device exists (id=0) | |
85 | + * the context requesting for reference can get the id to be | |
86 | + * requested by a. The protocol driver which is registering or | |
87 | + * b. the tty device which is opened. | |
88 | + */ | |
89 | +static struct platform_device *st_get_plat_device(int id) | |
90 | +{ | |
91 | + return st_kim_devices[id]; | |
92 | +} | |
93 | + | |
94 | +/** | |
80 | 95 | * validate_firmware_response - |
81 | 96 | * function to return whether the firmware response was proper |
82 | 97 | * in case of error don't complete so that waiting for proper |
... | ... | @@ -353,7 +368,7 @@ |
353 | 368 | struct kim_data_s *kim_gdata; |
354 | 369 | pr_info(" %s ", __func__); |
355 | 370 | |
356 | - kim_pdev = st_get_plat_device(); | |
371 | + kim_pdev = st_get_plat_device(0); | |
357 | 372 | kim_gdata = dev_get_drvdata(&kim_pdev->dev); |
358 | 373 | |
359 | 374 | if (kim_gdata->gpios[type] == -1) { |
360 | 375 | |
... | ... | @@ -574,12 +589,12 @@ |
574 | 589 | * This would enable multiple such platform devices to exist |
575 | 590 | * on a given platform |
576 | 591 | */ |
577 | -void st_kim_ref(struct st_data_s **core_data) | |
592 | +void st_kim_ref(struct st_data_s **core_data, int id) | |
578 | 593 | { |
579 | 594 | struct platform_device *pdev; |
580 | 595 | struct kim_data_s *kim_gdata; |
581 | 596 | /* get kim_gdata reference from platform device */ |
582 | - pdev = st_get_plat_device(); | |
597 | + pdev = st_get_plat_device(id); | |
583 | 598 | kim_gdata = dev_get_drvdata(&pdev->dev); |
584 | 599 | *core_data = kim_gdata->core_data; |
585 | 600 | } |
... | ... | @@ -623,6 +638,7 @@ |
623 | 638 | long *gpios = pdev->dev.platform_data; |
624 | 639 | struct kim_data_s *kim_gdata; |
625 | 640 | |
641 | + st_kim_devices[pdev->id] = pdev; | |
626 | 642 | kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); |
627 | 643 | if (!kim_gdata) { |
628 | 644 | pr_err("no mem to allocate"); |
drivers/usb/core/Kconfig
... | ... | @@ -91,12 +91,12 @@ |
91 | 91 | If you are unsure about this, say N here. |
92 | 92 | |
93 | 93 | config USB_SUSPEND |
94 | - bool "USB runtime power management (suspend/resume and wakeup)" | |
94 | + bool "USB runtime power management (autosuspend) and wakeup" | |
95 | 95 | depends on USB && PM_RUNTIME |
96 | 96 | help |
97 | 97 | If you say Y here, you can use driver calls or the sysfs |
98 | - "power/level" file to suspend or resume individual USB | |
99 | - peripherals and to enable or disable autosuspend (see | |
98 | + "power/control" file to enable or disable autosuspend for | |
99 | + individual USB peripherals (see | |
100 | 100 | Documentation/usb/power-management.txt for more details). |
101 | 101 | |
102 | 102 | Also, USB "remote wakeup" signaling is supported, whereby some |
drivers/usb/core/file.c
... | ... | @@ -159,9 +159,9 @@ |
159 | 159 | int usb_register_dev(struct usb_interface *intf, |
160 | 160 | struct usb_class_driver *class_driver) |
161 | 161 | { |
162 | - int retval = -EINVAL; | |
162 | + int retval; | |
163 | 163 | int minor_base = class_driver->minor_base; |
164 | - int minor = 0; | |
164 | + int minor; | |
165 | 165 | char name[20]; |
166 | 166 | char *temp; |
167 | 167 | |
168 | 168 | |
169 | 169 | |
170 | 170 | |
171 | 171 | |
172 | 172 | |
173 | 173 | |
... | ... | @@ -173,34 +173,31 @@ |
173 | 173 | */ |
174 | 174 | minor_base = 0; |
175 | 175 | #endif |
176 | - intf->minor = -1; | |
177 | 176 | |
178 | - dbg ("looking for a minor, starting at %d", minor_base); | |
179 | - | |
180 | 177 | if (class_driver->fops == NULL) |
181 | - goto exit; | |
178 | + return -EINVAL; | |
179 | + if (intf->minor >= 0) | |
180 | + return -EADDRINUSE; | |
182 | 181 | |
182 | + retval = init_usb_class(); | |
183 | + if (retval) | |
184 | + return retval; | |
185 | + | |
186 | + dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base); | |
187 | + | |
183 | 188 | down_write(&minor_rwsem); |
184 | 189 | for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { |
185 | 190 | if (usb_minors[minor]) |
186 | 191 | continue; |
187 | 192 | |
188 | 193 | usb_minors[minor] = class_driver->fops; |
189 | - | |
190 | - retval = 0; | |
194 | + intf->minor = minor; | |
191 | 195 | break; |
192 | 196 | } |
193 | 197 | up_write(&minor_rwsem); |
198 | + if (intf->minor < 0) | |
199 | + return -EXFULL; | |
194 | 200 | |
195 | - if (retval) | |
196 | - goto exit; | |
197 | - | |
198 | - retval = init_usb_class(); | |
199 | - if (retval) | |
200 | - goto exit; | |
201 | - | |
202 | - intf->minor = minor; | |
203 | - | |
204 | 201 | /* create a usb class device for this usb interface */ |
205 | 202 | snprintf(name, sizeof(name), class_driver->name, minor - minor_base); |
206 | 203 | temp = strrchr(name, '/'); |
207 | 204 | |
... | ... | @@ -213,11 +210,11 @@ |
213 | 210 | "%s", temp); |
214 | 211 | if (IS_ERR(intf->usb_dev)) { |
215 | 212 | down_write(&minor_rwsem); |
216 | - usb_minors[intf->minor] = NULL; | |
213 | + usb_minors[minor] = NULL; | |
214 | + intf->minor = -1; | |
217 | 215 | up_write(&minor_rwsem); |
218 | 216 | retval = PTR_ERR(intf->usb_dev); |
219 | 217 | } |
220 | -exit: | |
221 | 218 | return retval; |
222 | 219 | } |
223 | 220 | EXPORT_SYMBOL_GPL(usb_register_dev); |
drivers/usb/core/message.c
... | ... | @@ -1802,6 +1802,7 @@ |
1802 | 1802 | intf->dev.groups = usb_interface_groups; |
1803 | 1803 | intf->dev.dma_mask = dev->dev.dma_mask; |
1804 | 1804 | INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); |
1805 | + intf->minor = -1; | |
1805 | 1806 | device_initialize(&intf->dev); |
1806 | 1807 | dev_set_name(&intf->dev, "%d-%s:%d.%d", |
1807 | 1808 | dev->bus->busnum, dev->devpath, |
drivers/usb/musb/cppi_dma.c
... | ... | @@ -322,6 +322,7 @@ |
322 | 322 | index, transmit ? 'T' : 'R', cppi_ch); |
323 | 323 | cppi_ch->hw_ep = ep; |
324 | 324 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; |
325 | + cppi_ch->channel.max_len = 0x7fffffff; | |
325 | 326 | |
326 | 327 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); |
327 | 328 | return &cppi_ch->channel; |
drivers/usb/musb/musb_gadget.c
... | ... | @@ -300,18 +300,18 @@ |
300 | 300 | #ifndef CONFIG_MUSB_PIO_ONLY |
301 | 301 | if (is_dma_capable() && musb_ep->dma) { |
302 | 302 | struct dma_controller *c = musb->dma_controller; |
303 | + size_t request_size; | |
303 | 304 | |
305 | + /* setup DMA, then program endpoint CSR */ | |
306 | + request_size = min_t(size_t, request->length - request->actual, | |
307 | + musb_ep->dma->max_len); | |
308 | + | |
304 | 309 | use_dma = (request->dma != DMA_ADDR_INVALID); |
305 | 310 | |
306 | 311 | /* MUSB_TXCSR_P_ISO is still set correctly */ |
307 | 312 | |
308 | 313 | #ifdef CONFIG_USB_INVENTRA_DMA |
309 | 314 | { |
310 | - size_t request_size; | |
311 | - | |
312 | - /* setup DMA, then program endpoint CSR */ | |
313 | - request_size = min_t(size_t, request->length, | |
314 | - musb_ep->dma->max_len); | |
315 | 315 | if (request_size < musb_ep->packet_sz) |
316 | 316 | musb_ep->dma->desired_mode = 0; |
317 | 317 | else |
... | ... | @@ -373,8 +373,8 @@ |
373 | 373 | use_dma = use_dma && c->channel_program( |
374 | 374 | musb_ep->dma, musb_ep->packet_sz, |
375 | 375 | 0, |
376 | - request->dma, | |
377 | - request->length); | |
376 | + request->dma + request->actual, | |
377 | + request_size); | |
378 | 378 | if (!use_dma) { |
379 | 379 | c->channel_release(musb_ep->dma); |
380 | 380 | musb_ep->dma = NULL; |
... | ... | @@ -386,8 +386,8 @@ |
386 | 386 | use_dma = use_dma && c->channel_program( |
387 | 387 | musb_ep->dma, musb_ep->packet_sz, |
388 | 388 | request->zero, |
389 | - request->dma, | |
390 | - request->length); | |
389 | + request->dma + request->actual, | |
390 | + request_size); | |
391 | 391 | #endif |
392 | 392 | } |
393 | 393 | #endif |
... | ... | @@ -501,26 +501,14 @@ |
501 | 501 | request->zero = 0; |
502 | 502 | } |
503 | 503 | |
504 | - /* ... or if not, then complete it. */ | |
505 | - musb_g_giveback(musb_ep, request, 0); | |
506 | - | |
507 | - /* | |
508 | - * Kickstart next transfer if appropriate; | |
509 | - * the packet that just completed might not | |
510 | - * be transmitted for hours or days. | |
511 | - * REVISIT for double buffering... | |
512 | - * FIXME revisit for stalls too... | |
513 | - */ | |
514 | - musb_ep_select(mbase, epnum); | |
515 | - csr = musb_readw(epio, MUSB_TXCSR); | |
516 | - if (csr & MUSB_TXCSR_FIFONOTEMPTY) | |
517 | - return; | |
518 | - | |
519 | - request = musb_ep->desc ? next_request(musb_ep) : NULL; | |
520 | - if (!request) { | |
521 | - DBG(4, "%s idle now\n", | |
522 | - musb_ep->end_point.name); | |
523 | - return; | |
504 | + if (request->actual == request->length) { | |
505 | + musb_g_giveback(musb_ep, request, 0); | |
506 | + request = musb_ep->desc ? next_request(musb_ep) : NULL; | |
507 | + if (!request) { | |
508 | + DBG(4, "%s idle now\n", | |
509 | + musb_ep->end_point.name); | |
510 | + return; | |
511 | + } | |
524 | 512 | } |
525 | 513 | } |
526 | 514 | |
527 | 515 | |
528 | 516 | |
529 | 517 | |
... | ... | @@ -568,12 +556,20 @@ |
568 | 556 | { |
569 | 557 | const u8 epnum = req->epnum; |
570 | 558 | struct usb_request *request = &req->request; |
571 | - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | |
559 | + struct musb_ep *musb_ep; | |
572 | 560 | void __iomem *epio = musb->endpoints[epnum].regs; |
573 | 561 | unsigned fifo_count = 0; |
574 | - u16 len = musb_ep->packet_sz; | |
562 | + u16 len; | |
575 | 563 | u16 csr = musb_readw(epio, MUSB_RXCSR); |
564 | + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; | |
576 | 565 | |
566 | + if (hw_ep->is_shared_fifo) | |
567 | + musb_ep = &hw_ep->ep_in; | |
568 | + else | |
569 | + musb_ep = &hw_ep->ep_out; | |
570 | + | |
571 | + len = musb_ep->packet_sz; | |
572 | + | |
577 | 573 | /* We shouldn't get here while DMA is active, but we do... */ |
578 | 574 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { |
579 | 575 | DBG(4, "DMA pending...\n"); |
580 | 576 | |
... | ... | @@ -647,8 +643,8 @@ |
647 | 643 | */ |
648 | 644 | |
649 | 645 | csr |= MUSB_RXCSR_DMAENAB; |
650 | -#ifdef USE_MODE1 | |
651 | 646 | csr |= MUSB_RXCSR_AUTOCLEAR; |
647 | +#ifdef USE_MODE1 | |
652 | 648 | /* csr |= MUSB_RXCSR_DMAMODE; */ |
653 | 649 | |
654 | 650 | /* this special sequence (enabling and then |
655 | 651 | |
... | ... | @@ -663,10 +659,11 @@ |
663 | 659 | if (request->actual < request->length) { |
664 | 660 | int transfer_size = 0; |
665 | 661 | #ifdef USE_MODE1 |
666 | - transfer_size = min(request->length, | |
662 | + transfer_size = min(request->length - request->actual, | |
667 | 663 | channel->max_len); |
668 | 664 | #else |
669 | - transfer_size = len; | |
665 | + transfer_size = min(request->length - request->actual, | |
666 | + (unsigned)len); | |
670 | 667 | #endif |
671 | 668 | if (transfer_size <= musb_ep->packet_sz) |
672 | 669 | musb_ep->dma->desired_mode = 0; |
673 | 670 | |
674 | 671 | |
... | ... | @@ -740,10 +737,16 @@ |
740 | 737 | u16 csr; |
741 | 738 | struct usb_request *request; |
742 | 739 | void __iomem *mbase = musb->mregs; |
743 | - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | |
740 | + struct musb_ep *musb_ep; | |
744 | 741 | void __iomem *epio = musb->endpoints[epnum].regs; |
745 | 742 | struct dma_channel *dma; |
743 | + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; | |
746 | 744 | |
745 | + if (hw_ep->is_shared_fifo) | |
746 | + musb_ep = &hw_ep->ep_in; | |
747 | + else | |
748 | + musb_ep = &hw_ep->ep_out; | |
749 | + | |
747 | 750 | musb_ep_select(mbase, epnum); |
748 | 751 | |
749 | 752 | request = next_request(musb_ep); |
... | ... | @@ -1081,7 +1084,7 @@ |
1081 | 1084 | /* |
1082 | 1085 | * Context: controller locked, IRQs blocked. |
1083 | 1086 | */ |
1084 | -static void musb_ep_restart(struct musb *musb, struct musb_request *req) | |
1087 | +void musb_ep_restart(struct musb *musb, struct musb_request *req) | |
1085 | 1088 | { |
1086 | 1089 | DBG(3, "<== %s request %p len %u on hw_ep%d\n", |
1087 | 1090 | req->tx ? "TX/IN" : "RX/OUT", |
drivers/usb/musb/musb_gadget.h
drivers/usb/musb/musb_gadget_ep0.c
... | ... | @@ -261,6 +261,7 @@ |
261 | 261 | ctrlrequest->wIndex & 0x0f; |
262 | 262 | struct musb_ep *musb_ep; |
263 | 263 | struct musb_hw_ep *ep; |
264 | + struct musb_request *request; | |
264 | 265 | void __iomem *regs; |
265 | 266 | int is_in; |
266 | 267 | u16 csr; |
... | ... | @@ -300,6 +301,14 @@ |
300 | 301 | csr &= ~(MUSB_RXCSR_P_SENDSTALL | |
301 | 302 | MUSB_RXCSR_P_SENTSTALL); |
302 | 303 | musb_writew(regs, MUSB_RXCSR, csr); |
304 | + } | |
305 | + | |
306 | + /* Maybe start the first request in the queue */ | |
307 | + request = to_musb_request( | |
308 | + next_request(musb_ep)); | |
309 | + if (!musb_ep->busy && request) { | |
310 | + DBG(3, "restarting the request\n"); | |
311 | + musb_ep_restart(musb, request); | |
303 | 312 | } |
304 | 313 | |
305 | 314 | /* select ep0 again */ |
drivers/usb/musb/musb_host.c
... | ... | @@ -660,6 +660,12 @@ |
660 | 660 | |
661 | 661 | qh->segsize = length; |
662 | 662 | |
663 | + /* | |
664 | + * Ensure the data reaches to main memory before starting | |
665 | + * DMA transfer | |
666 | + */ | |
667 | + wmb(); | |
668 | + | |
663 | 669 | if (!dma->channel_program(channel, pkt_size, mode, |
664 | 670 | urb->transfer_dma + offset, length)) { |
665 | 671 | dma->channel_release(channel); |
fs/ocfs2/acl.c
... | ... | @@ -209,7 +209,10 @@ |
209 | 209 | } |
210 | 210 | |
211 | 211 | inode->i_mode = new_mode; |
212 | + inode->i_ctime = CURRENT_TIME; | |
212 | 213 | di->i_mode = cpu_to_le16(inode->i_mode); |
214 | + di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); | |
215 | + di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | |
213 | 216 | |
214 | 217 | ocfs2_journal_dirty(handle, di_bh); |
215 | 218 |
fs/ocfs2/cluster/tcp.c
... | ... | @@ -977,7 +977,7 @@ |
977 | 977 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, |
978 | 978 | size_t caller_veclen, u8 target_node, int *status) |
979 | 979 | { |
980 | - int ret; | |
980 | + int ret = 0; | |
981 | 981 | struct o2net_msg *msg = NULL; |
982 | 982 | size_t veclen, caller_bytes = 0; |
983 | 983 | struct kvec *vec = NULL; |
fs/ocfs2/dir.c
... | ... | @@ -3931,6 +3931,15 @@ |
3931 | 3931 | goto out_commit; |
3932 | 3932 | } |
3933 | 3933 | |
3934 | + cpos = split_hash; | |
3935 | + ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, | |
3936 | + data_ac, meta_ac, new_dx_leaves, | |
3937 | + num_dx_leaves); | |
3938 | + if (ret) { | |
3939 | + mlog_errno(ret); | |
3940 | + goto out_commit; | |
3941 | + } | |
3942 | + | |
3934 | 3943 | for (i = 0; i < num_dx_leaves; i++) { |
3935 | 3944 | ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), |
3936 | 3945 | orig_dx_leaves[i], |
3937 | 3946 | |
... | ... | @@ -3939,15 +3948,14 @@ |
3939 | 3948 | mlog_errno(ret); |
3940 | 3949 | goto out_commit; |
3941 | 3950 | } |
3942 | - } | |
3943 | 3951 | |
3944 | - cpos = split_hash; | |
3945 | - ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, | |
3946 | - data_ac, meta_ac, new_dx_leaves, | |
3947 | - num_dx_leaves); | |
3948 | - if (ret) { | |
3949 | - mlog_errno(ret); | |
3950 | - goto out_commit; | |
3952 | + ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), | |
3953 | + new_dx_leaves[i], | |
3954 | + OCFS2_JOURNAL_ACCESS_WRITE); | |
3955 | + if (ret) { | |
3956 | + mlog_errno(ret); | |
3957 | + goto out_commit; | |
3958 | + } | |
3951 | 3959 | } |
3952 | 3960 | |
3953 | 3961 | ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, |
fs/ocfs2/dlm/dlmcommon.h
... | ... | @@ -1030,6 +1030,7 @@ |
1030 | 1030 | struct dlm_lock_resource *res); |
1031 | 1031 | void dlm_clean_master_list(struct dlm_ctxt *dlm, |
1032 | 1032 | u8 dead_node); |
1033 | +void dlm_force_free_mles(struct dlm_ctxt *dlm); | |
1033 | 1034 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
1034 | 1035 | int __dlm_lockres_has_locks(struct dlm_lock_resource *res); |
1035 | 1036 | int __dlm_lockres_unused(struct dlm_lock_resource *res); |
fs/ocfs2/dlm/dlmdebug.c
... | ... | @@ -636,8 +636,14 @@ |
636 | 636 | spin_lock(&dlm->track_lock); |
637 | 637 | if (oldres) |
638 | 638 | track_list = &oldres->tracking; |
639 | - else | |
639 | + else { | |
640 | 640 | track_list = &dlm->tracking_list; |
641 | + if (list_empty(track_list)) { | |
642 | + dl = NULL; | |
643 | + spin_unlock(&dlm->track_lock); | |
644 | + goto bail; | |
645 | + } | |
646 | + } | |
641 | 647 | |
642 | 648 | list_for_each_entry(res, track_list, tracking) { |
643 | 649 | if (&res->tracking == &dlm->tracking_list) |
... | ... | @@ -660,6 +666,7 @@ |
660 | 666 | } else |
661 | 667 | dl = NULL; |
662 | 668 | |
669 | +bail: | |
663 | 670 | /* passed to seq_show */ |
664 | 671 | return dl; |
665 | 672 | } |
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
... | ... | @@ -3433,4 +3433,44 @@ |
3433 | 3433 | wake_up(&res->wq); |
3434 | 3434 | wake_up(&dlm->migration_wq); |
3435 | 3435 | } |
3436 | + | |
3437 | +void dlm_force_free_mles(struct dlm_ctxt *dlm) | |
3438 | +{ | |
3439 | + int i; | |
3440 | + struct hlist_head *bucket; | |
3441 | + struct dlm_master_list_entry *mle; | |
3442 | + struct hlist_node *tmp, *list; | |
3443 | + | |
3444 | + /* | |
3445 | + * We notified all other nodes that we are exiting the domain and | |
3446 | + * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still | |
3447 | + * around we force free them and wake any processes that are waiting | |
3448 | + * on the mles | |
3449 | + */ | |
3450 | + spin_lock(&dlm->spinlock); | |
3451 | + spin_lock(&dlm->master_lock); | |
3452 | + | |
3453 | + BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); | |
3454 | + BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); | |
3455 | + | |
3456 | + for (i = 0; i < DLM_HASH_BUCKETS; i++) { | |
3457 | + bucket = dlm_master_hash(dlm, i); | |
3458 | + hlist_for_each_safe(list, tmp, bucket) { | |
3459 | + mle = hlist_entry(list, struct dlm_master_list_entry, | |
3460 | + master_hash_node); | |
3461 | + if (mle->type != DLM_MLE_BLOCK) { | |
3462 | + mlog(ML_ERROR, "bad mle: %p\n", mle); | |
3463 | + dlm_print_one_mle(mle); | |
3464 | + } | |
3465 | + atomic_set(&mle->woken, 1); | |
3466 | + wake_up(&mle->wq); | |
3467 | + | |
3468 | + __dlm_unlink_mle(dlm, mle); | |
3469 | + __dlm_mle_detach_hb_events(dlm, mle); | |
3470 | + __dlm_put_mle(mle); | |
3471 | + } | |
3472 | + } | |
3473 | + spin_unlock(&dlm->master_lock); | |
3474 | + spin_unlock(&dlm->spinlock); | |
3475 | +} |
fs/ocfs2/dlmglue.h
fs/ocfs2/ocfs2_fs.h
... | ... | @@ -235,18 +235,31 @@ |
235 | 235 | #define OCFS2_HAS_REFCOUNT_FL (0x0010) |
236 | 236 | |
237 | 237 | /* Inode attributes, keep in sync with EXT2 */ |
238 | -#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ | |
239 | -#define OCFS2_UNRM_FL (0x00000002) /* Undelete */ | |
240 | -#define OCFS2_COMPR_FL (0x00000004) /* Compress file */ | |
241 | -#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ | |
242 | -#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ | |
243 | -#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ | |
244 | -#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ | |
245 | -#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ | |
246 | -#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ | |
238 | +#define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */ | |
239 | +#define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */ | |
240 | +#define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */ | |
241 | +#define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */ | |
242 | +#define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */ | |
243 | +#define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */ | |
244 | +#define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */ | |
245 | +#define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */ | |
246 | +/* Reserved for compression usage... */ | |
247 | +#define OCFS2_DIRTY_FL FS_DIRTY_FL | |
248 | +#define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */ | |
249 | +#define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */ | |
250 | +#define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */ | |
251 | +/* End compression flags --- maybe not all used */ | |
252 | +#define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */ | |
253 | +#define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */ | |
254 | +#define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */ | |
255 | +#define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */ | |
256 | +#define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */ | |
257 | +#define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */ | |
258 | +#define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/ | |
259 | +#define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */ | |
247 | 260 | |
248 | -#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ | |
249 | -#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ | |
261 | +#define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ | |
262 | +#define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ | |
250 | 263 | |
251 | 264 | /* |
252 | 265 | * Extent record flags (e_node.leaf.flags) |
fs/ocfs2/ocfs2_ioctl.h
... | ... | @@ -23,10 +23,10 @@ |
23 | 23 | /* |
24 | 24 | * ioctl commands |
25 | 25 | */ |
26 | -#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) | |
27 | -#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) | |
28 | -#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int) | |
29 | -#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int) | |
26 | +#define OCFS2_IOC_GETFLAGS FS_IOC_GETFLAGS | |
27 | +#define OCFS2_IOC_SETFLAGS FS_IOC_SETFLAGS | |
28 | +#define OCFS2_IOC32_GETFLAGS FS_IOC32_GETFLAGS | |
29 | +#define OCFS2_IOC32_SETFLAGS FS_IOC32_SETFLAGS | |
30 | 30 | |
31 | 31 | /* |
32 | 32 | * Space reservation / allocation / free ioctls and argument structure |
fs/ocfs2/refcounttree.c
... | ... | @@ -4201,8 +4201,9 @@ |
4201 | 4201 | goto out; |
4202 | 4202 | } |
4203 | 4203 | |
4204 | - mutex_lock(&new_inode->i_mutex); | |
4205 | - ret = ocfs2_inode_lock(new_inode, &new_bh, 1); | |
4204 | + mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD); | |
4205 | + ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1, | |
4206 | + OI_LS_REFLINK_TARGET); | |
4206 | 4207 | if (ret) { |
4207 | 4208 | mlog_errno(ret); |
4208 | 4209 | goto out_unlock; |
fs/ocfs2/reservations.c
... | ... | @@ -732,25 +732,23 @@ |
732 | 732 | struct ocfs2_alloc_reservation *resv, |
733 | 733 | int *cstart, int *clen) |
734 | 734 | { |
735 | - unsigned int wanted = *clen; | |
736 | - | |
737 | 735 | if (resv == NULL || ocfs2_resmap_disabled(resmap)) |
738 | 736 | return -ENOSPC; |
739 | 737 | |
740 | 738 | spin_lock(&resv_lock); |
741 | 739 | |
742 | - /* | |
743 | - * We don't want to over-allocate for temporary | |
744 | - * windows. Otherwise, we run the risk of fragmenting the | |
745 | - * allocation space. | |
746 | - */ | |
747 | - wanted = ocfs2_resv_window_bits(resmap, resv); | |
748 | - if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) | |
749 | - wanted = *clen; | |
750 | - | |
751 | 740 | if (ocfs2_resv_empty(resv)) { |
752 | - mlog(0, "empty reservation, find new window\n"); | |
741 | + /* | |
742 | + * We don't want to over-allocate for temporary | |
743 | + * windows. Otherwise, we run the risk of fragmenting the | |
744 | + * allocation space. | |
745 | + */ | |
746 | + unsigned int wanted = ocfs2_resv_window_bits(resmap, resv); | |
753 | 747 | |
748 | + if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) | |
749 | + wanted = *clen; | |
750 | + | |
751 | + mlog(0, "empty reservation, find new window\n"); | |
754 | 752 | /* |
755 | 753 | * Try to get a window here. If it works, we must fall |
756 | 754 | * through and test the bitmap . This avoids some |
fs/ocfs2/suballoc.c
... | ... | @@ -357,7 +357,7 @@ |
357 | 357 | static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, |
358 | 358 | struct ocfs2_group_desc *bg, |
359 | 359 | struct ocfs2_chain_list *cl, |
360 | - u64 p_blkno, u32 clusters) | |
360 | + u64 p_blkno, unsigned int clusters) | |
361 | 361 | { |
362 | 362 | struct ocfs2_extent_list *el = &bg->bg_list; |
363 | 363 | struct ocfs2_extent_rec *rec; |
... | ... | @@ -369,7 +369,7 @@ |
369 | 369 | rec->e_blkno = cpu_to_le64(p_blkno); |
370 | 370 | rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) / |
371 | 371 | le16_to_cpu(cl->cl_bpc)); |
372 | - rec->e_leaf_clusters = cpu_to_le32(clusters); | |
372 | + rec->e_leaf_clusters = cpu_to_le16(clusters); | |
373 | 373 | le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); |
374 | 374 | le16_add_cpu(&bg->bg_free_bits_count, |
375 | 375 | clusters * le16_to_cpu(cl->cl_bpc)); |
fs/ocfs2/xattr.c
... | ... | @@ -1286,13 +1286,11 @@ |
1286 | 1286 | xis.inode_bh = xbs.inode_bh = di_bh; |
1287 | 1287 | di = (struct ocfs2_dinode *)di_bh->b_data; |
1288 | 1288 | |
1289 | - down_read(&oi->ip_xattr_sem); | |
1290 | 1289 | ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, |
1291 | 1290 | buffer_size, &xis); |
1292 | 1291 | if (ret == -ENODATA && di->i_xattr_loc) |
1293 | 1292 | ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, |
1294 | 1293 | buffer_size, &xbs); |
1295 | - up_read(&oi->ip_xattr_sem); | |
1296 | 1294 | |
1297 | 1295 | return ret; |
1298 | 1296 | } |
1299 | 1297 | |
... | ... | @@ -1316,8 +1314,10 @@ |
1316 | 1314 | mlog_errno(ret); |
1317 | 1315 | return ret; |
1318 | 1316 | } |
1317 | + down_read(&OCFS2_I(inode)->ip_xattr_sem); | |
1319 | 1318 | ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, |
1320 | 1319 | name, buffer, buffer_size); |
1320 | + up_read(&OCFS2_I(inode)->ip_xattr_sem); | |
1321 | 1321 | |
1322 | 1322 | ocfs2_inode_unlock(inode, 0); |
1323 | 1323 |
include/drm/drmP.h
... | ... | @@ -612,7 +612,7 @@ |
612 | 612 | struct kref refcount; |
613 | 613 | |
614 | 614 | /** Handle count of this object. Each handle also holds a reference */ |
615 | - struct kref handlecount; | |
615 | + atomic_t handle_count; /* number of handles on this object */ | |
616 | 616 | |
617 | 617 | /** Related drm device */ |
618 | 618 | struct drm_device *dev; |
... | ... | @@ -1151,6 +1151,7 @@ |
1151 | 1151 | extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); |
1152 | 1152 | extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); |
1153 | 1153 | extern void drm_vm_open_locked(struct vm_area_struct *vma); |
1154 | +extern void drm_vm_close_locked(struct vm_area_struct *vma); | |
1154 | 1155 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
1155 | 1156 | |
1156 | 1157 | /* Memory management support (drm_memory.h) */ |
1157 | 1158 | |
... | ... | @@ -1411,12 +1412,11 @@ |
1411 | 1412 | void drm_gem_destroy(struct drm_device *dev); |
1412 | 1413 | void drm_gem_object_release(struct drm_gem_object *obj); |
1413 | 1414 | void drm_gem_object_free(struct kref *kref); |
1414 | -void drm_gem_object_free_unlocked(struct kref *kref); | |
1415 | 1415 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, |
1416 | 1416 | size_t size); |
1417 | 1417 | int drm_gem_object_init(struct drm_device *dev, |
1418 | 1418 | struct drm_gem_object *obj, size_t size); |
1419 | -void drm_gem_object_handle_free(struct kref *kref); | |
1419 | +void drm_gem_object_handle_free(struct drm_gem_object *obj); | |
1420 | 1420 | void drm_gem_vm_open(struct vm_area_struct *vma); |
1421 | 1421 | void drm_gem_vm_close(struct vm_area_struct *vma); |
1422 | 1422 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
... | ... | @@ -1439,8 +1439,12 @@ |
1439 | 1439 | static inline void |
1440 | 1440 | drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) |
1441 | 1441 | { |
1442 | - if (obj != NULL) | |
1443 | - kref_put(&obj->refcount, drm_gem_object_free_unlocked); | |
1442 | + if (obj != NULL) { | |
1443 | + struct drm_device *dev = obj->dev; | |
1444 | + mutex_lock(&dev->struct_mutex); | |
1445 | + kref_put(&obj->refcount, drm_gem_object_free); | |
1446 | + mutex_unlock(&dev->struct_mutex); | |
1447 | + } | |
1444 | 1448 | } |
1445 | 1449 | |
1446 | 1450 | int drm_gem_handle_create(struct drm_file *file_priv, |
... | ... | @@ -1451,7 +1455,7 @@ |
1451 | 1455 | drm_gem_object_handle_reference(struct drm_gem_object *obj) |
1452 | 1456 | { |
1453 | 1457 | drm_gem_object_reference(obj); |
1454 | - kref_get(&obj->handlecount); | |
1458 | + atomic_inc(&obj->handle_count); | |
1455 | 1459 | } |
1456 | 1460 | |
1457 | 1461 | static inline void |
1458 | 1462 | |
... | ... | @@ -1460,12 +1464,15 @@ |
1460 | 1464 | if (obj == NULL) |
1461 | 1465 | return; |
1462 | 1466 | |
1467 | + if (atomic_read(&obj->handle_count) == 0) | |
1468 | + return; | |
1463 | 1469 | /* |
1464 | 1470 | * Must bump handle count first as this may be the last |
1465 | 1471 | * ref, in which case the object would disappear before we |
1466 | 1472 | * checked for a name |
1467 | 1473 | */ |
1468 | - kref_put(&obj->handlecount, drm_gem_object_handle_free); | |
1474 | + if (atomic_dec_and_test(&obj->handle_count)) | |
1475 | + drm_gem_object_handle_free(obj); | |
1469 | 1476 | drm_gem_object_unreference(obj); |
1470 | 1477 | } |
1471 | 1478 | |
1472 | 1479 | |
... | ... | @@ -1475,12 +1482,17 @@ |
1475 | 1482 | if (obj == NULL) |
1476 | 1483 | return; |
1477 | 1484 | |
1485 | + if (atomic_read(&obj->handle_count) == 0) | |
1486 | + return; | |
1487 | + | |
1478 | 1488 | /* |
1479 | 1489 | * Must bump handle count first as this may be the last |
1480 | 1490 | * ref, in which case the object would disappear before we |
1481 | 1491 | * checked for a name |
1482 | 1492 | */ |
1483 | - kref_put(&obj->handlecount, drm_gem_object_handle_free); | |
1493 | + | |
1494 | + if (atomic_dec_and_test(&obj->handle_count)) | |
1495 | + drm_gem_object_handle_free(obj); | |
1484 | 1496 | drm_gem_object_unreference_unlocked(obj); |
1485 | 1497 | } |
1486 | 1498 |
include/drm/drm_pciids.h
... | ... | @@ -85,7 +85,6 @@ |
85 | 85 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
86 | 86 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
87 | 87 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
88 | - {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | |
89 | 88 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
90 | 89 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
91 | 90 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
... | ... | @@ -103,6 +102,7 @@ |
103 | 102 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
104 | 103 | {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
105 | 104 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
105 | + {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ | |
106 | 106 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ |
107 | 107 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
108 | 108 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
mm/fremap.c
... | ... | @@ -125,7 +125,6 @@ |
125 | 125 | { |
126 | 126 | struct mm_struct *mm = current->mm; |
127 | 127 | struct address_space *mapping; |
128 | - unsigned long end = start + size; | |
129 | 128 | struct vm_area_struct *vma; |
130 | 129 | int err = -EINVAL; |
131 | 130 | int has_write_lock = 0; |
... | ... | @@ -142,6 +141,10 @@ |
142 | 141 | if (start + size <= start) |
143 | 142 | return err; |
144 | 143 | |
144 | + /* Does pgoff wrap? */ | |
145 | + if (pgoff + (size >> PAGE_SHIFT) < pgoff) | |
146 | + return err; | |
147 | + | |
145 | 148 | /* Can we represent this offset inside this architecture's pte's? */ |
146 | 149 | #if PTE_FILE_MAX_BITS < BITS_PER_LONG |
147 | 150 | if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) |
... | ... | @@ -168,7 +171,7 @@ |
168 | 171 | if (!(vma->vm_flags & VM_CAN_NONLINEAR)) |
169 | 172 | goto out; |
170 | 173 | |
171 | - if (end <= start || start < vma->vm_start || end > vma->vm_end) | |
174 | + if (start < vma->vm_start || start + size > vma->vm_end) | |
172 | 175 | goto out; |
173 | 176 | |
174 | 177 | /* Must set VM_NONLINEAR before any pages are populated. */ |
sound/pci/hda/patch_analog.c
... | ... | @@ -3641,6 +3641,7 @@ |
3641 | 3641 | /* Lenovo Thinkpad T61/X61 */ |
3642 | 3642 | SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD), |
3643 | 3643 | SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP), |
3644 | + SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP), | |
3644 | 3645 | {} |
3645 | 3646 | }; |
3646 | 3647 |
sound/pci/hda/patch_realtek.c
... | ... | @@ -1594,12 +1594,22 @@ |
1594 | 1594 | } |
1595 | 1595 | |
1596 | 1596 | if (spec->autocfg.dig_in_pin) { |
1597 | - hda_nid_t dig_nid; | |
1598 | - err = snd_hda_get_connections(codec, | |
1599 | - spec->autocfg.dig_in_pin, | |
1600 | - &dig_nid, 1); | |
1601 | - if (err > 0) | |
1602 | - spec->dig_in_nid = dig_nid; | |
1597 | + dig_nid = codec->start_nid; | |
1598 | + for (i = 0; i < codec->num_nodes; i++, dig_nid++) { | |
1599 | + unsigned int wcaps = get_wcaps(codec, dig_nid); | |
1600 | + if (get_wcaps_type(wcaps) != AC_WID_AUD_IN) | |
1601 | + continue; | |
1602 | + if (!(wcaps & AC_WCAP_DIGITAL)) | |
1603 | + continue; | |
1604 | + if (!(wcaps & AC_WCAP_CONN_LIST)) | |
1605 | + continue; | |
1606 | + err = get_connection_index(codec, dig_nid, | |
1607 | + spec->autocfg.dig_in_pin); | |
1608 | + if (err >= 0) { | |
1609 | + spec->dig_in_nid = dig_nid; | |
1610 | + break; | |
1611 | + } | |
1612 | + } | |
1603 | 1613 | } |
1604 | 1614 | } |
1605 | 1615 |
sound/pci/oxygen/oxygen.c
... | ... | @@ -543,6 +543,10 @@ |
543 | 543 | chip->model.suspend = claro_suspend; |
544 | 544 | chip->model.resume = claro_resume; |
545 | 545 | chip->model.set_adc_params = set_ak5385_params; |
546 | + chip->model.device_config = PLAYBACK_0_TO_I2S | | |
547 | + PLAYBACK_1_TO_SPDIF | | |
548 | + CAPTURE_0_FROM_I2S_2 | | |
549 | + CAPTURE_1_FROM_SPDIF; | |
546 | 550 | break; |
547 | 551 | } |
548 | 552 | if (id->driver_data == MODEL_MERIDIAN || |
sound/pci/rme9652/hdsp.c
... | ... | @@ -4609,6 +4609,7 @@ |
4609 | 4609 | if (err < 0) |
4610 | 4610 | return err; |
4611 | 4611 | |
4612 | + memset(&info, 0, sizeof(info)); | |
4612 | 4613 | spin_lock_irqsave(&hdsp->lock, flags); |
4613 | 4614 | info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); |
4614 | 4615 | info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); |
sound/pci/rme9652/hdspm.c
... | ... | @@ -4127,6 +4127,7 @@ |
4127 | 4127 | |
4128 | 4128 | case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO: |
4129 | 4129 | |
4130 | + memset(&info, 0, sizeof(info)); | |
4130 | 4131 | spin_lock_irq(&hdspm->lock); |
4131 | 4132 | info.pref_sync_ref = hdspm_pref_sync_ref(hdspm); |
4132 | 4133 | info.wordclock_sync_check = hdspm_wc_sync_check(hdspm); |
sound/soc/sh/migor.c
... | ... | @@ -12,6 +12,7 @@ |
12 | 12 | #include <linux/firmware.h> |
13 | 13 | #include <linux/module.h> |
14 | 14 | |
15 | +#include <asm/clkdev.h> | |
15 | 16 | #include <asm/clock.h> |
16 | 17 | |
17 | 18 | #include <cpu/sh7722.h> |
18 | 19 | |
... | ... | @@ -40,12 +41,12 @@ |
40 | 41 | }; |
41 | 42 | |
42 | 43 | static struct clk siumckb_clk = { |
43 | - .name = "siumckb_clk", | |
44 | - .id = -1, | |
45 | 44 | .ops = &siumckb_clk_ops, |
46 | 45 | .rate = 0, /* initialised at run-time */ |
47 | 46 | }; |
48 | 47 | |
48 | +static struct clk_lookup *siumckb_lookup; | |
49 | + | |
49 | 50 | static int migor_hw_params(struct snd_pcm_substream *substream, |
50 | 51 | struct snd_pcm_hw_params *params) |
51 | 52 | { |
... | ... | @@ -180,6 +181,13 @@ |
180 | 181 | if (ret < 0) |
181 | 182 | return ret; |
182 | 183 | |
184 | + siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL); | |
185 | + if (!siumckb_lookup) { | |
186 | + ret = -ENOMEM; | |
187 | + goto eclkdevalloc; | |
188 | + } | |
189 | + clkdev_add(siumckb_lookup); | |
190 | + | |
183 | 191 | /* Port number used on this machine: port B */ |
184 | 192 | migor_snd_device = platform_device_alloc("soc-audio", 1); |
185 | 193 | if (!migor_snd_device) { |
186 | 194 | |
... | ... | @@ -200,12 +208,15 @@ |
200 | 208 | epdevadd: |
201 | 209 | platform_device_put(migor_snd_device); |
202 | 210 | epdevalloc: |
211 | + clkdev_drop(siumckb_lookup); | |
212 | +eclkdevalloc: | |
203 | 213 | clk_unregister(&siumckb_clk); |
204 | 214 | return ret; |
205 | 215 | } |
206 | 216 | |
207 | 217 | static void __exit migor_exit(void) |
208 | 218 | { |
219 | + clkdev_drop(siumckb_lookup); | |
209 | 220 | clk_unregister(&siumckb_clk); |
210 | 221 | platform_device_unregister(migor_snd_device); |
211 | 222 | } |
sound/soc/soc-cache.c
... | ... | @@ -203,8 +203,9 @@ |
203 | 203 | data[1] = (value >> 8) & 0xff; |
204 | 204 | data[2] = value & 0xff; |
205 | 205 | |
206 | - if (!snd_soc_codec_volatile_register(codec, reg)) | |
207 | - reg_cache[reg] = value; | |
206 | + if (!snd_soc_codec_volatile_register(codec, reg) | |
207 | + && reg < codec->reg_cache_size) | |
208 | + reg_cache[reg] = value; | |
208 | 209 | |
209 | 210 | if (codec->cache_only) { |
210 | 211 | codec->cache_sync = 1; |