Commit 0d8762c9ee40cf83d5dbf3a22843bc566912b592

Authored by Linus Torvalds

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel…

…/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  lockdep: fix irqs on/off ip tracing
  lockdep: minor fix for debug_show_all_locks()
  x86: restore the old swiotlb alloc_coherent behavior
  x86: use GFP_DMA for 24bit coherent_dma_mask
  swiotlb: remove panic for alloc_coherent failure
  xen: compilation fix of drivers/xen/events.c on IA64
  xen: portability clean up and some minor clean up for xencomm.c
  xen: don't reload cr3 on suspend
  kernel/resource: fix reserve_region_with_split() section mismatch
  printk: remove unused code from kernel/printk.c

Showing 9 changed files Side-by-side Diff

arch/x86/include/asm/dma-mapping.h
... ... @@ -255,9 +255,11 @@
255 255  
256 256 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
257 257 {
258   -#ifdef CONFIG_X86_64
259 258 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260 259  
  260 + if (dma_mask <= DMA_24BIT_MASK)
  261 + gfp |= GFP_DMA;
  262 +#ifdef CONFIG_X86_64
261 263 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 264 gfp |= GFP_DMA32;
263 265 #endif
arch/x86/kernel/pci-swiotlb_64.c
... ... @@ -18,9 +18,21 @@
18 18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19 19 }
20 20  
  21 +static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
  22 + dma_addr_t *dma_handle, gfp_t flags)
  23 +{
  24 + void *vaddr;
  25 +
  26 + vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
  27 + if (vaddr)
  28 + return vaddr;
  29 +
  30 + return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
  31 +}
  32 +
21 33 struct dma_mapping_ops swiotlb_dma_ops = {
22 34 .mapping_error = swiotlb_dma_mapping_error,
23   - .alloc_coherent = swiotlb_alloc_coherent,
  35 + .alloc_coherent = x86_swiotlb_alloc_coherent,
24 36 .free_coherent = swiotlb_free_coherent,
25 37 .map_single = swiotlb_map_single_phys,
26 38 .unmap_single = swiotlb_unmap_single,
drivers/xen/events.c
... ... @@ -774,7 +774,7 @@
774 774  
775 775 poll.nr_ports = 1;
776 776 poll.timeout = 0;
777   - poll.ports = &evtchn;
  777 + set_xen_guest_handle(poll.ports, &evtchn);
778 778  
779 779 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
780 780 BUG();
drivers/xen/manage.c
... ... @@ -39,8 +39,6 @@
39 39  
40 40 BUG_ON(!irqs_disabled());
41 41  
42   - load_cr3(swapper_pg_dir);
43   -
44 42 err = device_power_down(PMSG_SUSPEND);
45 43 if (err) {
46 44 printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n",
drivers/xen/xencomm.c
... ... @@ -23,14 +23,8 @@
23 23 #include <asm/page.h>
24 24 #include <xen/xencomm.h>
25 25 #include <xen/interface/xen.h>
26   -#ifdef __ia64__
27   -#include <asm/xen/xencomm.h> /* for is_kern_addr() */
28   -#endif
  26 +#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
29 27  
30   -#ifdef HAVE_XEN_PLATFORM_COMPAT_H
31   -#include <xen/platform-compat.h>
32   -#endif
33   -
34 28 static int xencomm_init(struct xencomm_desc *desc,
35 29 void *buffer, unsigned long bytes)
36 30 {
37 31  
... ... @@ -157,20 +151,11 @@
157 151 return 0;
158 152 }
159 153  
160   -/* check if memory address is within VMALLOC region */
161   -static int is_phys_contiguous(unsigned long addr)
162   -{
163   - if (!is_kernel_addr(addr))
164   - return 0;
165   -
166   - return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
167   -}
168   -
169 154 static struct xencomm_handle *xencomm_create_inline(void *ptr)
170 155 {
171 156 unsigned long paddr;
172 157  
173   - BUG_ON(!is_phys_contiguous((unsigned long)ptr));
  158 + BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
174 159  
175 160 paddr = (unsigned long)xencomm_pa(ptr);
176 161 BUG_ON(paddr & XENCOMM_INLINE_FLAG);
... ... @@ -202,7 +187,7 @@
202 187 int rc;
203 188 struct xencomm_desc *desc;
204 189  
205   - if (is_phys_contiguous((unsigned long)ptr))
  190 + if (xencomm_is_phys_contiguous((unsigned long)ptr))
206 191 return xencomm_create_inline(ptr);
207 192  
208 193 rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
... ... @@ -219,7 +204,7 @@
219 204 int rc;
220 205 struct xencomm_desc *desc = NULL;
221 206  
222   - if (is_phys_contiguous((unsigned long)ptr))
  207 + if (xencomm_is_phys_contiguous((unsigned long)ptr))
223 208 return xencomm_create_inline(ptr);
224 209  
225 210 rc = xencomm_create_mini(ptr, bytes, xc_desc,
... ... @@ -2169,12 +2169,11 @@
2169 2169 /*
2170 2170 * Hardirqs will be enabled:
2171 2171 */
2172   -void trace_hardirqs_on_caller(unsigned long a0)
  2172 +void trace_hardirqs_on_caller(unsigned long ip)
2173 2173 {
2174 2174 struct task_struct *curr = current;
2175   - unsigned long ip;
2176 2175  
2177   - time_hardirqs_on(CALLER_ADDR0, a0);
  2176 + time_hardirqs_on(CALLER_ADDR0, ip);
2178 2177  
2179 2178 if (unlikely(!debug_locks || current->lockdep_recursion))
2180 2179 return;
... ... @@ -2188,7 +2187,6 @@
2188 2187 }
2189 2188 /* we'll do an OFF -> ON transition: */
2190 2189 curr->hardirqs_enabled = 1;
2191   - ip = (unsigned long) __builtin_return_address(0);
2192 2190  
2193 2191 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2194 2192 return;
2195 2193  
... ... @@ -2224,11 +2222,11 @@
2224 2222 /*
2225 2223 * Hardirqs were disabled:
2226 2224 */
2227   -void trace_hardirqs_off_caller(unsigned long a0)
  2225 +void trace_hardirqs_off_caller(unsigned long ip)
2228 2226 {
2229 2227 struct task_struct *curr = current;
2230 2228  
2231   - time_hardirqs_off(CALLER_ADDR0, a0);
  2229 + time_hardirqs_off(CALLER_ADDR0, ip);
2232 2230  
2233 2231 if (unlikely(!debug_locks || current->lockdep_recursion))
2234 2232 return;
... ... @@ -2241,7 +2239,7 @@
2241 2239 * We have done an ON -> OFF transition:
2242 2240 */
2243 2241 curr->hardirqs_enabled = 0;
2244   - curr->hardirq_disable_ip = _RET_IP_;
  2242 + curr->hardirq_disable_ip = ip;
2245 2243 curr->hardirq_disable_event = ++curr->irq_events;
2246 2244 debug_atomic_inc(&hardirqs_off_events);
2247 2245 } else
2248 2246  
... ... @@ -3417,9 +3415,10 @@
3417 3415 }
3418 3416 printk(" ignoring it.\n");
3419 3417 unlock = 0;
  3418 + } else {
  3419 + if (count != 10)
  3420 + printk(KERN_CONT " locked it.\n");
3420 3421 }
3421   - if (count != 10)
3422   - printk(" locked it.\n");
3423 3422  
3424 3423 do_each_thread(g, p) {
3425 3424 /*
... ... @@ -233,45 +233,6 @@
233 233 #endif
234 234  
235 235 /*
236   - * Return the number of unread characters in the log buffer.
237   - */
238   -static int log_buf_get_len(void)
239   -{
240   - return logged_chars;
241   -}
242   -
243   -/*
244   - * Copy a range of characters from the log buffer.
245   - */
246   -int log_buf_copy(char *dest, int idx, int len)
247   -{
248   - int ret, max;
249   - bool took_lock = false;
250   -
251   - if (!oops_in_progress) {
252   - spin_lock_irq(&logbuf_lock);
253   - took_lock = true;
254   - }
255   -
256   - max = log_buf_get_len();
257   - if (idx < 0 || idx >= max) {
258   - ret = -1;
259   - } else {
260   - if (len > max)
261   - len = max;
262   - ret = len;
263   - idx += (log_end - max);
264   - while (len-- > 0)
265   - dest[len] = LOG_BUF(idx + len);
266   - }
267   -
268   - if (took_lock)
269   - spin_unlock_irq(&logbuf_lock);
270   -
271   - return ret;
272   -}
273   -
274   -/*
275 236 * Commands to do_syslog:
276 237 *
277 238 * 0 -- Close the log. Currently a NOP.
... ... @@ -571,7 +571,7 @@
571 571  
572 572 }
573 573  
574   -void reserve_region_with_split(struct resource *root,
  574 +void __init reserve_region_with_split(struct resource *root,
575 575 resource_size_t start, resource_size_t end,
576 576 const char *name)
577 577 {
... ... @@ -497,8 +497,10 @@
497 497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
498 498 (unsigned long long)*hwdev->dma_mask,
499 499 (unsigned long long)dev_addr);
500   - panic("swiotlb_alloc_coherent: allocated memory is out of "
501   - "range for device");
  500 +
  501 + /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
  502 + unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
  503 + return NULL;
502 504 }
503 505 *dma_handle = dev_addr;
504 506 return ret;