Commit 1170532bb49f9468aedabdc1d5a560e2521a2bcc
Committed by
Linus Torvalds
1 parent
756a025f00
Exists in
smarc_imx_lf-5.15.y
and in
27 other branches
mm: convert printk(KERN_<LEVEL> to pr_<level>
Most of the mm subsystem uses pr_<level> so make it consistent. Miscellanea: - Realign arguments - Add missing newline to format - kmemleak-test.c has a "kmemleak: " prefix added to the "Kmemleak testing" logging message via pr_fmt Signed-off-by: Joe Perches <joe@perches.com> Acked-by: Tejun Heo <tj@kernel.org> [percpu] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 20 changed files with 118 additions and 150 deletions Side-by-side Diff
mm/backing-dev.c
... | ... | @@ -1026,8 +1026,8 @@ |
1026 | 1026 | |
1027 | 1027 | if (copy_to_user(buffer, kbuf, sizeof(kbuf))) |
1028 | 1028 | return -EFAULT; |
1029 | - printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n", | |
1030 | - table->procname); | |
1029 | + pr_warn_once("%s exported in /proc is scheduled for removal\n", | |
1030 | + table->procname); | |
1031 | 1031 | |
1032 | 1032 | *lenp = 2; |
1033 | 1033 | *ppos += *lenp; |
mm/bootmem.c
... | ... | @@ -50,8 +50,7 @@ |
50 | 50 | |
51 | 51 | #define bdebug(fmt, args...) ({ \ |
52 | 52 | if (unlikely(bootmem_debug)) \ |
53 | - printk(KERN_INFO \ | |
54 | - "bootmem::%s " fmt, \ | |
53 | + pr_info("bootmem::%s " fmt, \ | |
55 | 54 | __func__, ## args); \ |
56 | 55 | }) |
57 | 56 | |
... | ... | @@ -680,7 +679,7 @@ |
680 | 679 | /* |
681 | 680 | * Whoops, we cannot satisfy the allocation request. |
682 | 681 | */ |
683 | - printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); | |
682 | + pr_alert("bootmem alloc of %lu bytes failed!\n", size); | |
684 | 683 | panic("Out of memory"); |
685 | 684 | return NULL; |
686 | 685 | } |
... | ... | @@ -755,7 +754,7 @@ |
755 | 754 | if (ptr) |
756 | 755 | return ptr; |
757 | 756 | |
758 | - printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); | |
757 | + pr_alert("bootmem alloc of %lu bytes failed!\n", size); | |
759 | 758 | panic("Out of memory"); |
760 | 759 | return NULL; |
761 | 760 | } |
mm/dmapool.c
... | ... | @@ -294,8 +294,7 @@ |
294 | 294 | "dma_pool_destroy %s, %p busy\n", |
295 | 295 | pool->name, page->vaddr); |
296 | 296 | else |
297 | - printk(KERN_ERR | |
298 | - "dma_pool_destroy %s, %p busy\n", | |
297 | + pr_err("dma_pool_destroy %s, %p busy\n", | |
299 | 298 | pool->name, page->vaddr); |
300 | 299 | /* leak the still-in-use consistent memory */ |
301 | 300 | list_del(&page->page_list); |
... | ... | @@ -424,7 +423,7 @@ |
424 | 423 | "dma_pool_free %s, %p/%lx (bad dma)\n", |
425 | 424 | pool->name, vaddr, (unsigned long)dma); |
426 | 425 | else |
427 | - printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", | |
426 | + pr_err("dma_pool_free %s, %p/%lx (bad dma)\n", | |
428 | 427 | pool->name, vaddr, (unsigned long)dma); |
429 | 428 | return; |
430 | 429 | } |
... | ... | @@ -438,8 +437,7 @@ |
438 | 437 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
439 | 438 | pool->name, vaddr, (unsigned long long)dma); |
440 | 439 | else |
441 | - printk(KERN_ERR | |
442 | - "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
440 | + pr_err("dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
443 | 441 | pool->name, vaddr, (unsigned long long)dma); |
444 | 442 | return; |
445 | 443 | } |
... | ... | @@ -455,8 +453,8 @@ |
455 | 453 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", |
456 | 454 | pool->name, (unsigned long long)dma); |
457 | 455 | else |
458 | - printk(KERN_ERR "dma_pool_free %s, dma %Lx already free\n", | |
459 | - pool->name, (unsigned long long)dma); | |
456 | + pr_err("dma_pool_free %s, dma %Lx already free\n", | |
457 | + pool->name, (unsigned long long)dma); | |
460 | 458 | return; |
461 | 459 | } |
462 | 460 | } |
mm/internal.h
... | ... | @@ -386,7 +386,7 @@ |
386 | 386 | do { \ |
387 | 387 | if (level < mminit_loglevel) { \ |
388 | 388 | if (level <= MMINIT_WARNING) \ |
389 | - printk(KERN_WARNING "mminit::" prefix " " fmt, ##arg); \ | |
389 | + pr_warn("mminit::" prefix " " fmt, ##arg); \ | |
390 | 390 | else \ |
391 | 391 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ |
392 | 392 | } \ |
mm/kmemcheck.c
... | ... | @@ -20,7 +20,7 @@ |
20 | 20 | shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); |
21 | 21 | if (!shadow) { |
22 | 22 | if (printk_ratelimit()) |
23 | - printk(KERN_ERR "kmemcheck: failed to allocate shadow bitmap\n"); | |
23 | + pr_err("kmemcheck: failed to allocate shadow bitmap\n"); | |
24 | 24 | return; |
25 | 25 | } |
26 | 26 |
mm/kmemleak-test.c
mm/memory-failure.c
... | ... | @@ -184,9 +184,8 @@ |
184 | 184 | struct siginfo si; |
185 | 185 | int ret; |
186 | 186 | |
187 | - printk(KERN_ERR | |
188 | - "MCE %#lx: Killing %s:%d due to hardware memory corruption\n", | |
189 | - pfn, t->comm, t->pid); | |
187 | + pr_err("MCE %#lx: Killing %s:%d due to hardware memory corruption\n", | |
188 | + pfn, t->comm, t->pid); | |
190 | 189 | si.si_signo = SIGBUS; |
191 | 190 | si.si_errno = 0; |
192 | 191 | si.si_addr = (void *)addr; |
... | ... | @@ -209,8 +208,8 @@ |
209 | 208 | ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ |
210 | 209 | } |
211 | 210 | if (ret < 0) |
212 | - printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n", | |
213 | - t->comm, t->pid, ret); | |
211 | + pr_info("MCE: Error sending signal to %s:%d: %d\n", | |
212 | + t->comm, t->pid, ret); | |
214 | 213 | return ret; |
215 | 214 | } |
216 | 215 | |
... | ... | @@ -290,8 +289,7 @@ |
290 | 289 | } else { |
291 | 290 | tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); |
292 | 291 | if (!tk) { |
293 | - printk(KERN_ERR | |
294 | - "MCE: Out of memory while machine check handling\n"); | |
292 | + pr_err("MCE: Out of memory while machine check handling\n"); | |
295 | 293 | return; |
296 | 294 | } |
297 | 295 | } |
... | ... | @@ -336,9 +334,8 @@ |
336 | 334 | * signal and then access the memory. Just kill it. |
337 | 335 | */ |
338 | 336 | if (fail || tk->addr_valid == 0) { |
339 | - printk(KERN_ERR | |
340 | - "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", | |
341 | - pfn, tk->tsk->comm, tk->tsk->pid); | |
337 | + pr_err("MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", | |
338 | + pfn, tk->tsk->comm, tk->tsk->pid); | |
342 | 339 | force_sig(SIGKILL, tk->tsk); |
343 | 340 | } |
344 | 341 | |
... | ... | @@ -350,9 +347,8 @@ |
350 | 347 | */ |
351 | 348 | else if (kill_proc(tk->tsk, tk->addr, trapno, |
352 | 349 | pfn, page, flags) < 0) |
353 | - printk(KERN_ERR | |
354 | - "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", | |
355 | - pfn, tk->tsk->comm, tk->tsk->pid); | |
350 | + pr_err("MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", | |
351 | + pfn, tk->tsk->comm, tk->tsk->pid); | |
356 | 352 | } |
357 | 353 | put_task_struct(tk->tsk); |
358 | 354 | kfree(tk); |
... | ... | @@ -563,7 +559,7 @@ |
563 | 559 | */ |
564 | 560 | static int me_unknown(struct page *p, unsigned long pfn) |
565 | 561 | { |
566 | - printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); | |
562 | + pr_err("MCE %#lx: Unknown page state\n", pfn); | |
567 | 563 | return MF_FAILED; |
568 | 564 | } |
569 | 565 | |
... | ... | @@ -608,8 +604,8 @@ |
608 | 604 | if (mapping->a_ops->error_remove_page) { |
609 | 605 | err = mapping->a_ops->error_remove_page(mapping, p); |
610 | 606 | if (err != 0) { |
611 | - printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", | |
612 | - pfn, err); | |
607 | + pr_info("MCE %#lx: Failed to punch page: %d\n", | |
608 | + pfn, err); | |
613 | 609 | } else if (page_has_private(p) && |
614 | 610 | !try_to_release_page(p, GFP_NOIO)) { |
615 | 611 | pr_info("MCE %#lx: failed to release buffers\n", pfn); |
... | ... | @@ -624,8 +620,7 @@ |
624 | 620 | if (invalidate_inode_page(p)) |
625 | 621 | ret = MF_RECOVERED; |
626 | 622 | else |
627 | - printk(KERN_INFO "MCE %#lx: Failed to invalidate\n", | |
628 | - pfn); | |
623 | + pr_info("MCE %#lx: Failed to invalidate\n", pfn); | |
629 | 624 | } |
630 | 625 | return ret; |
631 | 626 | } |
... | ... | @@ -854,8 +849,7 @@ |
854 | 849 | if (ps->action == me_swapcache_dirty && result == MF_DELAYED) |
855 | 850 | count--; |
856 | 851 | if (count != 0) { |
857 | - printk(KERN_ERR | |
858 | - "MCE %#lx: %s still referenced by %d users\n", | |
852 | + pr_err("MCE %#lx: %s still referenced by %d users\n", | |
859 | 853 | pfn, action_page_types[ps->type], count); |
860 | 854 | result = MF_FAILED; |
861 | 855 | } |
... | ... | @@ -934,8 +928,7 @@ |
934 | 928 | } |
935 | 929 | |
936 | 930 | if (PageSwapCache(p)) { |
937 | - printk(KERN_ERR | |
938 | - "MCE %#lx: keeping poisoned page in swap cache\n", pfn); | |
931 | + pr_err("MCE %#lx: keeping poisoned page in swap cache\n", pfn); | |
939 | 932 | ttu |= TTU_IGNORE_HWPOISON; |
940 | 933 | } |
941 | 934 | |
... | ... | @@ -953,8 +946,7 @@ |
953 | 946 | } else { |
954 | 947 | kill = 0; |
955 | 948 | ttu |= TTU_IGNORE_HWPOISON; |
956 | - printk(KERN_INFO | |
957 | - "MCE %#lx: corrupted page was clean: dropped without side effects\n", | |
949 | + pr_info("MCE %#lx: corrupted page was clean: dropped without side effects\n", | |
958 | 950 | pfn); |
959 | 951 | } |
960 | 952 | } |
... | ... | @@ -972,8 +964,8 @@ |
972 | 964 | |
973 | 965 | ret = try_to_unmap(hpage, ttu); |
974 | 966 | if (ret != SWAP_SUCCESS) |
975 | - printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", | |
976 | - pfn, page_mapcount(hpage)); | |
967 | + pr_err("MCE %#lx: failed to unmap page (mapcount=%d)\n", | |
968 | + pfn, page_mapcount(hpage)); | |
977 | 969 | |
978 | 970 | /* |
979 | 971 | * Now that the dirty bit has been propagated to the |
980 | 972 | |
... | ... | @@ -1040,16 +1032,14 @@ |
1040 | 1032 | panic("Memory failure from trap %d on page %lx", trapno, pfn); |
1041 | 1033 | |
1042 | 1034 | if (!pfn_valid(pfn)) { |
1043 | - printk(KERN_ERR | |
1044 | - "MCE %#lx: memory outside kernel control\n", | |
1045 | - pfn); | |
1035 | + pr_err("MCE %#lx: memory outside kernel control\n", pfn); | |
1046 | 1036 | return -ENXIO; |
1047 | 1037 | } |
1048 | 1038 | |
1049 | 1039 | p = pfn_to_page(pfn); |
1050 | 1040 | orig_head = hpage = compound_head(p); |
1051 | 1041 | if (TestSetPageHWPoison(p)) { |
1052 | - printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); | |
1042 | + pr_err("MCE %#lx: already hardware poisoned\n", pfn); | |
1053 | 1043 | return 0; |
1054 | 1044 | } |
1055 | 1045 | |
... | ... | @@ -1180,7 +1170,7 @@ |
1180 | 1170 | * unpoison always clear PG_hwpoison inside page lock |
1181 | 1171 | */ |
1182 | 1172 | if (!PageHWPoison(p)) { |
1183 | - printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); | |
1173 | + pr_err("MCE %#lx: just unpoisoned\n", pfn); | |
1184 | 1174 | num_poisoned_pages_sub(nr_pages); |
1185 | 1175 | unlock_page(hpage); |
1186 | 1176 | put_hwpoison_page(hpage); |
mm/memory.c
... | ... | @@ -660,9 +660,8 @@ |
660 | 660 | return; |
661 | 661 | } |
662 | 662 | if (nr_unshown) { |
663 | - printk(KERN_ALERT | |
664 | - "BUG: Bad page map: %lu messages suppressed\n", | |
665 | - nr_unshown); | |
663 | + pr_alert("BUG: Bad page map: %lu messages suppressed\n", | |
664 | + nr_unshown); | |
666 | 665 | nr_unshown = 0; |
667 | 666 | } |
668 | 667 | nr_shown = 0; |
669 | 668 | |
... | ... | @@ -673,15 +672,13 @@ |
673 | 672 | mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; |
674 | 673 | index = linear_page_index(vma, addr); |
675 | 674 | |
676 | - printk(KERN_ALERT | |
677 | - "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", | |
678 | - current->comm, | |
679 | - (long long)pte_val(pte), (long long)pmd_val(*pmd)); | |
675 | + pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", | |
676 | + current->comm, | |
677 | + (long long)pte_val(pte), (long long)pmd_val(*pmd)); | |
680 | 678 | if (page) |
681 | 679 | dump_page(page, "bad pte"); |
682 | - printk(KERN_ALERT | |
683 | - "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", | |
684 | - (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); | |
680 | + pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", | |
681 | + (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); | |
685 | 682 | /* |
686 | 683 | * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y |
687 | 684 | */ |
mm/mm_init.c
... | ... | @@ -55,13 +55,12 @@ |
55 | 55 | /* Iterate the zonelist */ |
56 | 56 | for_each_zone_zonelist(zone, z, zonelist, zoneid) { |
57 | 57 | #ifdef CONFIG_NUMA |
58 | - printk(KERN_CONT "%d:%s ", | |
59 | - zone->node, zone->name); | |
58 | + pr_cont("%d:%s ", zone->node, zone->name); | |
60 | 59 | #else |
61 | - printk(KERN_CONT "0:%s ", zone->name); | |
60 | + pr_cont("0:%s ", zone->name); | |
62 | 61 | #endif /* CONFIG_NUMA */ |
63 | 62 | } |
64 | - printk(KERN_CONT "\n"); | |
63 | + pr_cont("\n"); | |
65 | 64 | } |
66 | 65 | } |
67 | 66 | } |
mm/nobootmem.c
... | ... | @@ -288,7 +288,7 @@ |
288 | 288 | /* |
289 | 289 | * Whoops, we cannot satisfy the allocation request. |
290 | 290 | */ |
291 | - printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); | |
291 | + pr_alert("bootmem alloc of %lu bytes failed!\n", size); | |
292 | 292 | panic("Out of memory"); |
293 | 293 | return NULL; |
294 | 294 | } |
... | ... | @@ -360,7 +360,7 @@ |
360 | 360 | if (ptr) |
361 | 361 | return ptr; |
362 | 362 | |
363 | - printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); | |
363 | + pr_alert("bootmem alloc of %lu bytes failed!\n", size); | |
364 | 364 | panic("Out of memory"); |
365 | 365 | return NULL; |
366 | 366 | } |
mm/page_alloc.c
... | ... | @@ -544,11 +544,11 @@ |
544 | 544 | unsigned long res; |
545 | 545 | |
546 | 546 | if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { |
547 | - printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); | |
547 | + pr_err("Bad debug_guardpage_minorder value\n"); | |
548 | 548 | return 0; |
549 | 549 | } |
550 | 550 | _debug_guardpage_minorder = res; |
551 | - printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); | |
551 | + pr_info("Setting debug_guardpage_minorder to %lu\n", res); | |
552 | 552 | return 0; |
553 | 553 | } |
554 | 554 | __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); |
... | ... | @@ -4073,8 +4073,7 @@ |
4073 | 4073 | } else if (*s == 'z' || *s == 'Z') { |
4074 | 4074 | user_zonelist_order = ZONELIST_ORDER_ZONE; |
4075 | 4075 | } else { |
4076 | - printk(KERN_WARNING | |
4077 | - "Ignoring invalid numa_zonelist_order value: %s\n", s); | |
4076 | + pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s); | |
4078 | 4077 | return -EINVAL; |
4079 | 4078 | } |
4080 | 4079 | return 0; |
... | ... | @@ -5458,8 +5457,7 @@ |
5458 | 5457 | " %s zone: %lu pages used for memmap\n", |
5459 | 5458 | zone_names[j], memmap_pages); |
5460 | 5459 | } else |
5461 | - printk(KERN_WARNING | |
5462 | - " %s zone: %lu pages exceeds freesize %lu\n", | |
5460 | + pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", | |
5463 | 5461 | zone_names[j], memmap_pages, freesize); |
5464 | 5462 | } |
5465 | 5463 | |
... | ... | @@ -5667,8 +5665,7 @@ |
5667 | 5665 | min_pfn = min(min_pfn, start_pfn); |
5668 | 5666 | |
5669 | 5667 | if (min_pfn == ULONG_MAX) { |
5670 | - printk(KERN_WARNING | |
5671 | - "Could not find start_pfn for node %d\n", nid); | |
5668 | + pr_warn("Could not find start_pfn for node %d\n", nid); | |
5672 | 5669 | return 0; |
5673 | 5670 | } |
5674 | 5671 | |
... | ... | @@ -6686,11 +6683,8 @@ |
6686 | 6683 | if (!table) |
6687 | 6684 | panic("Failed to allocate %s hash table\n", tablename); |
6688 | 6685 | |
6689 | - printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", | |
6690 | - tablename, | |
6691 | - (1UL << log2qty), | |
6692 | - ilog2(size) - PAGE_SHIFT, | |
6693 | - size); | |
6686 | + pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n", | |
6687 | + tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size); | |
6694 | 6688 | |
6695 | 6689 | if (_hash_shift) |
6696 | 6690 | *_hash_shift = log2qty; |
... | ... | @@ -7191,8 +7185,8 @@ |
7191 | 7185 | BUG_ON(!PageBuddy(page)); |
7192 | 7186 | order = page_order(page); |
7193 | 7187 | #ifdef CONFIG_DEBUG_VM |
7194 | - printk(KERN_INFO "remove from free list %lx %d %lx\n", | |
7195 | - pfn, 1 << order, end_pfn); | |
7188 | + pr_info("remove from free list %lx %d %lx\n", | |
7189 | + pfn, 1 << order, end_pfn); | |
7196 | 7190 | #endif |
7197 | 7191 | list_del(&page->lru); |
7198 | 7192 | rmv_page_order(page); |
mm/page_io.c
... | ... | @@ -56,10 +56,10 @@ |
56 | 56 | * Also clear PG_reclaim to avoid rotate_reclaimable_page() |
57 | 57 | */ |
58 | 58 | set_page_dirty(page); |
59 | - printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", | |
60 | - imajor(bio->bi_bdev->bd_inode), | |
61 | - iminor(bio->bi_bdev->bd_inode), | |
62 | - (unsigned long long)bio->bi_iter.bi_sector); | |
59 | + pr_alert("Write-error on swap-device (%u:%u:%llu)\n", | |
60 | + imajor(bio->bi_bdev->bd_inode), | |
61 | + iminor(bio->bi_bdev->bd_inode), | |
62 | + (unsigned long long)bio->bi_iter.bi_sector); | |
63 | 63 | ClearPageReclaim(page); |
64 | 64 | } |
65 | 65 | end_page_writeback(page); |
... | ... | @@ -73,10 +73,10 @@ |
73 | 73 | if (bio->bi_error) { |
74 | 74 | SetPageError(page); |
75 | 75 | ClearPageUptodate(page); |
76 | - printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", | |
77 | - imajor(bio->bi_bdev->bd_inode), | |
78 | - iminor(bio->bi_bdev->bd_inode), | |
79 | - (unsigned long long)bio->bi_iter.bi_sector); | |
76 | + pr_alert("Read-error on swap-device (%u:%u:%llu)\n", | |
77 | + imajor(bio->bi_bdev->bd_inode), | |
78 | + iminor(bio->bi_bdev->bd_inode), | |
79 | + (unsigned long long)bio->bi_iter.bi_sector); | |
80 | 80 | goto out; |
81 | 81 | } |
82 | 82 | |
... | ... | @@ -216,7 +216,7 @@ |
216 | 216 | out: |
217 | 217 | return ret; |
218 | 218 | bad_bmap: |
219 | - printk(KERN_ERR "swapon: swapfile has holes\n"); | |
219 | + pr_err("swapon: swapfile has holes\n"); | |
220 | 220 | ret = -EINVAL; |
221 | 221 | goto out; |
222 | 222 | } |
... | ... | @@ -290,8 +290,8 @@ |
290 | 290 | */ |
291 | 291 | set_page_dirty(page); |
292 | 292 | ClearPageReclaim(page); |
293 | - pr_err_ratelimited("Write error on dio swapfile (%Lu)\n", | |
294 | - page_file_offset(page)); | |
293 | + pr_err_ratelimited("Write error on dio swapfile (%llu)\n", | |
294 | + page_file_offset(page)); | |
295 | 295 | } |
296 | 296 | end_page_writeback(page); |
297 | 297 | return ret; |
mm/percpu-km.c
... | ... | @@ -95,7 +95,7 @@ |
95 | 95 | |
96 | 96 | /* all units must be in a single group */ |
97 | 97 | if (ai->nr_groups != 1) { |
98 | - printk(KERN_CRIT "percpu: can't handle more than one groups\n"); | |
98 | + pr_crit("percpu: can't handle more than one groups\n"); | |
99 | 99 | return -EINVAL; |
100 | 100 | } |
101 | 101 | |
... | ... | @@ -103,8 +103,8 @@ |
103 | 103 | alloc_pages = roundup_pow_of_two(nr_pages); |
104 | 104 | |
105 | 105 | if (alloc_pages > nr_pages) |
106 | - printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n", | |
107 | - alloc_pages - nr_pages); | |
106 | + pr_warn("percpu: wasting %zu pages per chunk\n", | |
107 | + alloc_pages - nr_pages); | |
108 | 108 | |
109 | 109 | return 0; |
110 | 110 | } |
mm/percpu.c
... | ... | @@ -1449,20 +1449,20 @@ |
1449 | 1449 | for (alloc_end += gi->nr_units / upa; |
1450 | 1450 | alloc < alloc_end; alloc++) { |
1451 | 1451 | if (!(alloc % apl)) { |
1452 | - printk(KERN_CONT "\n"); | |
1452 | + pr_cont("\n"); | |
1453 | 1453 | printk("%spcpu-alloc: ", lvl); |
1454 | 1454 | } |
1455 | - printk(KERN_CONT "[%0*d] ", group_width, group); | |
1455 | + pr_cont("[%0*d] ", group_width, group); | |
1456 | 1456 | |
1457 | 1457 | for (unit_end += upa; unit < unit_end; unit++) |
1458 | 1458 | if (gi->cpu_map[unit] != NR_CPUS) |
1459 | - printk(KERN_CONT "%0*d ", cpu_width, | |
1460 | - gi->cpu_map[unit]); | |
1459 | + pr_cont("%0*d ", | |
1460 | + cpu_width, gi->cpu_map[unit]); | |
1461 | 1461 | else |
1462 | - printk(KERN_CONT "%s ", empty_str); | |
1462 | + pr_cont("%s ", empty_str); | |
1463 | 1463 | } |
1464 | 1464 | } |
1465 | - printk(KERN_CONT "\n"); | |
1465 | + pr_cont("\n"); | |
1466 | 1466 | } |
1467 | 1467 | |
1468 | 1468 | /** |
mm/shmem.c
... | ... | @@ -2823,9 +2823,8 @@ |
2823 | 2823 | if ((value = strchr(this_char,'=')) != NULL) { |
2824 | 2824 | *value++ = 0; |
2825 | 2825 | } else { |
2826 | - printk(KERN_ERR | |
2827 | - "tmpfs: No value for mount option '%s'\n", | |
2828 | - this_char); | |
2826 | + pr_err("tmpfs: No value for mount option '%s'\n", | |
2827 | + this_char); | |
2829 | 2828 | goto error; |
2830 | 2829 | } |
2831 | 2830 | |
... | ... | @@ -2880,8 +2879,7 @@ |
2880 | 2879 | if (mpol_parse_str(value, &mpol)) |
2881 | 2880 | goto bad_val; |
2882 | 2881 | } else { |
2883 | - printk(KERN_ERR "tmpfs: Bad mount option %s\n", | |
2884 | - this_char); | |
2882 | + pr_err("tmpfs: Bad mount option %s\n", this_char); | |
2885 | 2883 | goto error; |
2886 | 2884 | } |
2887 | 2885 | } |
... | ... | @@ -2889,7 +2887,7 @@ |
2889 | 2887 | return 0; |
2890 | 2888 | |
2891 | 2889 | bad_val: |
2892 | - printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", | |
2890 | + pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", | |
2893 | 2891 | value, this_char); |
2894 | 2892 | error: |
2895 | 2893 | mpol_put(mpol); |
2896 | 2894 | |
... | ... | @@ -3286,14 +3284,14 @@ |
3286 | 3284 | |
3287 | 3285 | error = register_filesystem(&shmem_fs_type); |
3288 | 3286 | if (error) { |
3289 | - printk(KERN_ERR "Could not register tmpfs\n"); | |
3287 | + pr_err("Could not register tmpfs\n"); | |
3290 | 3288 | goto out2; |
3291 | 3289 | } |
3292 | 3290 | |
3293 | 3291 | shm_mnt = kern_mount(&shmem_fs_type); |
3294 | 3292 | if (IS_ERR(shm_mnt)) { |
3295 | 3293 | error = PTR_ERR(shm_mnt); |
3296 | - printk(KERN_ERR "Could not kern_mount tmpfs\n"); | |
3294 | + pr_err("Could not kern_mount tmpfs\n"); | |
3297 | 3295 | goto out1; |
3298 | 3296 | } |
3299 | 3297 | return 0; |
mm/slab.c
... | ... | @@ -474,7 +474,7 @@ |
474 | 474 | static void __slab_error(const char *function, struct kmem_cache *cachep, |
475 | 475 | char *msg) |
476 | 476 | { |
477 | - printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", | |
477 | + pr_err("slab error in %s(): cache `%s': %s\n", | |
478 | 478 | function, cachep->name, msg); |
479 | 479 | dump_stack(); |
480 | 480 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
... | ... | @@ -1553,7 +1553,7 @@ |
1553 | 1553 | unsigned char error = 0; |
1554 | 1554 | int bad_count = 0; |
1555 | 1555 | |
1556 | - printk(KERN_ERR "%03x: ", offset); | |
1556 | + pr_err("%03x: ", offset); | |
1557 | 1557 | for (i = 0; i < limit; i++) { |
1558 | 1558 | if (data[offset + i] != POISON_FREE) { |
1559 | 1559 | error = data[offset + i]; |
1560 | 1560 | |
1561 | 1561 | |
... | ... | @@ -1566,11 +1566,11 @@ |
1566 | 1566 | if (bad_count == 1) { |
1567 | 1567 | error ^= POISON_FREE; |
1568 | 1568 | if (!(error & (error - 1))) { |
1569 | - printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n"); | |
1569 | + pr_err("Single bit error detected. Probably bad RAM.\n"); | |
1570 | 1570 | #ifdef CONFIG_X86 |
1571 | - printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n"); | |
1571 | + pr_err("Run memtest86+ or a similar memory test tool.\n"); | |
1572 | 1572 | #else |
1573 | - printk(KERN_ERR "Run a memory test tool.\n"); | |
1573 | + pr_err("Run a memory test tool.\n"); | |
1574 | 1574 | #endif |
1575 | 1575 | } |
1576 | 1576 | } |
1577 | 1577 | |
... | ... | @@ -1585,13 +1585,13 @@ |
1585 | 1585 | char *realobj; |
1586 | 1586 | |
1587 | 1587 | if (cachep->flags & SLAB_RED_ZONE) { |
1588 | - printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", | |
1589 | - *dbg_redzone1(cachep, objp), | |
1590 | - *dbg_redzone2(cachep, objp)); | |
1588 | + pr_err("Redzone: 0x%llx/0x%llx\n", | |
1589 | + *dbg_redzone1(cachep, objp), | |
1590 | + *dbg_redzone2(cachep, objp)); | |
1591 | 1591 | } |
1592 | 1592 | |
1593 | 1593 | if (cachep->flags & SLAB_STORE_USER) { |
1594 | - printk(KERN_ERR "Last user: [<%p>](%pSR)\n", | |
1594 | + pr_err("Last user: [<%p>](%pSR)\n", | |
1595 | 1595 | *dbg_userword(cachep, objp), |
1596 | 1596 | *dbg_userword(cachep, objp)); |
1597 | 1597 | } |
... | ... | @@ -1627,9 +1627,9 @@ |
1627 | 1627 | /* Mismatch ! */ |
1628 | 1628 | /* Print header */ |
1629 | 1629 | if (lines == 0) { |
1630 | - printk(KERN_ERR | |
1631 | - "Slab corruption (%s): %s start=%p, len=%d\n", | |
1632 | - print_tainted(), cachep->name, realobj, size); | |
1630 | + pr_err("Slab corruption (%s): %s start=%p, len=%d\n", | |
1631 | + print_tainted(), cachep->name, | |
1632 | + realobj, size); | |
1633 | 1633 | print_objinfo(cachep, objp, 0); |
1634 | 1634 | } |
1635 | 1635 | /* Hexdump the affected line */ |
1636 | 1636 | |
... | ... | @@ -1656,15 +1656,13 @@ |
1656 | 1656 | if (objnr) { |
1657 | 1657 | objp = index_to_obj(cachep, page, objnr - 1); |
1658 | 1658 | realobj = (char *)objp + obj_offset(cachep); |
1659 | - printk(KERN_ERR "Prev obj: start=%p, len=%d\n", | |
1660 | - realobj, size); | |
1659 | + pr_err("Prev obj: start=%p, len=%d\n", realobj, size); | |
1661 | 1660 | print_objinfo(cachep, objp, 2); |
1662 | 1661 | } |
1663 | 1662 | if (objnr + 1 < cachep->num) { |
1664 | 1663 | objp = index_to_obj(cachep, page, objnr + 1); |
1665 | 1664 | realobj = (char *)objp + obj_offset(cachep); |
1666 | - printk(KERN_ERR "Next obj: start=%p, len=%d\n", | |
1667 | - realobj, size); | |
1665 | + pr_err("Next obj: start=%p, len=%d\n", realobj, size); | |
1668 | 1666 | print_objinfo(cachep, objp, 2); |
1669 | 1667 | } |
1670 | 1668 | } |
... | ... | @@ -2463,7 +2461,7 @@ |
2463 | 2461 | /* Verify double free bug */ |
2464 | 2462 | for (i = page->active; i < cachep->num; i++) { |
2465 | 2463 | if (get_free_obj(page, i) == objnr) { |
2466 | - printk(KERN_ERR "slab: double free detected in cache '%s', objp %p\n", | |
2464 | + pr_err("slab: double free detected in cache '%s', objp %p\n", | |
2467 | 2465 | cachep->name, objp); |
2468 | 2466 | BUG(); |
2469 | 2467 | } |
... | ... | @@ -2583,7 +2581,7 @@ |
2583 | 2581 | static void kfree_debugcheck(const void *objp) |
2584 | 2582 | { |
2585 | 2583 | if (!virt_addr_valid(objp)) { |
2586 | - printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", | |
2584 | + pr_err("kfree_debugcheck: out of range ptr %lxh\n", | |
2587 | 2585 | (unsigned long)objp); |
2588 | 2586 | BUG(); |
2589 | 2587 | } |
... | ... | @@ -2607,8 +2605,8 @@ |
2607 | 2605 | else |
2608 | 2606 | slab_error(cache, "memory outside object was overwritten"); |
2609 | 2607 | |
2610 | - printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", | |
2611 | - obj, redzone1, redzone2); | |
2608 | + pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", | |
2609 | + obj, redzone1, redzone2); | |
2612 | 2610 | } |
2613 | 2611 | |
2614 | 2612 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, |
... | ... | @@ -2896,10 +2894,9 @@ |
2896 | 2894 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || |
2897 | 2895 | *dbg_redzone2(cachep, objp) != RED_INACTIVE) { |
2898 | 2896 | slab_error(cachep, "double free, or memory outside object was overwritten"); |
2899 | - printk(KERN_ERR | |
2900 | - "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", | |
2901 | - objp, *dbg_redzone1(cachep, objp), | |
2902 | - *dbg_redzone2(cachep, objp)); | |
2897 | + pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", | |
2898 | + objp, *dbg_redzone1(cachep, objp), | |
2899 | + *dbg_redzone2(cachep, objp)); | |
2903 | 2900 | } |
2904 | 2901 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; |
2905 | 2902 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; |
... | ... | @@ -2910,7 +2907,7 @@ |
2910 | 2907 | cachep->ctor(objp); |
2911 | 2908 | if (ARCH_SLAB_MINALIGN && |
2912 | 2909 | ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { |
2913 | - printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | |
2910 | + pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | |
2914 | 2911 | objp, (int)ARCH_SLAB_MINALIGN); |
2915 | 2912 | } |
2916 | 2913 | return objp; |
... | ... | @@ -3837,7 +3834,7 @@ |
3837 | 3834 | skip_setup: |
3838 | 3835 | err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); |
3839 | 3836 | if (err) |
3840 | - printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | |
3837 | + pr_err("enable_cpucache failed for %s, error %d\n", | |
3841 | 3838 | cachep->name, -err); |
3842 | 3839 | return err; |
3843 | 3840 | } |
... | ... | @@ -3993,7 +3990,7 @@ |
3993 | 3990 | |
3994 | 3991 | name = cachep->name; |
3995 | 3992 | if (error) |
3996 | - printk(KERN_ERR "slab: cache %s error: %s\n", name, error); | |
3993 | + pr_err("slab: cache %s error: %s\n", name, error); | |
3997 | 3994 | |
3998 | 3995 | sinfo->active_objs = active_objs; |
3999 | 3996 | sinfo->num_objs = num_objs; |
mm/slab_common.c
... | ... | @@ -442,7 +442,7 @@ |
442 | 442 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", |
443 | 443 | name, err); |
444 | 444 | else { |
445 | - printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d", | |
445 | + pr_warn("kmem_cache_create(%s) failed with error %d\n", | |
446 | 446 | name, err); |
447 | 447 | dump_stack(); |
448 | 448 | } |
mm/sparse-vmemmap.c
... | ... | @@ -166,8 +166,8 @@ |
166 | 166 | int actual_node = early_pfn_to_nid(pfn); |
167 | 167 | |
168 | 168 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
169 | - printk(KERN_WARNING "[%lx-%lx] potential offnode page_structs\n", | |
170 | - start, end - 1); | |
169 | + pr_warn("[%lx-%lx] potential offnode page_structs\n", | |
170 | + start, end - 1); | |
171 | 171 | } |
172 | 172 | |
173 | 173 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
... | ... | @@ -292,7 +292,7 @@ |
292 | 292 | if (map_map[pnum]) |
293 | 293 | continue; |
294 | 294 | ms = __nr_to_section(pnum); |
295 | - printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n", | |
295 | + pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", | |
296 | 296 | __func__); |
297 | 297 | ms->section_mem_map = 0; |
298 | 298 | } |
mm/sparse.c
... | ... | @@ -313,9 +313,8 @@ |
313 | 313 | |
314 | 314 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); |
315 | 315 | if (usemap_nid != nid) { |
316 | - printk(KERN_INFO | |
317 | - "node %d must be removed before remove section %ld\n", | |
318 | - nid, usemap_snr); | |
316 | + pr_info("node %d must be removed before remove section %ld\n", | |
317 | + nid, usemap_snr); | |
319 | 318 | return; |
320 | 319 | } |
321 | 320 | /* |
... | ... | @@ -324,10 +323,8 @@ |
324 | 323 | * gather other removable sections for dynamic partitioning. |
325 | 324 | * Just notify un-removable section's number here. |
326 | 325 | */ |
327 | - printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, | |
328 | - pgdat_snr, nid); | |
329 | - printk(KERN_CONT | |
330 | - " have a circular dependency on usemap and pgdat allocations\n"); | |
326 | + pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", | |
327 | + usemap_snr, pgdat_snr, nid); | |
331 | 328 | } |
332 | 329 | #else |
333 | 330 | static unsigned long * __init |
... | ... | @@ -355,7 +352,7 @@ |
355 | 352 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
356 | 353 | size * usemap_count); |
357 | 354 | if (!usemap) { |
358 | - printk(KERN_WARNING "%s: allocation failed\n", __func__); | |
355 | + pr_warn("%s: allocation failed\n", __func__); | |
359 | 356 | return; |
360 | 357 | } |
361 | 358 | |
... | ... | @@ -428,7 +425,7 @@ |
428 | 425 | if (map_map[pnum]) |
429 | 426 | continue; |
430 | 427 | ms = __nr_to_section(pnum); |
431 | - printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n", | |
428 | + pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", | |
432 | 429 | __func__); |
433 | 430 | ms->section_mem_map = 0; |
434 | 431 | } |
... | ... | @@ -456,7 +453,7 @@ |
456 | 453 | if (map) |
457 | 454 | return map; |
458 | 455 | |
459 | - printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n", | |
456 | + pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", | |
460 | 457 | __func__); |
461 | 458 | ms->section_mem_map = 0; |
462 | 459 | return NULL; |
mm/swap_cgroup.c
... | ... | @@ -174,9 +174,8 @@ |
174 | 174 | |
175 | 175 | return 0; |
176 | 176 | nomem: |
177 | - printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); | |
178 | - printk(KERN_INFO | |
179 | - "swap_cgroup can be disabled by swapaccount=0 boot option\n"); | |
177 | + pr_info("couldn't allocate enough memory for swap_cgroup\n"); | |
178 | + pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n"); | |
180 | 179 | return -ENOMEM; |
181 | 180 | } |
182 | 181 |