Commit e722d8daafb974b9ad1bbaf42f384a5ea5929f5f

Authored by Sebastian Andrzej Siewior
Committed by Ingo Molnar
1 parent 24f73b9971

profile: Convert to hotplug state machine

Install the callbacks via the state machine and let the core invoke
the callbacks on the already online CPUs. A lot of code is removed because
the for-loop is used and create_hash_tables() is removed since its purpose
is covered by the startup / teardown hooks.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153337.649867675@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 2 changed files with 66 additions and 116 deletions Side-by-side Diff

include/linux/cpuhotplug.h
... ... @@ -16,6 +16,7 @@
16 16 CPUHP_X86_APB_DEAD,
17 17 CPUHP_WORKQUEUE_PREP,
18 18 CPUHP_HRTIMERS_PREPARE,
  19 + CPUHP_PROFILE_PREPARE,
19 20 CPUHP_TIMERS_DEAD,
20 21 CPUHP_NOTIFY_PREPARE,
21 22 CPUHP_BRINGUP_CPU,
... ... @@ -328,68 +328,57 @@
328 328 put_cpu();
329 329 }
330 330  
331   -static int profile_cpu_callback(struct notifier_block *info,
332   - unsigned long action, void *__cpu)
  331 +static int profile_dead_cpu(unsigned int cpu)
333 332 {
334   - int node, cpu = (unsigned long)__cpu;
335 333 struct page *page;
  334 + int i;
336 335  
337   - switch (action) {
338   - case CPU_UP_PREPARE:
339   - case CPU_UP_PREPARE_FROZEN:
340   - node = cpu_to_mem(cpu);
341   - per_cpu(cpu_profile_flip, cpu) = 0;
342   - if (!per_cpu(cpu_profile_hits, cpu)[1]) {
343   - page = __alloc_pages_node(node,
344   - GFP_KERNEL | __GFP_ZERO,
345   - 0);
346   - if (!page)
347   - return notifier_from_errno(-ENOMEM);
348   - per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
349   - }
350   - if (!per_cpu(cpu_profile_hits, cpu)[0]) {
351   - page = __alloc_pages_node(node,
352   - GFP_KERNEL | __GFP_ZERO,
353   - 0);
354   - if (!page)
355   - goto out_free;
356   - per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
357   - }
358   - break;
359   -out_free:
360   - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
361   - per_cpu(cpu_profile_hits, cpu)[1] = NULL;
362   - __free_page(page);
363   - return notifier_from_errno(-ENOMEM);
364   - case CPU_ONLINE:
365   - case CPU_ONLINE_FROZEN:
366   - if (prof_cpu_mask != NULL)
367   - cpumask_set_cpu(cpu, prof_cpu_mask);
368   - break;
369   - case CPU_UP_CANCELED:
370   - case CPU_UP_CANCELED_FROZEN:
371   - case CPU_DEAD:
372   - case CPU_DEAD_FROZEN:
373   - if (prof_cpu_mask != NULL)
374   - cpumask_clear_cpu(cpu, prof_cpu_mask);
375   - if (per_cpu(cpu_profile_hits, cpu)[0]) {
376   - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
377   - per_cpu(cpu_profile_hits, cpu)[0] = NULL;
  336 + if (prof_cpu_mask != NULL)
  337 + cpumask_clear_cpu(cpu, prof_cpu_mask);
  338 +
  339 + for (i = 0; i < 2; i++) {
  340 + if (per_cpu(cpu_profile_hits, cpu)[i]) {
  341 + page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
  342 + per_cpu(cpu_profile_hits, cpu)[i] = NULL;
378 343 __free_page(page);
379 344 }
380   - if (per_cpu(cpu_profile_hits, cpu)[1]) {
381   - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
382   - per_cpu(cpu_profile_hits, cpu)[1] = NULL;
383   - __free_page(page);
  345 + }
  346 + return 0;
  347 +}
  348 +
  349 +static int profile_prepare_cpu(unsigned int cpu)
  350 +{
  351 + int i, node = cpu_to_mem(cpu);
  352 + struct page *page;
  353 +
  354 + per_cpu(cpu_profile_flip, cpu) = 0;
  355 +
  356 + for (i = 0; i < 2; i++) {
  357 + if (per_cpu(cpu_profile_hits, cpu)[i])
  358 + continue;
  359 +
  360 + page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
  361 + if (!page) {
  362 + profile_dead_cpu(cpu);
  363 + return -ENOMEM;
384 364 }
385   - break;
  365 + per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
  366 +
386 367 }
387   - return NOTIFY_OK;
  368 + return 0;
388 369 }
  370 +
  371 +static int profile_online_cpu(unsigned int cpu)
  372 +{
  373 + if (prof_cpu_mask != NULL)
  374 + cpumask_set_cpu(cpu, prof_cpu_mask);
  375 +
  376 + return 0;
  377 +}
  378 +
389 379 #else /* !CONFIG_SMP */
390 380 #define profile_flip_buffers() do { } while (0)
391 381 #define profile_discard_flip_buffers() do { } while (0)
392   -#define profile_cpu_callback NULL
393 382  
394 383 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
395 384 {
396 385  
397 386  
398 387  
399 388  
400 389  
401 390  
402 391  
... ... @@ -531,83 +520,43 @@
531 520 .llseek = default_llseek,
532 521 };
533 522  
534   -#ifdef CONFIG_SMP
535   -static void profile_nop(void *unused)
  523 +int __ref create_proc_profile(void)
536 524 {
537   -}
538   -
539   -static int create_hash_tables(void)
540   -{
541   - int cpu;
542   -
543   - for_each_online_cpu(cpu) {
544   - int node = cpu_to_mem(cpu);
545   - struct page *page;
546   -
547   - page = __alloc_pages_node(node,
548   - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
549   - 0);
550   - if (!page)
551   - goto out_cleanup;
552   - per_cpu(cpu_profile_hits, cpu)[1]
553   - = (struct profile_hit *)page_address(page);
554   - page = __alloc_pages_node(node,
555   - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
556   - 0);
557   - if (!page)
558   - goto out_cleanup;
559   - per_cpu(cpu_profile_hits, cpu)[0]
560   - = (struct profile_hit *)page_address(page);
561   - }
562   - return 0;
563   -out_cleanup:
564   - prof_on = 0;
565   - smp_mb();
566   - on_each_cpu(profile_nop, NULL, 1);
567   - for_each_online_cpu(cpu) {
568   - struct page *page;
569   -
570   - if (per_cpu(cpu_profile_hits, cpu)[0]) {
571   - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
572   - per_cpu(cpu_profile_hits, cpu)[0] = NULL;
573   - __free_page(page);
574   - }
575   - if (per_cpu(cpu_profile_hits, cpu)[1]) {
576   - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
577   - per_cpu(cpu_profile_hits, cpu)[1] = NULL;
578   - __free_page(page);
579   - }
580   - }
581   - return -1;
582   -}
583   -#else
584   -#define create_hash_tables() ({ 0; })
  525 + struct proc_dir_entry *entry;
  526 +#ifdef CONFIG_SMP
  527 + enum cpuhp_state online_state;
585 528 #endif
586 529  
587   -int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
588   -{
589   - struct proc_dir_entry *entry;
590 530 int err = 0;
591 531  
592 532 if (!prof_on)
593 533 return 0;
  534 +#ifdef CONFIG_SMP
  535 + err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
  536 + profile_prepare_cpu, profile_dead_cpu);
  537 + if (err)
  538 + return err;
594 539  
595   - cpu_notifier_register_begin();
596   -
597   - if (create_hash_tables()) {
598   - err = -ENOMEM;
599   - goto out;
600   - }
601   -
  540 + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
  541 + profile_online_cpu, NULL);
  542 + if (err < 0)
  543 + goto err_state_prep;
  544 + online_state = err;
  545 + err = 0;
  546 +#endif
602 547 entry = proc_create("profile", S_IWUSR | S_IRUGO,
603 548 NULL, &proc_profile_operations);
604 549 if (!entry)
605   - goto out;
  550 + goto err_state_onl;
606 551 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
607   - __hotcpu_notifier(profile_cpu_callback, 0);
608 552  
609   -out:
610   - cpu_notifier_register_done();
  553 + return err;
  554 +err_state_onl:
  555 +#ifdef CONFIG_SMP
  556 + cpuhp_remove_state(online_state);
  557 +err_state_prep:
  558 + cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
  559 +#endif
611 560 return err;
612 561 }
613 562 subsys_initcall(create_proc_profile);