Commit c52851b60cc0aaaf974ff0e49989fb698220447d
Committed by
Len Brown
1 parent
09b4d1ee88
Exists in
master
and in
7 other branches
P-state software coordination for speedstep-centrino
http://bugzilla.kernel.org/show_bug.cgi?id=5737 Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Showing 1 changed file with 180 additions and 66 deletions Side-by-side Diff
arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
... | ... | @@ -351,9 +351,38 @@ |
351 | 351 | |
352 | 352 | #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI |
353 | 353 | |
354 | -static struct acpi_processor_performance p; | |
354 | +static struct acpi_processor_performance *acpi_perf_data[NR_CPUS]; | |
355 | 355 | |
356 | 356 | /* |
357 | + * centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States | |
358 | + * library | |
359 | + * | |
360 | + * Before doing the actual init, we need to do _PSD related setup whenever | |
361 | + * supported by the BIOS. These are handled by this early_init routine. | |
362 | + */ | |
363 | +static int centrino_cpu_early_init_acpi(void) | |
364 | +{ | |
365 | + unsigned int i, j; | |
366 | + struct acpi_processor_performance *data; | |
367 | + | |
368 | + for_each_cpu(i) { | |
369 | + data = kzalloc(sizeof(struct acpi_processor_performance), | |
370 | + GFP_KERNEL); | |
371 | + if (!data) { | |
372 | + for_each_cpu(j) { | |
373 | + kfree(acpi_perf_data[j]); | |
374 | + acpi_perf_data[j] = NULL; | |
375 | + } | |
376 | + return (-ENOMEM); | |
377 | + } | |
378 | + acpi_perf_data[i] = data; | |
379 | + } | |
380 | + | |
381 | + acpi_processor_preregister_performance(acpi_perf_data); | |
382 | + return 0; | |
383 | +} | |
384 | + | |
385 | +/* | |
357 | 386 | * centrino_cpu_init_acpi - register with ACPI P-States library |
358 | 387 | * |
359 | 388 | * Register with the ACPI P-States library (part of drivers/acpi/processor.c) |
360 | 389 | |
361 | 390 | |
362 | 391 | |
363 | 392 | |
364 | 393 | |
365 | 394 | |
366 | 395 | |
367 | 396 | |
368 | 397 | |
369 | 398 | |
370 | 399 | |
... | ... | @@ -365,46 +394,51 @@ |
365 | 394 | unsigned long cur_freq; |
366 | 395 | int result = 0, i; |
367 | 396 | unsigned int cpu = policy->cpu; |
397 | + struct acpi_processor_performance *p; | |
368 | 398 | |
399 | + p = acpi_perf_data[cpu]; | |
400 | + | |
369 | 401 | /* register with ACPI core */ |
370 | - if (acpi_processor_register_performance(&p, cpu)) { | |
402 | + if (acpi_processor_register_performance(p, cpu)) { | |
371 | 403 | dprintk(KERN_INFO PFX "obtaining ACPI data failed\n"); |
372 | 404 | return -EIO; |
373 | 405 | } |
406 | + policy->cpus = p->shared_cpu_map; | |
407 | + policy->shared_type = p->shared_type; | |
374 | 408 | |
375 | 409 | /* verify the acpi_data */ |
376 | - if (p.state_count <= 1) { | |
410 | + if (p->state_count <= 1) { | |
377 | 411 | dprintk("No P-States\n"); |
378 | 412 | result = -ENODEV; |
379 | 413 | goto err_unreg; |
380 | 414 | } |
381 | 415 | |
382 | - if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || | |
383 | - (p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | |
416 | + if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || | |
417 | + (p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | |
384 | 418 | dprintk("Invalid control/status registers (%x - %x)\n", |
385 | - p.control_register.space_id, p.status_register.space_id); | |
419 | + p->control_register.space_id, p->status_register.space_id); | |
386 | 420 | result = -EIO; |
387 | 421 | goto err_unreg; |
388 | 422 | } |
389 | 423 | |
390 | - for (i=0; i<p.state_count; i++) { | |
391 | - if (p.states[i].control != p.states[i].status) { | |
424 | + for (i=0; i<p->state_count; i++) { | |
425 | + if (p->states[i].control != p->states[i].status) { | |
392 | 426 | dprintk("Different control (%llu) and status values (%llu)\n", |
393 | - p.states[i].control, p.states[i].status); | |
427 | + p->states[i].control, p->states[i].status); | |
394 | 428 | result = -EINVAL; |
395 | 429 | goto err_unreg; |
396 | 430 | } |
397 | 431 | |
398 | - if (!p.states[i].core_frequency) { | |
432 | + if (!p->states[i].core_frequency) { | |
399 | 433 | dprintk("Zero core frequency for state %u\n", i); |
400 | 434 | result = -EINVAL; |
401 | 435 | goto err_unreg; |
402 | 436 | } |
403 | 437 | |
404 | - if (p.states[i].core_frequency > p.states[0].core_frequency) { | |
438 | + if (p->states[i].core_frequency > p->states[0].core_frequency) { | |
405 | 439 | dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i, |
406 | - p.states[i].core_frequency, p.states[0].core_frequency); | |
407 | - p.states[i].core_frequency = 0; | |
440 | + p->states[i].core_frequency, p->states[0].core_frequency); | |
441 | + p->states[i].core_frequency = 0; | |
408 | 442 | continue; |
409 | 443 | } |
410 | 444 | } |
411 | 445 | |
412 | 446 | |
413 | 447 | |
414 | 448 | |
... | ... | @@ -416,26 +450,26 @@ |
416 | 450 | } |
417 | 451 | |
418 | 452 | centrino_model[cpu]->model_name=NULL; |
419 | - centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000; | |
453 | + centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000; | |
420 | 454 | centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) * |
421 | - (p.state_count + 1), GFP_KERNEL); | |
455 | + (p->state_count + 1), GFP_KERNEL); | |
422 | 456 | if (!centrino_model[cpu]->op_points) { |
423 | 457 | result = -ENOMEM; |
424 | 458 | goto err_kfree; |
425 | 459 | } |
426 | 460 | |
427 | - for (i=0; i<p.state_count; i++) { | |
428 | - centrino_model[cpu]->op_points[i].index = p.states[i].control; | |
429 | - centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000; | |
461 | + for (i=0; i<p->state_count; i++) { | |
462 | + centrino_model[cpu]->op_points[i].index = p->states[i].control; | |
463 | + centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000; | |
430 | 464 | dprintk("adding state %i with frequency %u and control value %04x\n", |
431 | 465 | i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index); |
432 | 466 | } |
433 | - centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END; | |
467 | + centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END; | |
434 | 468 | |
435 | 469 | cur_freq = get_cur_freq(cpu); |
436 | 470 | |
437 | - for (i=0; i<p.state_count; i++) { | |
438 | - if (!p.states[i].core_frequency) { | |
471 | + for (i=0; i<p->state_count; i++) { | |
472 | + if (!p->states[i].core_frequency) { | |
439 | 473 | dprintk("skipping state %u\n", i); |
440 | 474 | centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID; |
441 | 475 | continue; |
... | ... | @@ -451,7 +485,7 @@ |
451 | 485 | } |
452 | 486 | |
453 | 487 | if (cur_freq == centrino_model[cpu]->op_points[i].frequency) |
454 | - p.state = i; | |
488 | + p->state = i; | |
455 | 489 | } |
456 | 490 | |
457 | 491 | /* notify BIOS that we exist */ |
458 | 492 | |
... | ... | @@ -464,12 +498,13 @@ |
464 | 498 | err_kfree: |
465 | 499 | kfree(centrino_model[cpu]); |
466 | 500 | err_unreg: |
467 | - acpi_processor_unregister_performance(&p, cpu); | |
501 | + acpi_processor_unregister_performance(p, cpu); | |
468 | 502 | dprintk(KERN_INFO PFX "invalid ACPI data\n"); |
469 | 503 | return (result); |
470 | 504 | } |
471 | 505 | #else |
472 | 506 | static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; } |
507 | +static inline int centrino_cpu_early_init_acpi(void) { return 0; } | |
473 | 508 | #endif |
474 | 509 | |
475 | 510 | static int centrino_cpu_init(struct cpufreq_policy *policy) |
... | ... | @@ -557,10 +592,15 @@ |
557 | 592 | |
558 | 593 | #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI |
559 | 594 | if (!centrino_model[cpu]->model_name) { |
560 | - dprintk("unregistering and freeing ACPI data\n"); | |
561 | - acpi_processor_unregister_performance(&p, cpu); | |
562 | - kfree(centrino_model[cpu]->op_points); | |
563 | - kfree(centrino_model[cpu]); | |
595 | + static struct acpi_processor_performance *p; | |
596 | + | |
597 | + if (acpi_perf_data[cpu]) { | |
598 | + p = acpi_perf_data[cpu]; | |
599 | + dprintk("unregistering and freeing ACPI data\n"); | |
600 | + acpi_processor_unregister_performance(p, cpu); | |
601 | + kfree(centrino_model[cpu]->op_points); | |
602 | + kfree(centrino_model[cpu]); | |
603 | + } | |
564 | 604 | } |
565 | 605 | #endif |
566 | 606 | |
567 | 607 | |
568 | 608 | |
569 | 609 | |
570 | 610 | |
571 | 611 | |
572 | 612 | |
573 | 613 | |
574 | 614 | |
575 | 615 | |
576 | 616 | |
577 | 617 | |
578 | 618 | |
579 | 619 | |
580 | 620 | |
581 | 621 | |
... | ... | @@ -594,63 +634,124 @@ |
594 | 634 | unsigned int relation) |
595 | 635 | { |
596 | 636 | unsigned int newstate = 0; |
597 | - unsigned int msr, oldmsr, h, cpu = policy->cpu; | |
637 | + unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; | |
598 | 638 | struct cpufreq_freqs freqs; |
639 | + cpumask_t online_policy_cpus; | |
599 | 640 | cpumask_t saved_mask; |
600 | - int retval; | |
641 | + cpumask_t set_mask; | |
642 | + cpumask_t covered_cpus; | |
643 | + int retval = 0; | |
644 | + unsigned int j, k, first_cpu, tmp; | |
601 | 645 | |
602 | - if (centrino_model[cpu] == NULL) | |
646 | + if (unlikely(centrino_model[cpu] == NULL)) | |
603 | 647 | return -ENODEV; |
604 | 648 | |
605 | - /* | |
606 | - * Support for SMP systems. | |
607 | - * Make sure we are running on the CPU that wants to change frequency | |
608 | - */ | |
609 | - saved_mask = current->cpus_allowed; | |
610 | - set_cpus_allowed(current, policy->cpus); | |
611 | - if (!cpu_isset(smp_processor_id(), policy->cpus)) { | |
612 | - dprintk("couldn't limit to CPUs in this domain\n"); | |
613 | - return(-EAGAIN); | |
649 | + if (unlikely(cpufreq_frequency_table_target(policy, | |
650 | + centrino_model[cpu]->op_points, | |
651 | + target_freq, | |
652 | + relation, | |
653 | + &newstate))) { | |
654 | + return -EINVAL; | |
614 | 655 | } |
615 | 656 | |
616 | - if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq, | |
617 | - relation, &newstate)) { | |
618 | - retval = -EINVAL; | |
619 | - goto migrate_end; | |
620 | - } | |
657 | + /* cpufreq holds the hotplug lock, so we are safe from here on */ | |
658 | + cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); | |
621 | 659 | |
622 | - msr = centrino_model[cpu]->op_points[newstate].index; | |
623 | - rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); | |
660 | + saved_mask = current->cpus_allowed; | |
661 | + first_cpu = 1; | |
662 | + cpus_clear(covered_cpus); | |
663 | + for_each_cpu_mask(j, online_policy_cpus) { | |
664 | + /* | |
665 | + * Support for SMP systems. | |
666 | + * Make sure we are running on CPU that wants to change freq | |
667 | + */ | |
668 | + cpus_clear(set_mask); | |
669 | + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | |
670 | + cpus_or(set_mask, set_mask, online_policy_cpus); | |
671 | + else | |
672 | + cpu_set(j, set_mask); | |
624 | 673 | |
625 | - if (msr == (oldmsr & 0xffff)) { | |
626 | - retval = 0; | |
627 | - dprintk("no change needed - msr was and needs to be %x\n", oldmsr); | |
628 | - goto migrate_end; | |
629 | - } | |
674 | + set_cpus_allowed(current, set_mask); | |
675 | + if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { | |
676 | + dprintk("couldn't limit to CPUs in this domain\n"); | |
677 | + retval = -EAGAIN; | |
678 | + if (first_cpu) { | |
679 | + /* We haven't started the transition yet. */ | |
680 | + goto migrate_end; | |
681 | + } | |
682 | + break; | |
683 | + } | |
630 | 684 | |
631 | - freqs.cpu = cpu; | |
632 | - freqs.old = extract_clock(oldmsr, cpu, 0); | |
633 | - freqs.new = extract_clock(msr, cpu, 0); | |
685 | + msr = centrino_model[cpu]->op_points[newstate].index; | |
634 | 686 | |
635 | - dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | |
636 | - target_freq, freqs.old, freqs.new, msr); | |
687 | + if (first_cpu) { | |
688 | + rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); | |
689 | + if (msr == (oldmsr & 0xffff)) { | |
690 | + dprintk("no change needed - msr was and needs " | |
691 | + "to be %x\n", oldmsr); | |
692 | + retval = 0; | |
693 | + goto migrate_end; | |
694 | + } | |
637 | 695 | |
638 | - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | |
696 | + freqs.old = extract_clock(oldmsr, cpu, 0); | |
697 | + freqs.new = extract_clock(msr, cpu, 0); | |
639 | 698 | |
640 | - /* all but 16 LSB are "reserved", so treat them with | |
641 | - care */ | |
642 | - oldmsr &= ~0xffff; | |
643 | - msr &= 0xffff; | |
644 | - oldmsr |= msr; | |
699 | + dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | |
700 | + target_freq, freqs.old, freqs.new, msr); | |
645 | 701 | |
646 | - wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | |
702 | + for_each_cpu_mask(k, online_policy_cpus) { | |
703 | + freqs.cpu = k; | |
704 | + cpufreq_notify_transition(&freqs, | |
705 | + CPUFREQ_PRECHANGE); | |
706 | + } | |
647 | 707 | |
648 | - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | |
708 | + first_cpu = 0; | |
709 | + /* all but 16 LSB are reserved, treat them with care */ | |
710 | + oldmsr &= ~0xffff; | |
711 | + msr &= 0xffff; | |
712 | + oldmsr |= msr; | |
713 | + } | |
649 | 714 | |
650 | - retval = 0; | |
715 | + wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | |
716 | + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | |
717 | + break; | |
718 | + | |
719 | + cpu_set(j, covered_cpus); | |
720 | + } | |
721 | + | |
722 | + for_each_cpu_mask(k, online_policy_cpus) { | |
723 | + freqs.cpu = k; | |
724 | + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | |
725 | + } | |
726 | + | |
727 | + if (unlikely(retval)) { | |
728 | + /* | |
729 | + * We have failed halfway through the frequency change. | |
730 | + * We have sent callbacks to policy->cpus and | |
731 | + * MSRs have already been written on coverd_cpus. | |
732 | + * Best effort undo.. | |
733 | + */ | |
734 | + | |
735 | + if (!cpus_empty(covered_cpus)) { | |
736 | + for_each_cpu_mask(j, covered_cpus) { | |
737 | + set_cpus_allowed(current, cpumask_of_cpu(j)); | |
738 | + wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | |
739 | + } | |
740 | + } | |
741 | + | |
742 | + tmp = freqs.new; | |
743 | + freqs.new = freqs.old; | |
744 | + freqs.old = tmp; | |
745 | + for_each_cpu_mask(j, online_policy_cpus) { | |
746 | + freqs.cpu = j; | |
747 | + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | |
748 | + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | |
749 | + } | |
750 | + } | |
751 | + | |
651 | 752 | migrate_end: |
652 | 753 | set_cpus_allowed(current, saved_mask); |
653 | - return (retval); | |
754 | + return 0; | |
654 | 755 | } |
655 | 756 | |
656 | 757 | static struct freq_attr* centrino_attr[] = { |
657 | 758 | |
658 | 759 | |
... | ... | @@ -692,12 +793,25 @@ |
692 | 793 | if (!cpu_has(cpu, X86_FEATURE_EST)) |
693 | 794 | return -ENODEV; |
694 | 795 | |
796 | + centrino_cpu_early_init_acpi(); | |
797 | + | |
695 | 798 | return cpufreq_register_driver(¢rino_driver); |
696 | 799 | } |
697 | 800 | |
698 | 801 | static void __exit centrino_exit(void) |
699 | 802 | { |
803 | +#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI | |
804 | + unsigned int j; | |
805 | +#endif | |
806 | + | |
700 | 807 | cpufreq_unregister_driver(¢rino_driver); |
808 | + | |
809 | +#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI | |
810 | + for_each_cpu(j) { | |
811 | + kfree(acpi_perf_data[j]); | |
812 | + acpi_perf_data[j] = NULL; | |
813 | + } | |
814 | +#endif | |
701 | 815 | } |
702 | 816 | |
703 | 817 | MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>"); |