Commit f72a209a3e694ecb8d3ceed4671d98c4364e00e3

Authored by Linus Torvalds

Merge branches 'irq-urgent-for-linus', 'x86-urgent-for-linus' and 'sched-urgent-…

…for-linus' of git://tesla.tglx.de/git/linux-2.6-tip

* 'irq-urgent-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip:
  irq: Fix check for already initialized irq_domain in irq_domain_add
  irq: Add declaration of irq_domain_simple_ops to irqdomain.h

* 'x86-urgent-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip:
  x86/rtc: Don't recursively acquire rtc_lock

* 'sched-urgent-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip:
  posix-cpu-timers: Cure SMP wobbles
  sched: Fix up wchan borkage
  sched/rt: Migrate equal priority tasks to available CPUs

Showing 8 changed files Side-by-side Diff

arch/x86/kernel/rtc.c
... ... @@ -42,8 +42,11 @@
42 42 {
43 43 int real_seconds, real_minutes, cmos_minutes;
44 44 unsigned char save_control, save_freq_select;
  45 + unsigned long flags;
45 46 int retval = 0;
46 47  
  48 + spin_lock_irqsave(&rtc_lock, flags);
  49 +
47 50 /* tell the clock it's being set */
48 51 save_control = CMOS_READ(RTC_CONTROL);
49 52 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
50 53  
51 54  
... ... @@ -93,13 +96,18 @@
93 96 CMOS_WRITE(save_control, RTC_CONTROL);
94 97 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
95 98  
  99 + spin_unlock_irqrestore(&rtc_lock, flags);
  100 +
96 101 return retval;
97 102 }
98 103  
99 104 unsigned long mach_get_cmos_time(void)
100 105 {
101 106 unsigned int status, year, mon, day, hour, min, sec, century = 0;
  107 + unsigned long flags;
102 108  
  109 + spin_lock_irqsave(&rtc_lock, flags);
  110 +
103 111 /*
104 112 * If UIP is clear, then we have >= 244 microseconds before
105 113 * RTC registers will be updated. Spec sheet says that this
... ... @@ -125,6 +133,8 @@
125 133 status = CMOS_READ(RTC_CONTROL);
126 134 WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
127 135  
  136 + spin_unlock_irqrestore(&rtc_lock, flags);
  137 +
128 138 if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
129 139 sec = bcd2bin(sec);
130 140 min = bcd2bin(min);
131 141  
132 142  
133 143  
... ... @@ -169,24 +179,15 @@
169 179  
170 180 int update_persistent_clock(struct timespec now)
171 181 {
172   - unsigned long flags;
173   - int retval;
174   -
175   - spin_lock_irqsave(&rtc_lock, flags);
176   - retval = x86_platform.set_wallclock(now.tv_sec);
177   - spin_unlock_irqrestore(&rtc_lock, flags);
178   -
179   - return retval;
  182 + return x86_platform.set_wallclock(now.tv_sec);
180 183 }
181 184  
182 185 /* not static: needed by APM */
183 186 void read_persistent_clock(struct timespec *ts)
184 187 {
185   - unsigned long retval, flags;
  188 + unsigned long retval;
186 189  
187   - spin_lock_irqsave(&rtc_lock, flags);
188 190 retval = x86_platform.get_wallclock();
189   - spin_unlock_irqrestore(&rtc_lock, flags);
190 191  
191 192 ts->tv_sec = retval;
192 193 ts->tv_nsec = 0;
arch/x86/platform/mrst/vrtc.c
... ... @@ -58,8 +58,11 @@
58 58 unsigned long vrtc_get_time(void)
59 59 {
60 60 u8 sec, min, hour, mday, mon;
  61 + unsigned long flags;
61 62 u32 year;
62 63  
  64 + spin_lock_irqsave(&rtc_lock, flags);
  65 +
63 66 while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
64 67 cpu_relax();
65 68  
... ... @@ -70,6 +73,8 @@
70 73 mon = vrtc_cmos_read(RTC_MONTH);
71 74 year = vrtc_cmos_read(RTC_YEAR);
72 75  
  76 + spin_unlock_irqrestore(&rtc_lock, flags);
  77 +
73 78 /* vRTC YEAR reg contains the offset to 1960 */
74 79 year += 1960;
75 80  
76 81  
... ... @@ -83,8 +88,10 @@
83 88 int vrtc_set_mmss(unsigned long nowtime)
84 89 {
85 90 int real_sec, real_min;
  91 + unsigned long flags;
86 92 int vrtc_min;
87 93  
  94 + spin_lock_irqsave(&rtc_lock, flags);
88 95 vrtc_min = vrtc_cmos_read(RTC_MINUTES);
89 96  
90 97 real_sec = nowtime % 60;
... ... @@ -95,6 +102,8 @@
95 102  
96 103 vrtc_cmos_write(real_sec, RTC_SECONDS);
97 104 vrtc_cmos_write(real_min, RTC_MINUTES);
  105 + spin_unlock_irqrestore(&rtc_lock, flags);
  106 +
98 107 return 0;
99 108 }
100 109  
include/linux/irqdomain.h
... ... @@ -80,6 +80,7 @@
80 80 #endif /* CONFIG_IRQ_DOMAIN */
81 81  
82 82 #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
  83 +extern struct irq_domain_ops irq_domain_simple_ops;
83 84 extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
84 85 extern void irq_domain_generate_simple(const struct of_device_id *match,
85 86 u64 phys_base, unsigned int irq_start);
include/linux/sched.h
... ... @@ -1956,7 +1956,6 @@
1956 1956  
1957 1957 extern unsigned long long
1958 1958 task_sched_runtime(struct task_struct *task);
1959   -extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1960 1959  
1961 1960 /* sched_exec is called by processes performing an exec */
1962 1961 #ifdef CONFIG_SMP
kernel/irq/irqdomain.c
... ... @@ -29,7 +29,11 @@
29 29 */
30 30 for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
31 31 d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
32   - if (d || d->domain) {
  32 + if (!d) {
  33 + WARN(1, "error: assigning domain to non existant irq_desc");
  34 + return;
  35 + }
  36 + if (d->domain) {
33 37 /* things are broken; just report, don't clean up */
34 38 WARN(1, "error: irq_desc already assigned to a domain");
35 39 return;
kernel/posix-cpu-timers.c
... ... @@ -250,7 +250,7 @@
250 250 do {
251 251 times->utime = cputime_add(times->utime, t->utime);
252 252 times->stime = cputime_add(times->stime, t->stime);
253   - times->sum_exec_runtime += t->se.sum_exec_runtime;
  253 + times->sum_exec_runtime += task_sched_runtime(t);
254 254 } while_each_thread(tsk, t);
255 255 out:
256 256 rcu_read_unlock();
... ... @@ -312,7 +312,8 @@
312 312 cpu->cpu = cputime.utime;
313 313 break;
314 314 case CPUCLOCK_SCHED:
315   - cpu->sched = thread_group_sched_runtime(p);
  315 + thread_group_cputime(p, &cputime);
  316 + cpu->sched = cputime.sum_exec_runtime;
316 317 break;
317 318 }
318 319 return 0;
... ... @@ -3725,30 +3725,6 @@
3725 3725 }
3726 3726  
3727 3727 /*
3728   - * Return sum_exec_runtime for the thread group.
3729   - * In case the task is currently running, return the sum plus current's
3730   - * pending runtime that have not been accounted yet.
3731   - *
3732   - * Note that the thread group might have other running tasks as well,
3733   - * so the return value not includes other pending runtime that other
3734   - * running tasks might have.
3735   - */
3736   -unsigned long long thread_group_sched_runtime(struct task_struct *p)
3737   -{
3738   - struct task_cputime totals;
3739   - unsigned long flags;
3740   - struct rq *rq;
3741   - u64 ns;
3742   -
3743   - rq = task_rq_lock(p, &flags);
3744   - thread_group_cputime(p, &totals);
3745   - ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
3746   - task_rq_unlock(rq, p, &flags);
3747   -
3748   - return ns;
3749   -}
3750   -
3751   -/*
3752 3728 * Account user cpu time to a process.
3753 3729 * @p: the process that the cpu time gets accounted to
3754 3730 * @cputime: the cpu time spent in user space since the last update
... ... @@ -4372,7 +4348,7 @@
4372 4348 blk_schedule_flush_plug(tsk);
4373 4349 }
4374 4350  
4375   -asmlinkage void schedule(void)
  4351 +asmlinkage void __sched schedule(void)
4376 4352 {
4377 4353 struct task_struct *tsk = current;
4378 4354  
... ... @@ -1050,7 +1050,7 @@
1050 1050 */
1051 1051 if (curr && unlikely(rt_task(curr)) &&
1052 1052 (curr->rt.nr_cpus_allowed < 2 ||
1053   - curr->prio < p->prio) &&
  1053 + curr->prio <= p->prio) &&
1054 1054 (p->rt.nr_cpus_allowed > 1)) {
1055 1055 int target = find_lowest_rq(p);
1056 1056  
... ... @@ -1581,7 +1581,7 @@
1581 1581 p->rt.nr_cpus_allowed > 1 &&
1582 1582 rt_task(rq->curr) &&
1583 1583 (rq->curr->rt.nr_cpus_allowed < 2 ||
1584   - rq->curr->prio < p->prio))
  1584 + rq->curr->prio <= p->prio))
1585 1585 push_rt_tasks(rq);
1586 1586 }
1587 1587