Commit e107be36efb2a233833e8c9899039a370e4b2318
Committed by
Ingo Molnar
1 parent
b47e8608a0
Exists in
master
and in
7 other branches
[PATCH] sched: arch preempt notifier mechanism
This adds a general mechanism whereby a task can request the scheduler to notify it whenever it is preempted or scheduled back in. This allows the task to swap any special-purpose registers like the fpu or Intel's VT registers. Signed-off-by: Avi Kivity <avi@qumranet.com> [ mingo@elte.hu: fixes, cleanups ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 4 changed files with 123 additions and 2 deletions Side-by-side Diff
include/linux/preempt.h
... | ... | @@ -8,6 +8,7 @@ |
8 | 8 | |
9 | 9 | #include <linux/thread_info.h> |
10 | 10 | #include <linux/linkage.h> |
11 | +#include <linux/list.h> | |
11 | 12 | |
12 | 13 | #ifdef CONFIG_DEBUG_PREEMPT |
13 | 14 | extern void fastcall add_preempt_count(int val); |
... | ... | @@ -57,6 +58,49 @@ |
57 | 58 | #define preempt_enable_no_resched() do { } while (0) |
58 | 59 | #define preempt_enable() do { } while (0) |
59 | 60 | #define preempt_check_resched() do { } while (0) |
61 | + | |
62 | +#endif | |
63 | + | |
64 | +#ifdef CONFIG_PREEMPT_NOTIFIERS | |
65 | + | |
66 | +struct preempt_notifier; | |
67 | + | |
68 | +/** | |
69 | + * preempt_ops - notifiers called when a task is preempted and rescheduled | |
70 | + * @sched_in: we're about to be rescheduled: | |
71 | + * notifier: struct preempt_notifier for the task being scheduled | |
72 | + * cpu: cpu we're scheduled on | |
73 | + * @sched_out: we've just been preempted | |
74 | + * notifier: struct preempt_notifier for the task being preempted | |
75 | + * next: the task that's kicking us out | |
76 | + */ | |
77 | +struct preempt_ops { | |
78 | + void (*sched_in)(struct preempt_notifier *notifier, int cpu); | |
79 | + void (*sched_out)(struct preempt_notifier *notifier, | |
80 | + struct task_struct *next); | |
81 | +}; | |
82 | + | |
83 | +/** | |
84 | + * preempt_notifier - key for installing preemption notifiers | |
85 | + * @link: internal use | |
86 | + * @ops: defines the notifier functions to be called | |
87 | + * | |
88 | + * Usually used in conjunction with container_of(). | |
89 | + */ | |
90 | +struct preempt_notifier { | |
91 | + struct hlist_node link; | |
92 | + struct preempt_ops *ops; | |
93 | +}; | |
94 | + | |
95 | +void preempt_notifier_register(struct preempt_notifier *notifier); | |
96 | +void preempt_notifier_unregister(struct preempt_notifier *notifier); | |
97 | + | |
98 | +static inline void preempt_notifier_init(struct preempt_notifier *notifier, | |
99 | + struct preempt_ops *ops) | |
100 | +{ | |
101 | + INIT_HLIST_NODE(¬ifier->link); | |
102 | + notifier->ops = ops; | |
103 | +} | |
60 | 104 | |
61 | 105 | #endif |
62 | 106 |
include/linux/sched.h
... | ... | @@ -935,6 +935,11 @@ |
935 | 935 | struct sched_class *sched_class; |
936 | 936 | struct sched_entity se; |
937 | 937 | |
938 | +#ifdef CONFIG_PREEMPT_NOTIFIERS | |
939 | + /* list of struct preempt_notifier: */ | |
940 | + struct hlist_head preempt_notifiers; | |
941 | +#endif | |
942 | + | |
938 | 943 | unsigned short ioprio; |
939 | 944 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
940 | 945 | unsigned int btrace_seq; |
kernel/Kconfig.preempt
kernel/sched.c
... | ... | @@ -1592,6 +1592,10 @@ |
1592 | 1592 | INIT_LIST_HEAD(&p->run_list); |
1593 | 1593 | p->se.on_rq = 0; |
1594 | 1594 | |
1595 | +#ifdef CONFIG_PREEMPT_NOTIFIERS | |
1596 | + INIT_HLIST_HEAD(&p->preempt_notifiers); | |
1597 | +#endif | |
1598 | + | |
1595 | 1599 | /* |
1596 | 1600 | * We mark the process as running here, but have not actually |
1597 | 1601 | * inserted it onto the runqueue yet. This guarantees that |
1598 | 1602 | |
... | ... | @@ -1673,7 +1677,64 @@ |
1673 | 1677 | task_rq_unlock(rq, &flags); |
1674 | 1678 | } |
1675 | 1679 | |
1680 | +#ifdef CONFIG_PREEMPT_NOTIFIERS | |
1681 | + | |
1676 | 1682 | /** |
1683 | + * preempt_notifier_register - tell me when current is being being preempted | |
1684 | + * and rescheduled | |
1685 | + */ | |
1686 | +void preempt_notifier_register(struct preempt_notifier *notifier) | |
1687 | +{ | |
1688 | + hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); | |
1689 | +} | |
1690 | +EXPORT_SYMBOL_GPL(preempt_notifier_register); | |
1691 | + | |
1692 | +/** | |
1693 | + * preempt_notifier_unregister - no longer interested in preemption notifications | |
1694 | + * | |
1695 | + * This is safe to call from within a preemption notifier. | |
1696 | + */ | |
1697 | +void preempt_notifier_unregister(struct preempt_notifier *notifier) | |
1698 | +{ | |
1699 | + hlist_del(¬ifier->link); | |
1700 | +} | |
1701 | +EXPORT_SYMBOL_GPL(preempt_notifier_unregister); | |
1702 | + | |
1703 | +static void fire_sched_in_preempt_notifiers(struct task_struct *curr) | |
1704 | +{ | |
1705 | + struct preempt_notifier *notifier; | |
1706 | + struct hlist_node *node; | |
1707 | + | |
1708 | + hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) | |
1709 | + notifier->ops->sched_in(notifier, raw_smp_processor_id()); | |
1710 | +} | |
1711 | + | |
1712 | +static void | |
1713 | +fire_sched_out_preempt_notifiers(struct task_struct *curr, | |
1714 | + struct task_struct *next) | |
1715 | +{ | |
1716 | + struct preempt_notifier *notifier; | |
1717 | + struct hlist_node *node; | |
1718 | + | |
1719 | + hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) | |
1720 | + notifier->ops->sched_out(notifier, next); | |
1721 | +} | |
1722 | + | |
1723 | +#else | |
1724 | + | |
1725 | +static void fire_sched_in_preempt_notifiers(struct task_struct *curr) | |
1726 | +{ | |
1727 | +} | |
1728 | + | |
1729 | +static void | |
1730 | +fire_sched_out_preempt_notifiers(struct task_struct *curr, | |
1731 | + struct task_struct *next) | |
1732 | +{ | |
1733 | +} | |
1734 | + | |
1735 | +#endif | |
1736 | + | |
1737 | +/** | |
1677 | 1738 | * prepare_task_switch - prepare to switch tasks |
1678 | 1739 | * @rq: the runqueue preparing to switch |
1679 | 1740 | * @next: the task we are going to switch to. |
1680 | 1741 | |
... | ... | @@ -1685,8 +1746,11 @@ |
1685 | 1746 | * prepare_task_switch sets up locking and calls architecture specific |
1686 | 1747 | * hooks. |
1687 | 1748 | */ |
1688 | -static inline void prepare_task_switch(struct rq *rq, struct task_struct *next) | |
1749 | +static inline void | |
1750 | +prepare_task_switch(struct rq *rq, struct task_struct *prev, | |
1751 | + struct task_struct *next) | |
1689 | 1752 | { |
1753 | + fire_sched_out_preempt_notifiers(prev, next); | |
1690 | 1754 | prepare_lock_switch(rq, next); |
1691 | 1755 | prepare_arch_switch(next); |
1692 | 1756 | } |
... | ... | @@ -1728,6 +1792,7 @@ |
1728 | 1792 | prev_state = prev->state; |
1729 | 1793 | finish_arch_switch(prev); |
1730 | 1794 | finish_lock_switch(rq, prev); |
1795 | + fire_sched_in_preempt_notifiers(current); | |
1731 | 1796 | if (mm) |
1732 | 1797 | mmdrop(mm); |
1733 | 1798 | if (unlikely(prev_state == TASK_DEAD)) { |
... | ... | @@ -1768,7 +1833,7 @@ |
1768 | 1833 | { |
1769 | 1834 | struct mm_struct *mm, *oldmm; |
1770 | 1835 | |
1771 | - prepare_task_switch(rq, next); | |
1836 | + prepare_task_switch(rq, prev, next); | |
1772 | 1837 | mm = next->mm; |
1773 | 1838 | oldmm = prev->active_mm; |
1774 | 1839 | /* |
... | ... | @@ -6334,6 +6399,10 @@ |
6334 | 6399 | } |
6335 | 6400 | |
6336 | 6401 | set_load_weight(&init_task); |
6402 | + | |
6403 | +#ifdef CONFIG_PREEMPT_NOTIFIERS | |
6404 | + INIT_HLIST_HEAD(&init_task.preempt_notifiers); | |
6405 | +#endif | |
6337 | 6406 | |
6338 | 6407 | #ifdef CONFIG_SMP |
6339 | 6408 | nr_cpu_ids = highest_cpu + 1; |