Commit b3ac022cb9dc5883505a88b159d1b240ad1ef405
Committed by
Linus Torvalds
1 parent
dd98acf747
Exists in
master
and in
7 other branches
proc: turn signal_struct->count into "int nr_threads"
No functional changes, just s/atomic_t count/int nr_threads/. With the recent changes this counter has a single user, get_nr_threads() And, none of its callers need the really accurate number of threads, not to mention each caller obviously races with fork/exit. It is only used to report this value to the user-space, except first_tid() uses it to avoid the unnecessary while_each_thread() loop in the unlikely case. It is a bit sad we need a word in struct signal_struct for this, perhaps we can change get_nr_threads() to approximate the number of threads using signal->live and kill ->nr_threads later. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Acked-by: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 4 changed files with 8 additions and 11 deletions Side-by-side Diff
include/linux/init_task.h
... | ... | @@ -16,7 +16,7 @@ |
16 | 16 | extern struct fs_struct init_fs; |
17 | 17 | |
18 | 18 | #define INIT_SIGNALS(sig) { \ |
19 | - .count = ATOMIC_INIT(1), \ | |
19 | + .nr_threads = 1, \ | |
20 | 20 | .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ |
21 | 21 | .shared_pending = { \ |
22 | 22 | .list = LIST_HEAD_INIT(sig.shared_pending.list), \ |
include/linux/sched.h
... | ... | @@ -527,8 +527,8 @@ |
527 | 527 | */ |
528 | 528 | struct signal_struct { |
529 | 529 | atomic_t sigcnt; |
530 | - atomic_t count; | |
531 | 530 | atomic_t live; |
531 | + int nr_threads; | |
532 | 532 | |
533 | 533 | wait_queue_head_t wait_chldexit; /* for wait4() */ |
534 | 534 | |
... | ... | @@ -2149,7 +2149,7 @@ |
2149 | 2149 | |
2150 | 2150 | static inline int get_nr_threads(struct task_struct *tsk) |
2151 | 2151 | { |
2152 | - return atomic_read(&tsk->signal->count); | |
2152 | + return tsk->signal->nr_threads; | |
2153 | 2153 | } |
2154 | 2154 | |
2155 | 2155 | /* de_thread depends on thread_group_leader not being a pid based check */ |
kernel/exit.c
... | ... | @@ -83,14 +83,10 @@ |
83 | 83 | struct sighand_struct *sighand; |
84 | 84 | struct tty_struct *uninitialized_var(tty); |
85 | 85 | |
86 | - BUG_ON(!sig); | |
87 | - BUG_ON(!atomic_read(&sig->count)); | |
88 | - | |
89 | 86 | sighand = rcu_dereference_check(tsk->sighand, |
90 | 87 | rcu_read_lock_held() || |
91 | 88 | lockdep_tasklist_lock_is_held()); |
92 | 89 | spin_lock(&sighand->siglock); |
93 | - atomic_dec(&sig->count); | |
94 | 90 | |
95 | 91 | posix_cpu_timers_exit(tsk); |
96 | 92 | if (group_dead) { |
... | ... | @@ -130,6 +126,7 @@ |
130 | 126 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; |
131 | 127 | } |
132 | 128 | |
129 | + sig->nr_threads--; | |
133 | 130 | __unhash_process(tsk, group_dead); |
134 | 131 | |
135 | 132 | /* |
kernel/fork.c
... | ... | @@ -877,9 +877,9 @@ |
877 | 877 | if (!sig) |
878 | 878 | return -ENOMEM; |
879 | 879 | |
880 | - atomic_set(&sig->sigcnt, 1); | |
881 | - atomic_set(&sig->count, 1); | |
880 | + sig->nr_threads = 1; | |
882 | 881 | atomic_set(&sig->live, 1); |
882 | + atomic_set(&sig->sigcnt, 1); | |
883 | 883 | init_waitqueue_head(&sig->wait_chldexit); |
884 | 884 | if (clone_flags & CLONE_NEWPID) |
885 | 885 | sig->flags |= SIGNAL_UNKILLABLE; |
886 | 886 | |
... | ... | @@ -1256,9 +1256,9 @@ |
1256 | 1256 | } |
1257 | 1257 | |
1258 | 1258 | if (clone_flags & CLONE_THREAD) { |
1259 | - atomic_inc(¤t->signal->sigcnt); | |
1260 | - atomic_inc(¤t->signal->count); | |
1259 | + current->signal->nr_threads++; | |
1261 | 1260 | atomic_inc(¤t->signal->live); |
1261 | + atomic_inc(¤t->signal->sigcnt); | |
1262 | 1262 | p->group_leader = current->group_leader; |
1263 | 1263 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); |
1264 | 1264 | } |