Commit 79637a41e466bbe7dfe394bac3c9d86a92fd55b1

Authored by Linus Torvalds

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel…

…/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  gcc-4.6: kernel/*: Fix unused but set warnings
  mutex: Fix annotations to include it in kernel-locking docbook
  pid: make setpgid() system call use RCU read-side critical section
  MAINTAINERS: Add RCU's public git tree

Showing 11 changed files Side-by-side Diff

Documentation/DocBook/kernel-locking.tmpl
... ... @@ -1961,6 +1961,12 @@
1961 1961 </sect1>
1962 1962 </chapter>
1963 1963  
  1964 + <chapter id="apiref">
  1965 + <title>Mutex API reference</title>
  1966 +!Iinclude/linux/mutex.h
  1967 +!Ekernel/mutex.c
  1968 + </chapter>
  1969 +
1964 1970 <chapter id="references">
1965 1971 <title>Further reading</title>
1966 1972  
Documentation/mutex-design.txt
... ... @@ -9,7 +9,7 @@
9 9 mutex semantics are sufficient for your code, then there are a couple
10 10 of advantages of mutexes:
11 11  
12   - - 'struct mutex' is smaller on most architectures: .e.g on x86,
  12 + - 'struct mutex' is smaller on most architectures: E.g. on x86,
13 13 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
14 14 A smaller structure size means less RAM footprint, and better
15 15 CPU-cache utilization.
... ... @@ -136,4 +136,5 @@
136 136 void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
137 137 int mutex_lock_interruptible_nested(struct mutex *lock,
138 138 unsigned int subclass);
  139 + int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
... ... @@ -4810,6 +4810,7 @@
4810 4810 M: Josh Triplett <josh@freedesktop.org>
4811 4811 M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
4812 4812 S: Supported
  4813 +T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
4813 4814 F: Documentation/RCU/torture.txt
4814 4815 F: kernel/rcutorture.c
4815 4816  
... ... @@ -4834,6 +4835,7 @@
4834 4835 M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
4835 4836 W: http://www.rdrop.com/users/paulmck/rclock/
4836 4837 S: Supported
  4838 +T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
4837 4839 F: Documentation/RCU/
4838 4840 F: include/linux/rcu*
4839 4841 F: include/linux/srcu*
include/linux/mutex.h
... ... @@ -78,6 +78,14 @@
78 78 # include <linux/mutex-debug.h>
79 79 #else
80 80 # define __DEBUG_MUTEX_INITIALIZER(lockname)
  81 +/**
  82 + * mutex_init - initialize the mutex
  83 + * @mutex: the mutex to be initialized
  84 + *
  85 + * Initialize the mutex to unlocked state.
  86 + *
  87 + * It is not allowed to initialize an already locked mutex.
  88 + */
81 89 # define mutex_init(mutex) \
82 90 do { \
83 91 static struct lock_class_key __key; \
kernel/debug/kdb/kdb_bp.c
... ... @@ -274,7 +274,6 @@
274 274 int i, bpno;
275 275 kdb_bp_t *bp, *bp_check;
276 276 int diag;
277   - int free;
278 277 char *symname = NULL;
279 278 long offset = 0ul;
280 279 int nextarg;
... ... @@ -305,7 +304,6 @@
305 304 /*
306 305 * Find an empty bp structure to allocate
307 306 */
308   - free = KDB_MAXBPT;
309 307 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
310 308 if (bp->bp_free)
311 309 break;
... ... @@ -1091,11 +1091,10 @@
1091 1091 */
1092 1092 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1093 1093 {
1094   - struct hrtimer_clock_base *base;
1095 1094 unsigned long flags;
1096 1095 ktime_t rem;
1097 1096  
1098   - base = lock_hrtimer_base(timer, &flags);
  1097 + lock_hrtimer_base(timer, &flags);
1099 1098 rem = hrtimer_expires_remaining(timer);
1100 1099 unlock_hrtimer_base(timer, &flags);
1101 1100  
... ... @@ -36,15 +36,6 @@
36 36 # include <asm/mutex.h>
37 37 #endif
38 38  
39   -/***
40   - * mutex_init - initialize the mutex
41   - * @lock: the mutex to be initialized
42   - * @key: the lock_class_key for the class; used by mutex lock debugging
43   - *
44   - * Initialize the mutex to unlocked state.
45   - *
46   - * It is not allowed to initialize an already locked mutex.
47   - */
48 39 void
49 40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
50 41 {
... ... @@ -68,7 +59,7 @@
68 59 static __used noinline void __sched
69 60 __mutex_lock_slowpath(atomic_t *lock_count);
70 61  
71   -/***
  62 +/**
72 63 * mutex_lock - acquire the mutex
73 64 * @lock: the mutex to be acquired
74 65 *
... ... @@ -105,7 +96,7 @@
105 96  
106 97 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
107 98  
108   -/***
  99 +/**
109 100 * mutex_unlock - release the mutex
110 101 * @lock: the mutex to be released
111 102 *
... ... @@ -364,8 +355,8 @@
364 355 static noinline int __sched
365 356 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
366 357  
367   -/***
368   - * mutex_lock_interruptible - acquire the mutex, interruptable
  358 +/**
  359 + * mutex_lock_interruptible - acquire the mutex, interruptible
369 360 * @lock: the mutex to be acquired
370 361 *
371 362 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
372 363  
... ... @@ -456,15 +447,15 @@
456 447 return prev == 1;
457 448 }
458 449  
459   -/***
460   - * mutex_trylock - try acquire the mutex, without waiting
  450 +/**
  451 + * mutex_trylock - try to acquire the mutex, without waiting
461 452 * @lock: the mutex to be acquired
462 453 *
463 454 * Try to acquire the mutex atomically. Returns 1 if the mutex
464 455 * has been acquired successfully, and 0 on contention.
465 456 *
466 457 * NOTE: this function follows the spin_trylock() convention, so
467   - * it is negated to the down_trylock() return values! Be careful
  458 + * it is negated from the down_trylock() return values! Be careful
468 459 * about this when converting semaphore users to mutexes.
469 460 *
470 461 * This function must not be used in interrupt context. The
... ... @@ -1313,7 +1313,7 @@
1313 1313 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1314 1314 int this_cpu, int load_idx)
1315 1315 {
1316   - struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
  1316 + struct sched_group *idlest = NULL, *group = sd->groups;
1317 1317 unsigned long min_load = ULONG_MAX, this_load = 0;
1318 1318 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1319 1319  
... ... @@ -1348,7 +1348,6 @@
1348 1348  
1349 1349 if (local_group) {
1350 1350 this_load = avg_load;
1351   - this = group;
1352 1351 } else if (avg_load < min_load) {
1353 1352 min_load = avg_load;
1354 1353 idlest = group;
... ... @@ -931,6 +931,7 @@
931 931 pgid = pid;
932 932 if (pgid < 0)
933 933 return -EINVAL;
  934 + rcu_read_lock();
934 935  
935 936 /* From this point forward we keep holding onto the tasklist lock
936 937 * so that our parent does not change from under us. -DaveM
... ... @@ -984,6 +985,7 @@
984 985 out:
985 986 /* All paths lead to here, thus we are safe. -DaveM */
986 987 write_unlock_irq(&tasklist_lock);
  988 + rcu_read_unlock();
987 989 return err;
988 990 }
989 991  
... ... @@ -1713,10 +1713,7 @@
1713 1713 {
1714 1714 sysctl_set_parent(NULL, root_table);
1715 1715 #ifdef CONFIG_SYSCTL_SYSCALL_CHECK
1716   - {
1717   - int err;
1718   - err = sysctl_check_table(current->nsproxy, root_table);
1719   - }
  1716 + sysctl_check_table(current->nsproxy, root_table);
1720 1717 #endif
1721 1718 return 0;
1722 1719 }
kernel/trace/ring_buffer.c
... ... @@ -2985,13 +2985,11 @@
2985 2985  
2986 2986 static void rb_advance_iter(struct ring_buffer_iter *iter)
2987 2987 {
2988   - struct ring_buffer *buffer;
2989 2988 struct ring_buffer_per_cpu *cpu_buffer;
2990 2989 struct ring_buffer_event *event;
2991 2990 unsigned length;
2992 2991  
2993 2992 cpu_buffer = iter->cpu_buffer;
2994   - buffer = cpu_buffer->buffer;
2995 2993  
2996 2994 /*
2997 2995 * Check if we are at the end of the buffer.