Commit 72fd4a35a824331d7a0f4168d7576502d95d34b3

Authored by Robert P. J. Day
Committed by Linus Torvalds
1 parent 262086cf5b

[PATCH] Numerous fixes to kernel-doc info in source files.

A variety of (mostly) innocuous fixes to the embedded kernel-doc content in
source files, including:

  * make multi-line initial descriptions single line
  * denote some function names, constants and structs as such
  * change erroneous opening '/*' to '/**' in a few places
  * reword some text for clarity

Signed-off-by: Robert P. J. Day <rpjday@mindspring.com>
Cc: "Randy.Dunlap" <rdunlap@xenotime.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 33 changed files with 105 additions and 116 deletions Side-by-side Diff

include/asm-i386/atomic.h
... ... @@ -211,12 +211,12 @@
211 211 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212 212  
213 213 /**
214   - * atomic_add_unless - add unless the number is a given value
  214 + * atomic_add_unless - add unless the number is already a given value
215 215 * @v: pointer of type atomic_t
216 216 * @a: the amount to add to v...
217 217 * @u: ...unless v is equal to u.
218 218 *
219   - * Atomically adds @a to @v, so long as it was not @u.
  219 + * Atomically adds @a to @v, so long as @v was not already @u.
220 220 * Returns non-zero if @v was not @u, and zero otherwise.
221 221 */
222 222 #define atomic_add_unless(v, a, u) \
include/asm-i386/bitops.h
... ... @@ -371,7 +371,7 @@
371 371 *
372 372 * This is defined the same way as
373 373 * the libc and compiler builtin ffs routines, therefore
374   - * differs in spirit from the above ffz (man ffs).
  374 + * differs in spirit from the above ffz() (man ffs).
375 375 */
376 376 static inline int ffs(int x)
377 377 {
... ... @@ -388,7 +388,7 @@
388 388 * fls - find last bit set
389 389 * @x: the word to search
390 390 *
391   - * This is defined the same way as ffs.
  391 + * This is defined the same way as ffs().
392 392 */
393 393 static inline int fls(int x)
394 394 {
include/linux/init.h
... ... @@ -172,7 +172,7 @@
172 172 * module_init() - driver initialization entry point
173 173 * @x: function to be run at kernel boot time or module insertion
174 174 *
175   - * module_init() will either be called during do_initcalls (if
  175 + * module_init() will either be called during do_initcalls() (if
176 176 * builtin) or at module insertion time (if a module). There can only
177 177 * be one per module.
178 178 */
include/linux/kfifo.h
... ... @@ -74,7 +74,7 @@
74 74 * @buffer: the data to be added.
75 75 * @len: the length of the data to be added.
76 76 *
77   - * This function copies at most 'len' bytes from the 'buffer' into
  77 + * This function copies at most @len bytes from the @buffer into
78 78 * the FIFO depending on the free space, and returns the number of
79 79 * bytes copied.
80 80 */
... ... @@ -99,8 +99,8 @@
99 99 * @buffer: where the data must be copied.
100 100 * @len: the size of the destination buffer.
101 101 *
102   - * This function copies at most 'len' bytes from the FIFO into the
103   - * 'buffer' and returns the number of copied bytes.
  102 + * This function copies at most @len bytes from the FIFO into the
  103 + * @buffer and returns the number of copied bytes.
104 104 */
105 105 static inline unsigned int kfifo_get(struct kfifo *fifo,
106 106 unsigned char *buffer, unsigned int len)
include/linux/ktime.h
... ... @@ -163,7 +163,7 @@
163 163 * @add1: addend1
164 164 * @add2: addend2
165 165 *
166   - * Returns the sum of addend1 and addend2
  166 + * Returns the sum of @add1 and @add2.
167 167 */
168 168 static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
169 169 {
... ... @@ -189,7 +189,7 @@
189 189 * @kt: addend
190 190 * @nsec: the scalar nsec value to add
191 191 *
192   - * Returns the sum of kt and nsec in ktime_t format
  192 + * Returns the sum of @kt and @nsec in ktime_t format
193 193 */
194 194 extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
195 195  
... ... @@ -246,7 +246,7 @@
246 246 * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
247 247 * @kt: the ktime_t variable to convert
248 248 *
249   - * Returns the scalar nanoseconds representation of kt
  249 + * Returns the scalar nanoseconds representation of @kt
250 250 */
251 251 static inline s64 ktime_to_ns(const ktime_t kt)
252 252 {
include/linux/list.h
... ... @@ -161,7 +161,7 @@
161 161 /**
162 162 * list_del - deletes entry from list.
163 163 * @entry: the element to delete from the list.
164   - * Note: list_empty on entry does not return true after this, the entry is
  164 + * Note: list_empty() on entry does not return true after this, the entry is
165 165 * in an undefined state.
166 166 */
167 167 #ifndef CONFIG_DEBUG_LIST
... ... @@ -179,7 +179,7 @@
179 179 * list_del_rcu - deletes entry from list without re-initialization
180 180 * @entry: the element to delete from the list.
181 181 *
182   - * Note: list_empty on entry does not return true after this,
  182 + * Note: list_empty() on entry does not return true after this,
183 183 * the entry is in an undefined state. It is useful for RCU based
184 184 * lockfree traversal.
185 185 *
... ... @@ -209,7 +209,8 @@
209 209 * list_replace - replace old entry by new one
210 210 * @old : the element to be replaced
211 211 * @new : the new element to insert
212   - * Note: if 'old' was empty, it will be overwritten.
  212 + *
  213 + * If @old was empty, it will be overwritten.
213 214 */
214 215 static inline void list_replace(struct list_head *old,
215 216 struct list_head *new)
216 217  
... ... @@ -488,12 +489,12 @@
488 489 pos = list_entry(pos->member.prev, typeof(*pos), member))
489 490  
490 491 /**
491   - * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue
  492 + * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
492 493 * @pos: the type * to use as a start point
493 494 * @head: the head of the list
494 495 * @member: the name of the list_struct within the struct.
495 496 *
496   - * Prepares a pos entry for use as a start point in list_for_each_entry_continue.
  497 + * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
497 498 */
498 499 #define list_prepare_entry(pos, head, member) \
499 500 ((pos) ? : list_entry(head, typeof(*pos), member))
... ... @@ -150,7 +150,7 @@
150 150 * ipc_init - initialise IPC subsystem
151 151 *
152 152 * The various system5 IPC resources (semaphores, messages and shared
153   - * memory are initialised
  153 + * memory) are initialised
154 154 */
155 155  
156 156 static int __init ipc_init(void)
... ... @@ -207,8 +207,7 @@
207 207 #ifdef CONFIG_PROC_FS
208 208 static struct file_operations sysvipc_proc_fops;
209 209 /**
210   - * ipc_init_proc_interface - Create a proc interface for sysipc types
211   - * using a seq_file interface.
  210 + * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface.
212 211 * @path: Path in procfs
213 212 * @header: Banner to be printed at the beginning of the file.
214 213 * @ids: ipc id table to iterate.
... ... @@ -417,7 +416,7 @@
417 416 * @ptr: pointer returned by ipc_alloc
418 417 * @size: size of block
419 418 *
420   - * Free a block created with ipc_alloc. The caller must know the size
  419 + * Free a block created with ipc_alloc(). The caller must know the size
421 420 * used in the allocation call.
422 421 */
423 422  
... ... @@ -524,7 +523,7 @@
524 523 * @head: RCU callback structure for queued work
525 524 *
526 525 * Since RCU callback function is called in bh,
527   - * we need to defer the vfree to schedule_work
  526 + * we need to defer the vfree to schedule_work().
528 527 */
529 528 static void ipc_schedule_free(struct rcu_head *head)
530 529 {
... ... @@ -541,7 +540,7 @@
541 540 * ipc_immediate_free - free ipc + rcu space
542 541 * @head: RCU callback structure that contains pointer to be freed
543 542 *
544   - * Free from the RCU callback context
  543 + * Free from the RCU callback context.
545 544 */
546 545 static void ipc_immediate_free(struct rcu_head *head)
547 546 {
... ... @@ -603,8 +602,8 @@
603 602 * @in: kernel permissions
604 603 * @out: new style IPC permissions
605 604 *
606   - * Turn the kernel object 'in' into a set of permissions descriptions
607   - * for returning to userspace (out).
  605 + * Turn the kernel object @in into a set of permissions descriptions
  606 + * for returning to userspace (@out).
608 607 */
609 608  
610 609  
... ... @@ -624,8 +623,8 @@
624 623 * @in: new style IPC permissions
625 624 * @out: old style IPC permissions
626 625 *
627   - * Turn the new style permissions object in into a compatibility
628   - * object and store it into the 'out' pointer.
  626 + * Turn the new style permissions object @in into a compatibility
  627 + * object and store it into the @out pointer.
629 628 */
630 629  
631 630 void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
... ... @@ -722,7 +721,7 @@
722 721 * @cmd: pointer to command
723 722 *
724 723 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
725   - * The cmd value is turned from an encoding command and version into
  724 + * The @cmd value is turned from an encoding command and version into
726 725 * just the command code.
727 726 */
728 727  
... ... @@ -257,8 +257,7 @@
257 257 }
258 258  
259 259 /**
260   - * reparent_to_init - Reparent the calling kernel thread to the init task
261   - * of the pid space that the thread belongs to.
  260 + * reparent_to_init - Reparent the calling kernel thread to the init task of the pid space that the thread belongs to.
262 261 *
263 262 * If a kernel thread is launched as a result of a system call, or if
264 263 * it ever exits, it should generally reparent itself to init so that
... ... @@ -102,7 +102,7 @@
102 102 *
103 103 * The function calculates the monotonic clock from the realtime
104 104 * clock and the wall_to_monotonic offset and stores the result
105   - * in normalized timespec format in the variable pointed to by ts.
  105 + * in normalized timespec format in the variable pointed to by @ts.
106 106 */
107 107 void ktime_get_ts(struct timespec *ts)
108 108 {
... ... @@ -583,8 +583,8 @@
583 583 * @which_clock: which clock to query
584 584 * @tp: pointer to timespec variable to store the resolution
585 585 *
586   - * Store the resolution of the clock selected by which_clock in the
587   - * variable pointed to by tp.
  586 + * Store the resolution of the clock selected by @which_clock in the
  587 + * variable pointed to by @tp.
588 588 */
589 589 int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
590 590 {
... ... @@ -32,8 +32,8 @@
32 32 * @gfp_mask: get_free_pages mask, passed to kmalloc()
33 33 * @lock: the lock to be used to protect the fifo buffer
34 34 *
35   - * Do NOT pass the kfifo to kfifo_free() after use ! Simply free the
36   - * struct kfifo with kfree().
  35 + * Do NOT pass the kfifo to kfifo_free() after use! Simply free the
  36 + * &struct kfifo with kfree().
37 37 */
38 38 struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
39 39 gfp_t gfp_mask, spinlock_t *lock)
... ... @@ -108,7 +108,7 @@
108 108 * @buffer: the data to be added.
109 109 * @len: the length of the data to be added.
110 110 *
111   - * This function copies at most 'len' bytes from the 'buffer' into
  111 + * This function copies at most @len bytes from the @buffer into
112 112 * the FIFO depending on the free space, and returns the number of
113 113 * bytes copied.
114 114 *
... ... @@ -155,8 +155,8 @@
155 155 * @buffer: where the data must be copied.
156 156 * @len: the size of the destination buffer.
157 157 *
158   - * This function copies at most 'len' bytes from the FIFO into the
159   - * 'buffer' and returns the number of copied bytes.
  158 + * This function copies at most @len bytes from the FIFO into the
  159 + * @buffer and returns the number of copied bytes.
160 160 *
161 161 * Note that with only one concurrent reader and one concurrent
162 162 * writer, you don't need extra locking to use these functions.
... ... @@ -50,7 +50,7 @@
50 50 /**
51 51 * kthread_should_stop - should this kthread return now?
52 52 *
53   - * When someone calls kthread_stop on your kthread, it will be woken
  53 + * When someone calls kthread_stop() on your kthread, it will be woken
54 54 * and this will return true. You should then return, and your return
55 55 * value will be passed through to kthread_stop().
56 56 */
... ... @@ -143,7 +143,7 @@
143 143 * it. See also kthread_run(), kthread_create_on_cpu().
144 144 *
145 145 * When woken, the thread will run @threadfn() with @data as its
146   - * argument. @threadfn can either call do_exit() directly if it is a
  146 + * argument. @threadfn() can either call do_exit() directly if it is a
147 147 * standalone thread for which noone will call kthread_stop(), or
148 148 * return when 'kthread_should_stop()' is true (which means
149 149 * kthread_stop() has been called). The return value should be zero
... ... @@ -192,7 +192,7 @@
192 192 *
193 193 * Description: This function is equivalent to set_cpus_allowed(),
194 194 * except that @cpu doesn't need to be online, and the thread must be
195   - * stopped (i.e., just returned from kthread_create().
  195 + * stopped (i.e., just returned from kthread_create()).
196 196 */
197 197 void kthread_bind(struct task_struct *k, unsigned int cpu)
198 198 {
... ... @@ -483,7 +483,7 @@
483 483 * printk - print a kernel message
484 484 * @fmt: format string
485 485 *
486   - * This is printk. It can be called from any context. We want it to work.
  486 + * This is printk(). It can be called from any context. We want it to work.
487 487 *
488 488 * We try to grab the console_sem. If we succeed, it's easy - we log the output and
489 489 * call the console drivers. If we fail to get the semaphore we place the output
... ... @@ -328,7 +328,7 @@
328 328 * @buf: the channel buffer
329 329 * @init: 1 if this is a first-time initialization
330 330 *
331   - * See relay_reset for description of effect.
  331 + * See relay_reset() for description of effect.
332 332 */
333 333 static void __relay_reset(struct rchan_buf *buf, unsigned int init)
334 334 {
... ... @@ -364,7 +364,7 @@
364 364 * and restarting the channel in its initial state. The buffers
365 365 * are not freed, so any mappings are still in effect.
366 366 *
367   - * NOTE: Care should be taken that the channel isn't actually
  367 + * NOTE. Care should be taken that the channel isn't actually
368 368 * being used by anything when this call is made.
369 369 */
370 370 void relay_reset(struct rchan *chan)
... ... @@ -528,7 +528,7 @@
528 528 * Creates a channel buffer for each cpu using the sizes and
529 529 * attributes specified. The created channel buffer files
530 530 * will be named base_filename0...base_filenameN-1. File
531   - * permissions will be S_IRUSR.
  531 + * permissions will be %S_IRUSR.
532 532 */
533 533 struct rchan *relay_open(const char *base_filename,
534 534 struct dentry *parent,
... ... @@ -648,7 +648,7 @@
648 648 * subbufs_consumed should be the number of sub-buffers newly consumed,
649 649 * not the total consumed.
650 650 *
651   - * NOTE: Kernel clients don't need to call this function if the channel
  651 + * NOTE. Kernel clients don't need to call this function if the channel
652 652 * mode is 'overwrite'.
653 653 */
654 654 void relay_subbufs_consumed(struct rchan *chan,
... ... @@ -749,7 +749,7 @@
749 749 * @filp: the file
750 750 * @vma: the vma describing what to map
751 751 *
752   - * Calls upon relay_mmap_buf to map the file into user space.
  752 + * Calls upon relay_mmap_buf() to map the file into user space.
753 753 */
754 754 static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
755 755 {
... ... @@ -891,7 +891,7 @@
891 891 * @read_pos: file read position
892 892 * @buf: relay channel buffer
893 893 *
894   - * If the read_pos is in the middle of padding, return the
  894 + * If the @read_pos is in the middle of padding, return the
895 895 * position of the first actually available byte, otherwise
896 896 * return the original value.
897 897 */
... ... @@ -4203,13 +4203,12 @@
4203 4203 }
4204 4204  
4205 4205 /**
4206   - * sched_setscheduler - change the scheduling policy and/or RT priority of
4207   - * a thread.
  4206 + * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4208 4207 * @p: the task in question.
4209 4208 * @policy: new policy.
4210 4209 * @param: structure containing the new RT priority.
4211 4210 *
4212   - * NOTE: the task may be already dead
  4211 + * NOTE that the task may be already dead.
4213 4212 */
4214 4213 int sched_setscheduler(struct task_struct *p, int policy,
4215 4214 struct sched_param *param)
... ... @@ -4577,7 +4576,7 @@
4577 4576 /**
4578 4577 * sys_sched_yield - yield the current processor to other threads.
4579 4578 *
4580   - * this function yields the current CPU by moving the calling thread
  4579 + * This function yields the current CPU by moving the calling thread
4581 4580 * to the expired array. If there are no other threads running on this
4582 4581 * CPU then this function will return.
4583 4582 */
... ... @@ -4704,7 +4703,7 @@
4704 4703 /**
4705 4704 * yield - yield the current processor to other threads.
4706 4705 *
4707   - * this is a shortcut for kernel-space yielding - it marks the
  4706 + * This is a shortcut for kernel-space yielding - it marks the
4708 4707 * thread runnable and calls sys_sched_yield().
4709 4708 */
4710 4709 void __sched yield(void)
... ... @@ -2282,7 +2282,7 @@
2282 2282 * @pid: the PID of the thread
2283 2283 * @sig: signal to be sent
2284 2284 *
2285   - * This syscall also checks the tgid and returns -ESRCH even if the PID
  2285 + * This syscall also checks the @tgid and returns -ESRCH even if the PID
2286 2286 * exists but it's not belonging to the target process anymore. This
2287 2287 * method solves the problem of threads exiting and PIDs getting reused.
2288 2288 */
... ... @@ -215,7 +215,7 @@
215 215 * This routine uses RCU to synchronize with changes to the chain.
216 216 *
217 217 * If the return value of the notifier can be and'ed
218   - * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
  218 + * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
219 219 * will return immediately, with the return value of
220 220 * the notifier function which halted execution.
221 221 * Otherwise the return value is the return value
... ... @@ -313,7 +313,7 @@
313 313 * run in a process context, so they are allowed to block.
314 314 *
315 315 * If the return value of the notifier can be and'ed
316   - * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
  316 + * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
317 317 * will return immediately, with the return value of
318 318 * the notifier function which halted execution.
319 319 * Otherwise the return value is the return value
... ... @@ -393,7 +393,7 @@
393 393 * All locking must be provided by the caller.
394 394 *
395 395 * If the return value of the notifier can be and'ed
396   - * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
  396 + * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
397 397 * will return immediately, with the return value of
398 398 * the notifier function which halted execution.
399 399 * Otherwise the return value is the return value
... ... @@ -487,7 +487,7 @@
487 487 * run in a process context, so they are allowed to block.
488 488 *
489 489 * If the return value of the notifier can be and'ed
490   - * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain
  490 + * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
491 491 * will return immediately, with the return value of
492 492 * the notifier function which halted execution.
493 493 * Otherwise the return value is the return value
... ... @@ -538,7 +538,7 @@
538 538 * Registers a function with the list of functions
539 539 * to be called at reboot time.
540 540 *
541   - * Currently always returns zero, as blocking_notifier_chain_register
  541 + * Currently always returns zero, as blocking_notifier_chain_register()
542 542 * always returns zero.
543 543 */
544 544  
... ... @@ -85,7 +85,7 @@
85 85 * @j: the time in (absolute) jiffies that should be rounded
86 86 * @cpu: the processor number on which the timeout will happen
87 87 *
88   - * __round_jiffies rounds an absolute time in the future (in jiffies)
  88 + * __round_jiffies() rounds an absolute time in the future (in jiffies)
89 89 * up or down to (approximately) full seconds. This is useful for timers
90 90 * for which the exact time they fire does not matter too much, as long as
91 91 * they fire approximately every X seconds.
... ... @@ -98,7 +98,7 @@
98 98 * processors firing at the exact same time, which could lead
99 99 * to lock contention or spurious cache line bouncing.
100 100 *
101   - * The return value is the rounded version of the "j" parameter.
  101 + * The return value is the rounded version of the @j parameter.
102 102 */
103 103 unsigned long __round_jiffies(unsigned long j, int cpu)
104 104 {
... ... @@ -142,7 +142,7 @@
142 142 * @j: the time in (relative) jiffies that should be rounded
143 143 * @cpu: the processor number on which the timeout will happen
144 144 *
145   - * __round_jiffies_relative rounds a time delta in the future (in jiffies)
  145 + * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
146 146 * up or down to (approximately) full seconds. This is useful for timers
147 147 * for which the exact time they fire does not matter too much, as long as
148 148 * they fire approximately every X seconds.
... ... @@ -155,7 +155,7 @@
155 155 * processors firing at the exact same time, which could lead
156 156 * to lock contention or spurious cache line bouncing.
157 157 *
158   - * The return value is the rounded version of the "j" parameter.
  158 + * The return value is the rounded version of the @j parameter.
159 159 */
160 160 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
161 161 {
... ... @@ -173,7 +173,7 @@
173 173 * round_jiffies - function to round jiffies to a full second
174 174 * @j: the time in (absolute) jiffies that should be rounded
175 175 *
176   - * round_jiffies rounds an absolute time in the future (in jiffies)
  176 + * round_jiffies() rounds an absolute time in the future (in jiffies)
177 177 * up or down to (approximately) full seconds. This is useful for timers
178 178 * for which the exact time they fire does not matter too much, as long as
179 179 * they fire approximately every X seconds.
... ... @@ -182,7 +182,7 @@
182 182 * at the same time, rather than at various times spread out. The goal
183 183 * of this is to have the CPU wake up less, which saves power.
184 184 *
185   - * The return value is the rounded version of the "j" parameter.
  185 + * The return value is the rounded version of the @j parameter.
186 186 */
187 187 unsigned long round_jiffies(unsigned long j)
188 188 {
... ... @@ -194,7 +194,7 @@
194 194 * round_jiffies_relative - function to round jiffies to a full second
195 195 * @j: the time in (relative) jiffies that should be rounded
196 196 *
197   - * round_jiffies_relative rounds a time delta in the future (in jiffies)
  197 + * round_jiffies_relative() rounds a time delta in the future (in jiffies)
198 198 * up or down to (approximately) full seconds. This is useful for timers
199 199 * for which the exact time they fire does not matter too much, as long as
200 200 * they fire approximately every X seconds.
... ... @@ -203,7 +203,7 @@
203 203 * at the same time, rather than at various times spread out. The goal
204 204 * of this is to have the CPU wake up less, which saves power.
205 205 *
206   - * The return value is the rounded version of the "j" parameter.
  206 + * The return value is the rounded version of the @j parameter.
207 207 */
208 208 unsigned long round_jiffies_relative(unsigned long j)
209 209 {
... ... @@ -387,7 +387,7 @@
387 387 * @timer: the timer to be modified
388 388 * @expires: new timeout in jiffies
389 389 *
390   - * mod_timer is a more efficient way to update the expire field of an
  390 + * mod_timer() is a more efficient way to update the expire field of an
391 391 * active timer (if the timer is inactive it will be activated)
392 392 *
393 393 * mod_timer(timer, expires) is equivalent to:
... ... @@ -490,7 +490,7 @@
490 490 * the timer it also makes sure the handler has finished executing on other
491 491 * CPUs.
492 492 *
493   - * Synchronization rules: callers must prevent restarting of the timer,
  493 + * Synchronization rules: Callers must prevent restarting of the timer,
494 494 * otherwise this function is meaningless. It must not be called from
495 495 * interrupt contexts. The caller must not hold locks which would prevent
496 496 * completion of the timer's handler. The timer's handler must not call
... ... @@ -656,8 +656,7 @@
656 656 EXPORT_SYMBOL(flush_scheduled_work);
657 657  
658 658 /**
659   - * cancel_rearming_delayed_workqueue - reliably kill off a delayed
660   - * work whose handler rearms the delayed work.
  659 + * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
661 660 * @wq: the controlling workqueue structure
662 661 * @dwork: the delayed work struct
663 662 */
... ... @@ -670,8 +669,7 @@
670 669 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
671 670  
672 671 /**
673   - * cancel_rearming_delayed_work - reliably kill off a delayed keventd
674   - * work whose handler rearms the delayed work.
  672 + * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
675 673 * @dwork: the delayed work struct
676 674 */
677 675 void cancel_rearming_delayed_work(struct delayed_work *dwork)
... ... @@ -95,7 +95,7 @@
95 95 }
96 96 EXPORT_SYMBOL(__bitmap_complement);
97 97  
98   -/*
  98 +/**
99 99 * __bitmap_shift_right - logical right shift of the bits in a bitmap
100 100 * @dst - destination bitmap
101 101 * @src - source bitmap
... ... @@ -139,7 +139,7 @@
139 139 EXPORT_SYMBOL(__bitmap_shift_right);
140 140  
141 141  
142   -/*
  142 +/**
143 143 * __bitmap_shift_left - logical left shift of the bits in a bitmap
144 144 * @dst - destination bitmap
145 145 * @src - source bitmap
... ... @@ -529,7 +529,7 @@
529 529 }
530 530 EXPORT_SYMBOL(bitmap_parselist);
531 531  
532   -/*
  532 +/**
533 533 * bitmap_pos_to_ord(buf, pos, bits)
534 534 * @buf: pointer to a bitmap
535 535 * @pos: a bit position in @buf (0 <= @pos < @bits)
... ... @@ -804,7 +804,7 @@
804 804 * @pos: beginning of bit region to release
805 805 * @order: region size (log base 2 of number of bits) to release
806 806 *
807   - * This is the complement to __bitmap_find_free_region and releases
  807 + * This is the complement to __bitmap_find_free_region() and releases
808 808 * the found region (by clearing it in the bitmap).
809 809 *
810 810 * No return value.
... ... @@ -43,10 +43,10 @@
43 43 * comma as well.
44 44 *
45 45 * Return values:
46   - * 0 : no int in string
47   - * 1 : int found, no subsequent comma
48   - * 2 : int found including a subsequent comma
49   - * 3 : hyphen found to denote a range
  46 + * 0 - no int in string
  47 + * 1 - int found, no subsequent comma
  48 + * 2 - int found including a subsequent comma
  49 + * 3 - hyphen found to denote a range
50 50 */
51 51  
52 52 int get_option (char **str, int *pint)
... ... @@ -329,8 +329,8 @@
329 329  
330 330 /**
331 331 * idr_remove - remove the given id and free it's slot
332   - * idp: idr handle
333   - * id: uniqueue key
  332 + * @idp: idr handle
  333 + * @id: unique key
334 334 */
335 335 void idr_remove(struct idr *idp, int id)
336 336 {
... ... @@ -97,11 +97,12 @@
97 97 }
98 98  
99 99 /**
100   - * kobject_get_path - generate and return the path associated with a given kobj
101   - * and kset pair. The result must be freed by the caller with kfree().
  100 + * kobject_get_path - generate and return the path associated with a given kobj and kset pair.
102 101 *
103 102 * @kobj: kobject in question, with which to build the path
104 103 * @gfp_mask: the allocation type used to allocate the path
  104 + *
  105 + * The result must be freed by the caller with kfree().
105 106 */
106 107 char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
107 108 {
... ... @@ -20,8 +20,8 @@
20 20 #define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */
21 21 #define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */
22 22  
23   -/*
24   - * sha_transform: single block SHA1 transform
  23 +/**
  24 + * sha_transform - single block SHA1 transform
25 25 *
26 26 * @digest: 160 bit digest to update
27 27 * @data: 512 bits of data to hash
... ... @@ -80,9 +80,8 @@
80 80 }
81 81 EXPORT_SYMBOL(sha_transform);
82 82  
83   -/*
84   - * sha_init: initialize the vectors for a SHA1 digest
85   - *
  83 +/**
  84 + * sha_init - initialize the vectors for a SHA1 digest
86 85 * @buf: vector to initialize
87 86 */
88 87 void sha_init(__u32 *buf)
... ... @@ -27,7 +27,7 @@
27 27 } while (--size > 0);
28 28 }
29 29  
30   -/*
  30 +/**
31 31 * sort - sort an array of elements
32 32 * @base: pointer to data to sort
33 33 * @num: number of elements
... ... @@ -160,7 +160,7 @@
160 160 * @src: The string to append to it
161 161 * @count: The maximum numbers of bytes to copy
162 162 *
163   - * Note that in contrast to strncpy, strncat ensures the result is
  163 + * Note that in contrast to strncpy(), strncat() ensures the result is
164 164 * terminated.
165 165 */
166 166 char *strncat(char *dest, const char *src, size_t count)
... ... @@ -366,8 +366,7 @@
366 366  
367 367 #ifndef __HAVE_ARCH_STRSPN
368 368 /**
369   - * strspn - Calculate the length of the initial substring of @s which only
370   - * contain letters in @accept
  369 + * strspn - Calculate the length of the initial substring of @s which only contain letters in @accept
371 370 * @s: The string to be searched
372 371 * @accept: The string to search for
373 372 */
... ... @@ -394,8 +393,7 @@
394 393  
395 394 #ifndef __HAVE_ARCH_STRCSPN
396 395 /**
397   - * strcspn - Calculate the length of the initial substring of @s which does
398   - * not contain letters in @reject
  396 + * strcspn - Calculate the length of the initial substring of @s which does not contain letters in @reject
399 397 * @s: The string to be searched
400 398 * @reject: The string to avoid
401 399 */
... ... @@ -218,7 +218,7 @@
218 218 * Call textsearch_next() to retrieve subsequent matches.
219 219 *
220 220 * Returns the position of first occurrence of the pattern or
221   - * UINT_MAX if no occurrence was found.
  221 + * %UINT_MAX if no occurrence was found.
222 222 */
223 223 unsigned int textsearch_find_continuous(struct ts_config *conf,
224 224 struct ts_state *state,
... ... @@ -247,12 +247,12 @@
247 247 * be generated for the given input, excluding the trailing
248 248 * '\0', as per ISO C99. If you want to have the exact
249 249 * number of characters written into @buf as return value
250   - * (not including the trailing '\0'), use vscnprintf. If the
  250 + * (not including the trailing '\0'), use vscnprintf(). If the
251 251 * return is greater than or equal to @size, the resulting
252 252 * string is truncated.
253 253 *
254 254 * Call this function if you are already dealing with a va_list.
255   - * You probably want snprintf instead.
  255 + * You probably want snprintf() instead.
256 256 */
257 257 int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
258 258 {
... ... @@ -509,7 +509,7 @@
509 509 * returns 0.
510 510 *
511 511 * Call this function if you are already dealing with a va_list.
512   - * You probably want scnprintf instead.
  512 + * You probably want scnprintf() instead.
513 513 */
514 514 int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
515 515 {
516 516  
... ... @@ -577,11 +577,11 @@
577 577 * @args: Arguments for the format string
578 578 *
579 579 * The function returns the number of characters written
580   - * into @buf. Use vsnprintf or vscnprintf in order to avoid
  580 + * into @buf. Use vsnprintf() or vscnprintf() in order to avoid
581 581 * buffer overflows.
582 582 *
583 583 * Call this function if you are already dealing with a va_list.
584   - * You probably want sprintf instead.
  584 + * You probably want sprintf() instead.
585 585 */
586 586 int vsprintf(char *buf, const char *fmt, va_list args)
587 587 {
... ... @@ -597,7 +597,7 @@
597 597 * @...: Arguments for the format string
598 598 *
599 599 * The function returns the number of characters written
600   - * into @buf. Use snprintf or scnprintf in order to avoid
  600 + * into @buf. Use snprintf() or scnprintf() in order to avoid
601 601 * buffer overflows.
602 602 */
603 603 int sprintf(char * buf, const char *fmt, ...)
... ... @@ -327,7 +327,7 @@
327 327 * @pos: beginning offset in pages to write
328 328 * @count: number of bytes to write
329 329 *
330   - * Note: Holding i_mutex across sync_page_range_nolock is not a good idea
  330 + * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
331 331 * as it forces O_SYNC writers to different parts of the same file
332 332 * to be serialised right until io completion.
333 333 */
... ... @@ -784,7 +784,7 @@
784 784 * @mapping: target address_space
785 785 * @index: the page index
786 786 *
787   - * Same as grab_cache_page, but do not wait if the page is unavailable.
  787 + * Same as grab_cache_page(), but do not wait if the page is unavailable.
788 788 * This is intended for speculative data generators, where the data can
789 789 * be regenerated if the page couldn't be grabbed. This routine should
790 790 * be safe to call while holding the lock for another page.
... ... @@ -1775,9 +1775,7 @@
1775 1775 }
1776 1776  
1777 1777 /**
1778   - * unmap_mapping_range - unmap the portion of all mmaps
1779   - * in the specified address_space corresponding to the specified
1780   - * page range in the underlying file.
  1778 + * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
1781 1779 * @mapping: the address space containing mmaps to be unmapped.
1782 1780 * @holebegin: byte in first page to unmap, relative to the start of
1783 1781 * the underlying file. This will be rounded down to a PAGE_SIZE
... ... @@ -46,9 +46,9 @@
46 46 * @pool_data: optional private data available to the user-defined functions.
47 47 *
48 48 * this function creates and allocates a guaranteed size, preallocated
49   - * memory pool. The pool can be used from the mempool_alloc and mempool_free
  49 + * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
50 50 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
51   - * functions might sleep - as long as the mempool_alloc function is not called
  51 + * functions might sleep - as long as the mempool_alloc() function is not called
52 52 * from IRQ contexts.
53 53 */
54 54 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
... ... @@ -195,7 +195,7 @@
195 195 * mempool_create().
196 196 * @gfp_mask: the usual allocation bitmask.
197 197 *
198   - * this function only sleeps if the alloc_fn function sleeps or
  198 + * this function only sleeps if the alloc_fn() function sleeps or
199 199 * returns NULL. Note that due to preallocation, this function
200 200 * *never* fails when called from process contexts. (it might
201 201 * fail if called from an IRQ context.)
... ... @@ -549,9 +549,7 @@
549 549 }
550 550  
551 551 /**
552   - * generic_writepages - walk the list of dirty pages of the given
553   - * address space and writepage() all of them.
554   - *
  552 + * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
555 553 * @mapping: address space structure to write
556 554 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
557 555 *
... ... @@ -698,7 +696,6 @@
698 696  
699 697 /**
700 698 * write_one_page - write out a single page and optionally wait on I/O
701   - *
702 699 * @page: the page to write
703 700 * @wait: if true, wait on writeout
704 701 *
... ... @@ -2520,7 +2520,7 @@
2520 2520 * kmem_cache_destroy - delete a cache
2521 2521 * @cachep: the cache to destroy
2522 2522 *
2523   - * Remove a struct kmem_cache object from the slab cache.
  2523 + * Remove a &struct kmem_cache object from the slab cache.
2524 2524 *
2525 2525 * It is expected this function will be called by a module when it is
2526 2526 * unloaded. This will remove the cache completely, and avoid a duplicate
... ... @@ -699,7 +699,7 @@
699 699 * that it is big enough to cover the vma. Will return failure if
700 700 * that criteria isn't met.
701 701 *
702   - * Similar to remap_pfn_range (see mm/memory.c)
  702 + * Similar to remap_pfn_range() (see mm/memory.c)
703 703 */
704 704 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
705 705 unsigned long pgoff)