Commit 183ff22bb6bd8188c904ebfb479656ae52230b72

Authored by Simon Arlott
Committed by Adrian Bunk
1 parent 676b1855de

spelling fixes: mm/

Spelling fixes in mm/.

Signed-off-by: Simon Arlott <simon@fire.lp0.eu>
Signed-off-by: Adrian Bunk <bunk@kernel.org>

Showing 11 changed files with 18 additions and 18 deletions Side-by-side Diff

... ... @@ -1017,7 +1017,7 @@
1017 1017  
1018 1018 /* If we are below the current region then a new region is required.
1019 1019 * Subtle, allocate a new region at the position but make it zero
1020   - * size such that we can guarentee to record the reservation. */
  1020 + * size such that we can guarantee to record the reservation. */
1021 1021 if (&rg->link == head || t < rg->from) {
1022 1022 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
1023 1023 if (!nrg)
... ... @@ -2713,7 +2713,7 @@
2713 2713 return 0;
2714 2714  
2715 2715 down_read(&mm->mmap_sem);
2716   - /* ignore errors, just check how much was sucessfully transfered */
  2716 + /* ignore errors, just check how much was successfully transferred */
2717 2717 while (len) {
2718 2718 int bytes, ret, offset;
2719 2719 void *maddr;
... ... @@ -121,7 +121,7 @@
121 121 err = __add_section(zone, i << PFN_SECTION_SHIFT);
122 122  
123 123 /*
124   - * EEXIST is finally dealed with by ioresource collision
  124 + * EEXIST is finally dealt with by ioresource collision
125 125 * check. see add_memory() => register_memory_resource()
126 126 * Warning will be printed if there is collision.
127 127 */
... ... @@ -299,7 +299,7 @@
299 299  
300 300 /*
301 301 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
302   - * specfied by pool_data
  302 + * specified by pool_data
303 303 */
304 304 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
305 305 {
... ... @@ -989,7 +989,7 @@
989 989 * mapping is pinned by the vma's ->vm_file reference.
990 990 *
991 991 * We take care to handle the case where the page was truncated from the
992   - * mapping by re-checking page_mapping() insode tree_lock.
  992 + * mapping by re-checking page_mapping() inside tree_lock.
993 993 */
994 994 int __set_page_dirty_nobuffers(struct page *page)
995 995 {
... ... @@ -123,7 +123,7 @@
123 123  
124 124 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
125 125 /*
126   - * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
  126 + * MAX_ACTIVE_REGIONS determines the maximum number of distinct
127 127 * ranges of memory (RAM) that may be registered with add_active_range().
128 128 * Ranges passed to add_active_range() will be merged if possible
129 129 * so the number of times add_active_range() can be called is
... ... @@ -1260,7 +1260,7 @@
1260 1260 * skip over zones that are not allowed by the cpuset, or that have
1261 1261 * been recently (in last second) found to be nearly full. See further
1262 1262 * comments in mmzone.h. Reduces cache footprint of zonelist scans
1263   - * that have to skip over alot of full or unallowed zones.
  1263 + * that have to skip over a lot of full or unallowed zones.
1264 1264 *
1265 1265 * If the zonelist cache is present in the passed in zonelist, then
1266 1266 * returns a pointer to the allowed node mask (either the current
... ... @@ -2358,7 +2358,7 @@
2358 2358 __build_all_zonelists(NULL);
2359 2359 cpuset_init_current_mems_allowed();
2360 2360 } else {
2361   - /* we have to stop all cpus to guaranntee there is no user
  2361 + /* we have to stop all cpus to guarantee there is no user
2362 2362 of zonelist */
2363 2363 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
2364 2364 /* cpuset refresh routine should be here */
... ... @@ -2864,7 +2864,7 @@
2864 2864  
2865 2865 /*
2866 2866 * Basic iterator support. Return the next active range of PFNs for a node
2867   - * Note: nid == MAX_NUMNODES returns next region regardles of node
  2867 + * Note: nid == MAX_NUMNODES returns next region regardless of node
2868 2868 */
2869 2869 static int __meminit next_active_region_index_in_nid(int index, int nid)
2870 2870 {
... ... @@ -34,7 +34,7 @@
34 34 * Radix priority search tree for address_space->i_mmap
35 35 *
36 36 * For each vma that map a unique set of file pages i.e., unique [radix_index,
37   - * heap_index] value, we have a corresponing priority search tree node. If
  37 + * heap_index] value, we have a corresponding priority search tree node. If
38 38 * multiple vmas have identical [radix_index, heap_index] value, then one of
39 39 * them is used as a tree node and others are stored in a vm_set list. The tree
40 40 * node points to the first vma (head) of the list using vm_set.head.
... ... @@ -26,7 +26,7 @@
26 26 * initialized objects.
27 27 *
28 28 * This means, that your constructor is used only for newly allocated
29   - * slabs and you must pass objects with the same intializations to
  29 + * slabs and you must pass objects with the same initializations to
30 30 * kmem_cache_free.
31 31 *
32 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
... ... @@ -1369,7 +1369,7 @@
1369 1369 * structure is usually allocated from kmem_cache_create() and
1370 1370 * gets destroyed at kmem_cache_destroy().
1371 1371 */
1372   - /* fall thru */
  1372 + /* fall through */
1373 1373 #endif
1374 1374 case CPU_UP_CANCELED:
1375 1375 case CPU_UP_CANCELED_FROZEN:
... ... @@ -3806,7 +3806,7 @@
3806 3806 EXPORT_SYMBOL_GPL(kmem_cache_name);
3807 3807  
3808 3808 /*
3809   - * This initializes kmem_list3 or resizes varioius caches for all nodes.
  3809 + * This initializes kmem_list3 or resizes various caches for all nodes.
3810 3810 */
3811 3811 static int alloc_kmemlist(struct kmem_cache *cachep)
3812 3812 {
... ... @@ -5,7 +5,7 @@
5 5 */
6 6  
7 7 /*
8   - * This file contains the default values for the opereation of the
  8 + * This file contains the default values for the operation of the
9 9 * Linux VM subsystem. Fine-tuning documentation can be found in
10 10 * Documentation/sysctl/vm.txt.
11 11 * Started 18.12.91
... ... @@ -247,7 +247,7 @@
247 247 EXPORT_SYMBOL_GPL(__get_vm_area);
248 248  
249 249 /**
250   - * get_vm_area - reserve a contingous kernel virtual area
  250 + * get_vm_area - reserve a contiguous kernel virtual area
251 251 * @size: size of the area
252 252 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
253 253 *
... ... @@ -303,7 +303,7 @@
303 303 }
304 304  
305 305 /**
306   - * remove_vm_area - find and remove a contingous kernel virtual area
  306 + * remove_vm_area - find and remove a continuous kernel virtual area
307 307 * @addr: base address
308 308 *
309 309 * Search for the kernel VM area starting at @addr, and remove it.
... ... @@ -364,7 +364,7 @@
364 364 * vfree - release memory allocated by vmalloc()
365 365 * @addr: memory base address
366 366 *
367   - * Free the virtually contiguous memory area starting at @addr, as
  367 + * Free the virtually continuous memory area starting at @addr, as
368 368 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
369 369 * NULL, no operation is performed.
370 370 *
... ... @@ -141,7 +141,7 @@
141 141 * percentages of the lru and ageable caches. This should balance the seeks
142 142 * generated by these structures.
143 143 *
144   - * If the vm encounted mapped pages on the LRU it increase the pressure on
  144 + * If the vm encountered mapped pages on the LRU it increase the pressure on
145 145 * slab to avoid swapping.
146 146 *
147 147 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.