Commit 2441d15c97d498b18f03ae9fba262ffeae42a08b

Authored by Tejun Heo
1 parent 6a242909b0

percpu: cosmetic renames in pcpu_setup_first_chunk()

Impact: cosmetic, preparation for future changes

Make the following renames in pcpur_setup_first_chunk() in preparation
for future changes.

* s/free_size/dyn_size/
* s/static_vm/first_vm/
* s/static_chunk/schunk/

Signed-off-by: Tejun Heo <tj@kernel.org>

Showing 2 changed files with 30 additions and 30 deletions Side-by-side Diff

include/linux/percpu.h
... ... @@ -118,7 +118,7 @@
118 118  
119 119 extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
120 120 size_t static_size, size_t unit_size,
121   - size_t free_size, void *base_addr,
  121 + size_t dyn_size, void *base_addr,
122 122 pcpu_populate_pte_fn_t populate_pte_fn);
123 123  
124 124 /*
... ... @@ -831,7 +831,7 @@
831 831 * @get_page_fn: callback to fetch page pointer
832 832 * @static_size: the size of static percpu area in bytes
833 833 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto
834   - * @free_size: free size in bytes, 0 for auto
  834 + * @dyn_size: free size for dynamic allocation in bytes, 0 for auto
835 835 * @base_addr: mapped address, NULL for auto
836 836 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
837 837 *
838 838  
839 839  
... ... @@ -849,12 +849,12 @@
849 849 * return the same number of pages for all cpus.
850 850 *
851 851 * @unit_size, if non-zero, determines unit size and must be aligned
852   - * to PAGE_SIZE and equal to or larger than @static_size + @free_size.
  852 + * to PAGE_SIZE and equal to or larger than @static_size + @dyn_size.
853 853 *
854   - * @free_size determines the number of free bytes after the static
  854 + * @dyn_size determines the number of free bytes after the static
855 855 * area in the first chunk. If zero, whatever left is available.
856 856 * Specifying non-zero value make percpu leave the area after
857   - * @static_size + @free_size alone.
  857 + * @static_size + @dyn_size alone.
858 858 *
859 859 * Non-null @base_addr means that the caller already allocated virtual
860 860 * region for the first chunk and mapped it. percpu must not mess
861 861  
862 862  
... ... @@ -870,19 +870,19 @@
870 870 */
871 871 size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
872 872 size_t static_size, size_t unit_size,
873   - size_t free_size, void *base_addr,
  873 + size_t dyn_size, void *base_addr,
874 874 pcpu_populate_pte_fn_t populate_pte_fn)
875 875 {
876   - static struct vm_struct static_vm;
877   - struct pcpu_chunk *static_chunk;
  876 + static struct vm_struct first_vm;
  877 + struct pcpu_chunk *schunk;
878 878 unsigned int cpu;
879 879 int nr_pages;
880 880 int err, i;
881 881  
882 882 /* santiy checks */
883 883 BUG_ON(!static_size);
884   - BUG_ON(!unit_size && free_size);
885   - BUG_ON(unit_size && unit_size < static_size + free_size);
  884 + BUG_ON(!unit_size && dyn_size);
  885 + BUG_ON(unit_size && unit_size < static_size + dyn_size);
886 886 BUG_ON(unit_size & ~PAGE_MASK);
887 887 BUG_ON(base_addr && !unit_size);
888 888 BUG_ON(base_addr && populate_pte_fn);
889 889  
890 890  
891 891  
892 892  
893 893  
... ... @@ -908,24 +908,24 @@
908 908 for (i = 0; i < pcpu_nr_slots; i++)
909 909 INIT_LIST_HEAD(&pcpu_slot[i]);
910 910  
911   - /* init static_chunk */
912   - static_chunk = alloc_bootmem(pcpu_chunk_struct_size);
913   - INIT_LIST_HEAD(&static_chunk->list);
914   - static_chunk->vm = &static_vm;
  911 + /* init static chunk */
  912 + schunk = alloc_bootmem(pcpu_chunk_struct_size);
  913 + INIT_LIST_HEAD(&schunk->list);
  914 + schunk->vm = &first_vm;
915 915  
916   - if (free_size)
917   - static_chunk->free_size = free_size;
  916 + if (dyn_size)
  917 + schunk->free_size = dyn_size;
918 918 else
919   - static_chunk->free_size = pcpu_unit_size - pcpu_static_size;
  919 + schunk->free_size = pcpu_unit_size - pcpu_static_size;
920 920  
921   - static_chunk->contig_hint = static_chunk->free_size;
  921 + schunk->contig_hint = schunk->free_size;
922 922  
923 923 /* allocate vm address */
924   - static_vm.flags = VM_ALLOC;
925   - static_vm.size = pcpu_chunk_size;
  924 + first_vm.flags = VM_ALLOC;
  925 + first_vm.size = pcpu_chunk_size;
926 926  
927 927 if (!base_addr)
928   - vm_area_register_early(&static_vm, PAGE_SIZE);
  928 + vm_area_register_early(&first_vm, PAGE_SIZE);
929 929 else {
930 930 /*
931 931 * Pages already mapped. No need to remap into
... ... @@ -933,8 +933,8 @@
933 933 * be mapped or unmapped by percpu and is marked
934 934 * immutable.
935 935 */
936   - static_vm.addr = base_addr;
937   - static_chunk->immutable = true;
  936 + first_vm.addr = base_addr;
  937 + schunk->immutable = true;
938 938 }
939 939  
940 940 /* assign pages */
... ... @@ -945,7 +945,7 @@
945 945  
946 946 if (!page)
947 947 break;
948   - *pcpu_chunk_pagep(static_chunk, cpu, i) = page;
  948 + *pcpu_chunk_pagep(schunk, cpu, i) = page;
949 949 }
950 950  
951 951 BUG_ON(i < PFN_UP(pcpu_static_size));
952 952  
953 953  
954 954  
... ... @@ -960,21 +960,21 @@
960 960 if (populate_pte_fn) {
961 961 for_each_possible_cpu(cpu)
962 962 for (i = 0; i < nr_pages; i++)
963   - populate_pte_fn(pcpu_chunk_addr(static_chunk,
  963 + populate_pte_fn(pcpu_chunk_addr(schunk,
964 964 cpu, i));
965 965  
966   - err = pcpu_map(static_chunk, 0, nr_pages);
  966 + err = pcpu_map(schunk, 0, nr_pages);
967 967 if (err)
968 968 panic("failed to setup static percpu area, err=%d\n",
969 969 err);
970 970 }
971 971  
972   - /* link static_chunk in */
973   - pcpu_chunk_relocate(static_chunk, -1);
974   - pcpu_chunk_addr_insert(static_chunk);
  972 + /* link the first chunk in */
  973 + pcpu_chunk_relocate(schunk, -1);
  974 + pcpu_chunk_addr_insert(schunk);
975 975  
976 976 /* we're done */
977   - pcpu_base_addr = (void *)pcpu_chunk_addr(static_chunk, 0, 0);
  977 + pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
978 978 return pcpu_unit_size;
979 979 }