Commit 66cdef663cd7a97aff6bbbf41a81a0205dc81ba2

Authored by Ganesh Mahendran
Committed by Linus Torvalds
1 parent 136f49b917

mm/zsmalloc: adjust order of functions

Currently functions in zsmalloc.c does not arranged in a readable and
reasonable sequence.  With the more and more functions added, we may
meet below inconvenience.  For example:

Current functions:

    void zs_init()
    {
    }

    static void get_maxobj_per_zspage()
    {
    }

Then I want to add a func_1() which is called from zs_init(), and this
new added function func_1() will used get_maxobj_per_zspage() which is
defined below zs_init().

    void func_1()
    {
        get_maxobj_per_zspage()
    }

    void zs_init()
    {
        func_1()
    }

    static void get_maxobj_per_zspage()
    {
    }

This will cause compiling issue. So we must add a declaration:

    static void get_maxobj_per_zspage();

before func_1() if we do not put get_maxobj_per_zspage() before
func_1().

In addition, puting module_[init|exit] functions at the bottom of the
file conforms to our habit.

So, this patch ajusts function sequence as:

    /* helper functions */
    ...
    obj_location_to_handle()
    ...

    /* Some exported functions */
    ...

    zs_map_object()
    zs_unmap_object()

    zs_malloc()
    zs_free()

    zs_init()
    zs_exit()

Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 187 additions and 187 deletions Side-by-side Diff

... ... @@ -884,19 +884,6 @@
884 884 .notifier_call = zs_cpu_notifier
885 885 };
886 886  
887   -static void zs_unregister_cpu_notifier(void)
888   -{
889   - int cpu;
890   -
891   - cpu_notifier_register_begin();
892   -
893   - for_each_online_cpu(cpu)
894   - zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
895   - __unregister_cpu_notifier(&zs_cpu_nb);
896   -
897   - cpu_notifier_register_done();
898   -}
899   -
900 887 static int zs_register_cpu_notifier(void)
901 888 {
902 889 int cpu, uninitialized_var(ret);
... ... @@ -914,6 +901,19 @@
914 901 return notifier_to_errno(ret);
915 902 }
916 903  
  904 +static void zs_unregister_cpu_notifier(void)
  905 +{
  906 + int cpu;
  907 +
  908 + cpu_notifier_register_begin();
  909 +
  910 + for_each_online_cpu(cpu)
  911 + zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
  912 + __unregister_cpu_notifier(&zs_cpu_nb);
  913 +
  914 + cpu_notifier_register_done();
  915 +}
  916 +
917 917 static void init_zs_size_classes(void)
918 918 {
919 919 int nr;
... ... @@ -925,31 +925,6 @@
925 925 zs_size_classes = nr;
926 926 }
927 927  
928   -static void __exit zs_exit(void)
929   -{
930   -#ifdef CONFIG_ZPOOL
931   - zpool_unregister_driver(&zs_zpool_driver);
932   -#endif
933   - zs_unregister_cpu_notifier();
934   -}
935   -
936   -static int __init zs_init(void)
937   -{
938   - int ret = zs_register_cpu_notifier();
939   -
940   - if (ret) {
941   - zs_unregister_cpu_notifier();
942   - return ret;
943   - }
944   -
945   - init_zs_size_classes();
946   -
947   -#ifdef CONFIG_ZPOOL
948   - zpool_register_driver(&zs_zpool_driver);
949   -#endif
950   - return 0;
951   -}
952   -
953 928 static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
954 929 {
955 930 return pages_per_zspage * PAGE_SIZE / size;
956 931  
957 932  
958 933  
959 934  
960 935  
961 936  
962 937  
963 938  
964 939  
965 940  
966 941  
967 942  
968 943  
969 944  
970 945  
971 946  
972 947  
973 948  
974 949  
975 950  
976 951  
977 952  
... ... @@ -967,113 +942,101 @@
967 942 return true;
968 943 }
969 944  
  945 +unsigned long zs_get_total_pages(struct zs_pool *pool)
  946 +{
  947 + return atomic_long_read(&pool->pages_allocated);
  948 +}
  949 +EXPORT_SYMBOL_GPL(zs_get_total_pages);
  950 +
970 951 /**
971   - * zs_create_pool - Creates an allocation pool to work from.
972   - * @flags: allocation flags used to allocate pool metadata
  952 + * zs_map_object - get address of allocated object from handle.
  953 + * @pool: pool from which the object was allocated
  954 + * @handle: handle returned from zs_malloc
973 955 *
974   - * This function must be called before anything when using
975   - * the zsmalloc allocator.
  956 + * Before using an object allocated from zs_malloc, it must be mapped using
  957 + * this function. When done with the object, it must be unmapped using
  958 + * zs_unmap_object.
976 959 *
977   - * On success, a pointer to the newly created pool is returned,
978   - * otherwise NULL.
  960 + * Only one object can be mapped per cpu at a time. There is no protection
  961 + * against nested mappings.
  962 + *
  963 + * This function returns with preemption and page faults disabled.
979 964 */
980   -struct zs_pool *zs_create_pool(gfp_t flags)
  965 +void *zs_map_object(struct zs_pool *pool, unsigned long handle,
  966 + enum zs_mapmode mm)
981 967 {
982   - int i;
983   - struct zs_pool *pool;
984   - struct size_class *prev_class = NULL;
  968 + struct page *page;
  969 + unsigned long obj_idx, off;
985 970  
986   - pool = kzalloc(sizeof(*pool), GFP_KERNEL);
987   - if (!pool)
988   - return NULL;
  971 + unsigned int class_idx;
  972 + enum fullness_group fg;
  973 + struct size_class *class;
  974 + struct mapping_area *area;
  975 + struct page *pages[2];
989 976  
990   - pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
991   - GFP_KERNEL);
992   - if (!pool->size_class) {
993   - kfree(pool);
994   - return NULL;
995   - }
  977 + BUG_ON(!handle);
996 978  
997 979 /*
998   - * Iterate reversly, because, size of size_class that we want to use
999   - * for merging should be larger or equal to current size.
  980 + * Because we use per-cpu mapping areas shared among the
  981 + * pools/users, we can't allow mapping in interrupt context
  982 + * because it can corrupt another users mappings.
1000 983 */
1001   - for (i = zs_size_classes - 1; i >= 0; i--) {
1002   - int size;
1003   - int pages_per_zspage;
1004   - struct size_class *class;
  984 + BUG_ON(in_interrupt());
1005 985  
1006   - size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
1007   - if (size > ZS_MAX_ALLOC_SIZE)
1008   - size = ZS_MAX_ALLOC_SIZE;
1009   - pages_per_zspage = get_pages_per_zspage(size);
  986 + obj_handle_to_location(handle, &page, &obj_idx);
  987 + get_zspage_mapping(get_first_page(page), &class_idx, &fg);
  988 + class = pool->size_class[class_idx];
  989 + off = obj_idx_to_offset(page, obj_idx, class->size);
1010 990  
1011   - /*
1012   - * size_class is used for normal zsmalloc operation such
1013   - * as alloc/free for that size. Although it is natural that we
1014   - * have one size_class for each size, there is a chance that we
1015   - * can get more memory utilization if we use one size_class for
1016   - * many different sizes whose size_class have same
1017   - * characteristics. So, we makes size_class point to
1018   - * previous size_class if possible.
1019   - */
1020   - if (prev_class) {
1021   - if (can_merge(prev_class, size, pages_per_zspage)) {
1022   - pool->size_class[i] = prev_class;
1023   - continue;
1024   - }
1025   - }
1026   -
1027   - class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
1028   - if (!class)
1029   - goto err;
1030   -
1031   - class->size = size;
1032   - class->index = i;
1033   - class->pages_per_zspage = pages_per_zspage;
1034   - spin_lock_init(&class->lock);
1035   - pool->size_class[i] = class;
1036   -
1037   - prev_class = class;
  991 + area = &get_cpu_var(zs_map_area);
  992 + area->vm_mm = mm;
  993 + if (off + class->size <= PAGE_SIZE) {
  994 + /* this object is contained entirely within a page */
  995 + area->vm_addr = kmap_atomic(page);
  996 + return area->vm_addr + off;
1038 997 }
1039 998  
1040   - pool->flags = flags;
  999 + /* this object spans two pages */
  1000 + pages[0] = page;
  1001 + pages[1] = get_next_page(page);
  1002 + BUG_ON(!pages[1]);
1041 1003  
1042   - return pool;
1043   -
1044   -err:
1045   - zs_destroy_pool(pool);
1046   - return NULL;
  1004 + return __zs_map_object(area, pages, off, class->size);
1047 1005 }
1048   -EXPORT_SYMBOL_GPL(zs_create_pool);
  1006 +EXPORT_SYMBOL_GPL(zs_map_object);
1049 1007  
1050   -void zs_destroy_pool(struct zs_pool *pool)
  1008 +void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1051 1009 {
1052   - int i;
  1010 + struct page *page;
  1011 + unsigned long obj_idx, off;
1053 1012  
1054   - for (i = 0; i < zs_size_classes; i++) {
1055   - int fg;
1056   - struct size_class *class = pool->size_class[i];
  1013 + unsigned int class_idx;
  1014 + enum fullness_group fg;
  1015 + struct size_class *class;
  1016 + struct mapping_area *area;
1057 1017  
1058   - if (!class)
1059   - continue;
  1018 + BUG_ON(!handle);
1060 1019  
1061   - if (class->index != i)
1062   - continue;
  1020 + obj_handle_to_location(handle, &page, &obj_idx);
  1021 + get_zspage_mapping(get_first_page(page), &class_idx, &fg);
  1022 + class = pool->size_class[class_idx];
  1023 + off = obj_idx_to_offset(page, obj_idx, class->size);
1063 1024  
1064   - for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
1065   - if (class->fullness_list[fg]) {
1066   - pr_info("Freeing non-empty class with size %db, fullness group %d\n",
1067   - class->size, fg);
1068   - }
1069   - }
1070   - kfree(class);
1071   - }
  1025 + area = this_cpu_ptr(&zs_map_area);
  1026 + if (off + class->size <= PAGE_SIZE)
  1027 + kunmap_atomic(area->vm_addr);
  1028 + else {
  1029 + struct page *pages[2];
1072 1030  
1073   - kfree(pool->size_class);
1074   - kfree(pool);
  1031 + pages[0] = page;
  1032 + pages[1] = get_next_page(page);
  1033 + BUG_ON(!pages[1]);
  1034 +
  1035 + __zs_unmap_object(area, pages, off, class->size);
  1036 + }
  1037 + put_cpu_var(zs_map_area);
1075 1038 }
1076   -EXPORT_SYMBOL_GPL(zs_destroy_pool);
  1039 +EXPORT_SYMBOL_GPL(zs_unmap_object);
1077 1040  
1078 1041 /**
1079 1042 * zs_malloc - Allocate block of given size from pool.
1080 1043  
1081 1044  
1082 1045  
1083 1046  
1084 1047  
1085 1048  
1086 1049  
1087 1050  
1088 1051  
1089 1052  
1090 1053  
1091 1054  
1092 1055  
1093 1056  
1094 1057  
1095 1058  
1096 1059  
1097 1060  
1098 1061  
1099 1062  
1100 1063  
1101 1064  
1102 1065  
1103 1066  
1104 1067  
1105 1068  
... ... @@ -1176,100 +1139,137 @@
1176 1139 EXPORT_SYMBOL_GPL(zs_free);
1177 1140  
1178 1141 /**
1179   - * zs_map_object - get address of allocated object from handle.
1180   - * @pool: pool from which the object was allocated
1181   - * @handle: handle returned from zs_malloc
  1142 + * zs_create_pool - Creates an allocation pool to work from.
  1143 + * @flags: allocation flags used to allocate pool metadata
1182 1144 *
1183   - * Before using an object allocated from zs_malloc, it must be mapped using
1184   - * this function. When done with the object, it must be unmapped using
1185   - * zs_unmap_object.
  1145 + * This function must be called before anything when using
  1146 + * the zsmalloc allocator.
1186 1147 *
1187   - * Only one object can be mapped per cpu at a time. There is no protection
1188   - * against nested mappings.
1189   - *
1190   - * This function returns with preemption and page faults disabled.
  1148 + * On success, a pointer to the newly created pool is returned,
  1149 + * otherwise NULL.
1191 1150 */
1192   -void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1193   - enum zs_mapmode mm)
  1151 +struct zs_pool *zs_create_pool(gfp_t flags)
1194 1152 {
1195   - struct page *page;
1196   - unsigned long obj_idx, off;
  1153 + int i;
  1154 + struct zs_pool *pool;
  1155 + struct size_class *prev_class = NULL;
1197 1156  
1198   - unsigned int class_idx;
1199   - enum fullness_group fg;
1200   - struct size_class *class;
1201   - struct mapping_area *area;
1202   - struct page *pages[2];
  1157 + pool = kzalloc(sizeof(*pool), GFP_KERNEL);
  1158 + if (!pool)
  1159 + return NULL;
1203 1160  
1204   - BUG_ON(!handle);
  1161 + pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
  1162 + GFP_KERNEL);
  1163 + if (!pool->size_class) {
  1164 + kfree(pool);
  1165 + return NULL;
  1166 + }
1205 1167  
1206 1168 /*
1207   - * Because we use per-cpu mapping areas shared among the
1208   - * pools/users, we can't allow mapping in interrupt context
1209   - * because it can corrupt another users mappings.
  1169 + * Iterate reversly, because, size of size_class that we want to use
  1170 + * for merging should be larger or equal to current size.
1210 1171 */
1211   - BUG_ON(in_interrupt());
  1172 + for (i = zs_size_classes - 1; i >= 0; i--) {
  1173 + int size;
  1174 + int pages_per_zspage;
  1175 + struct size_class *class;
1212 1176  
1213   - obj_handle_to_location(handle, &page, &obj_idx);
1214   - get_zspage_mapping(get_first_page(page), &class_idx, &fg);
1215   - class = pool->size_class[class_idx];
1216   - off = obj_idx_to_offset(page, obj_idx, class->size);
  1177 + size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
  1178 + if (size > ZS_MAX_ALLOC_SIZE)
  1179 + size = ZS_MAX_ALLOC_SIZE;
  1180 + pages_per_zspage = get_pages_per_zspage(size);
1217 1181  
1218   - area = &get_cpu_var(zs_map_area);
1219   - area->vm_mm = mm;
1220   - if (off + class->size <= PAGE_SIZE) {
1221   - /* this object is contained entirely within a page */
1222   - area->vm_addr = kmap_atomic(page);
1223   - return area->vm_addr + off;
  1182 + /*
  1183 + * size_class is used for normal zsmalloc operation such
  1184 + * as alloc/free for that size. Although it is natural that we
  1185 + * have one size_class for each size, there is a chance that we
  1186 + * can get more memory utilization if we use one size_class for
  1187 + * many different sizes whose size_class have same
  1188 + * characteristics. So, we makes size_class point to
  1189 + * previous size_class if possible.
  1190 + */
  1191 + if (prev_class) {
  1192 + if (can_merge(prev_class, size, pages_per_zspage)) {
  1193 + pool->size_class[i] = prev_class;
  1194 + continue;
  1195 + }
  1196 + }
  1197 +
  1198 + class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
  1199 + if (!class)
  1200 + goto err;
  1201 +
  1202 + class->size = size;
  1203 + class->index = i;
  1204 + class->pages_per_zspage = pages_per_zspage;
  1205 + spin_lock_init(&class->lock);
  1206 + pool->size_class[i] = class;
  1207 +
  1208 + prev_class = class;
1224 1209 }
1225 1210  
1226   - /* this object spans two pages */
1227   - pages[0] = page;
1228   - pages[1] = get_next_page(page);
1229   - BUG_ON(!pages[1]);
  1211 + pool->flags = flags;
1230 1212  
1231   - return __zs_map_object(area, pages, off, class->size);
  1213 + return pool;
  1214 +
  1215 +err:
  1216 + zs_destroy_pool(pool);
  1217 + return NULL;
1232 1218 }
1233   -EXPORT_SYMBOL_GPL(zs_map_object);
  1219 +EXPORT_SYMBOL_GPL(zs_create_pool);
1234 1220  
1235   -void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
  1221 +void zs_destroy_pool(struct zs_pool *pool)
1236 1222 {
1237   - struct page *page;
1238   - unsigned long obj_idx, off;
  1223 + int i;
1239 1224  
1240   - unsigned int class_idx;
1241   - enum fullness_group fg;
1242   - struct size_class *class;
1243   - struct mapping_area *area;
  1225 + for (i = 0; i < zs_size_classes; i++) {
  1226 + int fg;
  1227 + struct size_class *class = pool->size_class[i];
1244 1228  
1245   - BUG_ON(!handle);
  1229 + if (!class)
  1230 + continue;
1246 1231  
1247   - obj_handle_to_location(handle, &page, &obj_idx);
1248   - get_zspage_mapping(get_first_page(page), &class_idx, &fg);
1249   - class = pool->size_class[class_idx];
1250   - off = obj_idx_to_offset(page, obj_idx, class->size);
  1232 + if (class->index != i)
  1233 + continue;
1251 1234  
1252   - area = this_cpu_ptr(&zs_map_area);
1253   - if (off + class->size <= PAGE_SIZE)
1254   - kunmap_atomic(area->vm_addr);
1255   - else {
1256   - struct page *pages[2];
  1235 + for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
  1236 + if (class->fullness_list[fg]) {
  1237 + pr_info("Freeing non-empty class with size %db, fullness group %d\n",
  1238 + class->size, fg);
  1239 + }
  1240 + }
  1241 + kfree(class);
  1242 + }
1257 1243  
1258   - pages[0] = page;
1259   - pages[1] = get_next_page(page);
1260   - BUG_ON(!pages[1]);
  1244 + kfree(pool->size_class);
  1245 + kfree(pool);
  1246 +}
  1247 +EXPORT_SYMBOL_GPL(zs_destroy_pool);
1261 1248  
1262   - __zs_unmap_object(area, pages, off, class->size);
  1249 +static int __init zs_init(void)
  1250 +{
  1251 + int ret = zs_register_cpu_notifier();
  1252 +
  1253 + if (ret) {
  1254 + zs_unregister_cpu_notifier();
  1255 + return ret;
1263 1256 }
1264   - put_cpu_var(zs_map_area);
  1257 +
  1258 + init_zs_size_classes();
  1259 +
  1260 +#ifdef CONFIG_ZPOOL
  1261 + zpool_register_driver(&zs_zpool_driver);
  1262 +#endif
  1263 + return 0;
1265 1264 }
1266   -EXPORT_SYMBOL_GPL(zs_unmap_object);
1267 1265  
1268   -unsigned long zs_get_total_pages(struct zs_pool *pool)
  1266 +static void __exit zs_exit(void)
1269 1267 {
1270   - return atomic_long_read(&pool->pages_allocated);
  1268 +#ifdef CONFIG_ZPOOL
  1269 + zpool_unregister_driver(&zs_zpool_driver);
  1270 +#endif
  1271 + zs_unregister_cpu_notifier();
1271 1272 }
1272   -EXPORT_SYMBOL_GPL(zs_get_total_pages);
1273 1273  
1274 1274 module_init(zs_init);
1275 1275 module_exit(zs_exit);