Commit e3239ff92a17976ac5d26fa0fe40ef3a9daf2523

Authored by Benjamin Herrenschmidt
1 parent f1c2c19c49

memblock: Rename memblock_region to memblock_type and memblock_property to memblock_region

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

Showing 10 changed files with 118 additions and 120 deletions Side-by-side Diff

... ... @@ -237,7 +237,7 @@
237 237 #ifndef CONFIG_SPARSEMEM
238 238 int pfn_valid(unsigned long pfn)
239 239 {
240   - struct memblock_region *mem = &memblock.memory;
  240 + struct memblock_type *mem = &memblock.memory;
241 241 unsigned int left = 0, right = mem->cnt;
242 242  
243 243 do {
arch/arm/plat-omap/fb.c
... ... @@ -173,7 +173,7 @@
173 173  
174 174 static int valid_sdram(unsigned long addr, unsigned long size)
175 175 {
176   - struct memblock_property res;
  176 + struct memblock_region res;
177 177  
178 178 res.base = addr;
179 179 res.size = size;
arch/microblaze/mm/init.c
... ... @@ -77,8 +77,8 @@
77 77  
78 78 /* Find main memory where is the kernel */
79 79 for (i = 0; i < memblock.memory.cnt; i++) {
80   - memory_start = (u32) memblock.memory.region[i].base;
81   - memory_end = (u32) memblock.memory.region[i].base
  80 + memory_start = (u32) memblock.memory.regions[i].base;
  81 + memory_end = (u32) memblock.memory.regions[i].base
82 82 + (u32) memblock.memory.region[i].size;
83 83 if ((memory_start <= (u32)_text) &&
84 84 ((u32)_text <= memory_end)) {
arch/powerpc/mm/hash_utils_64.c
... ... @@ -660,7 +660,7 @@
660 660  
661 661 /* create bolted the linear mapping in the hash table */
662 662 for (i=0; i < memblock.memory.cnt; i++) {
663   - base = (unsigned long)__va(memblock.memory.region[i].base);
  663 + base = (unsigned long)__va(memblock.memory.regions[i].base);
664 664 size = memblock.memory.region[i].size;
665 665  
666 666 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
arch/powerpc/mm/mem.c
... ... @@ -86,10 +86,10 @@
86 86 for (i=0; i < memblock.memory.cnt; i++) {
87 87 unsigned long base;
88 88  
89   - base = memblock.memory.region[i].base;
  89 + base = memblock.memory.regions[i].base;
90 90  
91 91 if ((paddr >= base) &&
92   - (paddr < (base + memblock.memory.region[i].size))) {
  92 + (paddr < (base + memblock.memory.regions[i].size))) {
93 93 return 1;
94 94 }
95 95 }
... ... @@ -149,7 +149,7 @@
149 149 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
150 150 void *arg, int (*func)(unsigned long, unsigned long, void *))
151 151 {
152   - struct memblock_property res;
  152 + struct memblock_region res;
153 153 unsigned long pfn, len;
154 154 u64 end;
155 155 int ret = -1;
... ... @@ -206,7 +206,7 @@
206 206 /* Add active regions with valid PFNs */
207 207 for (i = 0; i < memblock.memory.cnt; i++) {
208 208 unsigned long start_pfn, end_pfn;
209   - start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
  209 + start_pfn = memblock.memory.regions[i].base >> PAGE_SHIFT;
210 210 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
211 211 add_active_range(0, start_pfn, end_pfn);
212 212 }
213 213  
214 214  
215 215  
... ... @@ -219,16 +219,16 @@
219 219  
220 220 /* reserve the sections we're already using */
221 221 for (i = 0; i < memblock.reserved.cnt; i++) {
222   - unsigned long addr = memblock.reserved.region[i].base +
  222 + unsigned long addr = memblock.reserved.regions[i].base +
223 223 memblock_size_bytes(&memblock.reserved, i) - 1;
224 224 if (addr < lowmem_end_addr)
225   - reserve_bootmem(memblock.reserved.region[i].base,
  225 + reserve_bootmem(memblock.reserved.regions[i].base,
226 226 memblock_size_bytes(&memblock.reserved, i),
227 227 BOOTMEM_DEFAULT);
228   - else if (memblock.reserved.region[i].base < lowmem_end_addr) {
  228 + else if (memblock.reserved.regions[i].base < lowmem_end_addr) {
229 229 unsigned long adjusted_size = lowmem_end_addr -
230   - memblock.reserved.region[i].base;
231   - reserve_bootmem(memblock.reserved.region[i].base,
  230 + memblock.reserved.regions[i].base;
  231 + reserve_bootmem(memblock.reserved.regions[i].base,
232 232 adjusted_size, BOOTMEM_DEFAULT);
233 233 }
234 234 }
... ... @@ -237,7 +237,7 @@
237 237  
238 238 /* reserve the sections we're already using */
239 239 for (i = 0; i < memblock.reserved.cnt; i++)
240   - reserve_bootmem(memblock.reserved.region[i].base,
  240 + reserve_bootmem(memblock.reserved.regions[i].base,
241 241 memblock_size_bytes(&memblock.reserved, i),
242 242 BOOTMEM_DEFAULT);
243 243  
244 244  
... ... @@ -257,10 +257,10 @@
257 257  
258 258 for (i = 0; i < memblock.memory.cnt - 1; i++) {
259 259 memblock_region_max_pfn =
260   - (memblock.memory.region[i].base >> PAGE_SHIFT) +
261   - (memblock.memory.region[i].size >> PAGE_SHIFT);
  260 + (memblock.memory.regions[i].base >> PAGE_SHIFT) +
  261 + (memblock.memory.regions[i].size >> PAGE_SHIFT);
262 262 memblock_next_region_start_pfn =
263   - memblock.memory.region[i+1].base >> PAGE_SHIFT;
  263 + memblock.memory.regions[i+1].base >> PAGE_SHIFT;
264 264  
265 265 if (memblock_region_max_pfn < memblock_next_region_start_pfn)
266 266 register_nosave_region(memblock_region_max_pfn,
arch/powerpc/platforms/embedded6xx/wii.c
... ... @@ -65,7 +65,7 @@
65 65  
66 66 void __init wii_memory_fixups(void)
67 67 {
68   - struct memblock_property *p = memblock.memory.region;
  68 + struct memblock_region *p = memblock.memory.region;
69 69  
70 70 /*
71 71 * This is part of a workaround to allow the use of two
arch/sparc/mm/init_64.c
... ... @@ -978,7 +978,7 @@
978 978 unsigned long size = memblock_size_bytes(&memblock.memory, i);
979 979 unsigned long start, end;
980 980  
981   - start = memblock.memory.region[i].base;
  981 + start = memblock.memory.regions[i].base;
982 982 end = start + size;
983 983 while (start < end) {
984 984 unsigned long this_end;
... ... @@ -1299,7 +1299,7 @@
1299 1299 if (!size)
1300 1300 continue;
1301 1301  
1302   - start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
  1302 + start_pfn = memblock.memory.regions[i].base >> PAGE_SHIFT;
1303 1303 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
1304 1304 add_active_range(0, start_pfn, end_pfn);
1305 1305 }
... ... @@ -1339,7 +1339,7 @@
1339 1339 numadbg(" trim_reserved_in_node(%d)\n", nid);
1340 1340  
1341 1341 for (i = 0; i < memblock.reserved.cnt; i++) {
1342   - unsigned long start = memblock.reserved.region[i].base;
  1342 + unsigned long start = memblock.reserved.regions[i].base;
1343 1343 unsigned long size = memblock_size_bytes(&memblock.reserved, i);
1344 1344 unsigned long end = start + size;
1345 1345  
drivers/video/omap2/vram.c
... ... @@ -554,7 +554,7 @@
554 554 size = PAGE_ALIGN(size);
555 555  
556 556 if (paddr) {
557   - struct memblock_property res;
  557 + struct memblock_region res;
558 558  
559 559 res.base = paddr;
560 560 res.size = size;
include/linux/memblock.h
... ... @@ -18,22 +18,22 @@
18 18  
19 19 #define MAX_MEMBLOCK_REGIONS 128
20 20  
21   -struct memblock_property {
  21 +struct memblock_region {
22 22 u64 base;
23 23 u64 size;
24 24 };
25 25  
26   -struct memblock_region {
  26 +struct memblock_type {
27 27 unsigned long cnt;
28 28 u64 size;
29   - struct memblock_property region[MAX_MEMBLOCK_REGIONS+1];
  29 + struct memblock_region regions[MAX_MEMBLOCK_REGIONS+1];
30 30 };
31 31  
32 32 struct memblock {
33 33 unsigned long debug;
34 34 u64 rmo_size;
35   - struct memblock_region memory;
36   - struct memblock_region reserved;
  35 + struct memblock_type memory;
  36 + struct memblock_type reserved;
37 37 };
38 38  
39 39 extern struct memblock memblock;
40 40  
41 41  
42 42  
43 43  
44 44  
45 45  
... ... @@ -56,27 +56,27 @@
56 56 extern void __init memblock_enforce_memory_limit(u64 memory_limit);
57 57 extern int __init memblock_is_reserved(u64 addr);
58 58 extern int memblock_is_region_reserved(u64 base, u64 size);
59   -extern int memblock_find(struct memblock_property *res);
  59 +extern int memblock_find(struct memblock_region *res);
60 60  
61 61 extern void memblock_dump_all(void);
62 62  
63 63 static inline u64
64   -memblock_size_bytes(struct memblock_region *type, unsigned long region_nr)
  64 +memblock_size_bytes(struct memblock_type *type, unsigned long region_nr)
65 65 {
66   - return type->region[region_nr].size;
  66 + return type->regions[region_nr].size;
67 67 }
68 68 static inline u64
69   -memblock_size_pages(struct memblock_region *type, unsigned long region_nr)
  69 +memblock_size_pages(struct memblock_type *type, unsigned long region_nr)
70 70 {
71 71 return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT;
72 72 }
73 73 static inline u64
74   -memblock_start_pfn(struct memblock_region *type, unsigned long region_nr)
  74 +memblock_start_pfn(struct memblock_type *type, unsigned long region_nr)
75 75 {
76   - return type->region[region_nr].base >> PAGE_SHIFT;
  76 + return type->regions[region_nr].base >> PAGE_SHIFT;
77 77 }
78 78 static inline u64
79   -memblock_end_pfn(struct memblock_region *type, unsigned long region_nr)
  79 +memblock_end_pfn(struct memblock_type *type, unsigned long region_nr)
80 80 {
81 81 return memblock_start_pfn(type, region_nr) +
82 82 memblock_size_pages(type, region_nr);
... ... @@ -29,7 +29,7 @@
29 29 }
30 30 early_param("memblock", early_memblock);
31 31  
32   -static void memblock_dump(struct memblock_region *region, char *name)
  32 +static void memblock_dump(struct memblock_type *region, char *name)
33 33 {
34 34 unsigned long long base, size;
35 35 int i;
... ... @@ -37,8 +37,8 @@
37 37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38 38  
39 39 for (i = 0; i < region->cnt; i++) {
40   - base = region->region[i].base;
41   - size = region->region[i].size;
  40 + base = region->regions[i].base;
  41 + size = region->regions[i].size;
42 42  
43 43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 44 name, i, base, base + size - 1, size);
45 45  
46 46  
47 47  
48 48  
49 49  
50 50  
... ... @@ -74,34 +74,34 @@
74 74 return 0;
75 75 }
76 76  
77   -static long memblock_regions_adjacent(struct memblock_region *rgn,
  77 +static long memblock_regions_adjacent(struct memblock_type *type,
78 78 unsigned long r1, unsigned long r2)
79 79 {
80   - u64 base1 = rgn->region[r1].base;
81   - u64 size1 = rgn->region[r1].size;
82   - u64 base2 = rgn->region[r2].base;
83   - u64 size2 = rgn->region[r2].size;
  80 + u64 base1 = type->regions[r1].base;
  81 + u64 size1 = type->regions[r1].size;
  82 + u64 base2 = type->regions[r2].base;
  83 + u64 size2 = type->regions[r2].size;
84 84  
85 85 return memblock_addrs_adjacent(base1, size1, base2, size2);
86 86 }
87 87  
88   -static void memblock_remove_region(struct memblock_region *rgn, unsigned long r)
  88 +static void memblock_remove_region(struct memblock_type *type, unsigned long r)
89 89 {
90 90 unsigned long i;
91 91  
92   - for (i = r; i < rgn->cnt - 1; i++) {
93   - rgn->region[i].base = rgn->region[i + 1].base;
94   - rgn->region[i].size = rgn->region[i + 1].size;
  92 + for (i = r; i < type->cnt - 1; i++) {
  93 + type->regions[i].base = type->regions[i + 1].base;
  94 + type->regions[i].size = type->regions[i + 1].size;
95 95 }
96   - rgn->cnt--;
  96 + type->cnt--;
97 97 }
98 98  
99 99 /* Assumption: base addr of region 1 < base addr of region 2 */
100   -static void memblock_coalesce_regions(struct memblock_region *rgn,
  100 +static void memblock_coalesce_regions(struct memblock_type *type,
101 101 unsigned long r1, unsigned long r2)
102 102 {
103   - rgn->region[r1].size += rgn->region[r2].size;
104   - memblock_remove_region(rgn, r2);
  103 + type->regions[r1].size += type->regions[r2].size;
  104 + memblock_remove_region(type, r2);
105 105 }
106 106  
107 107 void __init memblock_init(void)
108 108  
... ... @@ -109,13 +109,13 @@
109 109 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
110 110 * This simplifies the memblock_add() code below...
111 111 */
112   - memblock.memory.region[0].base = 0;
113   - memblock.memory.region[0].size = 0;
  112 + memblock.memory.regions[0].base = 0;
  113 + memblock.memory.regions[0].size = 0;
114 114 memblock.memory.cnt = 1;
115 115  
116 116 /* Ditto. */
117   - memblock.reserved.region[0].base = 0;
118   - memblock.reserved.region[0].size = 0;
  117 + memblock.reserved.regions[0].base = 0;
  118 + memblock.reserved.regions[0].size = 0;
119 119 memblock.reserved.cnt = 1;
120 120 }
121 121  
122 122  
123 123  
124 124  
... ... @@ -126,24 +126,24 @@
126 126 memblock.memory.size = 0;
127 127  
128 128 for (i = 0; i < memblock.memory.cnt; i++)
129   - memblock.memory.size += memblock.memory.region[i].size;
  129 + memblock.memory.size += memblock.memory.regions[i].size;
130 130 }
131 131  
132   -static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size)
  132 +static long memblock_add_region(struct memblock_type *type, u64 base, u64 size)
133 133 {
134 134 unsigned long coalesced = 0;
135 135 long adjacent, i;
136 136  
137   - if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
138   - rgn->region[0].base = base;
139   - rgn->region[0].size = size;
  137 + if ((type->cnt == 1) && (type->regions[0].size == 0)) {
  138 + type->regions[0].base = base;
  139 + type->regions[0].size = size;
140 140 return 0;
141 141 }
142 142  
143 143 /* First try and coalesce this MEMBLOCK with another. */
144   - for (i = 0; i < rgn->cnt; i++) {
145   - u64 rgnbase = rgn->region[i].base;
146   - u64 rgnsize = rgn->region[i].size;
  144 + for (i = 0; i < type->cnt; i++) {
  145 + u64 rgnbase = type->regions[i].base;
  146 + u64 rgnsize = type->regions[i].size;
147 147  
148 148 if ((rgnbase == base) && (rgnsize == size))
149 149 /* Already have this region, so we're done */
150 150  
151 151  
152 152  
153 153  
154 154  
155 155  
156 156  
157 157  
158 158  
159 159  
... ... @@ -151,61 +151,59 @@
151 151  
152 152 adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
153 153 if (adjacent > 0) {
154   - rgn->region[i].base -= size;
155   - rgn->region[i].size += size;
  154 + type->regions[i].base -= size;
  155 + type->regions[i].size += size;
156 156 coalesced++;
157 157 break;
158 158 } else if (adjacent < 0) {
159   - rgn->region[i].size += size;
  159 + type->regions[i].size += size;
160 160 coalesced++;
161 161 break;
162 162 }
163 163 }
164 164  
165   - if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) {
166   - memblock_coalesce_regions(rgn, i, i+1);
  165 + if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1)) {
  166 + memblock_coalesce_regions(type, i, i+1);
167 167 coalesced++;
168 168 }
169 169  
170 170 if (coalesced)
171 171 return coalesced;
172   - if (rgn->cnt >= MAX_MEMBLOCK_REGIONS)
  172 + if (type->cnt >= MAX_MEMBLOCK_REGIONS)
173 173 return -1;
174 174  
175 175 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
176   - for (i = rgn->cnt - 1; i >= 0; i--) {
177   - if (base < rgn->region[i].base) {
178   - rgn->region[i+1].base = rgn->region[i].base;
179   - rgn->region[i+1].size = rgn->region[i].size;
  176 + for (i = type->cnt - 1; i >= 0; i--) {
  177 + if (base < type->regions[i].base) {
  178 + type->regions[i+1].base = type->regions[i].base;
  179 + type->regions[i+1].size = type->regions[i].size;
180 180 } else {
181   - rgn->region[i+1].base = base;
182   - rgn->region[i+1].size = size;
  181 + type->regions[i+1].base = base;
  182 + type->regions[i+1].size = size;
183 183 break;
184 184 }
185 185 }
186 186  
187   - if (base < rgn->region[0].base) {
188   - rgn->region[0].base = base;
189   - rgn->region[0].size = size;
  187 + if (base < type->regions[0].base) {
  188 + type->regions[0].base = base;
  189 + type->regions[0].size = size;
190 190 }
191   - rgn->cnt++;
  191 + type->cnt++;
192 192  
193 193 return 0;
194 194 }
195 195  
196 196 long memblock_add(u64 base, u64 size)
197 197 {
198   - struct memblock_region *_rgn = &memblock.memory;
199   -
200 198 /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */
201 199 if (base == 0)
202 200 memblock.rmo_size = size;
203 201  
204   - return memblock_add_region(_rgn, base, size);
  202 + return memblock_add_region(&memblock.memory, base, size);
205 203  
206 204 }
207 205  
208   -static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size)
  206 +static long __memblock_remove(struct memblock_type *type, u64 base, u64 size)
209 207 {
210 208 u64 rgnbegin, rgnend;
211 209 u64 end = base + size;
212 210  
213 211  
214 212  
215 213  
... ... @@ -214,34 +212,34 @@
214 212 rgnbegin = rgnend = 0; /* supress gcc warnings */
215 213  
216 214 /* Find the region where (base, size) belongs to */
217   - for (i=0; i < rgn->cnt; i++) {
218   - rgnbegin = rgn->region[i].base;
219   - rgnend = rgnbegin + rgn->region[i].size;
  215 + for (i=0; i < type->cnt; i++) {
  216 + rgnbegin = type->regions[i].base;
  217 + rgnend = rgnbegin + type->regions[i].size;
220 218  
221 219 if ((rgnbegin <= base) && (end <= rgnend))
222 220 break;
223 221 }
224 222  
225 223 /* Didn't find the region */
226   - if (i == rgn->cnt)
  224 + if (i == type->cnt)
227 225 return -1;
228 226  
229 227 /* Check to see if we are removing entire region */
230 228 if ((rgnbegin == base) && (rgnend == end)) {
231   - memblock_remove_region(rgn, i);
  229 + memblock_remove_region(type, i);
232 230 return 0;
233 231 }
234 232  
235 233 /* Check to see if region is matching at the front */
236 234 if (rgnbegin == base) {
237   - rgn->region[i].base = end;
238   - rgn->region[i].size -= size;
  235 + type->regions[i].base = end;
  236 + type->regions[i].size -= size;
239 237 return 0;
240 238 }
241 239  
242 240 /* Check to see if the region is matching at the end */
243 241 if (rgnend == end) {
244   - rgn->region[i].size -= size;
  242 + type->regions[i].size -= size;
245 243 return 0;
246 244 }
247 245  
... ... @@ -249,8 +247,8 @@
249 247 * We need to split the entry - adjust the current one to the
250 248 * beginging of the hole and add the region after hole.
251 249 */
252   - rgn->region[i].size = base - rgn->region[i].base;
253   - return memblock_add_region(rgn, end, rgnend - end);
  250 + type->regions[i].size = base - type->regions[i].base;
  251 + return memblock_add_region(type, end, rgnend - end);
254 252 }
255 253  
256 254 long memblock_remove(u64 base, u64 size)
257 255  
258 256  
259 257  
... ... @@ -265,25 +263,25 @@
265 263  
266 264 long __init memblock_reserve(u64 base, u64 size)
267 265 {
268   - struct memblock_region *_rgn = &memblock.reserved;
  266 + struct memblock_type *_rgn = &memblock.reserved;
269 267  
270 268 BUG_ON(0 == size);
271 269  
272 270 return memblock_add_region(_rgn, base, size);
273 271 }
274 272  
275   -long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size)
  273 +long memblock_overlaps_region(struct memblock_type *type, u64 base, u64 size)
276 274 {
277 275 unsigned long i;
278 276  
279   - for (i = 0; i < rgn->cnt; i++) {
280   - u64 rgnbase = rgn->region[i].base;
281   - u64 rgnsize = rgn->region[i].size;
  277 + for (i = 0; i < type->cnt; i++) {
  278 + u64 rgnbase = type->regions[i].base;
  279 + u64 rgnsize = type->regions[i].size;
282 280 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
283 281 break;
284 282 }
285 283  
286   - return (i < rgn->cnt) ? i : -1;
  284 + return (i < type->cnt) ? i : -1;
287 285 }
288 286  
289 287 static u64 memblock_align_down(u64 addr, u64 size)
... ... @@ -311,7 +309,7 @@
311 309 base = ~(u64)0;
312 310 return base;
313 311 }
314   - res_base = memblock.reserved.region[j].base;
  312 + res_base = memblock.reserved.regions[j].base;
315 313 if (res_base < size)
316 314 break;
317 315 base = memblock_align_down(res_base - size, align);
... ... @@ -320,7 +318,7 @@
320 318 return ~(u64)0;
321 319 }
322 320  
323   -static u64 __init memblock_alloc_nid_region(struct memblock_property *mp,
  321 +static u64 __init memblock_alloc_nid_region(struct memblock_region *mp,
324 322 u64 (*nid_range)(u64, u64, int *),
325 323 u64 size, u64 align, int nid)
326 324 {
... ... @@ -350,7 +348,7 @@
350 348 u64 __init memblock_alloc_nid(u64 size, u64 align, int nid,
351 349 u64 (*nid_range)(u64 start, u64 end, int *nid))
352 350 {
353   - struct memblock_region *mem = &memblock.memory;
  351 + struct memblock_type *mem = &memblock.memory;
354 352 int i;
355 353  
356 354 BUG_ON(0 == size);
... ... @@ -358,7 +356,7 @@
358 356 size = memblock_align_up(size, align);
359 357  
360 358 for (i = 0; i < mem->cnt; i++) {
361   - u64 ret = memblock_alloc_nid_region(&mem->region[i],
  359 + u64 ret = memblock_alloc_nid_region(&mem->regions[i],
362 360 nid_range,
363 361 size, align, nid);
364 362 if (ret != ~(u64)0)
... ... @@ -402,8 +400,8 @@
402 400 max_addr = MEMBLOCK_REAL_LIMIT;
403 401  
404 402 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
405   - u64 memblockbase = memblock.memory.region[i].base;
406   - u64 memblocksize = memblock.memory.region[i].size;
  403 + u64 memblockbase = memblock.memory.regions[i].base;
  404 + u64 memblocksize = memblock.memory.regions[i].size;
407 405  
408 406 if (memblocksize < size)
409 407 continue;
... ... @@ -423,7 +421,7 @@
423 421 return 0;
424 422 return base;
425 423 }
426   - res_base = memblock.reserved.region[j].base;
  424 + res_base = memblock.reserved.regions[j].base;
427 425 if (res_base < size)
428 426 break;
429 427 base = memblock_align_down(res_base - size, align);
... ... @@ -442,7 +440,7 @@
442 440 {
443 441 int idx = memblock.memory.cnt - 1;
444 442  
445   - return (memblock.memory.region[idx].base + memblock.memory.region[idx].size);
  443 + return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
446 444 }
447 445  
448 446 /* You must call memblock_analyze() after this. */
... ... @@ -450,7 +448,7 @@
450 448 {
451 449 unsigned long i;
452 450 u64 limit;
453   - struct memblock_property *p;
  451 + struct memblock_region *p;
454 452  
455 453 if (!memory_limit)
456 454 return;
457 455  
458 456  
459 457  
... ... @@ -458,24 +456,24 @@
458 456 /* Truncate the memblock regions to satisfy the memory limit. */
459 457 limit = memory_limit;
460 458 for (i = 0; i < memblock.memory.cnt; i++) {
461   - if (limit > memblock.memory.region[i].size) {
462   - limit -= memblock.memory.region[i].size;
  459 + if (limit > memblock.memory.regions[i].size) {
  460 + limit -= memblock.memory.regions[i].size;
463 461 continue;
464 462 }
465 463  
466   - memblock.memory.region[i].size = limit;
  464 + memblock.memory.regions[i].size = limit;
467 465 memblock.memory.cnt = i + 1;
468 466 break;
469 467 }
470 468  
471   - if (memblock.memory.region[0].size < memblock.rmo_size)
472   - memblock.rmo_size = memblock.memory.region[0].size;
  469 + if (memblock.memory.regions[0].size < memblock.rmo_size)
  470 + memblock.rmo_size = memblock.memory.regions[0].size;
473 471  
474 472 memory_limit = memblock_end_of_DRAM();
475 473  
476 474 /* And truncate any reserves above the limit also. */
477 475 for (i = 0; i < memblock.reserved.cnt; i++) {
478   - p = &memblock.reserved.region[i];
  476 + p = &memblock.reserved.regions[i];
479 477  
480 478 if (p->base > memory_limit)
481 479 p->size = 0;
... ... @@ -494,9 +492,9 @@
494 492 int i;
495 493  
496 494 for (i = 0; i < memblock.reserved.cnt; i++) {
497   - u64 upper = memblock.reserved.region[i].base +
498   - memblock.reserved.region[i].size - 1;
499   - if ((addr >= memblock.reserved.region[i].base) && (addr <= upper))
  495 + u64 upper = memblock.reserved.regions[i].base +
  496 + memblock.reserved.regions[i].size - 1;
  497 + if ((addr >= memblock.reserved.regions[i].base) && (addr <= upper))
500 498 return 1;
501 499 }
502 500 return 0;
... ... @@ -511,7 +509,7 @@
511 509 * Given a <base, len>, find which memory regions belong to this range.
512 510 * Adjust the request and return a contiguous chunk.
513 511 */
514   -int memblock_find(struct memblock_property *res)
  512 +int memblock_find(struct memblock_region *res)
515 513 {
516 514 int i;
517 515 u64 rstart, rend;
... ... @@ -520,8 +518,8 @@
520 518 rend = rstart + res->size - 1;
521 519  
522 520 for (i = 0; i < memblock.memory.cnt; i++) {
523   - u64 start = memblock.memory.region[i].base;
524   - u64 end = start + memblock.memory.region[i].size - 1;
  521 + u64 start = memblock.memory.regions[i].base;
  522 + u64 end = start + memblock.memory.regions[i].size - 1;
525 523  
526 524 if (start > rend)
527 525 return -1;