Commit 930b59529176bb05ea61ef9a40bf6384227f991e
Committed by
Stefano Babic
1 parent
c1aae21d89
Exists in
smarc_8mq_lf_v2020.04
and in
12 other branches
imx8: add mmu and dram related functions
Add mmu memmap, some memory regions are reserved by M4, Arm Trusted Firmware, so need to get memreg using SCFW API and setup the memmap. Add dram_init, dram_init_banksize, get_effective_memsize functions, according to the memreg. Signed-off-by: Peng Fan <peng.fan@nxp.com> Reviewed-by: Anatolij Gustschin <agust@denx.de> Cc: Stefano Babic <sbabic@denx.de>
Showing 1 changed file with 284 additions and 0 deletions Side-by-side Diff
arch/arm/mach-imx/imx8/cpu.c
... | ... | @@ -14,6 +14,7 @@ |
14 | 14 | #include <asm/arch/sys_proto.h> |
15 | 15 | #include <asm/arch-imx/cpu.h> |
16 | 16 | #include <asm/armv8/cpu.h> |
17 | +#include <asm/armv8/mmu.h> | |
17 | 18 | #include <asm/mach-imx/boot_mode.h> |
18 | 19 | |
19 | 20 | DECLARE_GLOBAL_DATA_PTR; |
... | ... | @@ -206,6 +207,289 @@ |
206 | 207 | } |
207 | 208 | |
208 | 209 | return board_mmc_get_env_dev(devno); |
210 | +} | |
211 | +#endif | |
212 | + | |
213 | +#define MEMSTART_ALIGNMENT SZ_2M /* Align the memory start with 2MB */ | |
214 | + | |
215 | +static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start, | |
216 | + sc_faddr_t *addr_end) | |
217 | +{ | |
218 | + sc_faddr_t start, end; | |
219 | + int ret; | |
220 | + bool owned; | |
221 | + | |
222 | + owned = sc_rm_is_memreg_owned(-1, mr); | |
223 | + if (owned) { | |
224 | + ret = sc_rm_get_memreg_info(-1, mr, &start, &end); | |
225 | + if (ret) { | |
226 | + printf("Memreg get info failed, %d\n", ret); | |
227 | + return -EINVAL; | |
228 | + } | |
229 | + debug("0x%llx -- 0x%llx\n", start, end); | |
230 | + *addr_start = start; | |
231 | + *addr_end = end; | |
232 | + | |
233 | + return 0; | |
234 | + } | |
235 | + | |
236 | + return -EINVAL; | |
237 | +} | |
238 | + | |
239 | +phys_size_t get_effective_memsize(void) | |
240 | +{ | |
241 | + sc_rm_mr_t mr; | |
242 | + sc_faddr_t start, end, end1; | |
243 | + int err; | |
244 | + | |
245 | + end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE; | |
246 | + | |
247 | + for (mr = 0; mr < 64; mr++) { | |
248 | + err = get_owned_memreg(mr, &start, &end); | |
249 | + if (!err) { | |
250 | + start = roundup(start, MEMSTART_ALIGNMENT); | |
251 | + /* Too small memory region, not use it */ | |
252 | + if (start > end) | |
253 | + continue; | |
254 | + | |
255 | + /* Find the memory region runs the u-boot */ | |
256 | + if (start >= PHYS_SDRAM_1 && start <= end1 && | |
257 | + (start <= CONFIG_SYS_TEXT_BASE && | |
258 | + end >= CONFIG_SYS_TEXT_BASE)) { | |
259 | + if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 + | |
260 | + PHYS_SDRAM_1_SIZE)) | |
261 | + return (end - PHYS_SDRAM_1 + 1); | |
262 | + else | |
263 | + return PHYS_SDRAM_1_SIZE; | |
264 | + } | |
265 | + } | |
266 | + } | |
267 | + | |
268 | + return PHYS_SDRAM_1_SIZE; | |
269 | +} | |
270 | + | |
271 | +int dram_init(void) | |
272 | +{ | |
273 | + sc_rm_mr_t mr; | |
274 | + sc_faddr_t start, end, end1, end2; | |
275 | + int err; | |
276 | + | |
277 | + end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE; | |
278 | + end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE; | |
279 | + for (mr = 0; mr < 64; mr++) { | |
280 | + err = get_owned_memreg(mr, &start, &end); | |
281 | + if (!err) { | |
282 | + start = roundup(start, MEMSTART_ALIGNMENT); | |
283 | + /* Too small memory region, not use it */ | |
284 | + if (start > end) | |
285 | + continue; | |
286 | + | |
287 | + if (start >= PHYS_SDRAM_1 && start <= end1) { | |
288 | + if ((end + 1) <= end1) | |
289 | + gd->ram_size += end - start + 1; | |
290 | + else | |
291 | + gd->ram_size += end1 - start; | |
292 | + } else if (start >= PHYS_SDRAM_2 && start <= end2) { | |
293 | + if ((end + 1) <= end2) | |
294 | + gd->ram_size += end - start + 1; | |
295 | + else | |
296 | + gd->ram_size += end2 - start; | |
297 | + } | |
298 | + } | |
299 | + } | |
300 | + | |
301 | + /* If error, set to the default value */ | |
302 | + if (!gd->ram_size) { | |
303 | + gd->ram_size = PHYS_SDRAM_1_SIZE; | |
304 | + gd->ram_size += PHYS_SDRAM_2_SIZE; | |
305 | + } | |
306 | + return 0; | |
307 | +} | |
308 | + | |
309 | +static void dram_bank_sort(int current_bank) | |
310 | +{ | |
311 | + phys_addr_t start; | |
312 | + phys_size_t size; | |
313 | + | |
314 | + while (current_bank > 0) { | |
315 | + if (gd->bd->bi_dram[current_bank - 1].start > | |
316 | + gd->bd->bi_dram[current_bank].start) { | |
317 | + start = gd->bd->bi_dram[current_bank - 1].start; | |
318 | + size = gd->bd->bi_dram[current_bank - 1].size; | |
319 | + | |
320 | + gd->bd->bi_dram[current_bank - 1].start = | |
321 | + gd->bd->bi_dram[current_bank].start; | |
322 | + gd->bd->bi_dram[current_bank - 1].size = | |
323 | + gd->bd->bi_dram[current_bank].size; | |
324 | + | |
325 | + gd->bd->bi_dram[current_bank].start = start; | |
326 | + gd->bd->bi_dram[current_bank].size = size; | |
327 | + } | |
328 | + current_bank--; | |
329 | + } | |
330 | +} | |
331 | + | |
332 | +int dram_init_banksize(void) | |
333 | +{ | |
334 | + sc_rm_mr_t mr; | |
335 | + sc_faddr_t start, end, end1, end2; | |
336 | + int i = 0; | |
337 | + int err; | |
338 | + | |
339 | + end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE; | |
340 | + end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE; | |
341 | + | |
342 | + for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) { | |
343 | + err = get_owned_memreg(mr, &start, &end); | |
344 | + if (!err) { | |
345 | + start = roundup(start, MEMSTART_ALIGNMENT); | |
346 | + if (start > end) /* Small memory region, no use it */ | |
347 | + continue; | |
348 | + | |
349 | + if (start >= PHYS_SDRAM_1 && start <= end1) { | |
350 | + gd->bd->bi_dram[i].start = start; | |
351 | + | |
352 | + if ((end + 1) <= end1) | |
353 | + gd->bd->bi_dram[i].size = | |
354 | + end - start + 1; | |
355 | + else | |
356 | + gd->bd->bi_dram[i].size = end1 - start; | |
357 | + | |
358 | + dram_bank_sort(i); | |
359 | + i++; | |
360 | + } else if (start >= PHYS_SDRAM_2 && start <= end2) { | |
361 | + gd->bd->bi_dram[i].start = start; | |
362 | + | |
363 | + if ((end + 1) <= end2) | |
364 | + gd->bd->bi_dram[i].size = | |
365 | + end - start + 1; | |
366 | + else | |
367 | + gd->bd->bi_dram[i].size = end2 - start; | |
368 | + | |
369 | + dram_bank_sort(i); | |
370 | + i++; | |
371 | + } | |
372 | + } | |
373 | + } | |
374 | + | |
375 | + /* If error, set to the default value */ | |
376 | + if (!i) { | |
377 | + gd->bd->bi_dram[0].start = PHYS_SDRAM_1; | |
378 | + gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE; | |
379 | + gd->bd->bi_dram[1].start = PHYS_SDRAM_2; | |
380 | + gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE; | |
381 | + } | |
382 | + | |
383 | + return 0; | |
384 | +} | |
385 | + | |
386 | +static u64 get_block_attrs(sc_faddr_t addr_start) | |
387 | +{ | |
388 | + u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | | |
389 | + PTE_BLOCK_PXN | PTE_BLOCK_UXN; | |
390 | + | |
391 | + if ((addr_start >= PHYS_SDRAM_1 && | |
392 | + addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) || | |
393 | + (addr_start >= PHYS_SDRAM_2 && | |
394 | + addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE))) | |
395 | + return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE); | |
396 | + | |
397 | + return attr; | |
398 | +} | |
399 | + | |
400 | +static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end) | |
401 | +{ | |
402 | + sc_faddr_t end1, end2; | |
403 | + | |
404 | + end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE; | |
405 | + end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE; | |
406 | + | |
407 | + if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) { | |
408 | + if ((addr_end + 1) > end1) | |
409 | + return end1 - addr_start; | |
410 | + } else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) { | |
411 | + if ((addr_end + 1) > end2) | |
412 | + return end2 - addr_start; | |
413 | + } | |
414 | + | |
415 | + return (addr_end - addr_start + 1); | |
416 | +} | |
417 | + | |
418 | +#define MAX_PTE_ENTRIES 512 | |
419 | +#define MAX_MEM_MAP_REGIONS 16 | |
420 | + | |
421 | +static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS]; | |
422 | +struct mm_region *mem_map = imx8_mem_map; | |
423 | + | |
424 | +void enable_caches(void) | |
425 | +{ | |
426 | + sc_rm_mr_t mr; | |
427 | + sc_faddr_t start, end; | |
428 | + int err, i; | |
429 | + | |
430 | + /* Create map for registers access from 0x1c000000 to 0x80000000*/ | |
431 | + imx8_mem_map[0].virt = 0x1c000000UL; | |
432 | + imx8_mem_map[0].phys = 0x1c000000UL; | |
433 | + imx8_mem_map[0].size = 0x64000000UL; | |
434 | + imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | | |
435 | + PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN; | |
436 | + | |
437 | + i = 1; | |
438 | + for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) { | |
439 | + err = get_owned_memreg(mr, &start, &end); | |
440 | + if (!err) { | |
441 | + imx8_mem_map[i].virt = start; | |
442 | + imx8_mem_map[i].phys = start; | |
443 | + imx8_mem_map[i].size = get_block_size(start, end); | |
444 | + imx8_mem_map[i].attrs = get_block_attrs(start); | |
445 | + i++; | |
446 | + } | |
447 | + } | |
448 | + | |
449 | + if (i < MAX_MEM_MAP_REGIONS) { | |
450 | + imx8_mem_map[i].size = 0; | |
451 | + imx8_mem_map[i].attrs = 0; | |
452 | + } else { | |
453 | + puts("Error, need more MEM MAP REGIONS reserved\n"); | |
454 | + icache_enable(); | |
455 | + return; | |
456 | + } | |
457 | + | |
458 | + for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) { | |
459 | + debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n", | |
460 | + i, imx8_mem_map[i].virt, imx8_mem_map[i].phys, | |
461 | + imx8_mem_map[i].size, imx8_mem_map[i].attrs); | |
462 | + } | |
463 | + | |
464 | + icache_enable(); | |
465 | + dcache_enable(); | |
466 | +} | |
467 | + | |
468 | +#ifndef CONFIG_SYS_DCACHE_OFF | |
469 | +u64 get_page_table_size(void) | |
470 | +{ | |
471 | + u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); | |
472 | + u64 size = 0; | |
473 | + | |
474 | + /* | |
475 | + * For each memory region, the max table size: | |
476 | + * 2 level 3 tables + 2 level 2 tables + 1 level 1 table | |
477 | + */ | |
478 | + size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt; | |
479 | + | |
480 | + /* | |
481 | + * We need to duplicate our page table once to have an emergency pt to | |
482 | + * resort to when splitting page tables later on | |
483 | + */ | |
484 | + size *= 2; | |
485 | + | |
486 | + /* | |
487 | + * We may need to split page tables later on if dcache settings change, | |
488 | + * so reserve up to 4 (random pick) page tables for that. | |
489 | + */ | |
490 | + size += one_pt * 4; | |
491 | + | |
492 | + return size; | |
209 | 493 | } |
210 | 494 | #endif |