Commit 588039c3367dffaf19775468528d57e904c965fb

Authored by Michel Dänzer
Committed by Greg Kroah-Hartman
1 parent c818bed668

drm/radeon: Split off gart_get_page_entry ASIC hook from set_page_entry

commit cb65890610dca287718a63bd8a5d9ce3dc80c3d7 upstream.

get_page_entry calculates the GART page table entry, which is just written
to the GART page table by set_page_entry.

This is a prerequisite for the following fix.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 9 changed files with 100 additions and 37 deletions Side-by-side Diff

drivers/gpu/drm/radeon/r100.c
... ... @@ -644,6 +644,7 @@
644 644 return r;
645 645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
646 646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
  647 + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
647 648 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
648 649 return radeon_gart_table_ram_alloc(rdev);
649 650 }
650 651  
651 652  
... ... @@ -681,11 +682,16 @@
681 682 WREG32(RADEON_AIC_HI_ADDR, 0);
682 683 }
683 684  
  685 +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
  686 +{
  687 + return addr;
  688 +}
  689 +
684 690 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
685   - uint64_t addr, uint32_t flags)
  691 + uint64_t entry)
686 692 {
687 693 u32 *gtt = rdev->gart.ptr;
688   - gtt[i] = cpu_to_le32(lower_32_bits(addr));
  694 + gtt[i] = cpu_to_le32(lower_32_bits(entry));
689 695 }
690 696  
691 697 void r100_pci_gart_fini(struct radeon_device *rdev)
drivers/gpu/drm/radeon/r300.c
... ... @@ -73,11 +73,8 @@
73 73 #define R300_PTE_WRITEABLE (1 << 2)
74 74 #define R300_PTE_READABLE (1 << 3)
75 75  
76   -void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
77   - uint64_t addr, uint32_t flags)
  76 +uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
78 77 {
79   - void __iomem *ptr = rdev->gart.ptr;
80   -
81 78 addr = (lower_32_bits(addr) >> 8) |
82 79 ((upper_32_bits(addr) & 0xff) << 24);
83 80 if (flags & RADEON_GART_PAGE_READ)
84 81  
... ... @@ -86,10 +83,18 @@
86 83 addr |= R300_PTE_WRITEABLE;
87 84 if (!(flags & RADEON_GART_PAGE_SNOOP))
88 85 addr |= R300_PTE_UNSNOOPED;
  86 + return addr;
  87 +}
  88 +
  89 +void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
  90 + uint64_t entry)
  91 +{
  92 + void __iomem *ptr = rdev->gart.ptr;
  93 +
89 94 /* on x86 we want this to be CPU endian, on powerpc
90 95 * on powerpc without HW swappers, it'll get swapped on way
91 96 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
92   - writel(addr, ((void __iomem *)ptr) + (i * 4));
  97 + writel(entry, ((void __iomem *)ptr) + (i * 4));
93 98 }
94 99  
95 100 int rv370_pcie_gart_init(struct radeon_device *rdev)
... ... @@ -109,6 +114,7 @@
109 114 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
110 115 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
111 116 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
  117 + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
112 118 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
113 119 return radeon_gart_table_vram_alloc(rdev);
114 120 }
drivers/gpu/drm/radeon/radeon.h
... ... @@ -245,6 +245,7 @@
245 245 * Dummy page
246 246 */
247 247 struct radeon_dummy_page {
  248 + uint64_t entry;
248 249 struct page *page;
249 250 dma_addr_t addr;
250 251 };
... ... @@ -626,6 +627,7 @@
626 627 unsigned table_size;
627 628 struct page **pages;
628 629 dma_addr_t *pages_addr;
  630 + uint64_t *pages_entry;
629 631 bool ready;
630 632 };
631 633  
632 634  
... ... @@ -1819,8 +1821,9 @@
1819 1821 /* gart */
1820 1822 struct {
1821 1823 void (*tlb_flush)(struct radeon_device *rdev);
  1824 + uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
1822 1825 void (*set_page)(struct radeon_device *rdev, unsigned i,
1823   - uint64_t addr, uint32_t flags);
  1826 + uint64_t entry);
1824 1827 } gart;
1825 1828 struct {
1826 1829 int (*init)(struct radeon_device *rdev);
... ... @@ -2818,7 +2821,8 @@
2818 2821 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2819 2822 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
2820 2823 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
2821   -#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
  2824 +#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
  2825 +#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
2822 2826 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2823 2827 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2824 2828 #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
drivers/gpu/drm/radeon/radeon_asic.c
... ... @@ -159,11 +159,13 @@
159 159 DRM_INFO("Forcing AGP to PCIE mode\n");
160 160 rdev->flags |= RADEON_IS_PCIE;
161 161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
  162 + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
162 163 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
163 164 } else {
164 165 DRM_INFO("Forcing AGP to PCI mode\n");
165 166 rdev->flags |= RADEON_IS_PCI;
166 167 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
  168 + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
167 169 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
168 170 }
169 171 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
... ... @@ -199,6 +201,7 @@
199 201 .mc_wait_for_idle = &r100_mc_wait_for_idle,
200 202 .gart = {
201 203 .tlb_flush = &r100_pci_gart_tlb_flush,
  204 + .get_page_entry = &r100_pci_gart_get_page_entry,
202 205 .set_page = &r100_pci_gart_set_page,
203 206 },
204 207 .ring = {
... ... @@ -265,6 +268,7 @@
265 268 .mc_wait_for_idle = &r100_mc_wait_for_idle,
266 269 .gart = {
267 270 .tlb_flush = &r100_pci_gart_tlb_flush,
  271 + .get_page_entry = &r100_pci_gart_get_page_entry,
268 272 .set_page = &r100_pci_gart_set_page,
269 273 },
270 274 .ring = {
... ... @@ -359,6 +363,7 @@
359 363 .mc_wait_for_idle = &r300_mc_wait_for_idle,
360 364 .gart = {
361 365 .tlb_flush = &r100_pci_gart_tlb_flush,
  366 + .get_page_entry = &r100_pci_gart_get_page_entry,
362 367 .set_page = &r100_pci_gart_set_page,
363 368 },
364 369 .ring = {
... ... @@ -425,6 +430,7 @@
425 430 .mc_wait_for_idle = &r300_mc_wait_for_idle,
426 431 .gart = {
427 432 .tlb_flush = &rv370_pcie_gart_tlb_flush,
  433 + .get_page_entry = &rv370_pcie_gart_get_page_entry,
428 434 .set_page = &rv370_pcie_gart_set_page,
429 435 },
430 436 .ring = {
... ... @@ -491,6 +497,7 @@
491 497 .mc_wait_for_idle = &r300_mc_wait_for_idle,
492 498 .gart = {
493 499 .tlb_flush = &rv370_pcie_gart_tlb_flush,
  500 + .get_page_entry = &rv370_pcie_gart_get_page_entry,
494 501 .set_page = &rv370_pcie_gart_set_page,
495 502 },
496 503 .ring = {
... ... @@ -557,6 +564,7 @@
557 564 .mc_wait_for_idle = &rs400_mc_wait_for_idle,
558 565 .gart = {
559 566 .tlb_flush = &rs400_gart_tlb_flush,
  567 + .get_page_entry = &rs400_gart_get_page_entry,
560 568 .set_page = &rs400_gart_set_page,
561 569 },
562 570 .ring = {
... ... @@ -623,6 +631,7 @@
623 631 .mc_wait_for_idle = &rs600_mc_wait_for_idle,
624 632 .gart = {
625 633 .tlb_flush = &rs600_gart_tlb_flush,
  634 + .get_page_entry = &rs600_gart_get_page_entry,
626 635 .set_page = &rs600_gart_set_page,
627 636 },
628 637 .ring = {
... ... @@ -691,6 +700,7 @@
691 700 .mc_wait_for_idle = &rs690_mc_wait_for_idle,
692 701 .gart = {
693 702 .tlb_flush = &rs400_gart_tlb_flush,
  703 + .get_page_entry = &rs400_gart_get_page_entry,
694 704 .set_page = &rs400_gart_set_page,
695 705 },
696 706 .ring = {
... ... @@ -759,6 +769,7 @@
759 769 .mc_wait_for_idle = &rv515_mc_wait_for_idle,
760 770 .gart = {
761 771 .tlb_flush = &rv370_pcie_gart_tlb_flush,
  772 + .get_page_entry = &rv370_pcie_gart_get_page_entry,
762 773 .set_page = &rv370_pcie_gart_set_page,
763 774 },
764 775 .ring = {
... ... @@ -825,6 +836,7 @@
825 836 .mc_wait_for_idle = &r520_mc_wait_for_idle,
826 837 .gart = {
827 838 .tlb_flush = &rv370_pcie_gart_tlb_flush,
  839 + .get_page_entry = &rv370_pcie_gart_get_page_entry,
828 840 .set_page = &rv370_pcie_gart_set_page,
829 841 },
830 842 .ring = {
... ... @@ -919,6 +931,7 @@
919 931 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
920 932 .gart = {
921 933 .tlb_flush = &r600_pcie_gart_tlb_flush,
  934 + .get_page_entry = &rs600_gart_get_page_entry,
922 935 .set_page = &rs600_gart_set_page,
923 936 },
924 937 .ring = {
... ... @@ -1004,6 +1017,7 @@
1004 1017 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1005 1018 .gart = {
1006 1019 .tlb_flush = &r600_pcie_gart_tlb_flush,
  1020 + .get_page_entry = &rs600_gart_get_page_entry,
1007 1021 .set_page = &rs600_gart_set_page,
1008 1022 },
1009 1023 .ring = {
... ... @@ -1095,6 +1109,7 @@
1095 1109 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1096 1110 .gart = {
1097 1111 .tlb_flush = &r600_pcie_gart_tlb_flush,
  1112 + .get_page_entry = &rs600_gart_get_page_entry,
1098 1113 .set_page = &rs600_gart_set_page,
1099 1114 },
1100 1115 .ring = {
... ... @@ -1199,6 +1214,7 @@
1199 1214 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1200 1215 .gart = {
1201 1216 .tlb_flush = &r600_pcie_gart_tlb_flush,
  1217 + .get_page_entry = &rs600_gart_get_page_entry,
1202 1218 .set_page = &rs600_gart_set_page,
1203 1219 },
1204 1220 .ring = {
... ... @@ -1317,6 +1333,7 @@
1317 1333 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1318 1334 .gart = {
1319 1335 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
  1336 + .get_page_entry = &rs600_gart_get_page_entry,
1320 1337 .set_page = &rs600_gart_set_page,
1321 1338 },
1322 1339 .ring = {
... ... @@ -1409,6 +1426,7 @@
1409 1426 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1410 1427 .gart = {
1411 1428 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
  1429 + .get_page_entry = &rs600_gart_get_page_entry,
1412 1430 .set_page = &rs600_gart_set_page,
1413 1431 },
1414 1432 .ring = {
... ... @@ -1500,6 +1518,7 @@
1500 1518 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1501 1519 .gart = {
1502 1520 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
  1521 + .get_page_entry = &rs600_gart_get_page_entry,
1503 1522 .set_page = &rs600_gart_set_page,
1504 1523 },
1505 1524 .ring = {
... ... @@ -1635,6 +1654,7 @@
1635 1654 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1636 1655 .gart = {
1637 1656 .tlb_flush = &cayman_pcie_gart_tlb_flush,
  1657 + .get_page_entry = &rs600_gart_get_page_entry,
1638 1658 .set_page = &rs600_gart_set_page,
1639 1659 },
1640 1660 .vm = {
... ... @@ -1738,6 +1758,7 @@
1738 1758 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1739 1759 .gart = {
1740 1760 .tlb_flush = &cayman_pcie_gart_tlb_flush,
  1761 + .get_page_entry = &rs600_gart_get_page_entry,
1741 1762 .set_page = &rs600_gart_set_page,
1742 1763 },
1743 1764 .vm = {
... ... @@ -1871,6 +1892,7 @@
1871 1892 .get_gpu_clock_counter = &si_get_gpu_clock_counter,
1872 1893 .gart = {
1873 1894 .tlb_flush = &si_pcie_gart_tlb_flush,
  1895 + .get_page_entry = &rs600_gart_get_page_entry,
1874 1896 .set_page = &rs600_gart_set_page,
1875 1897 },
1876 1898 .vm = {
... ... @@ -2032,6 +2054,7 @@
2032 2054 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2033 2055 .gart = {
2034 2056 .tlb_flush = &cik_pcie_gart_tlb_flush,
  2057 + .get_page_entry = &rs600_gart_get_page_entry,
2035 2058 .set_page = &rs600_gart_set_page,
2036 2059 },
2037 2060 .vm = {
... ... @@ -2139,6 +2162,7 @@
2139 2162 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2140 2163 .gart = {
2141 2164 .tlb_flush = &cik_pcie_gart_tlb_flush,
  2165 + .get_page_entry = &rs600_gart_get_page_entry,
2142 2166 .set_page = &rs600_gart_set_page,
2143 2167 },
2144 2168 .vm = {
drivers/gpu/drm/radeon/radeon_asic.h
... ... @@ -67,8 +67,9 @@
67 67 int r100_asic_reset(struct radeon_device *rdev);
68 68 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
69 69 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
  70 +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
70 71 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
71   - uint64_t addr, uint32_t flags);
  72 + uint64_t entry);
72 73 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
73 74 int r100_irq_set(struct radeon_device *rdev);
74 75 int r100_irq_process(struct radeon_device *rdev);
75 76  
... ... @@ -172,8 +173,9 @@
172 173 struct radeon_fence *fence);
173 174 extern int r300_cs_parse(struct radeon_cs_parser *p);
174 175 extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
  176 +extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
175 177 extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
176   - uint64_t addr, uint32_t flags);
  178 + uint64_t entry);
177 179 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
178 180 extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
179 181 extern void r300_set_reg_safe(struct radeon_device *rdev);
180 182  
... ... @@ -208,8 +210,9 @@
208 210 extern int rs400_suspend(struct radeon_device *rdev);
209 211 extern int rs400_resume(struct radeon_device *rdev);
210 212 void rs400_gart_tlb_flush(struct radeon_device *rdev);
  213 +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
211 214 void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
212   - uint64_t addr, uint32_t flags);
  215 + uint64_t entry);
213 216 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
214 217 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
215 218 int rs400_gart_init(struct radeon_device *rdev);
216 219  
... ... @@ -232,8 +235,9 @@
232 235 void rs600_irq_disable(struct radeon_device *rdev);
233 236 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
234 237 void rs600_gart_tlb_flush(struct radeon_device *rdev);
  238 +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
235 239 void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
236   - uint64_t addr, uint32_t flags);
  240 + uint64_t entry);
237 241 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
238 242 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
239 243 void rs600_bandwidth_update(struct radeon_device *rdev);
drivers/gpu/drm/radeon/radeon_device.c
... ... @@ -743,6 +743,8 @@
743 743 rdev->dummy_page.page = NULL;
744 744 return -ENOMEM;
745 745 }
  746 + rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
  747 + RADEON_GART_PAGE_DUMMY);
746 748 return 0;
747 749 }
748 750  
drivers/gpu/drm/radeon/radeon_gart.c
... ... @@ -228,7 +228,6 @@
228 228 unsigned t;
229 229 unsigned p;
230 230 int i, j;
231   - u64 page_base;
232 231  
233 232 if (!rdev->gart.ready) {
234 233 WARN(1, "trying to unbind memory from uninitialized GART !\n");
235 234  
236 235  
237 236  
... ... @@ -240,13 +239,12 @@
240 239 if (rdev->gart.pages[p]) {
241 240 rdev->gart.pages[p] = NULL;
242 241 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
243   - page_base = rdev->gart.pages_addr[p];
244 242 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
  243 + rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
245 244 if (rdev->gart.ptr) {
246   - radeon_gart_set_page(rdev, t, page_base,
247   - RADEON_GART_PAGE_DUMMY);
  245 + radeon_gart_set_page(rdev, t,
  246 + rdev->dummy_page.entry);
248 247 }
249   - page_base += RADEON_GPU_PAGE_SIZE;
250 248 }
251 249 }
252 250 }
... ... @@ -274,7 +272,7 @@
274 272 {
275 273 unsigned t;
276 274 unsigned p;
277   - uint64_t page_base;
  275 + uint64_t page_base, page_entry;
278 276 int i, j;
279 277  
280 278 if (!rdev->gart.ready) {
281 279  
... ... @@ -287,12 +285,14 @@
287 285 for (i = 0; i < pages; i++, p++) {
288 286 rdev->gart.pages_addr[p] = dma_addr[i];
289 287 rdev->gart.pages[p] = pagelist[i];
290   - if (rdev->gart.ptr) {
291   - page_base = rdev->gart.pages_addr[p];
292   - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
293   - radeon_gart_set_page(rdev, t, page_base, flags);
294   - page_base += RADEON_GPU_PAGE_SIZE;
  288 + page_base = dma_addr[i];
  289 + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
  290 + page_entry = radeon_gart_get_page_entry(page_base, flags);
  291 + rdev->gart.pages_entry[t] = page_entry;
  292 + if (rdev->gart.ptr) {
  293 + radeon_gart_set_page(rdev, t, page_entry);
295 294 }
  295 + page_base += RADEON_GPU_PAGE_SIZE;
296 296 }
297 297 }
298 298 mb();
299 299  
300 300  
... ... @@ -340,10 +340,17 @@
340 340 radeon_gart_fini(rdev);
341 341 return -ENOMEM;
342 342 }
  343 + rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
  344 + rdev->gart.num_gpu_pages);
  345 + if (rdev->gart.pages_entry == NULL) {
  346 + radeon_gart_fini(rdev);
  347 + return -ENOMEM;
  348 + }
343 349 /* set GART entry to point to the dummy page by default */
344   - for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
  350 + for (i = 0; i < rdev->gart.num_cpu_pages; i++)
345 351 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
346   - }
  352 + for (i = 0; i < rdev->gart.num_gpu_pages; i++)
  353 + rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
347 354 return 0;
348 355 }
349 356  
350 357  
351 358  
... ... @@ -356,15 +363,17 @@
356 363 */
357 364 void radeon_gart_fini(struct radeon_device *rdev)
358 365 {
359   - if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
  366 + if (rdev->gart.ready) {
360 367 /* unbind pages */
361 368 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
362 369 }
363 370 rdev->gart.ready = false;
364 371 vfree(rdev->gart.pages);
365 372 vfree(rdev->gart.pages_addr);
  373 + vfree(rdev->gart.pages_entry);
366 374 rdev->gart.pages = NULL;
367 375 rdev->gart.pages_addr = NULL;
  376 + rdev->gart.pages_entry = NULL;
368 377  
369 378 radeon_dummy_page_fini(rdev);
370 379 }
drivers/gpu/drm/radeon/rs400.c
... ... @@ -212,11 +212,9 @@
212 212 #define RS400_PTE_WRITEABLE (1 << 2)
213 213 #define RS400_PTE_READABLE (1 << 3)
214 214  
215   -void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
216   - uint64_t addr, uint32_t flags)
  215 +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
217 216 {
218 217 uint32_t entry;
219   - u32 *gtt = rdev->gart.ptr;
220 218  
221 219 entry = (lower_32_bits(addr) & PAGE_MASK) |
222 220 ((upper_32_bits(addr) & 0xff) << 4);
... ... @@ -226,8 +224,14 @@
226 224 entry |= RS400_PTE_WRITEABLE;
227 225 if (!(flags & RADEON_GART_PAGE_SNOOP))
228 226 entry |= RS400_PTE_UNSNOOPED;
229   - entry = cpu_to_le32(entry);
230   - gtt[i] = entry;
  227 + return entry;
  228 +}
  229 +
  230 +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
  231 + uint64_t entry)
  232 +{
  233 + u32 *gtt = rdev->gart.ptr;
  234 + gtt[i] = cpu_to_le32(lower_32_bits(entry));
231 235 }
232 236  
233 237 int rs400_mc_wait_for_idle(struct radeon_device *rdev)
drivers/gpu/drm/radeon/rs600.c
... ... @@ -625,11 +625,8 @@
625 625 radeon_gart_table_vram_free(rdev);
626 626 }
627 627  
628   -void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
629   - uint64_t addr, uint32_t flags)
  628 +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
630 629 {
631   - void __iomem *ptr = (void *)rdev->gart.ptr;
632   -
633 630 addr = addr & 0xFFFFFFFFFFFFF000ULL;
634 631 addr |= R600_PTE_SYSTEM;
635 632 if (flags & RADEON_GART_PAGE_VALID)
... ... @@ -640,7 +637,14 @@
640 637 addr |= R600_PTE_WRITEABLE;
641 638 if (flags & RADEON_GART_PAGE_SNOOP)
642 639 addr |= R600_PTE_SNOOPED;
643   - writeq(addr, ptr + (i * 8));
  640 + return addr;
  641 +}
  642 +
  643 +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
  644 + uint64_t entry)
  645 +{
  646 + void __iomem *ptr = (void *)rdev->gart.ptr;
  647 + writeq(entry, ptr + (i * 8));
644 648 }
645 649  
646 650 int rs600_irq_set(struct radeon_device *rdev)