Commit 7bdda6209f224aa784a036df54b22cb338d2e859

Authored by Paul Mundt
1 parent 49f3bfe933

sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.

Both the store queue API and the PMB remapping take unsigned long for
their pgprot flags, which cuts off the extended protection bits. In the
case of the PMB this isn't really a problem since the cache attribute
bits that we care about are all in the lower 32-bits, but we do it just
to be safe. The store queue remapping on the other hand depends on the
extended prot bits for enabling userspace access to the mappings.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 6 changed files with 18 additions and 13 deletions Inline Diff

arch/sh/include/asm/mmu.h
1 #ifndef __MMU_H 1 #ifndef __MMU_H
2 #define __MMU_H 2 #define __MMU_H
3 3
4 /* 4 /*
5 * Privileged Space Mapping Buffer (PMB) definitions 5 * Privileged Space Mapping Buffer (PMB) definitions
6 */ 6 */
7 #define PMB_PASCR 0xff000070 7 #define PMB_PASCR 0xff000070
8 #define PMB_IRMCR 0xff000078 8 #define PMB_IRMCR 0xff000078
9 9
10 #define PASCR_SE 0x80000000 10 #define PASCR_SE 0x80000000
11 11
12 #define PMB_ADDR 0xf6100000 12 #define PMB_ADDR 0xf6100000
13 #define PMB_DATA 0xf7100000 13 #define PMB_DATA 0xf7100000
14 #define PMB_ENTRY_MAX 16 14 #define PMB_ENTRY_MAX 16
15 #define PMB_E_MASK 0x0000000f 15 #define PMB_E_MASK 0x0000000f
16 #define PMB_E_SHIFT 8 16 #define PMB_E_SHIFT 8
17 17
18 #define PMB_PFN_MASK 0xff000000 18 #define PMB_PFN_MASK 0xff000000
19 19
20 #define PMB_SZ_16M 0x00000000 20 #define PMB_SZ_16M 0x00000000
21 #define PMB_SZ_64M 0x00000010 21 #define PMB_SZ_64M 0x00000010
22 #define PMB_SZ_128M 0x00000080 22 #define PMB_SZ_128M 0x00000080
23 #define PMB_SZ_512M 0x00000090 23 #define PMB_SZ_512M 0x00000090
24 #define PMB_SZ_MASK PMB_SZ_512M 24 #define PMB_SZ_MASK PMB_SZ_512M
25 #define PMB_C 0x00000008 25 #define PMB_C 0x00000008
26 #define PMB_WT 0x00000001 26 #define PMB_WT 0x00000001
27 #define PMB_UB 0x00000200 27 #define PMB_UB 0x00000200
28 #define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB) 28 #define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB)
29 #define PMB_V 0x00000100 29 #define PMB_V 0x00000100
30 30
31 #define PMB_NO_ENTRY (-1) 31 #define PMB_NO_ENTRY (-1)
32 32
33 #ifndef __ASSEMBLY__ 33 #ifndef __ASSEMBLY__
34 #include <linux/errno.h> 34 #include <linux/errno.h>
35 #include <linux/threads.h> 35 #include <linux/threads.h>
36 #include <asm/page.h>
36 37
37 /* Default "unsigned long" context */ 38 /* Default "unsigned long" context */
38 typedef unsigned long mm_context_id_t[NR_CPUS]; 39 typedef unsigned long mm_context_id_t[NR_CPUS];
39 40
40 typedef struct { 41 typedef struct {
41 #ifdef CONFIG_MMU 42 #ifdef CONFIG_MMU
42 mm_context_id_t id; 43 mm_context_id_t id;
43 void *vdso; 44 void *vdso;
44 #else 45 #else
45 unsigned long end_brk; 46 unsigned long end_brk;
46 #endif 47 #endif
47 #ifdef CONFIG_BINFMT_ELF_FDPIC 48 #ifdef CONFIG_BINFMT_ELF_FDPIC
48 unsigned long exec_fdpic_loadmap; 49 unsigned long exec_fdpic_loadmap;
49 unsigned long interp_fdpic_loadmap; 50 unsigned long interp_fdpic_loadmap;
50 #endif 51 #endif
51 } mm_context_t; 52 } mm_context_t;
52 53
53 struct pmb_entry; 54 struct pmb_entry;
54 55
55 struct pmb_entry { 56 struct pmb_entry {
56 unsigned long vpn; 57 unsigned long vpn;
57 unsigned long ppn; 58 unsigned long ppn;
58 unsigned long flags; 59 unsigned long flags;
59 60
60 /* 61 /*
61 * 0 .. NR_PMB_ENTRIES for specific entry selection, or 62 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
62 * PMB_NO_ENTRY to search for a free one 63 * PMB_NO_ENTRY to search for a free one
63 */ 64 */
64 int entry; 65 int entry;
65 66
66 struct pmb_entry *next; 67 struct pmb_entry *next;
67 /* Adjacent entry link for contiguous multi-entry mappings */ 68 /* Adjacent entry link for contiguous multi-entry mappings */
68 struct pmb_entry *link; 69 struct pmb_entry *link;
69 }; 70 };
70 71
71 #ifdef CONFIG_PMB 72 #ifdef CONFIG_PMB
72 /* arch/sh/mm/pmb.c */ 73 /* arch/sh/mm/pmb.c */
73 long pmb_remap(unsigned long virt, unsigned long phys, 74 long pmb_remap(unsigned long virt, unsigned long phys,
74 unsigned long size, unsigned long flags); 75 unsigned long size, pgprot_t prot);
75 void pmb_unmap(unsigned long addr); 76 void pmb_unmap(unsigned long addr);
76 int pmb_init(void); 77 int pmb_init(void);
77 bool __in_29bit_mode(void); 78 bool __in_29bit_mode(void);
78 #else 79 #else
79 static inline long pmb_remap(unsigned long virt, unsigned long phys, 80 static inline long pmb_remap(unsigned long virt, unsigned long phys,
80 unsigned long size, unsigned long flags) 81 unsigned long size, pgprot_t prot)
81 { 82 {
82 return -EINVAL; 83 return -EINVAL;
83 } 84 }
84 85
85 static inline void pmb_unmap(unsigned long addr) 86 static inline void pmb_unmap(unsigned long addr)
86 { 87 {
87 } 88 }
88 89
89 static inline int pmb_init(void) 90 static inline int pmb_init(void)
90 { 91 {
91 return -ENODEV; 92 return -ENODEV;
92 } 93 }
93 94
94 #ifdef CONFIG_29BIT 95 #ifdef CONFIG_29BIT
95 #define __in_29bit_mode() (1) 96 #define __in_29bit_mode() (1)
96 #else 97 #else
97 #define __in_29bit_mode() (0) 98 #define __in_29bit_mode() (0)
98 #endif 99 #endif
99 100
100 #endif /* CONFIG_PMB */ 101 #endif /* CONFIG_PMB */
101 #endif /* __ASSEMBLY__ */ 102 #endif /* __ASSEMBLY__ */
102 103
103 #endif /* __MMU_H */ 104 #endif /* __MMU_H */
104 105
arch/sh/include/cpu-sh4/cpu/sq.h
1 /* 1 /*
2 * include/asm-sh/cpu-sh4/sq.h 2 * include/asm-sh/cpu-sh4/sq.h
3 * 3 *
4 * Copyright (C) 2001, 2002, 2003 Paul Mundt 4 * Copyright (C) 2001, 2002, 2003 Paul Mundt
5 * Copyright (C) 2001, 2002 M. R. Brown 5 * Copyright (C) 2001, 2002 M. R. Brown
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 9 * for more details.
10 */ 10 */
11 #ifndef __ASM_CPU_SH4_SQ_H 11 #ifndef __ASM_CPU_SH4_SQ_H
12 #define __ASM_CPU_SH4_SQ_H 12 #define __ASM_CPU_SH4_SQ_H
13 13
14 #include <asm/addrspace.h> 14 #include <asm/addrspace.h>
15 #include <asm/page.h>
15 16
16 /* 17 /*
17 * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be 18 * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
18 * mapped to any physical address space. Since data is written (and aligned) 19 * mapped to any physical address space. Since data is written (and aligned)
19 * to 32-byte boundaries, we need to be sure that all allocations are aligned. 20 * to 32-byte boundaries, we need to be sure that all allocations are aligned.
20 */ 21 */
21 #define SQ_SIZE 32 22 #define SQ_SIZE 32
22 #define SQ_ALIGN_MASK (~(SQ_SIZE - 1)) 23 #define SQ_ALIGN_MASK (~(SQ_SIZE - 1))
23 #define SQ_ALIGN(addr) (((addr)+SQ_SIZE-1) & SQ_ALIGN_MASK) 24 #define SQ_ALIGN(addr) (((addr)+SQ_SIZE-1) & SQ_ALIGN_MASK)
24 25
25 #define SQ_QACR0 (P4SEG_REG_BASE + 0x38) 26 #define SQ_QACR0 (P4SEG_REG_BASE + 0x38)
26 #define SQ_QACR1 (P4SEG_REG_BASE + 0x3c) 27 #define SQ_QACR1 (P4SEG_REG_BASE + 0x3c)
27 #define SQ_ADDRMAX (P4SEG_STORE_QUE + 0x04000000) 28 #define SQ_ADDRMAX (P4SEG_STORE_QUE + 0x04000000)
28 29
29 /* arch/sh/kernel/cpu/sh4/sq.c */ 30 /* arch/sh/kernel/cpu/sh4/sq.c */
30 unsigned long sq_remap(unsigned long phys, unsigned int size, 31 unsigned long sq_remap(unsigned long phys, unsigned int size,
31 const char *name, unsigned long flags); 32 const char *name, pgprot_t prot);
32 void sq_unmap(unsigned long vaddr); 33 void sq_unmap(unsigned long vaddr);
33 void sq_flush_range(unsigned long start, unsigned int len); 34 void sq_flush_range(unsigned long start, unsigned int len);
34 35
35 #endif /* __ASM_CPU_SH4_SQ_H */ 36 #endif /* __ASM_CPU_SH4_SQ_H */
36 37
arch/sh/kernel/cpu/sh4/sq.c
1 /* 1 /*
2 * arch/sh/kernel/cpu/sh4/sq.c 2 * arch/sh/kernel/cpu/sh4/sq.c
3 * 3 *
4 * General management API for SH-4 integrated Store Queues 4 * General management API for SH-4 integrated Store Queues
5 * 5 *
6 * Copyright (C) 2001 - 2006 Paul Mundt 6 * Copyright (C) 2001 - 2006 Paul Mundt
7 * Copyright (C) 2001, 2002 M. R. Brown 7 * Copyright (C) 2001, 2002 M. R. Brown
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive 10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details. 11 * for more details.
12 */ 12 */
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/cpu.h> 14 #include <linux/cpu.h>
15 #include <linux/bitmap.h> 15 #include <linux/bitmap.h>
16 #include <linux/sysdev.h> 16 #include <linux/sysdev.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/slab.h> 19 #include <linux/slab.h>
20 #include <linux/vmalloc.h> 20 #include <linux/vmalloc.h>
21 #include <linux/mm.h> 21 #include <linux/mm.h>
22 #include <linux/io.h> 22 #include <linux/io.h>
23 #include <asm/page.h> 23 #include <asm/page.h>
24 #include <asm/cacheflush.h> 24 #include <asm/cacheflush.h>
25 #include <cpu/sq.h> 25 #include <cpu/sq.h>
26 26
27 struct sq_mapping; 27 struct sq_mapping;
28 28
29 struct sq_mapping { 29 struct sq_mapping {
30 const char *name; 30 const char *name;
31 31
32 unsigned long sq_addr; 32 unsigned long sq_addr;
33 unsigned long addr; 33 unsigned long addr;
34 unsigned int size; 34 unsigned int size;
35 35
36 struct sq_mapping *next; 36 struct sq_mapping *next;
37 }; 37 };
38 38
39 static struct sq_mapping *sq_mapping_list; 39 static struct sq_mapping *sq_mapping_list;
40 static DEFINE_SPINLOCK(sq_mapping_lock); 40 static DEFINE_SPINLOCK(sq_mapping_lock);
41 static struct kmem_cache *sq_cache; 41 static struct kmem_cache *sq_cache;
42 static unsigned long *sq_bitmap; 42 static unsigned long *sq_bitmap;
43 43
44 #define store_queue_barrier() \ 44 #define store_queue_barrier() \
45 do { \ 45 do { \
46 (void)__raw_readl(P4SEG_STORE_QUE); \ 46 (void)__raw_readl(P4SEG_STORE_QUE); \
47 __raw_writel(0, P4SEG_STORE_QUE + 0); \ 47 __raw_writel(0, P4SEG_STORE_QUE + 0); \
48 __raw_writel(0, P4SEG_STORE_QUE + 8); \ 48 __raw_writel(0, P4SEG_STORE_QUE + 8); \
49 } while (0); 49 } while (0);
50 50
51 /** 51 /**
52 * sq_flush_range - Flush (prefetch) a specific SQ range 52 * sq_flush_range - Flush (prefetch) a specific SQ range
53 * @start: the store queue address to start flushing from 53 * @start: the store queue address to start flushing from
54 * @len: the length to flush 54 * @len: the length to flush
55 * 55 *
56 * Flushes the store queue cache from @start to @start + @len in a 56 * Flushes the store queue cache from @start to @start + @len in a
57 * linear fashion. 57 * linear fashion.
58 */ 58 */
59 void sq_flush_range(unsigned long start, unsigned int len) 59 void sq_flush_range(unsigned long start, unsigned int len)
60 { 60 {
61 unsigned long *sq = (unsigned long *)start; 61 unsigned long *sq = (unsigned long *)start;
62 62
63 /* Flush the queues */ 63 /* Flush the queues */
64 for (len >>= 5; len--; sq += 8) 64 for (len >>= 5; len--; sq += 8)
65 prefetchw(sq); 65 prefetchw(sq);
66 66
67 /* Wait for completion */ 67 /* Wait for completion */
68 store_queue_barrier(); 68 store_queue_barrier();
69 } 69 }
70 EXPORT_SYMBOL(sq_flush_range); 70 EXPORT_SYMBOL(sq_flush_range);
71 71
72 static inline void sq_mapping_list_add(struct sq_mapping *map) 72 static inline void sq_mapping_list_add(struct sq_mapping *map)
73 { 73 {
74 struct sq_mapping **p, *tmp; 74 struct sq_mapping **p, *tmp;
75 75
76 spin_lock_irq(&sq_mapping_lock); 76 spin_lock_irq(&sq_mapping_lock);
77 77
78 p = &sq_mapping_list; 78 p = &sq_mapping_list;
79 while ((tmp = *p) != NULL) 79 while ((tmp = *p) != NULL)
80 p = &tmp->next; 80 p = &tmp->next;
81 81
82 map->next = tmp; 82 map->next = tmp;
83 *p = map; 83 *p = map;
84 84
85 spin_unlock_irq(&sq_mapping_lock); 85 spin_unlock_irq(&sq_mapping_lock);
86 } 86 }
87 87
88 static inline void sq_mapping_list_del(struct sq_mapping *map) 88 static inline void sq_mapping_list_del(struct sq_mapping *map)
89 { 89 {
90 struct sq_mapping **p, *tmp; 90 struct sq_mapping **p, *tmp;
91 91
92 spin_lock_irq(&sq_mapping_lock); 92 spin_lock_irq(&sq_mapping_lock);
93 93
94 for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next) 94 for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
95 if (tmp == map) { 95 if (tmp == map) {
96 *p = tmp->next; 96 *p = tmp->next;
97 break; 97 break;
98 } 98 }
99 99
100 spin_unlock_irq(&sq_mapping_lock); 100 spin_unlock_irq(&sq_mapping_lock);
101 } 101 }
102 102
103 static int __sq_remap(struct sq_mapping *map, unsigned long flags) 103 static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
104 { 104 {
105 #if defined(CONFIG_MMU) 105 #if defined(CONFIG_MMU)
106 struct vm_struct *vma; 106 struct vm_struct *vma;
107 107
108 vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX); 108 vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
109 if (!vma) 109 if (!vma)
110 return -ENOMEM; 110 return -ENOMEM;
111 111
112 vma->phys_addr = map->addr; 112 vma->phys_addr = map->addr;
113 113
114 if (ioremap_page_range((unsigned long)vma->addr, 114 if (ioremap_page_range((unsigned long)vma->addr,
115 (unsigned long)vma->addr + map->size, 115 (unsigned long)vma->addr + map->size,
116 vma->phys_addr, __pgprot(flags))) { 116 vma->phys_addr, prot)) {
117 vunmap(vma->addr); 117 vunmap(vma->addr);
118 return -EAGAIN; 118 return -EAGAIN;
119 } 119 }
120 #else 120 #else
121 /* 121 /*
122 * Without an MMU (or with it turned off), this is much more 122 * Without an MMU (or with it turned off), this is much more
123 * straightforward, as we can just load up each queue's QACR with 123 * straightforward, as we can just load up each queue's QACR with
124 * the physical address appropriately masked. 124 * the physical address appropriately masked.
125 */ 125 */
126 __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); 126 __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
127 __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); 127 __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
128 #endif 128 #endif
129 129
130 return 0; 130 return 0;
131 } 131 }
132 132
133 /** 133 /**
134 * sq_remap - Map a physical address through the Store Queues 134 * sq_remap - Map a physical address through the Store Queues
135 * @phys: Physical address of mapping. 135 * @phys: Physical address of mapping.
136 * @size: Length of mapping. 136 * @size: Length of mapping.
137 * @name: User invoking mapping. 137 * @name: User invoking mapping.
138 * @flags: Protection flags. 138 * @prot: Protection bits.
139 * 139 *
140 * Remaps the physical address @phys through the next available store queue 140 * Remaps the physical address @phys through the next available store queue
141 * address of @size length. @name is logged at boot time as well as through 141 * address of @size length. @name is logged at boot time as well as through
142 * the sysfs interface. 142 * the sysfs interface.
143 */ 143 */
144 unsigned long sq_remap(unsigned long phys, unsigned int size, 144 unsigned long sq_remap(unsigned long phys, unsigned int size,
145 const char *name, unsigned long flags) 145 const char *name, pgprot_t prot)
146 { 146 {
147 struct sq_mapping *map; 147 struct sq_mapping *map;
148 unsigned long end; 148 unsigned long end;
149 unsigned int psz; 149 unsigned int psz;
150 int ret, page; 150 int ret, page;
151 151
152 /* Don't allow wraparound or zero size */ 152 /* Don't allow wraparound or zero size */
153 end = phys + size - 1; 153 end = phys + size - 1;
154 if (unlikely(!size || end < phys)) 154 if (unlikely(!size || end < phys))
155 return -EINVAL; 155 return -EINVAL;
156 /* Don't allow anyone to remap normal memory.. */ 156 /* Don't allow anyone to remap normal memory.. */
157 if (unlikely(phys < virt_to_phys(high_memory))) 157 if (unlikely(phys < virt_to_phys(high_memory)))
158 return -EINVAL; 158 return -EINVAL;
159 159
160 phys &= PAGE_MASK; 160 phys &= PAGE_MASK;
161 size = PAGE_ALIGN(end + 1) - phys; 161 size = PAGE_ALIGN(end + 1) - phys;
162 162
163 map = kmem_cache_alloc(sq_cache, GFP_KERNEL); 163 map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
164 if (unlikely(!map)) 164 if (unlikely(!map))
165 return -ENOMEM; 165 return -ENOMEM;
166 166
167 map->addr = phys; 167 map->addr = phys;
168 map->size = size; 168 map->size = size;
169 map->name = name; 169 map->name = name;
170 170
171 page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT, 171 page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
172 get_order(map->size)); 172 get_order(map->size));
173 if (unlikely(page < 0)) { 173 if (unlikely(page < 0)) {
174 ret = -ENOSPC; 174 ret = -ENOSPC;
175 goto out; 175 goto out;
176 } 176 }
177 177
178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); 178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
179 179
180 ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); 180 ret = __sq_remap(map, prot);
181 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
182 goto out; 182 goto out;
183 183
184 psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 184 psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
185 pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n", 185 pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
186 likely(map->name) ? map->name : "???", 186 likely(map->name) ? map->name : "???",
187 psz, psz == 1 ? " " : "s", 187 psz, psz == 1 ? " " : "s",
188 map->sq_addr, map->addr); 188 map->sq_addr, map->addr);
189 189
190 sq_mapping_list_add(map); 190 sq_mapping_list_add(map);
191 191
192 return map->sq_addr; 192 return map->sq_addr;
193 193
194 out: 194 out:
195 kmem_cache_free(sq_cache, map); 195 kmem_cache_free(sq_cache, map);
196 return ret; 196 return ret;
197 } 197 }
198 EXPORT_SYMBOL(sq_remap); 198 EXPORT_SYMBOL(sq_remap);
199 199
200 /** 200 /**
201 * sq_unmap - Unmap a Store Queue allocation 201 * sq_unmap - Unmap a Store Queue allocation
202 * @vaddr: Pre-allocated Store Queue mapping. 202 * @vaddr: Pre-allocated Store Queue mapping.
203 * 203 *
204 * Unmaps the store queue allocation @map that was previously created by 204 * Unmaps the store queue allocation @map that was previously created by
205 * sq_remap(). Also frees up the pte that was previously inserted into 205 * sq_remap(). Also frees up the pte that was previously inserted into
206 * the kernel page table and discards the UTLB translation. 206 * the kernel page table and discards the UTLB translation.
207 */ 207 */
208 void sq_unmap(unsigned long vaddr) 208 void sq_unmap(unsigned long vaddr)
209 { 209 {
210 struct sq_mapping **p, *map; 210 struct sq_mapping **p, *map;
211 int page; 211 int page;
212 212
213 for (p = &sq_mapping_list; (map = *p); p = &map->next) 213 for (p = &sq_mapping_list; (map = *p); p = &map->next)
214 if (map->sq_addr == vaddr) 214 if (map->sq_addr == vaddr)
215 break; 215 break;
216 216
217 if (unlikely(!map)) { 217 if (unlikely(!map)) {
218 printk("%s: bad store queue address 0x%08lx\n", 218 printk("%s: bad store queue address 0x%08lx\n",
219 __func__, vaddr); 219 __func__, vaddr);
220 return; 220 return;
221 } 221 }
222 222
223 page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT; 223 page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
224 bitmap_release_region(sq_bitmap, page, get_order(map->size)); 224 bitmap_release_region(sq_bitmap, page, get_order(map->size));
225 225
226 #ifdef CONFIG_MMU 226 #ifdef CONFIG_MMU
227 { 227 {
228 /* 228 /*
229 * Tear down the VMA in the MMU case. 229 * Tear down the VMA in the MMU case.
230 */ 230 */
231 struct vm_struct *vma; 231 struct vm_struct *vma;
232 232
233 vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK)); 233 vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
234 if (!vma) { 234 if (!vma) {
235 printk(KERN_ERR "%s: bad address 0x%08lx\n", 235 printk(KERN_ERR "%s: bad address 0x%08lx\n",
236 __func__, map->sq_addr); 236 __func__, map->sq_addr);
237 return; 237 return;
238 } 238 }
239 } 239 }
240 #endif 240 #endif
241 241
242 sq_mapping_list_del(map); 242 sq_mapping_list_del(map);
243 243
244 kmem_cache_free(sq_cache, map); 244 kmem_cache_free(sq_cache, map);
245 } 245 }
246 EXPORT_SYMBOL(sq_unmap); 246 EXPORT_SYMBOL(sq_unmap);
247 247
248 /* 248 /*
249 * Needlessly complex sysfs interface. Unfortunately it doesn't seem like 249 * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
250 * there is any other easy way to add things on a per-cpu basis without 250 * there is any other easy way to add things on a per-cpu basis without
251 * putting the directory entries somewhere stupid and having to create 251 * putting the directory entries somewhere stupid and having to create
252 * links in sysfs by hand back in to the per-cpu directories. 252 * links in sysfs by hand back in to the per-cpu directories.
253 * 253 *
254 * Some day we may want to have an additional abstraction per store 254 * Some day we may want to have an additional abstraction per store
255 * queue, but considering the kobject hell we already have to deal with, 255 * queue, but considering the kobject hell we already have to deal with,
256 * it's simply not worth the trouble. 256 * it's simply not worth the trouble.
257 */ 257 */
258 static struct kobject *sq_kobject[NR_CPUS]; 258 static struct kobject *sq_kobject[NR_CPUS];
259 259
260 struct sq_sysfs_attr { 260 struct sq_sysfs_attr {
261 struct attribute attr; 261 struct attribute attr;
262 ssize_t (*show)(char *buf); 262 ssize_t (*show)(char *buf);
263 ssize_t (*store)(const char *buf, size_t count); 263 ssize_t (*store)(const char *buf, size_t count);
264 }; 264 };
265 265
266 #define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr) 266 #define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr)
267 267
268 static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr, 268 static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
269 char *buf) 269 char *buf)
270 { 270 {
271 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); 271 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
272 272
273 if (likely(sattr->show)) 273 if (likely(sattr->show))
274 return sattr->show(buf); 274 return sattr->show(buf);
275 275
276 return -EIO; 276 return -EIO;
277 } 277 }
278 278
279 static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr, 279 static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
280 const char *buf, size_t count) 280 const char *buf, size_t count)
281 { 281 {
282 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); 282 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
283 283
284 if (likely(sattr->store)) 284 if (likely(sattr->store))
285 return sattr->store(buf, count); 285 return sattr->store(buf, count);
286 286
287 return -EIO; 287 return -EIO;
288 } 288 }
289 289
290 static ssize_t mapping_show(char *buf) 290 static ssize_t mapping_show(char *buf)
291 { 291 {
292 struct sq_mapping **list, *entry; 292 struct sq_mapping **list, *entry;
293 char *p = buf; 293 char *p = buf;
294 294
295 for (list = &sq_mapping_list; (entry = *list); list = &entry->next) 295 for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
296 p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", 296 p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
297 entry->sq_addr, entry->sq_addr + entry->size, 297 entry->sq_addr, entry->sq_addr + entry->size,
298 entry->addr, entry->name); 298 entry->addr, entry->name);
299 299
300 return p - buf; 300 return p - buf;
301 } 301 }
302 302
303 static ssize_t mapping_store(const char *buf, size_t count) 303 static ssize_t mapping_store(const char *buf, size_t count)
304 { 304 {
305 unsigned long base = 0, len = 0; 305 unsigned long base = 0, len = 0;
306 306
307 sscanf(buf, "%lx %lx", &base, &len); 307 sscanf(buf, "%lx %lx", &base, &len);
308 if (!base) 308 if (!base)
309 return -EIO; 309 return -EIO;
310 310
311 if (likely(len)) { 311 if (likely(len)) {
312 int ret = sq_remap(base, len, "Userspace", 312 int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
313 pgprot_val(PAGE_SHARED));
314 if (ret < 0) 313 if (ret < 0)
315 return ret; 314 return ret;
316 } else 315 } else
317 sq_unmap(base); 316 sq_unmap(base);
318 317
319 return count; 318 return count;
320 } 319 }
321 320
322 static struct sq_sysfs_attr mapping_attr = 321 static struct sq_sysfs_attr mapping_attr =
323 __ATTR(mapping, 0644, mapping_show, mapping_store); 322 __ATTR(mapping, 0644, mapping_show, mapping_store);
324 323
325 static struct attribute *sq_sysfs_attrs[] = { 324 static struct attribute *sq_sysfs_attrs[] = {
326 &mapping_attr.attr, 325 &mapping_attr.attr,
327 NULL, 326 NULL,
328 }; 327 };
329 328
330 static struct sysfs_ops sq_sysfs_ops = { 329 static struct sysfs_ops sq_sysfs_ops = {
331 .show = sq_sysfs_show, 330 .show = sq_sysfs_show,
332 .store = sq_sysfs_store, 331 .store = sq_sysfs_store,
333 }; 332 };
334 333
335 static struct kobj_type ktype_percpu_entry = { 334 static struct kobj_type ktype_percpu_entry = {
336 .sysfs_ops = &sq_sysfs_ops, 335 .sysfs_ops = &sq_sysfs_ops,
337 .default_attrs = sq_sysfs_attrs, 336 .default_attrs = sq_sysfs_attrs,
338 }; 337 };
339 338
340 static int __devinit sq_sysdev_add(struct sys_device *sysdev) 339 static int __devinit sq_sysdev_add(struct sys_device *sysdev)
341 { 340 {
342 unsigned int cpu = sysdev->id; 341 unsigned int cpu = sysdev->id;
343 struct kobject *kobj; 342 struct kobject *kobj;
344 int error; 343 int error;
345 344
346 sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); 345 sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
347 if (unlikely(!sq_kobject[cpu])) 346 if (unlikely(!sq_kobject[cpu]))
348 return -ENOMEM; 347 return -ENOMEM;
349 348
350 kobj = sq_kobject[cpu]; 349 kobj = sq_kobject[cpu];
351 error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj, 350 error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
352 "%s", "sq"); 351 "%s", "sq");
353 if (!error) 352 if (!error)
354 kobject_uevent(kobj, KOBJ_ADD); 353 kobject_uevent(kobj, KOBJ_ADD);
355 return error; 354 return error;
356 } 355 }
357 356
358 static int __devexit sq_sysdev_remove(struct sys_device *sysdev) 357 static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
359 { 358 {
360 unsigned int cpu = sysdev->id; 359 unsigned int cpu = sysdev->id;
361 struct kobject *kobj = sq_kobject[cpu]; 360 struct kobject *kobj = sq_kobject[cpu];
362 361
363 kobject_put(kobj); 362 kobject_put(kobj);
364 return 0; 363 return 0;
365 } 364 }
366 365
367 static struct sysdev_driver sq_sysdev_driver = { 366 static struct sysdev_driver sq_sysdev_driver = {
368 .add = sq_sysdev_add, 367 .add = sq_sysdev_add,
369 .remove = __devexit_p(sq_sysdev_remove), 368 .remove = __devexit_p(sq_sysdev_remove),
370 }; 369 };
371 370
372 static int __init sq_api_init(void) 371 static int __init sq_api_init(void)
373 { 372 {
374 unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; 373 unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
375 unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG; 374 unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG;
376 int ret = -ENOMEM; 375 int ret = -ENOMEM;
377 376
378 printk(KERN_NOTICE "sq: Registering store queue API.\n"); 377 printk(KERN_NOTICE "sq: Registering store queue API.\n");
379 378
380 sq_cache = kmem_cache_create("store_queue_cache", 379 sq_cache = kmem_cache_create("store_queue_cache",
381 sizeof(struct sq_mapping), 0, 0, NULL); 380 sizeof(struct sq_mapping), 0, 0, NULL);
382 if (unlikely(!sq_cache)) 381 if (unlikely(!sq_cache))
383 return ret; 382 return ret;
384 383
385 sq_bitmap = kzalloc(size, GFP_KERNEL); 384 sq_bitmap = kzalloc(size, GFP_KERNEL);
386 if (unlikely(!sq_bitmap)) 385 if (unlikely(!sq_bitmap))
387 goto out; 386 goto out;
388 387
389 ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver); 388 ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver);
390 if (unlikely(ret != 0)) 389 if (unlikely(ret != 0))
391 goto out; 390 goto out;
392 391
393 return 0; 392 return 0;
394 393
395 out: 394 out:
396 kfree(sq_bitmap); 395 kfree(sq_bitmap);
397 kmem_cache_destroy(sq_cache); 396 kmem_cache_destroy(sq_cache);
398 397
399 return ret; 398 return ret;
400 } 399 }
401 400
402 static void __exit sq_api_exit(void) 401 static void __exit sq_api_exit(void)
403 { 402 {
404 sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver); 403 sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver);
405 kfree(sq_bitmap); 404 kfree(sq_bitmap);
406 kmem_cache_destroy(sq_cache); 405 kmem_cache_destroy(sq_cache);
407 } 406 }
408 407
409 module_init(sq_api_init); 408 module_init(sq_api_init);
410 module_exit(sq_api_exit); 409 module_exit(sq_api_exit);
411 410
412 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); 411 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
413 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); 412 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
414 MODULE_LICENSE("GPL"); 413 MODULE_LICENSE("GPL");
415 414
arch/sh/mm/ioremap.c
1 /* 1 /*
2 * arch/sh/mm/ioremap.c 2 * arch/sh/mm/ioremap.c
3 * 3 *
4 * (C) Copyright 1995 1996 Linus Torvalds 4 * (C) Copyright 1995 1996 Linus Torvalds
5 * (C) Copyright 2005 - 2010 Paul Mundt 5 * (C) Copyright 2005 - 2010 Paul Mundt
6 * 6 *
7 * Re-map IO memory to kernel address space so that we can access it. 7 * Re-map IO memory to kernel address space so that we can access it.
8 * This is needed for high PCI addresses that aren't mapped in the 8 * This is needed for high PCI addresses that aren't mapped in the
9 * 640k-1MB IO memory area on PC's 9 * 640k-1MB IO memory area on PC's
10 * 10 *
11 * This file is subject to the terms and conditions of the GNU General 11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this 12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details. 13 * archive for more details.
14 */ 14 */
15 #include <linux/vmalloc.h> 15 #include <linux/vmalloc.h>
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/mm.h> 17 #include <linux/mm.h>
18 #include <linux/pci.h> 18 #include <linux/pci.h>
19 #include <linux/io.h> 19 #include <linux/io.h>
20 #include <asm/page.h> 20 #include <asm/page.h>
21 #include <asm/pgalloc.h> 21 #include <asm/pgalloc.h>
22 #include <asm/addrspace.h> 22 #include <asm/addrspace.h>
23 #include <asm/cacheflush.h> 23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h> 24 #include <asm/tlbflush.h>
25 #include <asm/mmu.h> 25 #include <asm/mmu.h>
26 26
27 /* 27 /*
28 * Remap an arbitrary physical address space into the kernel virtual 28 * Remap an arbitrary physical address space into the kernel virtual
29 * address space. Needed when the kernel wants to access high addresses 29 * address space. Needed when the kernel wants to access high addresses
30 * directly. 30 * directly.
31 * 31 *
32 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 32 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
33 * have to convert them into an offset in a page-aligned mapping, but the 33 * have to convert them into an offset in a page-aligned mapping, but the
34 * caller shouldn't need to know that small detail. 34 * caller shouldn't need to know that small detail.
35 */ 35 */
36 void __iomem * __init_refok 36 void __iomem * __init_refok
37 __ioremap_caller(unsigned long phys_addr, unsigned long size, 37 __ioremap_caller(unsigned long phys_addr, unsigned long size,
38 pgprot_t pgprot, void *caller) 38 pgprot_t pgprot, void *caller)
39 { 39 {
40 struct vm_struct *area; 40 struct vm_struct *area;
41 unsigned long offset, last_addr, addr, orig_addr; 41 unsigned long offset, last_addr, addr, orig_addr;
42 42
43 /* Don't allow wraparound or zero size */ 43 /* Don't allow wraparound or zero size */
44 last_addr = phys_addr + size - 1; 44 last_addr = phys_addr + size - 1;
45 if (!size || last_addr < phys_addr) 45 if (!size || last_addr < phys_addr)
46 return NULL; 46 return NULL;
47 47
48 /* 48 /*
49 * Mappings have to be page-aligned 49 * Mappings have to be page-aligned
50 */ 50 */
51 offset = phys_addr & ~PAGE_MASK; 51 offset = phys_addr & ~PAGE_MASK;
52 phys_addr &= PAGE_MASK; 52 phys_addr &= PAGE_MASK;
53 size = PAGE_ALIGN(last_addr+1) - phys_addr; 53 size = PAGE_ALIGN(last_addr+1) - phys_addr;
54 54
55 /* 55 /*
56 * If we can't yet use the regular approach, go the fixmap route. 56 * If we can't yet use the regular approach, go the fixmap route.
57 */ 57 */
58 if (!mem_init_done) 58 if (!mem_init_done)
59 return ioremap_fixed(phys_addr, offset, size, pgprot); 59 return ioremap_fixed(phys_addr, offset, size, pgprot);
60 60
61 /* 61 /*
62 * Ok, go for it.. 62 * Ok, go for it..
63 */ 63 */
64 area = get_vm_area_caller(size, VM_IOREMAP, caller); 64 area = get_vm_area_caller(size, VM_IOREMAP, caller);
65 if (!area) 65 if (!area)
66 return NULL; 66 return NULL;
67 area->phys_addr = phys_addr; 67 area->phys_addr = phys_addr;
68 orig_addr = addr = (unsigned long)area->addr; 68 orig_addr = addr = (unsigned long)area->addr;
69 69
70 #ifdef CONFIG_PMB 70 #ifdef CONFIG_PMB
71 /* 71 /*
72 * First try to remap through the PMB once a valid VMA has been 72 * First try to remap through the PMB once a valid VMA has been
73 * established. Smaller allocations (or the rest of the size 73 * established. Smaller allocations (or the rest of the size
74 * remaining after a PMB mapping due to the size not being 74 * remaining after a PMB mapping due to the size not being
75 * perfectly aligned on a PMB size boundary) are then mapped 75 * perfectly aligned on a PMB size boundary) are then mapped
76 * through the UTLB using conventional page tables. 76 * through the UTLB using conventional page tables.
77 * 77 *
78 * PMB entries are all pre-faulted. 78 * PMB entries are all pre-faulted.
79 */ 79 */
80 if (unlikely(phys_addr >= P1SEG)) { 80 if (unlikely(phys_addr >= P1SEG)) {
81 unsigned long mapped; 81 unsigned long mapped;
82 82
83 mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); 83 mapped = pmb_remap(addr, phys_addr, size, pgprot);
84 if (likely(mapped)) { 84 if (likely(mapped)) {
85 addr += mapped; 85 addr += mapped;
86 phys_addr += mapped; 86 phys_addr += mapped;
87 size -= mapped; 87 size -= mapped;
88 } 88 }
89 } 89 }
90 #endif 90 #endif
91 91
92 if (likely(size)) 92 if (likely(size))
93 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { 93 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
94 vunmap((void *)orig_addr); 94 vunmap((void *)orig_addr);
95 return NULL; 95 return NULL;
96 } 96 }
97 97
98 return (void __iomem *)(offset + (char *)orig_addr); 98 return (void __iomem *)(offset + (char *)orig_addr);
99 } 99 }
100 EXPORT_SYMBOL(__ioremap_caller); 100 EXPORT_SYMBOL(__ioremap_caller);
101 101
102 /* 102 /*
103 * Simple checks for non-translatable mappings. 103 * Simple checks for non-translatable mappings.
104 */ 104 */
105 static inline int iomapping_nontranslatable(unsigned long offset) 105 static inline int iomapping_nontranslatable(unsigned long offset)
106 { 106 {
107 #ifdef CONFIG_29BIT 107 #ifdef CONFIG_29BIT
108 /* 108 /*
109 * In 29-bit mode this includes the fixed P1/P2 areas, as well as 109 * In 29-bit mode this includes the fixed P1/P2 areas, as well as
110 * parts of P3. 110 * parts of P3.
111 */ 111 */
112 if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) 112 if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
113 return 1; 113 return 1;
114 #endif 114 #endif
115 115
116 return 0; 116 return 0;
117 } 117 }
118 118
119 void __iounmap(void __iomem *addr) 119 void __iounmap(void __iomem *addr)
120 { 120 {
121 unsigned long vaddr = (unsigned long __force)addr; 121 unsigned long vaddr = (unsigned long __force)addr;
122 struct vm_struct *p; 122 struct vm_struct *p;
123 123
124 /* 124 /*
125 * Nothing to do if there is no translatable mapping. 125 * Nothing to do if there is no translatable mapping.
126 */ 126 */
127 if (iomapping_nontranslatable(vaddr)) 127 if (iomapping_nontranslatable(vaddr))
128 return; 128 return;
129 129
130 /* 130 /*
131 * There's no VMA if it's from an early fixed mapping. 131 * There's no VMA if it's from an early fixed mapping.
132 */ 132 */
133 if (iounmap_fixed(addr) == 0) 133 if (iounmap_fixed(addr) == 0)
134 return; 134 return;
135 135
136 #ifdef CONFIG_PMB 136 #ifdef CONFIG_PMB
137 /* 137 /*
138 * Purge any PMB entries that may have been established for this 138 * Purge any PMB entries that may have been established for this
139 * mapping, then proceed with conventional VMA teardown. 139 * mapping, then proceed with conventional VMA teardown.
140 * 140 *
141 * XXX: Note that due to the way that remove_vm_area() does 141 * XXX: Note that due to the way that remove_vm_area() does
142 * matching of the resultant VMA, we aren't able to fast-forward 142 * matching of the resultant VMA, we aren't able to fast-forward
143 * the address past the PMB space until the end of the VMA where 143 * the address past the PMB space until the end of the VMA where
144 * the page tables reside. As such, unmap_vm_area() will be 144 * the page tables reside. As such, unmap_vm_area() will be
145 * forced to linearly scan over the area until it finds the page 145 * forced to linearly scan over the area until it finds the page
146 * tables where PTEs that need to be unmapped actually reside, 146 * tables where PTEs that need to be unmapped actually reside,
147 * which is far from optimal. Perhaps we need to use a separate 147 * which is far from optimal. Perhaps we need to use a separate
148 * VMA for the PMB mappings? 148 * VMA for the PMB mappings?
149 * -- PFM. 149 * -- PFM.
150 */ 150 */
151 pmb_unmap(vaddr); 151 pmb_unmap(vaddr);
152 #endif 152 #endif
153 153
154 p = remove_vm_area((void *)(vaddr & PAGE_MASK)); 154 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
155 if (!p) { 155 if (!p) {
156 printk(KERN_ERR "%s: bad address %p\n", __func__, addr); 156 printk(KERN_ERR "%s: bad address %p\n", __func__, addr);
157 return; 157 return;
158 } 158 }
159 159
160 kfree(p); 160 kfree(p);
161 } 161 }
162 EXPORT_SYMBOL(__iounmap); 162 EXPORT_SYMBOL(__iounmap);
163 163
1 /* 1 /*
2 * arch/sh/mm/pmb.c 2 * arch/sh/mm/pmb.c
3 * 3 *
4 * Privileged Space Mapping Buffer (PMB) Support. 4 * Privileged Space Mapping Buffer (PMB) Support.
5 * 5 *
6 * Copyright (C) 2005 - 2010 Paul Mundt 6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming 7 * Copyright (C) 2010 Matt Fleming
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive 10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details. 11 * for more details.
12 */ 12 */
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/sysdev.h> 15 #include <linux/sysdev.h>
16 #include <linux/cpu.h> 16 #include <linux/cpu.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/bitops.h> 19 #include <linux/bitops.h>
20 #include <linux/debugfs.h> 20 #include <linux/debugfs.h>
21 #include <linux/fs.h> 21 #include <linux/fs.h>
22 #include <linux/seq_file.h> 22 #include <linux/seq_file.h>
23 #include <linux/err.h> 23 #include <linux/err.h>
24 #include <asm/system.h> 24 #include <asm/system.h>
25 #include <asm/uaccess.h> 25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h> 26 #include <asm/pgtable.h>
27 #include <asm/page.h>
27 #include <asm/mmu.h> 28 #include <asm/mmu.h>
28 #include <asm/io.h> 29 #include <asm/io.h>
29 #include <asm/mmu_context.h> 30 #include <asm/mmu_context.h>
30 31
31 #define NR_PMB_ENTRIES 16 32 #define NR_PMB_ENTRIES 16
32 33
33 static void __pmb_unmap(struct pmb_entry *); 34 static void __pmb_unmap(struct pmb_entry *);
34 35
35 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; 36 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
36 static unsigned long pmb_map; 37 static unsigned long pmb_map;
37 38
38 static inline unsigned long mk_pmb_entry(unsigned int entry) 39 static inline unsigned long mk_pmb_entry(unsigned int entry)
39 { 40 {
40 return (entry & PMB_E_MASK) << PMB_E_SHIFT; 41 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
41 } 42 }
42 43
43 static inline unsigned long mk_pmb_addr(unsigned int entry) 44 static inline unsigned long mk_pmb_addr(unsigned int entry)
44 { 45 {
45 return mk_pmb_entry(entry) | PMB_ADDR; 46 return mk_pmb_entry(entry) | PMB_ADDR;
46 } 47 }
47 48
48 static inline unsigned long mk_pmb_data(unsigned int entry) 49 static inline unsigned long mk_pmb_data(unsigned int entry)
49 { 50 {
50 return mk_pmb_entry(entry) | PMB_DATA; 51 return mk_pmb_entry(entry) | PMB_DATA;
51 } 52 }
52 53
53 static int pmb_alloc_entry(void) 54 static int pmb_alloc_entry(void)
54 { 55 {
55 unsigned int pos; 56 unsigned int pos;
56 57
57 repeat: 58 repeat:
58 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); 59 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
59 60
60 if (unlikely(pos > NR_PMB_ENTRIES)) 61 if (unlikely(pos > NR_PMB_ENTRIES))
61 return -ENOSPC; 62 return -ENOSPC;
62 63
63 if (test_and_set_bit(pos, &pmb_map)) 64 if (test_and_set_bit(pos, &pmb_map))
64 goto repeat; 65 goto repeat;
65 66
66 return pos; 67 return pos;
67 } 68 }
68 69
69 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, 70 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
70 unsigned long flags, int entry) 71 unsigned long flags, int entry)
71 { 72 {
72 struct pmb_entry *pmbe; 73 struct pmb_entry *pmbe;
73 int pos; 74 int pos;
74 75
75 if (entry == PMB_NO_ENTRY) { 76 if (entry == PMB_NO_ENTRY) {
76 pos = pmb_alloc_entry(); 77 pos = pmb_alloc_entry();
77 if (pos < 0) 78 if (pos < 0)
78 return ERR_PTR(pos); 79 return ERR_PTR(pos);
79 } else { 80 } else {
80 if (test_and_set_bit(entry, &pmb_map)) 81 if (test_and_set_bit(entry, &pmb_map))
81 return ERR_PTR(-ENOSPC); 82 return ERR_PTR(-ENOSPC);
82 pos = entry; 83 pos = entry;
83 } 84 }
84 85
85 pmbe = &pmb_entry_list[pos]; 86 pmbe = &pmb_entry_list[pos];
86 if (!pmbe) 87 if (!pmbe)
87 return ERR_PTR(-ENOMEM); 88 return ERR_PTR(-ENOMEM);
88 89
89 pmbe->vpn = vpn; 90 pmbe->vpn = vpn;
90 pmbe->ppn = ppn; 91 pmbe->ppn = ppn;
91 pmbe->flags = flags; 92 pmbe->flags = flags;
92 pmbe->entry = pos; 93 pmbe->entry = pos;
93 94
94 return pmbe; 95 return pmbe;
95 } 96 }
96 97
97 static void pmb_free(struct pmb_entry *pmbe) 98 static void pmb_free(struct pmb_entry *pmbe)
98 { 99 {
99 int pos = pmbe->entry; 100 int pos = pmbe->entry;
100 101
101 pmbe->vpn = 0; 102 pmbe->vpn = 0;
102 pmbe->ppn = 0; 103 pmbe->ppn = 0;
103 pmbe->flags = 0; 104 pmbe->flags = 0;
104 pmbe->entry = 0; 105 pmbe->entry = 0;
105 106
106 clear_bit(pos, &pmb_map); 107 clear_bit(pos, &pmb_map);
107 } 108 }
108 109
109 /* 110 /*
110 * Must be in P2 for __set_pmb_entry() 111 * Must be in P2 for __set_pmb_entry()
111 */ 112 */
112 static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, 113 static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
113 unsigned long flags, int pos) 114 unsigned long flags, int pos)
114 { 115 {
115 __raw_writel(vpn | PMB_V, mk_pmb_addr(pos)); 116 __raw_writel(vpn | PMB_V, mk_pmb_addr(pos));
116 117
117 #ifdef CONFIG_CACHE_WRITETHROUGH 118 #ifdef CONFIG_CACHE_WRITETHROUGH
118 /* 119 /*
119 * When we are in 32-bit address extended mode, CCR.CB becomes 120 * When we are in 32-bit address extended mode, CCR.CB becomes
120 * invalid, so care must be taken to manually adjust cacheable 121 * invalid, so care must be taken to manually adjust cacheable
121 * translations. 122 * translations.
122 */ 123 */
123 if (likely(flags & PMB_C)) 124 if (likely(flags & PMB_C))
124 flags |= PMB_WT; 125 flags |= PMB_WT;
125 #endif 126 #endif
126 127
127 __raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos)); 128 __raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos));
128 } 129 }
129 130
130 static void set_pmb_entry(struct pmb_entry *pmbe) 131 static void set_pmb_entry(struct pmb_entry *pmbe)
131 { 132 {
132 jump_to_uncached(); 133 jump_to_uncached();
133 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); 134 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
134 back_to_cached(); 135 back_to_cached();
135 } 136 }
136 137
137 static void clear_pmb_entry(struct pmb_entry *pmbe) 138 static void clear_pmb_entry(struct pmb_entry *pmbe)
138 { 139 {
139 unsigned int entry = pmbe->entry; 140 unsigned int entry = pmbe->entry;
140 unsigned long addr; 141 unsigned long addr;
141 142
142 if (unlikely(entry >= NR_PMB_ENTRIES)) 143 if (unlikely(entry >= NR_PMB_ENTRIES))
143 return; 144 return;
144 145
145 jump_to_uncached(); 146 jump_to_uncached();
146 147
147 /* Clear V-bit */ 148 /* Clear V-bit */
148 addr = mk_pmb_addr(entry); 149 addr = mk_pmb_addr(entry);
149 __raw_writel(__raw_readl(addr) & ~PMB_V, addr); 150 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
150 151
151 addr = mk_pmb_data(entry); 152 addr = mk_pmb_data(entry);
152 __raw_writel(__raw_readl(addr) & ~PMB_V, addr); 153 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
153 154
154 back_to_cached(); 155 back_to_cached();
155 } 156 }
156 157
157 158
158 static struct { 159 static struct {
159 unsigned long size; 160 unsigned long size;
160 int flag; 161 int flag;
161 } pmb_sizes[] = { 162 } pmb_sizes[] = {
162 { .size = 0x20000000, .flag = PMB_SZ_512M, }, 163 { .size = 0x20000000, .flag = PMB_SZ_512M, },
163 { .size = 0x08000000, .flag = PMB_SZ_128M, }, 164 { .size = 0x08000000, .flag = PMB_SZ_128M, },
164 { .size = 0x04000000, .flag = PMB_SZ_64M, }, 165 { .size = 0x04000000, .flag = PMB_SZ_64M, },
165 { .size = 0x01000000, .flag = PMB_SZ_16M, }, 166 { .size = 0x01000000, .flag = PMB_SZ_16M, },
166 }; 167 };
167 168
168 long pmb_remap(unsigned long vaddr, unsigned long phys, 169 long pmb_remap(unsigned long vaddr, unsigned long phys,
169 unsigned long size, unsigned long flags) 170 unsigned long size, pgprot_t prot)
170 { 171 {
171 struct pmb_entry *pmbp, *pmbe; 172 struct pmb_entry *pmbp, *pmbe;
172 unsigned long wanted; 173 unsigned long wanted;
173 int pmb_flags, i; 174 int pmb_flags, i;
174 long err; 175 long err;
176 u64 flags;
177
178 flags = pgprot_val(prot);
175 179
176 /* Convert typical pgprot value to the PMB equivalent */ 180 /* Convert typical pgprot value to the PMB equivalent */
177 if (flags & _PAGE_CACHABLE) { 181 if (flags & _PAGE_CACHABLE) {
178 if (flags & _PAGE_WT) 182 if (flags & _PAGE_WT)
179 pmb_flags = PMB_WT; 183 pmb_flags = PMB_WT;
180 else 184 else
181 pmb_flags = PMB_C; 185 pmb_flags = PMB_C;
182 } else 186 } else
183 pmb_flags = PMB_WT | PMB_UB; 187 pmb_flags = PMB_WT | PMB_UB;
184 188
185 pmbp = NULL; 189 pmbp = NULL;
186 wanted = size; 190 wanted = size;
187 191
188 again: 192 again:
189 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { 193 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
190 if (size < pmb_sizes[i].size) 194 if (size < pmb_sizes[i].size)
191 continue; 195 continue;
192 196
193 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, 197 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
194 PMB_NO_ENTRY); 198 PMB_NO_ENTRY);
195 if (IS_ERR(pmbe)) { 199 if (IS_ERR(pmbe)) {
196 err = PTR_ERR(pmbe); 200 err = PTR_ERR(pmbe);
197 goto out; 201 goto out;
198 } 202 }
199 203
200 set_pmb_entry(pmbe); 204 set_pmb_entry(pmbe);
201 205
202 phys += pmb_sizes[i].size; 206 phys += pmb_sizes[i].size;
203 vaddr += pmb_sizes[i].size; 207 vaddr += pmb_sizes[i].size;
204 size -= pmb_sizes[i].size; 208 size -= pmb_sizes[i].size;
205 209
206 /* 210 /*
207 * Link adjacent entries that span multiple PMB entries 211 * Link adjacent entries that span multiple PMB entries
208 * for easier tear-down. 212 * for easier tear-down.
209 */ 213 */
210 if (likely(pmbp)) 214 if (likely(pmbp))
211 pmbp->link = pmbe; 215 pmbp->link = pmbe;
212 216
213 pmbp = pmbe; 217 pmbp = pmbe;
214 218
215 /* 219 /*
216 * Instead of trying smaller sizes on every iteration 220 * Instead of trying smaller sizes on every iteration
217 * (even if we succeed in allocating space), try using 221 * (even if we succeed in allocating space), try using
218 * pmb_sizes[i].size again. 222 * pmb_sizes[i].size again.
219 */ 223 */
220 i--; 224 i--;
221 } 225 }
222 226
223 if (size >= 0x1000000) 227 if (size >= 0x1000000)
224 goto again; 228 goto again;
225 229
226 return wanted - size; 230 return wanted - size;
227 231
228 out: 232 out:
229 if (pmbp) 233 if (pmbp)
230 __pmb_unmap(pmbp); 234 __pmb_unmap(pmbp);
231 235
232 return err; 236 return err;
233 } 237 }
234 238
235 void pmb_unmap(unsigned long addr) 239 void pmb_unmap(unsigned long addr)
236 { 240 {
237 struct pmb_entry *pmbe = NULL; 241 struct pmb_entry *pmbe = NULL;
238 int i; 242 int i;
239 243
240 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { 244 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
241 if (test_bit(i, &pmb_map)) { 245 if (test_bit(i, &pmb_map)) {
242 pmbe = &pmb_entry_list[i]; 246 pmbe = &pmb_entry_list[i];
243 if (pmbe->vpn == addr) 247 if (pmbe->vpn == addr)
244 break; 248 break;
245 } 249 }
246 } 250 }
247 251
248 if (unlikely(!pmbe)) 252 if (unlikely(!pmbe))
249 return; 253 return;
250 254
251 __pmb_unmap(pmbe); 255 __pmb_unmap(pmbe);
252 } 256 }
253 257
254 static void __pmb_unmap(struct pmb_entry *pmbe) 258 static void __pmb_unmap(struct pmb_entry *pmbe)
255 { 259 {
256 BUG_ON(!test_bit(pmbe->entry, &pmb_map)); 260 BUG_ON(!test_bit(pmbe->entry, &pmb_map));
257 261
258 do { 262 do {
259 struct pmb_entry *pmblink = pmbe; 263 struct pmb_entry *pmblink = pmbe;
260 264
261 /* 265 /*
262 * We may be called before this pmb_entry has been 266 * We may be called before this pmb_entry has been
263 * entered into the PMB table via set_pmb_entry(), but 267 * entered into the PMB table via set_pmb_entry(), but
264 * that's OK because we've allocated a unique slot for 268 * that's OK because we've allocated a unique slot for
265 * this entry in pmb_alloc() (even if we haven't filled 269 * this entry in pmb_alloc() (even if we haven't filled
266 * it yet). 270 * it yet).
267 * 271 *
268 * Therefore, calling clear_pmb_entry() is safe as no 272 * Therefore, calling clear_pmb_entry() is safe as no
269 * other mapping can be using that slot. 273 * other mapping can be using that slot.
270 */ 274 */
271 clear_pmb_entry(pmbe); 275 clear_pmb_entry(pmbe);
272 276
273 pmbe = pmblink->link; 277 pmbe = pmblink->link;
274 278
275 pmb_free(pmblink); 279 pmb_free(pmblink);
276 } while (pmbe); 280 } while (pmbe);
277 } 281 }
278 282
279 static inline void 283 static inline void
280 pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn) 284 pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn)
281 { 285 {
282 unsigned int size; 286 unsigned int size;
283 const char *sz_str; 287 const char *sz_str;
284 288
285 size = data_val & PMB_SZ_MASK; 289 size = data_val & PMB_SZ_MASK;
286 290
287 sz_str = (size == PMB_SZ_16M) ? " 16MB": 291 sz_str = (size == PMB_SZ_16M) ? " 16MB":
288 (size == PMB_SZ_64M) ? " 64MB": 292 (size == PMB_SZ_64M) ? " 64MB":
289 (size == PMB_SZ_128M) ? "128MB": 293 (size == PMB_SZ_128M) ? "128MB":
290 "512MB"; 294 "512MB";
291 295
292 pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", 296 pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
293 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, 297 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
294 (data_val & PMB_C) ? "" : "un"); 298 (data_val & PMB_C) ? "" : "un");
295 } 299 }
296 300
297 static inline unsigned int pmb_ppn_in_range(unsigned long ppn) 301 static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
298 { 302 {
299 return ppn >= __pa(memory_start) && ppn < __pa(memory_end); 303 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
300 } 304 }
301 305
302 static int pmb_synchronize_mappings(void) 306 static int pmb_synchronize_mappings(void)
303 { 307 {
304 unsigned int applied = 0; 308 unsigned int applied = 0;
305 int i; 309 int i;
306 310
307 pr_info("PMB: boot mappings:\n"); 311 pr_info("PMB: boot mappings:\n");
308 312
309 /* 313 /*
310 * Run through the initial boot mappings, log the established 314 * Run through the initial boot mappings, log the established
311 * ones, and blow away anything that falls outside of the valid 315 * ones, and blow away anything that falls outside of the valid
312 * PPN range. Specifically, we only care about existing mappings 316 * PPN range. Specifically, we only care about existing mappings
313 * that impact the cached/uncached sections. 317 * that impact the cached/uncached sections.
314 * 318 *
315 * Note that touching these can be a bit of a minefield; the boot 319 * Note that touching these can be a bit of a minefield; the boot
316 * loader can establish multi-page mappings with the same caching 320 * loader can establish multi-page mappings with the same caching
317 * attributes, so we need to ensure that we aren't modifying a 321 * attributes, so we need to ensure that we aren't modifying a
318 * mapping that we're presently executing from, or may execute 322 * mapping that we're presently executing from, or may execute
319 * from in the case of straddling page boundaries. 323 * from in the case of straddling page boundaries.
320 * 324 *
321 * In the future we will have to tidy up after the boot loader by 325 * In the future we will have to tidy up after the boot loader by
322 * jumping between the cached and uncached mappings and tearing 326 * jumping between the cached and uncached mappings and tearing
323 * down alternating mappings while executing from the other. 327 * down alternating mappings while executing from the other.
324 */ 328 */
325 for (i = 0; i < PMB_ENTRY_MAX; i++) { 329 for (i = 0; i < PMB_ENTRY_MAX; i++) {
326 unsigned long addr, data; 330 unsigned long addr, data;
327 unsigned long addr_val, data_val; 331 unsigned long addr_val, data_val;
328 unsigned long ppn, vpn, flags; 332 unsigned long ppn, vpn, flags;
329 struct pmb_entry *pmbe; 333 struct pmb_entry *pmbe;
330 334
331 addr = mk_pmb_addr(i); 335 addr = mk_pmb_addr(i);
332 data = mk_pmb_data(i); 336 data = mk_pmb_data(i);
333 337
334 addr_val = __raw_readl(addr); 338 addr_val = __raw_readl(addr);
335 data_val = __raw_readl(data); 339 data_val = __raw_readl(data);
336 340
337 /* 341 /*
338 * Skip over any bogus entries 342 * Skip over any bogus entries
339 */ 343 */
340 if (!(data_val & PMB_V) || !(addr_val & PMB_V)) 344 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
341 continue; 345 continue;
342 346
343 ppn = data_val & PMB_PFN_MASK; 347 ppn = data_val & PMB_PFN_MASK;
344 vpn = addr_val & PMB_PFN_MASK; 348 vpn = addr_val & PMB_PFN_MASK;
345 349
346 /* 350 /*
347 * Only preserve in-range mappings. 351 * Only preserve in-range mappings.
348 */ 352 */
349 if (!pmb_ppn_in_range(ppn)) { 353 if (!pmb_ppn_in_range(ppn)) {
350 /* 354 /*
351 * Invalidate anything out of bounds. 355 * Invalidate anything out of bounds.
352 */ 356 */
353 __raw_writel(addr_val & ~PMB_V, addr); 357 __raw_writel(addr_val & ~PMB_V, addr);
354 __raw_writel(data_val & ~PMB_V, data); 358 __raw_writel(data_val & ~PMB_V, data);
355 continue; 359 continue;
356 } 360 }
357 361
358 /* 362 /*
359 * Update the caching attributes if necessary 363 * Update the caching attributes if necessary
360 */ 364 */
361 if (data_val & PMB_C) { 365 if (data_val & PMB_C) {
362 #if defined(CONFIG_CACHE_WRITETHROUGH) 366 #if defined(CONFIG_CACHE_WRITETHROUGH)
363 data_val |= PMB_WT; 367 data_val |= PMB_WT;
364 #elif defined(CONFIG_CACHE_WRITEBACK) 368 #elif defined(CONFIG_CACHE_WRITEBACK)
365 data_val &= ~PMB_WT; 369 data_val &= ~PMB_WT;
366 #else 370 #else
367 data_val &= ~(PMB_C | PMB_WT); 371 data_val &= ~(PMB_C | PMB_WT);
368 #endif 372 #endif
369 __raw_writel(data_val, data); 373 __raw_writel(data_val, data);
370 } 374 }
371 375
372 flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK); 376 flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK);
373 377
374 pmbe = pmb_alloc(vpn, ppn, flags, i); 378 pmbe = pmb_alloc(vpn, ppn, flags, i);
375 if (IS_ERR(pmbe)) { 379 if (IS_ERR(pmbe)) {
376 WARN_ON_ONCE(1); 380 WARN_ON_ONCE(1);
377 continue; 381 continue;
378 } 382 }
379 383
380 pmb_log_mapping(data_val, vpn, ppn); 384 pmb_log_mapping(data_val, vpn, ppn);
381 385
382 applied++; 386 applied++;
383 } 387 }
384 388
385 return (applied == 0); 389 return (applied == 0);
386 } 390 }
387 391
388 int pmb_init(void) 392 int pmb_init(void)
389 { 393 {
390 int ret; 394 int ret;
391 395
392 jump_to_uncached(); 396 jump_to_uncached();
393 397
394 /* 398 /*
395 * Sync our software copy of the PMB mappings with those in 399 * Sync our software copy of the PMB mappings with those in
396 * hardware. The mappings in the hardware PMB were either set up 400 * hardware. The mappings in the hardware PMB were either set up
397 * by the bootloader or very early on by the kernel. 401 * by the bootloader or very early on by the kernel.
398 */ 402 */
399 ret = pmb_synchronize_mappings(); 403 ret = pmb_synchronize_mappings();
400 if (unlikely(ret == 0)) { 404 if (unlikely(ret == 0)) {
401 back_to_cached(); 405 back_to_cached();
402 return 0; 406 return 0;
403 } 407 }
404 408
405 __raw_writel(0, PMB_IRMCR); 409 __raw_writel(0, PMB_IRMCR);
406 410
407 /* Flush out the TLB */ 411 /* Flush out the TLB */
408 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); 412 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
409 413
410 back_to_cached(); 414 back_to_cached();
411 415
412 return 0; 416 return 0;
413 } 417 }
414 418
415 bool __in_29bit_mode(void) 419 bool __in_29bit_mode(void)
416 { 420 {
417 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; 421 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
418 } 422 }
419 423
420 static int pmb_seq_show(struct seq_file *file, void *iter) 424 static int pmb_seq_show(struct seq_file *file, void *iter)
421 { 425 {
422 int i; 426 int i;
423 427
424 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n" 428 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
425 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n"); 429 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
426 seq_printf(file, "ety vpn ppn size flags\n"); 430 seq_printf(file, "ety vpn ppn size flags\n");
427 431
428 for (i = 0; i < NR_PMB_ENTRIES; i++) { 432 for (i = 0; i < NR_PMB_ENTRIES; i++) {
429 unsigned long addr, data; 433 unsigned long addr, data;
430 unsigned int size; 434 unsigned int size;
431 char *sz_str = NULL; 435 char *sz_str = NULL;
432 436
433 addr = __raw_readl(mk_pmb_addr(i)); 437 addr = __raw_readl(mk_pmb_addr(i));
434 data = __raw_readl(mk_pmb_data(i)); 438 data = __raw_readl(mk_pmb_data(i));
435 439
436 size = data & PMB_SZ_MASK; 440 size = data & PMB_SZ_MASK;
437 sz_str = (size == PMB_SZ_16M) ? " 16MB": 441 sz_str = (size == PMB_SZ_16M) ? " 16MB":
438 (size == PMB_SZ_64M) ? " 64MB": 442 (size == PMB_SZ_64M) ? " 64MB":
439 (size == PMB_SZ_128M) ? "128MB": 443 (size == PMB_SZ_128M) ? "128MB":
440 "512MB"; 444 "512MB";
441 445
442 /* 02: V 0x88 0x08 128MB C CB B */ 446 /* 02: V 0x88 0x08 128MB C CB B */
443 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n", 447 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
444 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ', 448 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
445 (addr >> 24) & 0xff, (data >> 24) & 0xff, 449 (addr >> 24) & 0xff, (data >> 24) & 0xff,
446 sz_str, (data & PMB_C) ? 'C' : ' ', 450 sz_str, (data & PMB_C) ? 'C' : ' ',
447 (data & PMB_WT) ? "WT" : "CB", 451 (data & PMB_WT) ? "WT" : "CB",
448 (data & PMB_UB) ? "UB" : " B"); 452 (data & PMB_UB) ? "UB" : " B");
449 } 453 }
450 454
451 return 0; 455 return 0;
452 } 456 }
453 457
454 static int pmb_debugfs_open(struct inode *inode, struct file *file) 458 static int pmb_debugfs_open(struct inode *inode, struct file *file)
455 { 459 {
456 return single_open(file, pmb_seq_show, NULL); 460 return single_open(file, pmb_seq_show, NULL);
457 } 461 }
458 462
459 static const struct file_operations pmb_debugfs_fops = { 463 static const struct file_operations pmb_debugfs_fops = {
460 .owner = THIS_MODULE, 464 .owner = THIS_MODULE,
461 .open = pmb_debugfs_open, 465 .open = pmb_debugfs_open,
462 .read = seq_read, 466 .read = seq_read,
463 .llseek = seq_lseek, 467 .llseek = seq_lseek,
464 .release = single_release, 468 .release = single_release,
465 }; 469 };
466 470
467 static int __init pmb_debugfs_init(void) 471 static int __init pmb_debugfs_init(void)
468 { 472 {
469 struct dentry *dentry; 473 struct dentry *dentry;
470 474
471 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, 475 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
472 sh_debugfs_root, NULL, &pmb_debugfs_fops); 476 sh_debugfs_root, NULL, &pmb_debugfs_fops);
473 if (!dentry) 477 if (!dentry)
474 return -ENOMEM; 478 return -ENOMEM;
475 if (IS_ERR(dentry)) 479 if (IS_ERR(dentry))
476 return PTR_ERR(dentry); 480 return PTR_ERR(dentry);
477 481
478 return 0; 482 return 0;
479 } 483 }
480 postcore_initcall(pmb_debugfs_init); 484 postcore_initcall(pmb_debugfs_init);
481 485
482 #ifdef CONFIG_PM 486 #ifdef CONFIG_PM
483 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) 487 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
484 { 488 {
485 static pm_message_t prev_state; 489 static pm_message_t prev_state;
486 int i; 490 int i;
487 491
488 /* Restore the PMB after a resume from hibernation */ 492 /* Restore the PMB after a resume from hibernation */
489 if (state.event == PM_EVENT_ON && 493 if (state.event == PM_EVENT_ON &&
490 prev_state.event == PM_EVENT_FREEZE) { 494 prev_state.event == PM_EVENT_FREEZE) {
491 struct pmb_entry *pmbe; 495 struct pmb_entry *pmbe;
492 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { 496 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
493 if (test_bit(i, &pmb_map)) { 497 if (test_bit(i, &pmb_map)) {
494 pmbe = &pmb_entry_list[i]; 498 pmbe = &pmb_entry_list[i];
495 set_pmb_entry(pmbe); 499 set_pmb_entry(pmbe);
496 } 500 }
497 } 501 }
498 } 502 }
499 prev_state = state; 503 prev_state = state;
500 return 0; 504 return 0;
501 } 505 }
502 506
503 static int pmb_sysdev_resume(struct sys_device *dev) 507 static int pmb_sysdev_resume(struct sys_device *dev)
504 { 508 {
505 return pmb_sysdev_suspend(dev, PMSG_ON); 509 return pmb_sysdev_suspend(dev, PMSG_ON);
506 } 510 }
507 511
508 static struct sysdev_driver pmb_sysdev_driver = { 512 static struct sysdev_driver pmb_sysdev_driver = {
509 .suspend = pmb_sysdev_suspend, 513 .suspend = pmb_sysdev_suspend,
510 .resume = pmb_sysdev_resume, 514 .resume = pmb_sysdev_resume,
511 }; 515 };
512 516
513 static int __init pmb_sysdev_init(void) 517 static int __init pmb_sysdev_init(void)
514 { 518 {
515 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); 519 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
516 } 520 }
517 subsys_initcall(pmb_sysdev_init); 521 subsys_initcall(pmb_sysdev_init);
518 #endif 522 #endif
519 523
drivers/video/pvr2fb.c
1 /* 1 /*
2 * drivers/video/pvr2fb.c 2 * drivers/video/pvr2fb.c
3 * 3 *
4 * Frame buffer and fbcon support for the NEC PowerVR2 found within the Sega 4 * Frame buffer and fbcon support for the NEC PowerVR2 found within the Sega
5 * Dreamcast. 5 * Dreamcast.
6 * 6 *
7 * Copyright (c) 2001 M. R. Brown <mrbrown@0xd6.org> 7 * Copyright (c) 2001 M. R. Brown <mrbrown@0xd6.org>
8 * Copyright (c) 2001 - 2008 Paul Mundt <lethal@linux-sh.org> 8 * Copyright (c) 2001 - 2008 Paul Mundt <lethal@linux-sh.org>
9 * 9 *
10 * This driver is mostly based on the excellent amifb and vfb sources. It uses 10 * This driver is mostly based on the excellent amifb and vfb sources. It uses
11 * an odd scheme for converting hardware values to/from framebuffer values, 11 * an odd scheme for converting hardware values to/from framebuffer values,
12 * here are some hacked-up formulas: 12 * here are some hacked-up formulas:
13 * 13 *
14 * The Dreamcast has screen offsets from each side of its four borders and 14 * The Dreamcast has screen offsets from each side of its four borders and
15 * the start offsets of the display window. I used these values to calculate 15 * the start offsets of the display window. I used these values to calculate
16 * 'pseudo' values (think of them as placeholders) for the fb video mode, so 16 * 'pseudo' values (think of them as placeholders) for the fb video mode, so
17 * that when it came time to convert these values back into their hardware 17 * that when it came time to convert these values back into their hardware
18 * values, I could just add mode- specific offsets to get the correct mode 18 * values, I could just add mode- specific offsets to get the correct mode
19 * settings: 19 * settings:
20 * 20 *
21 * left_margin = diwstart_h - borderstart_h; 21 * left_margin = diwstart_h - borderstart_h;
22 * right_margin = borderstop_h - (diwstart_h + xres); 22 * right_margin = borderstop_h - (diwstart_h + xres);
23 * upper_margin = diwstart_v - borderstart_v; 23 * upper_margin = diwstart_v - borderstart_v;
24 * lower_margin = borderstop_v - (diwstart_h + yres); 24 * lower_margin = borderstop_v - (diwstart_h + yres);
25 * 25 *
26 * hsync_len = borderstart_h + (hsync_total - borderstop_h); 26 * hsync_len = borderstart_h + (hsync_total - borderstop_h);
27 * vsync_len = borderstart_v + (vsync_total - borderstop_v); 27 * vsync_len = borderstart_v + (vsync_total - borderstop_v);
28 * 28 *
29 * Then, when it's time to convert back to hardware settings, the only 29 * Then, when it's time to convert back to hardware settings, the only
30 * constants are the borderstart_* offsets, all other values are derived from 30 * constants are the borderstart_* offsets, all other values are derived from
31 * the fb video mode: 31 * the fb video mode:
32 * 32 *
33 * // PAL 33 * // PAL
34 * borderstart_h = 116; 34 * borderstart_h = 116;
35 * borderstart_v = 44; 35 * borderstart_v = 44;
36 * ... 36 * ...
37 * borderstop_h = borderstart_h + hsync_total - hsync_len; 37 * borderstop_h = borderstart_h + hsync_total - hsync_len;
38 * ... 38 * ...
39 * diwstart_v = borderstart_v - upper_margin; 39 * diwstart_v = borderstart_v - upper_margin;
40 * 40 *
41 * However, in the current implementation, the borderstart values haven't had 41 * However, in the current implementation, the borderstart values haven't had
42 * the benefit of being fully researched, so some modes may be broken. 42 * the benefit of being fully researched, so some modes may be broken.
43 */ 43 */
44 44
45 #undef DEBUG 45 #undef DEBUG
46 46
47 #include <linux/module.h> 47 #include <linux/module.h>
48 #include <linux/kernel.h> 48 #include <linux/kernel.h>
49 #include <linux/errno.h> 49 #include <linux/errno.h>
50 #include <linux/string.h> 50 #include <linux/string.h>
51 #include <linux/mm.h> 51 #include <linux/mm.h>
52 #include <linux/slab.h> 52 #include <linux/slab.h>
53 #include <linux/delay.h> 53 #include <linux/delay.h>
54 #include <linux/interrupt.h> 54 #include <linux/interrupt.h>
55 #include <linux/fb.h> 55 #include <linux/fb.h>
56 #include <linux/init.h> 56 #include <linux/init.h>
57 #include <linux/pci.h> 57 #include <linux/pci.h>
58 58
59 #ifdef CONFIG_SH_DREAMCAST 59 #ifdef CONFIG_SH_DREAMCAST
60 #include <asm/machvec.h> 60 #include <asm/machvec.h>
61 #include <mach-dreamcast/mach/sysasic.h> 61 #include <mach-dreamcast/mach/sysasic.h>
62 #endif 62 #endif
63 63
64 #ifdef CONFIG_PVR2_DMA 64 #ifdef CONFIG_PVR2_DMA
65 #include <linux/pagemap.h> 65 #include <linux/pagemap.h>
66 #include <mach/dma.h> 66 #include <mach/dma.h>
67 #include <asm/dma.h> 67 #include <asm/dma.h>
68 #endif 68 #endif
69 69
70 #ifdef CONFIG_SH_STORE_QUEUES 70 #ifdef CONFIG_SH_STORE_QUEUES
71 #include <linux/uaccess.h> 71 #include <linux/uaccess.h>
72 #include <cpu/sq.h> 72 #include <cpu/sq.h>
73 #endif 73 #endif
74 74
75 #ifndef PCI_DEVICE_ID_NEC_NEON250 75 #ifndef PCI_DEVICE_ID_NEC_NEON250
76 # define PCI_DEVICE_ID_NEC_NEON250 0x0067 76 # define PCI_DEVICE_ID_NEC_NEON250 0x0067
77 #endif 77 #endif
78 78
79 /* 2D video registers */ 79 /* 2D video registers */
80 #define DISP_BASE par->mmio_base 80 #define DISP_BASE par->mmio_base
81 #define DISP_BRDRCOLR (DISP_BASE + 0x40) 81 #define DISP_BRDRCOLR (DISP_BASE + 0x40)
82 #define DISP_DIWMODE (DISP_BASE + 0x44) 82 #define DISP_DIWMODE (DISP_BASE + 0x44)
83 #define DISP_DIWADDRL (DISP_BASE + 0x50) 83 #define DISP_DIWADDRL (DISP_BASE + 0x50)
84 #define DISP_DIWADDRS (DISP_BASE + 0x54) 84 #define DISP_DIWADDRS (DISP_BASE + 0x54)
85 #define DISP_DIWSIZE (DISP_BASE + 0x5c) 85 #define DISP_DIWSIZE (DISP_BASE + 0x5c)
86 #define DISP_SYNCCONF (DISP_BASE + 0xd0) 86 #define DISP_SYNCCONF (DISP_BASE + 0xd0)
87 #define DISP_BRDRHORZ (DISP_BASE + 0xd4) 87 #define DISP_BRDRHORZ (DISP_BASE + 0xd4)
88 #define DISP_SYNCSIZE (DISP_BASE + 0xd8) 88 #define DISP_SYNCSIZE (DISP_BASE + 0xd8)
89 #define DISP_BRDRVERT (DISP_BASE + 0xdc) 89 #define DISP_BRDRVERT (DISP_BASE + 0xdc)
90 #define DISP_DIWCONF (DISP_BASE + 0xe8) 90 #define DISP_DIWCONF (DISP_BASE + 0xe8)
91 #define DISP_DIWHSTRT (DISP_BASE + 0xec) 91 #define DISP_DIWHSTRT (DISP_BASE + 0xec)
92 #define DISP_DIWVSTRT (DISP_BASE + 0xf0) 92 #define DISP_DIWVSTRT (DISP_BASE + 0xf0)
93 #define DISP_PIXDEPTH (DISP_BASE + 0x108) 93 #define DISP_PIXDEPTH (DISP_BASE + 0x108)
94 94
95 /* Pixel clocks, one for TV output, doubled for VGA output */ 95 /* Pixel clocks, one for TV output, doubled for VGA output */
96 #define TV_CLK 74239 96 #define TV_CLK 74239
97 #define VGA_CLK 37119 97 #define VGA_CLK 37119
98 98
99 /* This is for 60Hz - the VTOTAL is doubled for interlaced modes */ 99 /* This is for 60Hz - the VTOTAL is doubled for interlaced modes */
100 #define PAL_HTOTAL 863 100 #define PAL_HTOTAL 863
101 #define PAL_VTOTAL 312 101 #define PAL_VTOTAL 312
102 #define NTSC_HTOTAL 857 102 #define NTSC_HTOTAL 857
103 #define NTSC_VTOTAL 262 103 #define NTSC_VTOTAL 262
104 104
105 /* Supported cable types */ 105 /* Supported cable types */
106 enum { CT_VGA, CT_NONE, CT_RGB, CT_COMPOSITE }; 106 enum { CT_VGA, CT_NONE, CT_RGB, CT_COMPOSITE };
107 107
108 /* Supported video output types */ 108 /* Supported video output types */
109 enum { VO_PAL, VO_NTSC, VO_VGA }; 109 enum { VO_PAL, VO_NTSC, VO_VGA };
110 110
111 /* Supported palette types */ 111 /* Supported palette types */
112 enum { PAL_ARGB1555, PAL_RGB565, PAL_ARGB4444, PAL_ARGB8888 }; 112 enum { PAL_ARGB1555, PAL_RGB565, PAL_ARGB4444, PAL_ARGB8888 };
113 113
114 struct pvr2_params { unsigned int val; char *name; }; 114 struct pvr2_params { unsigned int val; char *name; };
115 static struct pvr2_params cables[] __devinitdata = { 115 static struct pvr2_params cables[] __devinitdata = {
116 { CT_VGA, "VGA" }, { CT_RGB, "RGB" }, { CT_COMPOSITE, "COMPOSITE" }, 116 { CT_VGA, "VGA" }, { CT_RGB, "RGB" }, { CT_COMPOSITE, "COMPOSITE" },
117 }; 117 };
118 118
119 static struct pvr2_params outputs[] __devinitdata = { 119 static struct pvr2_params outputs[] __devinitdata = {
120 { VO_PAL, "PAL" }, { VO_NTSC, "NTSC" }, { VO_VGA, "VGA" }, 120 { VO_PAL, "PAL" }, { VO_NTSC, "NTSC" }, { VO_VGA, "VGA" },
121 }; 121 };
122 122
123 /* 123 /*
124 * This describes the current video mode 124 * This describes the current video mode
125 */ 125 */
126 126
127 static struct pvr2fb_par { 127 static struct pvr2fb_par {
128 unsigned int hsync_total; /* Clocks/line */ 128 unsigned int hsync_total; /* Clocks/line */
129 unsigned int vsync_total; /* Lines/field */ 129 unsigned int vsync_total; /* Lines/field */
130 unsigned int borderstart_h; 130 unsigned int borderstart_h;
131 unsigned int borderstop_h; 131 unsigned int borderstop_h;
132 unsigned int borderstart_v; 132 unsigned int borderstart_v;
133 unsigned int borderstop_v; 133 unsigned int borderstop_v;
134 unsigned int diwstart_h; /* Horizontal offset of the display field */ 134 unsigned int diwstart_h; /* Horizontal offset of the display field */
135 unsigned int diwstart_v; /* Vertical offset of the display field, for 135 unsigned int diwstart_v; /* Vertical offset of the display field, for
136 interlaced modes, this is the long field */ 136 interlaced modes, this is the long field */
137 unsigned long disp_start; /* Address of image within VRAM */ 137 unsigned long disp_start; /* Address of image within VRAM */
138 unsigned char is_interlaced; /* Is the display interlaced? */ 138 unsigned char is_interlaced; /* Is the display interlaced? */
139 unsigned char is_doublescan; /* Are scanlines output twice? (doublescan) */ 139 unsigned char is_doublescan; /* Are scanlines output twice? (doublescan) */
140 unsigned char is_lowres; /* Is horizontal pixel-doubling enabled? */ 140 unsigned char is_lowres; /* Is horizontal pixel-doubling enabled? */
141 141
142 unsigned long mmio_base; /* MMIO base */ 142 unsigned long mmio_base; /* MMIO base */
143 u32 palette[16]; 143 u32 palette[16];
144 } *currentpar; 144 } *currentpar;
145 145
146 static struct fb_info *fb_info; 146 static struct fb_info *fb_info;
147 147
148 static struct fb_fix_screeninfo pvr2_fix __devinitdata = { 148 static struct fb_fix_screeninfo pvr2_fix __devinitdata = {
149 .id = "NEC PowerVR2", 149 .id = "NEC PowerVR2",
150 .type = FB_TYPE_PACKED_PIXELS, 150 .type = FB_TYPE_PACKED_PIXELS,
151 .visual = FB_VISUAL_TRUECOLOR, 151 .visual = FB_VISUAL_TRUECOLOR,
152 .ypanstep = 1, 152 .ypanstep = 1,
153 .ywrapstep = 1, 153 .ywrapstep = 1,
154 .accel = FB_ACCEL_NONE, 154 .accel = FB_ACCEL_NONE,
155 }; 155 };
156 156
157 static struct fb_var_screeninfo pvr2_var __devinitdata = { 157 static struct fb_var_screeninfo pvr2_var __devinitdata = {
158 .xres = 640, 158 .xres = 640,
159 .yres = 480, 159 .yres = 480,
160 .xres_virtual = 640, 160 .xres_virtual = 640,
161 .yres_virtual = 480, 161 .yres_virtual = 480,
162 .bits_per_pixel =16, 162 .bits_per_pixel =16,
163 .red = { 11, 5, 0 }, 163 .red = { 11, 5, 0 },
164 .green = { 5, 6, 0 }, 164 .green = { 5, 6, 0 },
165 .blue = { 0, 5, 0 }, 165 .blue = { 0, 5, 0 },
166 .activate = FB_ACTIVATE_NOW, 166 .activate = FB_ACTIVATE_NOW,
167 .height = -1, 167 .height = -1,
168 .width = -1, 168 .width = -1,
169 .vmode = FB_VMODE_NONINTERLACED, 169 .vmode = FB_VMODE_NONINTERLACED,
170 }; 170 };
171 171
172 static int cable_type = CT_VGA; 172 static int cable_type = CT_VGA;
173 static int video_output = VO_VGA; 173 static int video_output = VO_VGA;
174 174
175 static int nopan = 0; 175 static int nopan = 0;
176 static int nowrap = 1; 176 static int nowrap = 1;
177 177
178 /* 178 /*
179 * We do all updating, blanking, etc. during the vertical retrace period 179 * We do all updating, blanking, etc. during the vertical retrace period
180 */ 180 */
181 static unsigned int do_vmode_full = 0; /* Change the video mode */ 181 static unsigned int do_vmode_full = 0; /* Change the video mode */
182 static unsigned int do_vmode_pan = 0; /* Update the video mode */ 182 static unsigned int do_vmode_pan = 0; /* Update the video mode */
183 static short do_blank = 0; /* (Un)Blank the screen */ 183 static short do_blank = 0; /* (Un)Blank the screen */
184 184
185 static unsigned int is_blanked = 0; /* Is the screen blanked? */ 185 static unsigned int is_blanked = 0; /* Is the screen blanked? */
186 186
187 #ifdef CONFIG_SH_STORE_QUEUES 187 #ifdef CONFIG_SH_STORE_QUEUES
188 static unsigned long pvr2fb_map; 188 static unsigned long pvr2fb_map;
189 #endif 189 #endif
190 190
191 #ifdef CONFIG_PVR2_DMA 191 #ifdef CONFIG_PVR2_DMA
192 static unsigned int shdma = PVR2_CASCADE_CHAN; 192 static unsigned int shdma = PVR2_CASCADE_CHAN;
193 static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS; 193 static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS;
194 #endif 194 #endif
195 195
196 static int pvr2fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, 196 static int pvr2fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue,
197 unsigned int transp, struct fb_info *info); 197 unsigned int transp, struct fb_info *info);
198 static int pvr2fb_blank(int blank, struct fb_info *info); 198 static int pvr2fb_blank(int blank, struct fb_info *info);
199 static unsigned long get_line_length(int xres_virtual, int bpp); 199 static unsigned long get_line_length(int xres_virtual, int bpp);
200 static void set_color_bitfields(struct fb_var_screeninfo *var); 200 static void set_color_bitfields(struct fb_var_screeninfo *var);
201 static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); 201 static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
202 static int pvr2fb_set_par(struct fb_info *info); 202 static int pvr2fb_set_par(struct fb_info *info);
203 static void pvr2_update_display(struct fb_info *info); 203 static void pvr2_update_display(struct fb_info *info);
204 static void pvr2_init_display(struct fb_info *info); 204 static void pvr2_init_display(struct fb_info *info);
205 static void pvr2_do_blank(void); 205 static void pvr2_do_blank(void);
206 static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id); 206 static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id);
207 static int pvr2_init_cable(void); 207 static int pvr2_init_cable(void);
208 static int pvr2_get_param(const struct pvr2_params *p, const char *s, 208 static int pvr2_get_param(const struct pvr2_params *p, const char *s,
209 int val, int size); 209 int val, int size);
210 #ifdef CONFIG_PVR2_DMA 210 #ifdef CONFIG_PVR2_DMA
211 static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, 211 static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
212 size_t count, loff_t *ppos); 212 size_t count, loff_t *ppos);
213 #endif 213 #endif
214 214
215 static struct fb_ops pvr2fb_ops = { 215 static struct fb_ops pvr2fb_ops = {
216 .owner = THIS_MODULE, 216 .owner = THIS_MODULE,
217 .fb_setcolreg = pvr2fb_setcolreg, 217 .fb_setcolreg = pvr2fb_setcolreg,
218 .fb_blank = pvr2fb_blank, 218 .fb_blank = pvr2fb_blank,
219 .fb_check_var = pvr2fb_check_var, 219 .fb_check_var = pvr2fb_check_var,
220 .fb_set_par = pvr2fb_set_par, 220 .fb_set_par = pvr2fb_set_par,
221 #ifdef CONFIG_PVR2_DMA 221 #ifdef CONFIG_PVR2_DMA
222 .fb_write = pvr2fb_write, 222 .fb_write = pvr2fb_write,
223 #endif 223 #endif
224 .fb_fillrect = cfb_fillrect, 224 .fb_fillrect = cfb_fillrect,
225 .fb_copyarea = cfb_copyarea, 225 .fb_copyarea = cfb_copyarea,
226 .fb_imageblit = cfb_imageblit, 226 .fb_imageblit = cfb_imageblit,
227 }; 227 };
228 228
229 static struct fb_videomode pvr2_modedb[] __devinitdata = { 229 static struct fb_videomode pvr2_modedb[] __devinitdata = {
230 /* 230 /*
231 * Broadcast video modes (PAL and NTSC). I'm unfamiliar with 231 * Broadcast video modes (PAL and NTSC). I'm unfamiliar with
232 * PAL-M and PAL-N, but from what I've read both modes parallel PAL and 232 * PAL-M and PAL-N, but from what I've read both modes parallel PAL and
233 * NTSC, so it shouldn't be a problem (I hope). 233 * NTSC, so it shouldn't be a problem (I hope).
234 */ 234 */
235 235
236 { 236 {
237 /* 640x480 @ 60Hz interlaced (NTSC) */ 237 /* 640x480 @ 60Hz interlaced (NTSC) */
238 "ntsc_640x480i", 60, 640, 480, TV_CLK, 38, 33, 0, 18, 146, 26, 238 "ntsc_640x480i", 60, 640, 480, TV_CLK, 38, 33, 0, 18, 146, 26,
239 FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP 239 FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
240 }, { 240 }, {
241 /* 640x240 @ 60Hz (NTSC) */ 241 /* 640x240 @ 60Hz (NTSC) */
242 /* XXX: Broken! Don't use... */ 242 /* XXX: Broken! Don't use... */
243 "ntsc_640x240", 60, 640, 240, TV_CLK, 38, 33, 0, 0, 146, 22, 243 "ntsc_640x240", 60, 640, 240, TV_CLK, 38, 33, 0, 0, 146, 22,
244 FB_SYNC_BROADCAST, FB_VMODE_YWRAP 244 FB_SYNC_BROADCAST, FB_VMODE_YWRAP
245 }, { 245 }, {
246 /* 640x480 @ 60hz (VGA) */ 246 /* 640x480 @ 60hz (VGA) */
247 "vga_640x480", 60, 640, 480, VGA_CLK, 38, 33, 0, 18, 146, 26, 247 "vga_640x480", 60, 640, 480, VGA_CLK, 38, 33, 0, 18, 146, 26,
248 0, FB_VMODE_YWRAP 248 0, FB_VMODE_YWRAP
249 }, 249 },
250 }; 250 };
251 251
252 #define NUM_TOTAL_MODES ARRAY_SIZE(pvr2_modedb) 252 #define NUM_TOTAL_MODES ARRAY_SIZE(pvr2_modedb)
253 253
254 #define DEFMODE_NTSC 0 254 #define DEFMODE_NTSC 0
255 #define DEFMODE_PAL 0 255 #define DEFMODE_PAL 0
256 #define DEFMODE_VGA 2 256 #define DEFMODE_VGA 2
257 257
258 static int defmode = DEFMODE_NTSC; 258 static int defmode = DEFMODE_NTSC;
259 static char *mode_option __devinitdata = NULL; 259 static char *mode_option __devinitdata = NULL;
260 260
261 static inline void pvr2fb_set_pal_type(unsigned int type) 261 static inline void pvr2fb_set_pal_type(unsigned int type)
262 { 262 {
263 struct pvr2fb_par *par = (struct pvr2fb_par *)fb_info->par; 263 struct pvr2fb_par *par = (struct pvr2fb_par *)fb_info->par;
264 264
265 fb_writel(type, par->mmio_base + 0x108); 265 fb_writel(type, par->mmio_base + 0x108);
266 } 266 }
267 267
268 static inline void pvr2fb_set_pal_entry(struct pvr2fb_par *par, 268 static inline void pvr2fb_set_pal_entry(struct pvr2fb_par *par,
269 unsigned int regno, 269 unsigned int regno,
270 unsigned int val) 270 unsigned int val)
271 { 271 {
272 fb_writel(val, par->mmio_base + 0x1000 + (4 * regno)); 272 fb_writel(val, par->mmio_base + 0x1000 + (4 * regno));
273 } 273 }
274 274
275 static int pvr2fb_blank(int blank, struct fb_info *info) 275 static int pvr2fb_blank(int blank, struct fb_info *info)
276 { 276 {
277 do_blank = blank ? blank : -1; 277 do_blank = blank ? blank : -1;
278 return 0; 278 return 0;
279 } 279 }
280 280
281 static inline unsigned long get_line_length(int xres_virtual, int bpp) 281 static inline unsigned long get_line_length(int xres_virtual, int bpp)
282 { 282 {
283 return (unsigned long)((((xres_virtual*bpp)+31)&~31) >> 3); 283 return (unsigned long)((((xres_virtual*bpp)+31)&~31) >> 3);
284 } 284 }
285 285
286 static void set_color_bitfields(struct fb_var_screeninfo *var) 286 static void set_color_bitfields(struct fb_var_screeninfo *var)
287 { 287 {
288 switch (var->bits_per_pixel) { 288 switch (var->bits_per_pixel) {
289 case 16: /* RGB 565 */ 289 case 16: /* RGB 565 */
290 pvr2fb_set_pal_type(PAL_RGB565); 290 pvr2fb_set_pal_type(PAL_RGB565);
291 var->red.offset = 11; var->red.length = 5; 291 var->red.offset = 11; var->red.length = 5;
292 var->green.offset = 5; var->green.length = 6; 292 var->green.offset = 5; var->green.length = 6;
293 var->blue.offset = 0; var->blue.length = 5; 293 var->blue.offset = 0; var->blue.length = 5;
294 var->transp.offset = 0; var->transp.length = 0; 294 var->transp.offset = 0; var->transp.length = 0;
295 break; 295 break;
296 case 24: /* RGB 888 */ 296 case 24: /* RGB 888 */
297 var->red.offset = 16; var->red.length = 8; 297 var->red.offset = 16; var->red.length = 8;
298 var->green.offset = 8; var->green.length = 8; 298 var->green.offset = 8; var->green.length = 8;
299 var->blue.offset = 0; var->blue.length = 8; 299 var->blue.offset = 0; var->blue.length = 8;
300 var->transp.offset = 0; var->transp.length = 0; 300 var->transp.offset = 0; var->transp.length = 0;
301 break; 301 break;
302 case 32: /* ARGB 8888 */ 302 case 32: /* ARGB 8888 */
303 pvr2fb_set_pal_type(PAL_ARGB8888); 303 pvr2fb_set_pal_type(PAL_ARGB8888);
304 var->red.offset = 16; var->red.length = 8; 304 var->red.offset = 16; var->red.length = 8;
305 var->green.offset = 8; var->green.length = 8; 305 var->green.offset = 8; var->green.length = 8;
306 var->blue.offset = 0; var->blue.length = 8; 306 var->blue.offset = 0; var->blue.length = 8;
307 var->transp.offset = 24; var->transp.length = 8; 307 var->transp.offset = 24; var->transp.length = 8;
308 break; 308 break;
309 } 309 }
310 } 310 }
311 311
312 static int pvr2fb_setcolreg(unsigned int regno, unsigned int red, 312 static int pvr2fb_setcolreg(unsigned int regno, unsigned int red,
313 unsigned int green, unsigned int blue, 313 unsigned int green, unsigned int blue,
314 unsigned int transp, struct fb_info *info) 314 unsigned int transp, struct fb_info *info)
315 { 315 {
316 struct pvr2fb_par *par = (struct pvr2fb_par *)info->par; 316 struct pvr2fb_par *par = (struct pvr2fb_par *)info->par;
317 unsigned int tmp; 317 unsigned int tmp;
318 318
319 if (regno > info->cmap.len) 319 if (regno > info->cmap.len)
320 return 1; 320 return 1;
321 321
322 /* 322 /*
323 * We only support the hardware palette for 16 and 32bpp. It's also 323 * We only support the hardware palette for 16 and 32bpp. It's also
324 * expected that the palette format has been set by the time we get 324 * expected that the palette format has been set by the time we get
325 * here, so we don't waste time setting it again. 325 * here, so we don't waste time setting it again.
326 */ 326 */
327 switch (info->var.bits_per_pixel) { 327 switch (info->var.bits_per_pixel) {
328 case 16: /* RGB 565 */ 328 case 16: /* RGB 565 */
329 tmp = (red & 0xf800) | 329 tmp = (red & 0xf800) |
330 ((green & 0xfc00) >> 5) | 330 ((green & 0xfc00) >> 5) |
331 ((blue & 0xf800) >> 11); 331 ((blue & 0xf800) >> 11);
332 332
333 pvr2fb_set_pal_entry(par, regno, tmp); 333 pvr2fb_set_pal_entry(par, regno, tmp);
334 break; 334 break;
335 case 24: /* RGB 888 */ 335 case 24: /* RGB 888 */
336 red >>= 8; green >>= 8; blue >>= 8; 336 red >>= 8; green >>= 8; blue >>= 8;
337 tmp = (red << 16) | (green << 8) | blue; 337 tmp = (red << 16) | (green << 8) | blue;
338 break; 338 break;
339 case 32: /* ARGB 8888 */ 339 case 32: /* ARGB 8888 */
340 red >>= 8; green >>= 8; blue >>= 8; 340 red >>= 8; green >>= 8; blue >>= 8;
341 tmp = (transp << 24) | (red << 16) | (green << 8) | blue; 341 tmp = (transp << 24) | (red << 16) | (green << 8) | blue;
342 342
343 pvr2fb_set_pal_entry(par, regno, tmp); 343 pvr2fb_set_pal_entry(par, regno, tmp);
344 break; 344 break;
345 default: 345 default:
346 pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel); 346 pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel);
347 return 1; 347 return 1;
348 } 348 }
349 349
350 if (regno < 16) 350 if (regno < 16)
351 ((u32*)(info->pseudo_palette))[regno] = tmp; 351 ((u32*)(info->pseudo_palette))[regno] = tmp;
352 352
353 return 0; 353 return 0;
354 } 354 }
355 355
356 static int pvr2fb_set_par(struct fb_info *info) 356 static int pvr2fb_set_par(struct fb_info *info)
357 { 357 {
358 struct pvr2fb_par *par = (struct pvr2fb_par *)info->par; 358 struct pvr2fb_par *par = (struct pvr2fb_par *)info->par;
359 struct fb_var_screeninfo *var = &info->var; 359 struct fb_var_screeninfo *var = &info->var;
360 unsigned long line_length; 360 unsigned long line_length;
361 unsigned int vtotal; 361 unsigned int vtotal;
362 362
363 /* 363 /*
364 * XXX: It's possible that a user could use a VGA box, change the cable 364 * XXX: It's possible that a user could use a VGA box, change the cable
365 * type in hardware (i.e. switch from VGA<->composite), then change 365 * type in hardware (i.e. switch from VGA<->composite), then change
366 * modes (i.e. switching to another VT). If that happens we should 366 * modes (i.e. switching to another VT). If that happens we should
367 * automagically change the output format to cope, but currently I 367 * automagically change the output format to cope, but currently I
368 * don't have a VGA box to make sure this works properly. 368 * don't have a VGA box to make sure this works properly.
369 */ 369 */
370 cable_type = pvr2_init_cable(); 370 cable_type = pvr2_init_cable();
371 if (cable_type == CT_VGA && video_output != VO_VGA) 371 if (cable_type == CT_VGA && video_output != VO_VGA)
372 video_output = VO_VGA; 372 video_output = VO_VGA;
373 373
374 var->vmode &= FB_VMODE_MASK; 374 var->vmode &= FB_VMODE_MASK;
375 if (var->vmode & FB_VMODE_INTERLACED && video_output != VO_VGA) 375 if (var->vmode & FB_VMODE_INTERLACED && video_output != VO_VGA)
376 par->is_interlaced = 1; 376 par->is_interlaced = 1;
377 /* 377 /*
378 * XXX: Need to be more creative with this (i.e. allow doublecan for 378 * XXX: Need to be more creative with this (i.e. allow doublecan for
379 * PAL/NTSC output). 379 * PAL/NTSC output).
380 */ 380 */
381 if (var->vmode & FB_VMODE_DOUBLE && video_output == VO_VGA) 381 if (var->vmode & FB_VMODE_DOUBLE && video_output == VO_VGA)
382 par->is_doublescan = 1; 382 par->is_doublescan = 1;
383 383
384 par->hsync_total = var->left_margin + var->xres + var->right_margin + 384 par->hsync_total = var->left_margin + var->xres + var->right_margin +
385 var->hsync_len; 385 var->hsync_len;
386 par->vsync_total = var->upper_margin + var->yres + var->lower_margin + 386 par->vsync_total = var->upper_margin + var->yres + var->lower_margin +
387 var->vsync_len; 387 var->vsync_len;
388 388
389 if (var->sync & FB_SYNC_BROADCAST) { 389 if (var->sync & FB_SYNC_BROADCAST) {
390 vtotal = par->vsync_total; 390 vtotal = par->vsync_total;
391 if (par->is_interlaced) 391 if (par->is_interlaced)
392 vtotal /= 2; 392 vtotal /= 2;
393 if (vtotal > (PAL_VTOTAL + NTSC_VTOTAL)/2) { 393 if (vtotal > (PAL_VTOTAL + NTSC_VTOTAL)/2) {
394 /* XXX: Check for start values here... */ 394 /* XXX: Check for start values here... */
395 /* XXX: Check hardware for PAL-compatibility */ 395 /* XXX: Check hardware for PAL-compatibility */
396 par->borderstart_h = 116; 396 par->borderstart_h = 116;
397 par->borderstart_v = 44; 397 par->borderstart_v = 44;
398 } else { 398 } else {
399 /* NTSC video output */ 399 /* NTSC video output */
400 par->borderstart_h = 126; 400 par->borderstart_h = 126;
401 par->borderstart_v = 18; 401 par->borderstart_v = 18;
402 } 402 }
403 } else { 403 } else {
404 /* VGA mode */ 404 /* VGA mode */
405 /* XXX: What else needs to be checked? */ 405 /* XXX: What else needs to be checked? */
406 /* 406 /*
407 * XXX: We have a little freedom in VGA modes, what ranges 407 * XXX: We have a little freedom in VGA modes, what ranges
408 * should be here (i.e. hsync/vsync totals, etc.)? 408 * should be here (i.e. hsync/vsync totals, etc.)?
409 */ 409 */
410 par->borderstart_h = 126; 410 par->borderstart_h = 126;
411 par->borderstart_v = 40; 411 par->borderstart_v = 40;
412 } 412 }
413 413
414 /* Calculate the remainding offsets */ 414 /* Calculate the remainding offsets */
415 par->diwstart_h = par->borderstart_h + var->left_margin; 415 par->diwstart_h = par->borderstart_h + var->left_margin;
416 par->diwstart_v = par->borderstart_v + var->upper_margin; 416 par->diwstart_v = par->borderstart_v + var->upper_margin;
417 par->borderstop_h = par->diwstart_h + var->xres + 417 par->borderstop_h = par->diwstart_h + var->xres +
418 var->right_margin; 418 var->right_margin;
419 par->borderstop_v = par->diwstart_v + var->yres + 419 par->borderstop_v = par->diwstart_v + var->yres +
420 var->lower_margin; 420 var->lower_margin;
421 421
422 if (!par->is_interlaced) 422 if (!par->is_interlaced)
423 par->borderstop_v /= 2; 423 par->borderstop_v /= 2;
424 if (info->var.xres < 640) 424 if (info->var.xres < 640)
425 par->is_lowres = 1; 425 par->is_lowres = 1;
426 426
427 line_length = get_line_length(var->xres_virtual, var->bits_per_pixel); 427 line_length = get_line_length(var->xres_virtual, var->bits_per_pixel);
428 par->disp_start = info->fix.smem_start + (line_length * var->yoffset) * line_length; 428 par->disp_start = info->fix.smem_start + (line_length * var->yoffset) * line_length;
429 info->fix.line_length = line_length; 429 info->fix.line_length = line_length;
430 return 0; 430 return 0;
431 } 431 }
432 432
433 static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 433 static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
434 { 434 {
435 struct pvr2fb_par *par = (struct pvr2fb_par *)info->par; 435 struct pvr2fb_par *par = (struct pvr2fb_par *)info->par;
436 unsigned int vtotal, hsync_total; 436 unsigned int vtotal, hsync_total;
437 unsigned long line_length; 437 unsigned long line_length;
438 438
439 if (var->pixclock != TV_CLK && var->pixclock != VGA_CLK) { 439 if (var->pixclock != TV_CLK && var->pixclock != VGA_CLK) {
440 pr_debug("Invalid pixclock value %d\n", var->pixclock); 440 pr_debug("Invalid pixclock value %d\n", var->pixclock);
441 return -EINVAL; 441 return -EINVAL;
442 } 442 }
443 443
444 if (var->xres < 320) 444 if (var->xres < 320)
445 var->xres = 320; 445 var->xres = 320;
446 if (var->yres < 240) 446 if (var->yres < 240)
447 var->yres = 240; 447 var->yres = 240;
448 if (var->xres_virtual < var->xres) 448 if (var->xres_virtual < var->xres)
449 var->xres_virtual = var->xres; 449 var->xres_virtual = var->xres;
450 if (var->yres_virtual < var->yres) 450 if (var->yres_virtual < var->yres)
451 var->yres_virtual = var->yres; 451 var->yres_virtual = var->yres;
452 452
453 if (var->bits_per_pixel <= 16) 453 if (var->bits_per_pixel <= 16)
454 var->bits_per_pixel = 16; 454 var->bits_per_pixel = 16;
455 else if (var->bits_per_pixel <= 24) 455 else if (var->bits_per_pixel <= 24)
456 var->bits_per_pixel = 24; 456 var->bits_per_pixel = 24;
457 else if (var->bits_per_pixel <= 32) 457 else if (var->bits_per_pixel <= 32)
458 var->bits_per_pixel = 32; 458 var->bits_per_pixel = 32;
459 459
460 set_color_bitfields(var); 460 set_color_bitfields(var);
461 461
462 if (var->vmode & FB_VMODE_YWRAP) { 462 if (var->vmode & FB_VMODE_YWRAP) {
463 if (var->xoffset || var->yoffset < 0 || 463 if (var->xoffset || var->yoffset < 0 ||
464 var->yoffset >= var->yres_virtual) { 464 var->yoffset >= var->yres_virtual) {
465 var->xoffset = var->yoffset = 0; 465 var->xoffset = var->yoffset = 0;
466 } else { 466 } else {
467 if (var->xoffset > var->xres_virtual - var->xres || 467 if (var->xoffset > var->xres_virtual - var->xres ||
468 var->yoffset > var->yres_virtual - var->yres || 468 var->yoffset > var->yres_virtual - var->yres ||
469 var->xoffset < 0 || var->yoffset < 0) 469 var->xoffset < 0 || var->yoffset < 0)
470 var->xoffset = var->yoffset = 0; 470 var->xoffset = var->yoffset = 0;
471 } 471 }
472 } else { 472 } else {
473 var->xoffset = var->yoffset = 0; 473 var->xoffset = var->yoffset = 0;
474 } 474 }
475 475
476 /* 476 /*
477 * XXX: Need to be more creative with this (i.e. allow doublecan for 477 * XXX: Need to be more creative with this (i.e. allow doublecan for
478 * PAL/NTSC output). 478 * PAL/NTSC output).
479 */ 479 */
480 if (var->yres < 480 && video_output == VO_VGA) 480 if (var->yres < 480 && video_output == VO_VGA)
481 var->vmode |= FB_VMODE_DOUBLE; 481 var->vmode |= FB_VMODE_DOUBLE;
482 482
483 if (video_output != VO_VGA) { 483 if (video_output != VO_VGA) {
484 var->sync |= FB_SYNC_BROADCAST; 484 var->sync |= FB_SYNC_BROADCAST;
485 var->vmode |= FB_VMODE_INTERLACED; 485 var->vmode |= FB_VMODE_INTERLACED;
486 } else { 486 } else {
487 var->sync &= ~FB_SYNC_BROADCAST; 487 var->sync &= ~FB_SYNC_BROADCAST;
488 var->vmode &= ~FB_VMODE_INTERLACED; 488 var->vmode &= ~FB_VMODE_INTERLACED;
489 var->vmode |= FB_VMODE_NONINTERLACED; 489 var->vmode |= FB_VMODE_NONINTERLACED;
490 } 490 }
491 491
492 if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_TEST) { 492 if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_TEST) {
493 var->right_margin = par->borderstop_h - 493 var->right_margin = par->borderstop_h -
494 (par->diwstart_h + var->xres); 494 (par->diwstart_h + var->xres);
495 var->left_margin = par->diwstart_h - par->borderstart_h; 495 var->left_margin = par->diwstart_h - par->borderstart_h;
496 var->hsync_len = par->borderstart_h + 496 var->hsync_len = par->borderstart_h +
497 (par->hsync_total - par->borderstop_h); 497 (par->hsync_total - par->borderstop_h);
498 498
499 var->upper_margin = par->diwstart_v - par->borderstart_v; 499 var->upper_margin = par->diwstart_v - par->borderstart_v;
500 var->lower_margin = par->borderstop_v - 500 var->lower_margin = par->borderstop_v -
501 (par->diwstart_v + var->yres); 501 (par->diwstart_v + var->yres);
502 var->vsync_len = par->borderstop_v + 502 var->vsync_len = par->borderstop_v +
503 (par->vsync_total - par->borderstop_v); 503 (par->vsync_total - par->borderstop_v);
504 } 504 }
505 505
506 hsync_total = var->left_margin + var->xres + var->right_margin + 506 hsync_total = var->left_margin + var->xres + var->right_margin +
507 var->hsync_len; 507 var->hsync_len;
508 vtotal = var->upper_margin + var->yres + var->lower_margin + 508 vtotal = var->upper_margin + var->yres + var->lower_margin +
509 var->vsync_len; 509 var->vsync_len;
510 510
511 if (var->sync & FB_SYNC_BROADCAST) { 511 if (var->sync & FB_SYNC_BROADCAST) {
512 if (var->vmode & FB_VMODE_INTERLACED) 512 if (var->vmode & FB_VMODE_INTERLACED)
513 vtotal /= 2; 513 vtotal /= 2;
514 if (vtotal > (PAL_VTOTAL + NTSC_VTOTAL)/2) { 514 if (vtotal > (PAL_VTOTAL + NTSC_VTOTAL)/2) {
515 /* PAL video output */ 515 /* PAL video output */
516 /* XXX: Should be using a range here ... ? */ 516 /* XXX: Should be using a range here ... ? */
517 if (hsync_total != PAL_HTOTAL) { 517 if (hsync_total != PAL_HTOTAL) {
518 pr_debug("invalid hsync total for PAL\n"); 518 pr_debug("invalid hsync total for PAL\n");
519 return -EINVAL; 519 return -EINVAL;
520 } 520 }
521 } else { 521 } else {
522 /* NTSC video output */ 522 /* NTSC video output */
523 if (hsync_total != NTSC_HTOTAL) { 523 if (hsync_total != NTSC_HTOTAL) {
524 pr_debug("invalid hsync total for NTSC\n"); 524 pr_debug("invalid hsync total for NTSC\n");
525 return -EINVAL; 525 return -EINVAL;
526 } 526 }
527 } 527 }
528 } 528 }
529 529
530 /* Check memory sizes */ 530 /* Check memory sizes */
531 line_length = get_line_length(var->xres_virtual, var->bits_per_pixel); 531 line_length = get_line_length(var->xres_virtual, var->bits_per_pixel);
532 if (line_length * var->yres_virtual > info->fix.smem_len) 532 if (line_length * var->yres_virtual > info->fix.smem_len)
533 return -ENOMEM; 533 return -ENOMEM;
534 534
535 return 0; 535 return 0;
536 } 536 }
537 537
538 static void pvr2_update_display(struct fb_info *info) 538 static void pvr2_update_display(struct fb_info *info)
539 { 539 {
540 struct pvr2fb_par *par = (struct pvr2fb_par *) info->par; 540 struct pvr2fb_par *par = (struct pvr2fb_par *) info->par;
541 struct fb_var_screeninfo *var = &info->var; 541 struct fb_var_screeninfo *var = &info->var;
542 542
543 /* Update the start address of the display image */ 543 /* Update the start address of the display image */
544 fb_writel(par->disp_start, DISP_DIWADDRL); 544 fb_writel(par->disp_start, DISP_DIWADDRL);
545 fb_writel(par->disp_start + 545 fb_writel(par->disp_start +
546 get_line_length(var->xoffset+var->xres, var->bits_per_pixel), 546 get_line_length(var->xoffset+var->xres, var->bits_per_pixel),
547 DISP_DIWADDRS); 547 DISP_DIWADDRS);
548 } 548 }
549 549
550 /* 550 /*
551 * Initialize the video mode. Currently, the 16bpp and 24bpp modes aren't 551 * Initialize the video mode. Currently, the 16bpp and 24bpp modes aren't
552 * very stable. It's probably due to the fact that a lot of the 2D video 552 * very stable. It's probably due to the fact that a lot of the 2D video
553 * registers are still undocumented. 553 * registers are still undocumented.
554 */ 554 */
555 555
556 static void pvr2_init_display(struct fb_info *info) 556 static void pvr2_init_display(struct fb_info *info)
557 { 557 {
558 struct pvr2fb_par *par = (struct pvr2fb_par *) info->par; 558 struct pvr2fb_par *par = (struct pvr2fb_par *) info->par;
559 struct fb_var_screeninfo *var = &info->var; 559 struct fb_var_screeninfo *var = &info->var;
560 unsigned int diw_height, diw_width, diw_modulo = 1; 560 unsigned int diw_height, diw_width, diw_modulo = 1;
561 unsigned int bytesperpixel = var->bits_per_pixel >> 3; 561 unsigned int bytesperpixel = var->bits_per_pixel >> 3;
562 562
563 /* hsync and vsync totals */ 563 /* hsync and vsync totals */
564 fb_writel((par->vsync_total << 16) | par->hsync_total, DISP_SYNCSIZE); 564 fb_writel((par->vsync_total << 16) | par->hsync_total, DISP_SYNCSIZE);
565 565
566 /* column height, modulo, row width */ 566 /* column height, modulo, row width */
567 /* since we're "panning" within vram, we need to offset things based 567 /* since we're "panning" within vram, we need to offset things based
568 * on the offset from the virtual x start to our real gfx. */ 568 * on the offset from the virtual x start to our real gfx. */
569 if (video_output != VO_VGA && par->is_interlaced) 569 if (video_output != VO_VGA && par->is_interlaced)
570 diw_modulo += info->fix.line_length / 4; 570 diw_modulo += info->fix.line_length / 4;
571 diw_height = (par->is_interlaced ? var->yres / 2 : var->yres); 571 diw_height = (par->is_interlaced ? var->yres / 2 : var->yres);
572 diw_width = get_line_length(var->xres, var->bits_per_pixel) / 4; 572 diw_width = get_line_length(var->xres, var->bits_per_pixel) / 4;
573 fb_writel((diw_modulo << 20) | (--diw_height << 10) | --diw_width, 573 fb_writel((diw_modulo << 20) | (--diw_height << 10) | --diw_width,
574 DISP_DIWSIZE); 574 DISP_DIWSIZE);
575 575
576 /* display address, long and short fields */ 576 /* display address, long and short fields */
577 fb_writel(par->disp_start, DISP_DIWADDRL); 577 fb_writel(par->disp_start, DISP_DIWADDRL);
578 fb_writel(par->disp_start + 578 fb_writel(par->disp_start +
579 get_line_length(var->xoffset+var->xres, var->bits_per_pixel), 579 get_line_length(var->xoffset+var->xres, var->bits_per_pixel),
580 DISP_DIWADDRS); 580 DISP_DIWADDRS);
581 581
582 /* border horizontal, border vertical, border color */ 582 /* border horizontal, border vertical, border color */
583 fb_writel((par->borderstart_h << 16) | par->borderstop_h, DISP_BRDRHORZ); 583 fb_writel((par->borderstart_h << 16) | par->borderstop_h, DISP_BRDRHORZ);
584 fb_writel((par->borderstart_v << 16) | par->borderstop_v, DISP_BRDRVERT); 584 fb_writel((par->borderstart_v << 16) | par->borderstop_v, DISP_BRDRVERT);
585 fb_writel(0, DISP_BRDRCOLR); 585 fb_writel(0, DISP_BRDRCOLR);
586 586
587 /* display window start position */ 587 /* display window start position */
588 fb_writel(par->diwstart_h, DISP_DIWHSTRT); 588 fb_writel(par->diwstart_h, DISP_DIWHSTRT);
589 fb_writel((par->diwstart_v << 16) | par->diwstart_v, DISP_DIWVSTRT); 589 fb_writel((par->diwstart_v << 16) | par->diwstart_v, DISP_DIWVSTRT);
590 590
591 /* misc. settings */ 591 /* misc. settings */
592 fb_writel((0x16 << 16) | par->is_lowres, DISP_DIWCONF); 592 fb_writel((0x16 << 16) | par->is_lowres, DISP_DIWCONF);
593 593
594 /* clock doubler (for VGA), scan doubler, display enable */ 594 /* clock doubler (for VGA), scan doubler, display enable */
595 fb_writel(((video_output == VO_VGA) << 23) | 595 fb_writel(((video_output == VO_VGA) << 23) |
596 (par->is_doublescan << 1) | 1, DISP_DIWMODE); 596 (par->is_doublescan << 1) | 1, DISP_DIWMODE);
597 597
598 /* bits per pixel */ 598 /* bits per pixel */
599 fb_writel(fb_readl(DISP_DIWMODE) | (--bytesperpixel << 2), DISP_DIWMODE); 599 fb_writel(fb_readl(DISP_DIWMODE) | (--bytesperpixel << 2), DISP_DIWMODE);
600 fb_writel(bytesperpixel << 2, DISP_PIXDEPTH); 600 fb_writel(bytesperpixel << 2, DISP_PIXDEPTH);
601 601
602 /* video enable, color sync, interlace, 602 /* video enable, color sync, interlace,
603 * hsync and vsync polarity (currently unused) */ 603 * hsync and vsync polarity (currently unused) */
604 fb_writel(0x100 | ((par->is_interlaced /*|4*/) << 4), DISP_SYNCCONF); 604 fb_writel(0x100 | ((par->is_interlaced /*|4*/) << 4), DISP_SYNCCONF);
605 } 605 }
606 606
607 /* Simulate blanking by making the border cover the entire screen */ 607 /* Simulate blanking by making the border cover the entire screen */
608 608
609 #define BLANK_BIT (1<<3) 609 #define BLANK_BIT (1<<3)
610 610
611 static void pvr2_do_blank(void) 611 static void pvr2_do_blank(void)
612 { 612 {
613 struct pvr2fb_par *par = currentpar; 613 struct pvr2fb_par *par = currentpar;
614 unsigned long diwconf; 614 unsigned long diwconf;
615 615
616 diwconf = fb_readl(DISP_DIWCONF); 616 diwconf = fb_readl(DISP_DIWCONF);
617 if (do_blank > 0) 617 if (do_blank > 0)
618 fb_writel(diwconf | BLANK_BIT, DISP_DIWCONF); 618 fb_writel(diwconf | BLANK_BIT, DISP_DIWCONF);
619 else 619 else
620 fb_writel(diwconf & ~BLANK_BIT, DISP_DIWCONF); 620 fb_writel(diwconf & ~BLANK_BIT, DISP_DIWCONF);
621 621
622 is_blanked = do_blank > 0 ? do_blank : 0; 622 is_blanked = do_blank > 0 ? do_blank : 0;
623 } 623 }
624 624
625 static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id) 625 static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id)
626 { 626 {
627 struct fb_info *info = dev_id; 627 struct fb_info *info = dev_id;
628 628
629 if (do_vmode_pan || do_vmode_full) 629 if (do_vmode_pan || do_vmode_full)
630 pvr2_update_display(info); 630 pvr2_update_display(info);
631 if (do_vmode_full) 631 if (do_vmode_full)
632 pvr2_init_display(info); 632 pvr2_init_display(info);
633 if (do_vmode_pan) 633 if (do_vmode_pan)
634 do_vmode_pan = 0; 634 do_vmode_pan = 0;
635 if (do_vmode_full) 635 if (do_vmode_full)
636 do_vmode_full = 0; 636 do_vmode_full = 0;
637 if (do_blank) { 637 if (do_blank) {
638 pvr2_do_blank(); 638 pvr2_do_blank();
639 do_blank = 0; 639 do_blank = 0;
640 } 640 }
641 return IRQ_HANDLED; 641 return IRQ_HANDLED;
642 } 642 }
643 643
644 /* 644 /*
645 * Determine the cable type and initialize the cable output format. Don't do 645 * Determine the cable type and initialize the cable output format. Don't do
646 * anything if the cable type has been overidden (via "cable:XX"). 646 * anything if the cable type has been overidden (via "cable:XX").
647 */ 647 */
648 648
649 #define PCTRA 0xff80002c 649 #define PCTRA 0xff80002c
650 #define PDTRA 0xff800030 650 #define PDTRA 0xff800030
651 #define VOUTC 0xa0702c00 651 #define VOUTC 0xa0702c00
652 652
653 static int pvr2_init_cable(void) 653 static int pvr2_init_cable(void)
654 { 654 {
655 if (cable_type < 0) { 655 if (cable_type < 0) {
656 fb_writel((fb_readl(PCTRA) & 0xfff0ffff) | 0x000a0000, 656 fb_writel((fb_readl(PCTRA) & 0xfff0ffff) | 0x000a0000,
657 PCTRA); 657 PCTRA);
658 cable_type = (fb_readw(PDTRA) >> 8) & 3; 658 cable_type = (fb_readw(PDTRA) >> 8) & 3;
659 } 659 }
660 660
661 /* Now select the output format (either composite or other) */ 661 /* Now select the output format (either composite or other) */
662 /* XXX: Save the previous val first, as this reg is also AICA 662 /* XXX: Save the previous val first, as this reg is also AICA
663 related */ 663 related */
664 if (cable_type == CT_COMPOSITE) 664 if (cable_type == CT_COMPOSITE)
665 fb_writel(3 << 8, VOUTC); 665 fb_writel(3 << 8, VOUTC);
666 else if (cable_type == CT_RGB) 666 else if (cable_type == CT_RGB)
667 fb_writel(1 << 9, VOUTC); 667 fb_writel(1 << 9, VOUTC);
668 else 668 else
669 fb_writel(0, VOUTC); 669 fb_writel(0, VOUTC);
670 670
671 return cable_type; 671 return cable_type;
672 } 672 }
673 673
674 #ifdef CONFIG_PVR2_DMA 674 #ifdef CONFIG_PVR2_DMA
675 static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, 675 static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
676 size_t count, loff_t *ppos) 676 size_t count, loff_t *ppos)
677 { 677 {
678 unsigned long dst, start, end, len; 678 unsigned long dst, start, end, len;
679 unsigned int nr_pages; 679 unsigned int nr_pages;
680 struct page **pages; 680 struct page **pages;
681 int ret, i; 681 int ret, i;
682 682
683 nr_pages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT; 683 nr_pages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT;
684 684
685 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 685 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
686 if (!pages) 686 if (!pages)
687 return -ENOMEM; 687 return -ENOMEM;
688 688
689 down_read(&current->mm->mmap_sem); 689 down_read(&current->mm->mmap_sem);
690 ret = get_user_pages(current, current->mm, (unsigned long)buf, 690 ret = get_user_pages(current, current->mm, (unsigned long)buf,
691 nr_pages, WRITE, 0, pages, NULL); 691 nr_pages, WRITE, 0, pages, NULL);
692 up_read(&current->mm->mmap_sem); 692 up_read(&current->mm->mmap_sem);
693 693
694 if (ret < nr_pages) { 694 if (ret < nr_pages) {
695 nr_pages = ret; 695 nr_pages = ret;
696 ret = -EINVAL; 696 ret = -EINVAL;
697 goto out_unmap; 697 goto out_unmap;
698 } 698 }
699 699
700 dma_configure_channel(shdma, 0x12c1); 700 dma_configure_channel(shdma, 0x12c1);
701 701
702 dst = (unsigned long)fb_info->screen_base + *ppos; 702 dst = (unsigned long)fb_info->screen_base + *ppos;
703 start = (unsigned long)page_address(pages[0]); 703 start = (unsigned long)page_address(pages[0]);
704 end = (unsigned long)page_address(pages[nr_pages]); 704 end = (unsigned long)page_address(pages[nr_pages]);
705 len = nr_pages << PAGE_SHIFT; 705 len = nr_pages << PAGE_SHIFT;
706 706
707 /* Half-assed contig check */ 707 /* Half-assed contig check */
708 if (start + len == end) { 708 if (start + len == end) {
709 /* As we do this in one shot, it's either all or nothing.. */ 709 /* As we do this in one shot, it's either all or nothing.. */
710 if ((*ppos + len) > fb_info->fix.smem_len) { 710 if ((*ppos + len) > fb_info->fix.smem_len) {
711 ret = -ENOSPC; 711 ret = -ENOSPC;
712 goto out_unmap; 712 goto out_unmap;
713 } 713 }
714 714
715 dma_write(shdma, start, 0, len); 715 dma_write(shdma, start, 0, len);
716 dma_write(pvr2dma, 0, dst, len); 716 dma_write(pvr2dma, 0, dst, len);
717 dma_wait_for_completion(pvr2dma); 717 dma_wait_for_completion(pvr2dma);
718 718
719 goto out; 719 goto out;
720 } 720 }
721 721
722 /* Not contiguous, writeout per-page instead.. */ 722 /* Not contiguous, writeout per-page instead.. */
723 for (i = 0; i < nr_pages; i++, dst += PAGE_SIZE) { 723 for (i = 0; i < nr_pages; i++, dst += PAGE_SIZE) {
724 if ((*ppos + (i << PAGE_SHIFT)) > fb_info->fix.smem_len) { 724 if ((*ppos + (i << PAGE_SHIFT)) > fb_info->fix.smem_len) {
725 ret = -ENOSPC; 725 ret = -ENOSPC;
726 goto out_unmap; 726 goto out_unmap;
727 } 727 }
728 728
729 dma_write_page(shdma, (unsigned long)page_address(pages[i]), 0); 729 dma_write_page(shdma, (unsigned long)page_address(pages[i]), 0);
730 dma_write_page(pvr2dma, 0, dst); 730 dma_write_page(pvr2dma, 0, dst);
731 dma_wait_for_completion(pvr2dma); 731 dma_wait_for_completion(pvr2dma);
732 } 732 }
733 733
734 out: 734 out:
735 *ppos += count; 735 *ppos += count;
736 ret = count; 736 ret = count;
737 737
738 out_unmap: 738 out_unmap:
739 for (i = 0; i < nr_pages; i++) 739 for (i = 0; i < nr_pages; i++)
740 page_cache_release(pages[i]); 740 page_cache_release(pages[i]);
741 741
742 kfree(pages); 742 kfree(pages);
743 743
744 return ret; 744 return ret;
745 } 745 }
746 #endif /* CONFIG_PVR2_DMA */ 746 #endif /* CONFIG_PVR2_DMA */
747 747
748 /** 748 /**
749 * pvr2fb_common_init 749 * pvr2fb_common_init
750 * 750 *
751 * Common init code for the PVR2 chips. 751 * Common init code for the PVR2 chips.
752 * 752 *
753 * This mostly takes care of the common aspects of the fb setup and 753 * This mostly takes care of the common aspects of the fb setup and
754 * registration. It's expected that the board-specific init code has 754 * registration. It's expected that the board-specific init code has
755 * already setup pvr2_fix with something meaningful at this point. 755 * already setup pvr2_fix with something meaningful at this point.
756 * 756 *
757 * Device info reporting is also done here, as well as picking a sane 757 * Device info reporting is also done here, as well as picking a sane
758 * default from the modedb. For board-specific modelines, simply define 758 * default from the modedb. For board-specific modelines, simply define
759 * a per-board modedb. 759 * a per-board modedb.
760 * 760 *
761 * Also worth noting is that the cable and video output types are likely 761 * Also worth noting is that the cable and video output types are likely
762 * always going to be VGA for the PCI-based PVR2 boards, but we leave this 762 * always going to be VGA for the PCI-based PVR2 boards, but we leave this
763 * in for flexibility anyways. Who knows, maybe someone has tv-out on a 763 * in for flexibility anyways. Who knows, maybe someone has tv-out on a
764 * PCI-based version of these things ;-) 764 * PCI-based version of these things ;-)
765 */ 765 */
766 static int __devinit pvr2fb_common_init(void) 766 static int __devinit pvr2fb_common_init(void)
767 { 767 {
768 struct pvr2fb_par *par = currentpar; 768 struct pvr2fb_par *par = currentpar;
769 unsigned long modememused, rev; 769 unsigned long modememused, rev;
770 770
771 fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start, 771 fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start,
772 pvr2_fix.smem_len); 772 pvr2_fix.smem_len);
773 773
774 if (!fb_info->screen_base) { 774 if (!fb_info->screen_base) {
775 printk(KERN_ERR "pvr2fb: Failed to remap smem space\n"); 775 printk(KERN_ERR "pvr2fb: Failed to remap smem space\n");
776 goto out_err; 776 goto out_err;
777 } 777 }
778 778
779 par->mmio_base = (unsigned long)ioremap_nocache(pvr2_fix.mmio_start, 779 par->mmio_base = (unsigned long)ioremap_nocache(pvr2_fix.mmio_start,
780 pvr2_fix.mmio_len); 780 pvr2_fix.mmio_len);
781 if (!par->mmio_base) { 781 if (!par->mmio_base) {
782 printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n"); 782 printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n");
783 goto out_err; 783 goto out_err;
784 } 784 }
785 785
786 fb_memset(fb_info->screen_base, 0, pvr2_fix.smem_len); 786 fb_memset(fb_info->screen_base, 0, pvr2_fix.smem_len);
787 787
788 pvr2_fix.ypanstep = nopan ? 0 : 1; 788 pvr2_fix.ypanstep = nopan ? 0 : 1;
789 pvr2_fix.ywrapstep = nowrap ? 0 : 1; 789 pvr2_fix.ywrapstep = nowrap ? 0 : 1;
790 790
791 fb_info->fbops = &pvr2fb_ops; 791 fb_info->fbops = &pvr2fb_ops;
792 fb_info->fix = pvr2_fix; 792 fb_info->fix = pvr2_fix;
793 fb_info->par = currentpar; 793 fb_info->par = currentpar;
794 fb_info->pseudo_palette = currentpar->palette; 794 fb_info->pseudo_palette = currentpar->palette;
795 fb_info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; 795 fb_info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
796 796
797 if (video_output == VO_VGA) 797 if (video_output == VO_VGA)
798 defmode = DEFMODE_VGA; 798 defmode = DEFMODE_VGA;
799 799
800 if (!mode_option) 800 if (!mode_option)
801 mode_option = "640x480@60"; 801 mode_option = "640x480@60";
802 802
803 if (!fb_find_mode(&fb_info->var, fb_info, mode_option, pvr2_modedb, 803 if (!fb_find_mode(&fb_info->var, fb_info, mode_option, pvr2_modedb,
804 NUM_TOTAL_MODES, &pvr2_modedb[defmode], 16)) 804 NUM_TOTAL_MODES, &pvr2_modedb[defmode], 16))
805 fb_info->var = pvr2_var; 805 fb_info->var = pvr2_var;
806 806
807 fb_alloc_cmap(&fb_info->cmap, 256, 0); 807 fb_alloc_cmap(&fb_info->cmap, 256, 0);
808 808
809 if (register_framebuffer(fb_info) < 0) 809 if (register_framebuffer(fb_info) < 0)
810 goto out_err; 810 goto out_err;
811 /*Must write PIXDEPTH to register before anything is displayed - so force init */ 811 /*Must write PIXDEPTH to register before anything is displayed - so force init */
812 pvr2_init_display(fb_info); 812 pvr2_init_display(fb_info);
813 813
814 modememused = get_line_length(fb_info->var.xres_virtual, 814 modememused = get_line_length(fb_info->var.xres_virtual,
815 fb_info->var.bits_per_pixel); 815 fb_info->var.bits_per_pixel);
816 modememused *= fb_info->var.yres_virtual; 816 modememused *= fb_info->var.yres_virtual;
817 817
818 rev = fb_readl(par->mmio_base + 0x04); 818 rev = fb_readl(par->mmio_base + 0x04);
819 819
820 printk("fb%d: %s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n", 820 printk("fb%d: %s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n",
821 fb_info->node, fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f, 821 fb_info->node, fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f,
822 modememused >> 10, (unsigned long)(fb_info->fix.smem_len >> 10)); 822 modememused >> 10, (unsigned long)(fb_info->fix.smem_len >> 10));
823 printk("fb%d: Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n", 823 printk("fb%d: Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
824 fb_info->node, fb_info->var.xres, fb_info->var.yres, 824 fb_info->node, fb_info->var.xres, fb_info->var.yres,
825 fb_info->var.bits_per_pixel, 825 fb_info->var.bits_per_pixel,
826 get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel), 826 get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
827 (char *)pvr2_get_param(cables, NULL, cable_type, 3), 827 (char *)pvr2_get_param(cables, NULL, cable_type, 3),
828 (char *)pvr2_get_param(outputs, NULL, video_output, 3)); 828 (char *)pvr2_get_param(outputs, NULL, video_output, 3));
829 829
830 #ifdef CONFIG_SH_STORE_QUEUES 830 #ifdef CONFIG_SH_STORE_QUEUES
831 printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node); 831 printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node);
832 832
833 pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len, 833 pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len,
834 fb_info->fix.id, pgprot_val(PAGE_SHARED)); 834 fb_info->fix.id, PAGE_SHARED);
835 835
836 printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n", 836 printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n",
837 fb_info->node, pvr2fb_map); 837 fb_info->node, pvr2fb_map);
838 #endif 838 #endif
839 839
840 return 0; 840 return 0;
841 841
842 out_err: 842 out_err:
843 if (fb_info->screen_base) 843 if (fb_info->screen_base)
844 iounmap(fb_info->screen_base); 844 iounmap(fb_info->screen_base);
845 if (par->mmio_base) 845 if (par->mmio_base)
846 iounmap((void *)par->mmio_base); 846 iounmap((void *)par->mmio_base);
847 847
848 return -ENXIO; 848 return -ENXIO;
849 } 849 }
850 850
851 #ifdef CONFIG_SH_DREAMCAST 851 #ifdef CONFIG_SH_DREAMCAST
852 static int __init pvr2fb_dc_init(void) 852 static int __init pvr2fb_dc_init(void)
853 { 853 {
854 if (!mach_is_dreamcast()) 854 if (!mach_is_dreamcast())
855 return -ENXIO; 855 return -ENXIO;
856 856
857 /* Make a guess at the monitor based on the attached cable */ 857 /* Make a guess at the monitor based on the attached cable */
858 if (pvr2_init_cable() == CT_VGA) { 858 if (pvr2_init_cable() == CT_VGA) {
859 fb_info->monspecs.hfmin = 30000; 859 fb_info->monspecs.hfmin = 30000;
860 fb_info->monspecs.hfmax = 70000; 860 fb_info->monspecs.hfmax = 70000;
861 fb_info->monspecs.vfmin = 60; 861 fb_info->monspecs.vfmin = 60;
862 fb_info->monspecs.vfmax = 60; 862 fb_info->monspecs.vfmax = 60;
863 } else { 863 } else {
864 /* Not VGA, using a TV (taken from acornfb) */ 864 /* Not VGA, using a TV (taken from acornfb) */
865 fb_info->monspecs.hfmin = 15469; 865 fb_info->monspecs.hfmin = 15469;
866 fb_info->monspecs.hfmax = 15781; 866 fb_info->monspecs.hfmax = 15781;
867 fb_info->monspecs.vfmin = 49; 867 fb_info->monspecs.vfmin = 49;
868 fb_info->monspecs.vfmax = 51; 868 fb_info->monspecs.vfmax = 51;
869 } 869 }
870 870
871 /* 871 /*
872 * XXX: This needs to pull default video output via BIOS or other means 872 * XXX: This needs to pull default video output via BIOS or other means
873 */ 873 */
874 if (video_output < 0) { 874 if (video_output < 0) {
875 if (cable_type == CT_VGA) { 875 if (cable_type == CT_VGA) {
876 video_output = VO_VGA; 876 video_output = VO_VGA;
877 } else { 877 } else {
878 video_output = VO_NTSC; 878 video_output = VO_NTSC;
879 } 879 }
880 } 880 }
881 881
882 /* 882 /*
883 * Nothing exciting about the DC PVR2 .. only a measly 8MiB. 883 * Nothing exciting about the DC PVR2 .. only a measly 8MiB.
884 */ 884 */
885 pvr2_fix.smem_start = 0xa5000000; /* RAM starts here */ 885 pvr2_fix.smem_start = 0xa5000000; /* RAM starts here */
886 pvr2_fix.smem_len = 8 << 20; 886 pvr2_fix.smem_len = 8 << 20;
887 887
888 pvr2_fix.mmio_start = 0xa05f8000; /* registers start here */ 888 pvr2_fix.mmio_start = 0xa05f8000; /* registers start here */
889 pvr2_fix.mmio_len = 0x2000; 889 pvr2_fix.mmio_len = 0x2000;
890 890
891 if (request_irq(HW_EVENT_VSYNC, pvr2fb_interrupt, IRQF_SHARED, 891 if (request_irq(HW_EVENT_VSYNC, pvr2fb_interrupt, IRQF_SHARED,
892 "pvr2 VBL handler", fb_info)) { 892 "pvr2 VBL handler", fb_info)) {
893 return -EBUSY; 893 return -EBUSY;
894 } 894 }
895 895
896 #ifdef CONFIG_PVR2_DMA 896 #ifdef CONFIG_PVR2_DMA
897 if (request_dma(pvr2dma, "pvr2") != 0) { 897 if (request_dma(pvr2dma, "pvr2") != 0) {
898 free_irq(HW_EVENT_VSYNC, 0); 898 free_irq(HW_EVENT_VSYNC, 0);
899 return -EBUSY; 899 return -EBUSY;
900 } 900 }
901 #endif 901 #endif
902 902
903 return pvr2fb_common_init(); 903 return pvr2fb_common_init();
904 } 904 }
905 905
906 static void __exit pvr2fb_dc_exit(void) 906 static void __exit pvr2fb_dc_exit(void)
907 { 907 {
908 if (fb_info->screen_base) { 908 if (fb_info->screen_base) {
909 iounmap(fb_info->screen_base); 909 iounmap(fb_info->screen_base);
910 fb_info->screen_base = NULL; 910 fb_info->screen_base = NULL;
911 } 911 }
912 if (currentpar->mmio_base) { 912 if (currentpar->mmio_base) {
913 iounmap((void *)currentpar->mmio_base); 913 iounmap((void *)currentpar->mmio_base);
914 currentpar->mmio_base = 0; 914 currentpar->mmio_base = 0;
915 } 915 }
916 916
917 free_irq(HW_EVENT_VSYNC, 0); 917 free_irq(HW_EVENT_VSYNC, 0);
918 #ifdef CONFIG_PVR2_DMA 918 #ifdef CONFIG_PVR2_DMA
919 free_dma(pvr2dma); 919 free_dma(pvr2dma);
920 #endif 920 #endif
921 } 921 }
922 #endif /* CONFIG_SH_DREAMCAST */ 922 #endif /* CONFIG_SH_DREAMCAST */
923 923
924 #ifdef CONFIG_PCI 924 #ifdef CONFIG_PCI
925 static int __devinit pvr2fb_pci_probe(struct pci_dev *pdev, 925 static int __devinit pvr2fb_pci_probe(struct pci_dev *pdev,
926 const struct pci_device_id *ent) 926 const struct pci_device_id *ent)
927 { 927 {
928 int ret; 928 int ret;
929 929
930 ret = pci_enable_device(pdev); 930 ret = pci_enable_device(pdev);
931 if (ret) { 931 if (ret) {
932 printk(KERN_ERR "pvr2fb: PCI enable failed\n"); 932 printk(KERN_ERR "pvr2fb: PCI enable failed\n");
933 return ret; 933 return ret;
934 } 934 }
935 935
936 ret = pci_request_regions(pdev, "pvr2fb"); 936 ret = pci_request_regions(pdev, "pvr2fb");
937 if (ret) { 937 if (ret) {
938 printk(KERN_ERR "pvr2fb: PCI request regions failed\n"); 938 printk(KERN_ERR "pvr2fb: PCI request regions failed\n");
939 return ret; 939 return ret;
940 } 940 }
941 941
942 /* 942 /*
943 * Slightly more exciting than the DC PVR2 .. 16MiB! 943 * Slightly more exciting than the DC PVR2 .. 16MiB!
944 */ 944 */
945 pvr2_fix.smem_start = pci_resource_start(pdev, 0); 945 pvr2_fix.smem_start = pci_resource_start(pdev, 0);
946 pvr2_fix.smem_len = pci_resource_len(pdev, 0); 946 pvr2_fix.smem_len = pci_resource_len(pdev, 0);
947 947
948 pvr2_fix.mmio_start = pci_resource_start(pdev, 1); 948 pvr2_fix.mmio_start = pci_resource_start(pdev, 1);
949 pvr2_fix.mmio_len = pci_resource_len(pdev, 1); 949 pvr2_fix.mmio_len = pci_resource_len(pdev, 1);
950 950
951 fb_info->device = &pdev->dev; 951 fb_info->device = &pdev->dev;
952 952
953 return pvr2fb_common_init(); 953 return pvr2fb_common_init();
954 } 954 }
955 955
956 static void __devexit pvr2fb_pci_remove(struct pci_dev *pdev) 956 static void __devexit pvr2fb_pci_remove(struct pci_dev *pdev)
957 { 957 {
958 if (fb_info->screen_base) { 958 if (fb_info->screen_base) {
959 iounmap(fb_info->screen_base); 959 iounmap(fb_info->screen_base);
960 fb_info->screen_base = NULL; 960 fb_info->screen_base = NULL;
961 } 961 }
962 if (currentpar->mmio_base) { 962 if (currentpar->mmio_base) {
963 iounmap((void *)currentpar->mmio_base); 963 iounmap((void *)currentpar->mmio_base);
964 currentpar->mmio_base = 0; 964 currentpar->mmio_base = 0;
965 } 965 }
966 966
967 pci_release_regions(pdev); 967 pci_release_regions(pdev);
968 } 968 }
969 969
970 static struct pci_device_id pvr2fb_pci_tbl[] __devinitdata = { 970 static struct pci_device_id pvr2fb_pci_tbl[] __devinitdata = {
971 { PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NEON250, 971 { PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NEON250,
972 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 972 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
973 { 0, }, 973 { 0, },
974 }; 974 };
975 975
976 MODULE_DEVICE_TABLE(pci, pvr2fb_pci_tbl); 976 MODULE_DEVICE_TABLE(pci, pvr2fb_pci_tbl);
977 977
978 static struct pci_driver pvr2fb_pci_driver = { 978 static struct pci_driver pvr2fb_pci_driver = {
979 .name = "pvr2fb", 979 .name = "pvr2fb",
980 .id_table = pvr2fb_pci_tbl, 980 .id_table = pvr2fb_pci_tbl,
981 .probe = pvr2fb_pci_probe, 981 .probe = pvr2fb_pci_probe,
982 .remove = __devexit_p(pvr2fb_pci_remove), 982 .remove = __devexit_p(pvr2fb_pci_remove),
983 }; 983 };
984 984
985 static int __init pvr2fb_pci_init(void) 985 static int __init pvr2fb_pci_init(void)
986 { 986 {
987 return pci_register_driver(&pvr2fb_pci_driver); 987 return pci_register_driver(&pvr2fb_pci_driver);
988 } 988 }
989 989
990 static void __exit pvr2fb_pci_exit(void) 990 static void __exit pvr2fb_pci_exit(void)
991 { 991 {
992 pci_unregister_driver(&pvr2fb_pci_driver); 992 pci_unregister_driver(&pvr2fb_pci_driver);
993 } 993 }
994 #endif /* CONFIG_PCI */ 994 #endif /* CONFIG_PCI */
995 995
996 static int __devinit pvr2_get_param(const struct pvr2_params *p, const char *s, 996 static int __devinit pvr2_get_param(const struct pvr2_params *p, const char *s,
997 int val, int size) 997 int val, int size)
998 { 998 {
999 int i; 999 int i;
1000 1000
1001 for (i = 0 ; i < size ; i++ ) { 1001 for (i = 0 ; i < size ; i++ ) {
1002 if (s != NULL) { 1002 if (s != NULL) {
1003 if (!strnicmp(p[i].name, s, strlen(s))) 1003 if (!strnicmp(p[i].name, s, strlen(s)))
1004 return p[i].val; 1004 return p[i].val;
1005 } else { 1005 } else {
1006 if (p[i].val == val) 1006 if (p[i].val == val)
1007 return (int)p[i].name; 1007 return (int)p[i].name;
1008 } 1008 }
1009 } 1009 }
1010 return -1; 1010 return -1;
1011 } 1011 }
1012 1012
1013 /* 1013 /*
1014 * Parse command arguments. Supported arguments are: 1014 * Parse command arguments. Supported arguments are:
1015 * inverse Use inverse color maps 1015 * inverse Use inverse color maps
1016 * cable:composite|rgb|vga Override the video cable type 1016 * cable:composite|rgb|vga Override the video cable type
1017 * output:NTSC|PAL|VGA Override the video output format 1017 * output:NTSC|PAL|VGA Override the video output format
1018 * 1018 *
1019 * <xres>x<yres>[-<bpp>][@<refresh>] or, 1019 * <xres>x<yres>[-<bpp>][@<refresh>] or,
1020 * <name>[-<bpp>][@<refresh>] Startup using this video mode 1020 * <name>[-<bpp>][@<refresh>] Startup using this video mode
1021 */ 1021 */
1022 1022
1023 #ifndef MODULE 1023 #ifndef MODULE
1024 static int __init pvr2fb_setup(char *options) 1024 static int __init pvr2fb_setup(char *options)
1025 { 1025 {
1026 char *this_opt; 1026 char *this_opt;
1027 char cable_arg[80]; 1027 char cable_arg[80];
1028 char output_arg[80]; 1028 char output_arg[80];
1029 1029
1030 if (!options || !*options) 1030 if (!options || !*options)
1031 return 0; 1031 return 0;
1032 1032
1033 while ((this_opt = strsep(&options, ","))) { 1033 while ((this_opt = strsep(&options, ","))) {
1034 if (!*this_opt) 1034 if (!*this_opt)
1035 continue; 1035 continue;
1036 if (!strcmp(this_opt, "inverse")) { 1036 if (!strcmp(this_opt, "inverse")) {
1037 fb_invert_cmaps(); 1037 fb_invert_cmaps();
1038 } else if (!strncmp(this_opt, "cable:", 6)) { 1038 } else if (!strncmp(this_opt, "cable:", 6)) {
1039 strcpy(cable_arg, this_opt + 6); 1039 strcpy(cable_arg, this_opt + 6);
1040 } else if (!strncmp(this_opt, "output:", 7)) { 1040 } else if (!strncmp(this_opt, "output:", 7)) {
1041 strcpy(output_arg, this_opt + 7); 1041 strcpy(output_arg, this_opt + 7);
1042 } else if (!strncmp(this_opt, "nopan", 5)) { 1042 } else if (!strncmp(this_opt, "nopan", 5)) {
1043 nopan = 1; 1043 nopan = 1;
1044 } else if (!strncmp(this_opt, "nowrap", 6)) { 1044 } else if (!strncmp(this_opt, "nowrap", 6)) {
1045 nowrap = 1; 1045 nowrap = 1;
1046 } else { 1046 } else {
1047 mode_option = this_opt; 1047 mode_option = this_opt;
1048 } 1048 }
1049 } 1049 }
1050 1050
1051 if (*cable_arg) 1051 if (*cable_arg)
1052 cable_type = pvr2_get_param(cables, cable_arg, 0, 3); 1052 cable_type = pvr2_get_param(cables, cable_arg, 0, 3);
1053 if (*output_arg) 1053 if (*output_arg)
1054 video_output = pvr2_get_param(outputs, output_arg, 0, 3); 1054 video_output = pvr2_get_param(outputs, output_arg, 0, 3);
1055 1055
1056 return 0; 1056 return 0;
1057 } 1057 }
1058 #endif 1058 #endif
1059 1059
1060 static struct pvr2_board { 1060 static struct pvr2_board {
1061 int (*init)(void); 1061 int (*init)(void);
1062 void (*exit)(void); 1062 void (*exit)(void);
1063 char name[16]; 1063 char name[16];
1064 } board_driver[] = { 1064 } board_driver[] = {
1065 #ifdef CONFIG_SH_DREAMCAST 1065 #ifdef CONFIG_SH_DREAMCAST
1066 { pvr2fb_dc_init, pvr2fb_dc_exit, "Sega DC PVR2" }, 1066 { pvr2fb_dc_init, pvr2fb_dc_exit, "Sega DC PVR2" },
1067 #endif 1067 #endif
1068 #ifdef CONFIG_PCI 1068 #ifdef CONFIG_PCI
1069 { pvr2fb_pci_init, pvr2fb_pci_exit, "PCI PVR2" }, 1069 { pvr2fb_pci_init, pvr2fb_pci_exit, "PCI PVR2" },
1070 #endif 1070 #endif
1071 { 0, }, 1071 { 0, },
1072 }; 1072 };
1073 1073
1074 static int __init pvr2fb_init(void) 1074 static int __init pvr2fb_init(void)
1075 { 1075 {
1076 int i, ret = -ENODEV; 1076 int i, ret = -ENODEV;
1077 int size; 1077 int size;
1078 1078
1079 #ifndef MODULE 1079 #ifndef MODULE
1080 char *option = NULL; 1080 char *option = NULL;
1081 1081
1082 if (fb_get_options("pvr2fb", &option)) 1082 if (fb_get_options("pvr2fb", &option))
1083 return -ENODEV; 1083 return -ENODEV;
1084 pvr2fb_setup(option); 1084 pvr2fb_setup(option);
1085 #endif 1085 #endif
1086 size = sizeof(struct fb_info) + sizeof(struct pvr2fb_par) + 16 * sizeof(u32); 1086 size = sizeof(struct fb_info) + sizeof(struct pvr2fb_par) + 16 * sizeof(u32);
1087 1087
1088 fb_info = framebuffer_alloc(sizeof(struct pvr2fb_par), NULL); 1088 fb_info = framebuffer_alloc(sizeof(struct pvr2fb_par), NULL);
1089 1089
1090 if (!fb_info) { 1090 if (!fb_info) {
1091 printk(KERN_ERR "Failed to allocate memory for fb_info\n"); 1091 printk(KERN_ERR "Failed to allocate memory for fb_info\n");
1092 return -ENOMEM; 1092 return -ENOMEM;
1093 } 1093 }
1094 1094
1095 1095
1096 currentpar = fb_info->par; 1096 currentpar = fb_info->par;
1097 1097
1098 for (i = 0; i < ARRAY_SIZE(board_driver); i++) { 1098 for (i = 0; i < ARRAY_SIZE(board_driver); i++) {
1099 struct pvr2_board *pvr_board = board_driver + i; 1099 struct pvr2_board *pvr_board = board_driver + i;
1100 1100
1101 if (!pvr_board->init) 1101 if (!pvr_board->init)
1102 continue; 1102 continue;
1103 1103
1104 ret = pvr_board->init(); 1104 ret = pvr_board->init();
1105 1105
1106 if (ret != 0) { 1106 if (ret != 0) {
1107 printk(KERN_ERR "pvr2fb: Failed init of %s device\n", 1107 printk(KERN_ERR "pvr2fb: Failed init of %s device\n",
1108 pvr_board->name); 1108 pvr_board->name);
1109 framebuffer_release(fb_info); 1109 framebuffer_release(fb_info);
1110 break; 1110 break;
1111 } 1111 }
1112 } 1112 }
1113 1113
1114 return ret; 1114 return ret;
1115 } 1115 }
1116 1116
1117 static void __exit pvr2fb_exit(void) 1117 static void __exit pvr2fb_exit(void)
1118 { 1118 {
1119 int i; 1119 int i;
1120 1120
1121 for (i = 0; i < ARRAY_SIZE(board_driver); i++) { 1121 for (i = 0; i < ARRAY_SIZE(board_driver); i++) {
1122 struct pvr2_board *pvr_board = board_driver + i; 1122 struct pvr2_board *pvr_board = board_driver + i;
1123 1123
1124 if (pvr_board->exit) 1124 if (pvr_board->exit)
1125 pvr_board->exit(); 1125 pvr_board->exit();
1126 } 1126 }
1127 1127
1128 #ifdef CONFIG_SH_STORE_QUEUES 1128 #ifdef CONFIG_SH_STORE_QUEUES
1129 sq_unmap(pvr2fb_map); 1129 sq_unmap(pvr2fb_map);
1130 #endif 1130 #endif
1131 1131
1132 unregister_framebuffer(fb_info); 1132 unregister_framebuffer(fb_info);
1133 framebuffer_release(fb_info); 1133 framebuffer_release(fb_info);
1134 } 1134 }
1135 1135
1136 module_init(pvr2fb_init); 1136 module_init(pvr2fb_init);
1137 module_exit(pvr2fb_exit); 1137 module_exit(pvr2fb_exit);
1138 1138
1139 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); 1139 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
1140 MODULE_DESCRIPTION("Framebuffer driver for NEC PowerVR 2 based graphics boards"); 1140 MODULE_DESCRIPTION("Framebuffer driver for NEC PowerVR 2 based graphics boards");
1141 MODULE_LICENSE("GPL"); 1141 MODULE_LICENSE("GPL");
1142 1142