Commit bc5f4523f772cc7629c5c5a46cf4f2a07a5500b8

Authored by Dave Airlie
Committed by Dave Airlie
1 parent 8562b3f25d

drm: run cleanfile across drm tree

Signed-off-by: Dave Airlie <airlied@linux.ie>

Showing 36 changed files with 362 additions and 375 deletions Side-by-side Diff

drivers/char/drm/Kconfig
... ... @@ -38,7 +38,7 @@
38 38 Choose this option if you have an ATI Radeon graphics card. There
39 39 are both PCI and AGP versions. You don't need to choose this to
40 40 run the Radeon in plain VGA mode.
41   -
  41 +
42 42 If M is selected, the module will be called radeon.
43 43  
44 44 config DRM_I810
45 45  
... ... @@ -71,9 +71,9 @@
71 71 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
72 72 module will be called i915. AGP support is required for this driver
73 73 to work. This driver is used by the Intel driver in X.org 6.8 and
74   - XFree86 4.4 and above. If unsure, build this and i830 as modules and
  74 + XFree86 4.4 and above. If unsure, build this and i830 as modules and
75 75 the X server will load the correct one.
76   -
  76 +
77 77 endchoice
78 78  
79 79 config DRM_MGA
... ... @@ -88,7 +88,7 @@
88 88 tristate "SiS video cards"
89 89 depends on DRM && AGP
90 90 help
91   - Choose this option if you have a SiS 630 or compatible video
  91 + Choose this option if you have a SiS 630 or compatible video
92 92 chipset. If M is selected the module will be called sis. AGP
93 93 support is required for this driver to work.
94 94  
drivers/char/drm/drm_bufs.c
... ... @@ -184,7 +184,7 @@
184 184 return -ENOMEM;
185 185 }
186 186 }
187   -
  187 +
188 188 break;
189 189 case _DRM_SHM:
190 190 list = drm_find_matching_map(dev, map);
191 191  
... ... @@ -814,9 +814,9 @@
814 814 page_count = 0;
815 815  
816 816 while (entry->buf_count < count) {
817   -
  817 +
818 818 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
819   -
  819 +
820 820 if (!dmah) {
821 821 /* Set count correctly so we free the proper amount. */
822 822 entry->buf_count = count;
drivers/char/drm/drm_context.c
... ... @@ -159,7 +159,7 @@
159 159 request->handle = NULL;
160 160 list_for_each_entry(_entry, &dev->maplist, head) {
161 161 if (_entry->map == map) {
162   - request->handle =
  162 + request->handle =
163 163 (void *)(unsigned long)_entry->user_token;
164 164 break;
165 165 }
drivers/char/drm/drm_hashtab.c
... ... @@ -80,7 +80,7 @@
80 80 }
81 81 }
82 82  
83   -static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
  83 +static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
84 84 unsigned long key)
85 85 {
86 86 struct drm_hash_item *entry;
... ... @@ -129,7 +129,7 @@
129 129 }
130 130  
131 131 /*
132   - * Just insert an item and return any "bits" bit key that hasn't been
  132 + * Just insert an item and return any "bits" bit key that hasn't been
133 133 * used before.
134 134 */
135 135 int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
drivers/char/drm/drm_ioctl.c
... ... @@ -234,7 +234,7 @@
234 234  
235 235 idx = client->idx;
236 236 mutex_lock(&dev->struct_mutex);
237   -
  237 +
238 238 if (list_empty(&dev->filelist)) {
239 239 mutex_unlock(&dev->struct_mutex);
240 240 return -EINVAL;
drivers/char/drm/drm_os_linux.h
... ... @@ -69,9 +69,9 @@
69 69 #define DRM_COPY_TO_USER(arg1, arg2, arg3) \
70 70 copy_to_user(arg1, arg2, arg3)
71 71 /* Macros for copyfrom user, but checking readability only once */
72   -#define DRM_VERIFYAREA_READ( uaddr, size ) \
  72 +#define DRM_VERIFYAREA_READ( uaddr, size ) \
73 73 (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
74   -#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
  74 +#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
75 75 __copy_from_user(arg1, arg2, arg3)
76 76 #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
77 77 __copy_to_user(arg1, arg2, arg3)
drivers/char/drm/drm_sarea.h
... ... @@ -45,7 +45,7 @@
45 45 #endif
46 46  
47 47 /** Maximum number of drawables in the SAREA */
48   -#define SAREA_MAX_DRAWABLES 256
  48 +#define SAREA_MAX_DRAWABLES 256
49 49  
50 50 #define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
51 51  
drivers/char/drm/drm_stub.c
... ... @@ -224,7 +224,7 @@
224 224 }
225 225 if ((ret = drm_get_head(dev, &dev->primary)))
226 226 goto err_g2;
227   -
  227 +
228 228 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
229 229 driver->name, driver->major, driver->minor, driver->patchlevel,
230 230 driver->date, dev->primary.minor);
drivers/char/drm/i810_dma.c
... ... @@ -40,7 +40,7 @@
40 40  
41 41 #define I810_BUF_FREE 2
42 42 #define I810_BUF_CLIENT 1
43   -#define I810_BUF_HARDWARE 0
  43 +#define I810_BUF_HARDWARE 0
44 44  
45 45 #define I810_BUF_UNMAPPED 0
46 46 #define I810_BUF_MAPPED 1
... ... @@ -848,7 +848,7 @@
848 848 drm_i810_private_t *dev_priv = dev->dev_private;
849 849 RING_LOCALS;
850 850  
851   -/* printk("%s\n", __FUNCTION__); */
  851 +/* printk("%s\n", __FUNCTION__); */
852 852  
853 853 i810_kernel_lost_context(dev);
854 854  
... ... @@ -869,7 +869,7 @@
869 869 int i, ret = 0;
870 870 RING_LOCALS;
871 871  
872   -/* printk("%s\n", __FUNCTION__); */
  872 +/* printk("%s\n", __FUNCTION__); */
873 873  
874 874 i810_kernel_lost_context(dev);
875 875  
drivers/char/drm/i810_drv.h
... ... @@ -25,7 +25,7 @@
25 25 * DEALINGS IN THE SOFTWARE.
26 26 *
27 27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28   - * Jeff Hartmann <jhartmann@valinux.com>
  28 + * Jeff Hartmann <jhartmann@valinux.com>
29 29 *
30 30 */
31 31  
... ... @@ -134,7 +134,7 @@
134 134 #define I810_ADDR(reg) (I810_BASE(reg) + reg)
135 135 #define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
136 136 #define I810_READ(reg) I810_DEREF(reg)
137   -#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
  137 +#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
138 138 #define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
139 139 #define I810_READ16(reg) I810_DEREF16(reg)
140 140 #define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
141 141  
142 142  
... ... @@ -155,19 +155,19 @@
155 155 } while (0)
156 156  
157 157 #define ADVANCE_LP_RING() do { \
158   - if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
159   - dev_priv->ring.tail = outring; \
  158 + if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
  159 + dev_priv->ring.tail = outring; \
160 160 I810_WRITE(LP_RING + RING_TAIL, outring); \
161 161 } while(0)
162 162  
163   -#define OUT_RING(n) do { \
  163 +#define OUT_RING(n) do { \
164 164 if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
165 165 *(volatile unsigned int *)(virt + outring) = n; \
166 166 outring += 4; \
167 167 outring &= ringmask; \
168 168 } while (0)
169 169  
170   -#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
  170 +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
171 171 #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
172 172 #define CMD_REPORT_HEAD (7<<23)
173 173 #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
174 174  
175 175  
... ... @@ -184,28 +184,28 @@
184 184  
185 185 #define I810REG_HWSTAM 0x02098
186 186 #define I810REG_INT_IDENTITY_R 0x020a4
187   -#define I810REG_INT_MASK_R 0x020a8
  187 +#define I810REG_INT_MASK_R 0x020a8
188 188 #define I810REG_INT_ENABLE_R 0x020a0
189 189  
190   -#define LP_RING 0x2030
191   -#define HP_RING 0x2040
192   -#define RING_TAIL 0x00
  190 +#define LP_RING 0x2030
  191 +#define HP_RING 0x2040
  192 +#define RING_TAIL 0x00
193 193 #define TAIL_ADDR 0x000FFFF8
194   -#define RING_HEAD 0x04
195   -#define HEAD_WRAP_COUNT 0xFFE00000
196   -#define HEAD_WRAP_ONE 0x00200000
197   -#define HEAD_ADDR 0x001FFFFC
198   -#define RING_START 0x08
199   -#define START_ADDR 0x00FFFFF8
200   -#define RING_LEN 0x0C
201   -#define RING_NR_PAGES 0x000FF000
202   -#define RING_REPORT_MASK 0x00000006
203   -#define RING_REPORT_64K 0x00000002
204   -#define RING_REPORT_128K 0x00000004
205   -#define RING_NO_REPORT 0x00000000
206   -#define RING_VALID_MASK 0x00000001
207   -#define RING_VALID 0x00000001
208   -#define RING_INVALID 0x00000000
  194 +#define RING_HEAD 0x04
  195 +#define HEAD_WRAP_COUNT 0xFFE00000
  196 +#define HEAD_WRAP_ONE 0x00200000
  197 +#define HEAD_ADDR 0x001FFFFC
  198 +#define RING_START 0x08
  199 +#define START_ADDR 0x00FFFFF8
  200 +#define RING_LEN 0x0C
  201 +#define RING_NR_PAGES 0x000FF000
  202 +#define RING_REPORT_MASK 0x00000006
  203 +#define RING_REPORT_64K 0x00000002
  204 +#define RING_REPORT_128K 0x00000004
  205 +#define RING_NO_REPORT 0x00000000
  206 +#define RING_VALID_MASK 0x00000001
  207 +#define RING_VALID 0x00000001
  208 +#define RING_INVALID 0x00000000
209 209  
210 210 #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
211 211 #define SC_UPDATE_SCISSOR (0x1<<1)
drivers/char/drm/i830_dma.c
... ... @@ -42,7 +42,7 @@
42 42  
43 43 #define I830_BUF_FREE 2
44 44 #define I830_BUF_CLIENT 1
45   -#define I830_BUF_HARDWARE 0
  45 +#define I830_BUF_HARDWARE 0
46 46  
47 47 #define I830_BUF_UNMAPPED 0
48 48 #define I830_BUF_MAPPED 1
drivers/char/drm/i830_drm.h
... ... @@ -12,9 +12,9 @@
12 12 #define _I830_DEFINES_
13 13  
14 14 #define I830_DMA_BUF_ORDER 12
15   -#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
16   -#define I830_DMA_BUF_NR 256
17   -#define I830_NR_SAREA_CLIPRECTS 8
  15 +#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
  16 +#define I830_DMA_BUF_NR 256
  17 +#define I830_NR_SAREA_CLIPRECTS 8
18 18  
19 19 /* Each region is a minimum of 64k, and there are at most 64 of them.
20 20 */
... ... @@ -58,7 +58,7 @@
58 58 #define I830_UPLOAD_TEXBLEND_MASK 0xf00000
59 59 #define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
60 60 #define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
61   -#define I830_UPLOAD_STIPPLE 0x8000000
  61 +#define I830_UPLOAD_STIPPLE 0x8000000
62 62  
63 63 /* Indices into buf.Setup where various bits of state are mirrored per
64 64 * context and per buffer. These can be fired at the card as a unit,
drivers/char/drm/i830_drv.h
... ... @@ -25,7 +25,7 @@
25 25 * DEALINGS IN THE SOFTWARE.
26 26 *
27 27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28   - * Jeff Hartmann <jhartmann@valinux.com>
  28 + * Jeff Hartmann <jhartmann@valinux.com>
29 29 *
30 30 */
31 31  
... ... @@ -183,7 +183,7 @@
183 183  
184 184 extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
185 185  
186   -#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
  186 +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
187 187 #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
188 188 #define CMD_REPORT_HEAD (7<<23)
189 189 #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
190 190  
191 191  
... ... @@ -203,30 +203,30 @@
203 203  
204 204 #define I830REG_HWSTAM 0x02098
205 205 #define I830REG_INT_IDENTITY_R 0x020a4
206   -#define I830REG_INT_MASK_R 0x020a8
  206 +#define I830REG_INT_MASK_R 0x020a8
207 207 #define I830REG_INT_ENABLE_R 0x020a0
208 208  
209 209 #define I830_IRQ_RESERVED ((1<<13)|(3<<2))
210 210  
211   -#define LP_RING 0x2030
212   -#define HP_RING 0x2040
213   -#define RING_TAIL 0x00
  211 +#define LP_RING 0x2030
  212 +#define HP_RING 0x2040
  213 +#define RING_TAIL 0x00
214 214 #define TAIL_ADDR 0x001FFFF8
215   -#define RING_HEAD 0x04
216   -#define HEAD_WRAP_COUNT 0xFFE00000
217   -#define HEAD_WRAP_ONE 0x00200000
218   -#define HEAD_ADDR 0x001FFFFC
219   -#define RING_START 0x08
220   -#define START_ADDR 0x0xFFFFF000
221   -#define RING_LEN 0x0C
222   -#define RING_NR_PAGES 0x001FF000
223   -#define RING_REPORT_MASK 0x00000006
224   -#define RING_REPORT_64K 0x00000002
225   -#define RING_REPORT_128K 0x00000004
226   -#define RING_NO_REPORT 0x00000000
227   -#define RING_VALID_MASK 0x00000001
228   -#define RING_VALID 0x00000001
229   -#define RING_INVALID 0x00000000
  215 +#define RING_HEAD 0x04
  216 +#define HEAD_WRAP_COUNT 0xFFE00000
  217 +#define HEAD_WRAP_ONE 0x00200000
  218 +#define HEAD_ADDR 0x001FFFFC
  219 +#define RING_START 0x08
  220 +#define START_ADDR 0x0xFFFFF000
  221 +#define RING_LEN 0x0C
  222 +#define RING_NR_PAGES 0x001FF000
  223 +#define RING_REPORT_MASK 0x00000006
  224 +#define RING_REPORT_64K 0x00000002
  225 +#define RING_REPORT_128K 0x00000004
  226 +#define RING_NO_REPORT 0x00000000
  227 +#define RING_VALID_MASK 0x00000001
  228 +#define RING_VALID 0x00000001
  229 +#define RING_INVALID 0x00000000
230 230  
231 231 #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
232 232 #define SC_UPDATE_SCISSOR (0x1<<1)
... ... @@ -279,9 +279,9 @@
279 279 #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
280 280 #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
281 281  
282   -#define MI_BATCH_BUFFER ((0x30<<23)|1)
283   -#define MI_BATCH_BUFFER_START (0x31<<23)
284   -#define MI_BATCH_BUFFER_END (0xA<<23)
  282 +#define MI_BATCH_BUFFER ((0x30<<23)|1)
  283 +#define MI_BATCH_BUFFER_START (0x31<<23)
  284 +#define MI_BATCH_BUFFER_END (0xA<<23)
285 285 #define MI_BATCH_NON_SECURE (1)
286 286  
287 287 #define MI_WAIT_FOR_EVENT ((0x3<<23))
drivers/char/drm/i830_irq.c
... ... @@ -144,7 +144,7 @@
144 144 struct drm_file *file_priv)
145 145 {
146 146 drm_i830_private_t *dev_priv = dev->dev_private;
147   - drm_i830_irq_wait_t *irqwait = data;
  147 + drm_i830_irq_wait_t *irqwait = data;
148 148  
149 149 if (!dev_priv) {
150 150 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
drivers/char/drm/i915_dma.c
... ... @@ -351,7 +351,7 @@
351 351 {
352 352 int ret = do_validate_cmd(cmd);
353 353  
354   -/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
  354 +/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
355 355  
356 356 return ret;
357 357 }
drivers/char/drm/i915_drv.c
... ... @@ -77,7 +77,7 @@
77 77 .name = DRIVER_NAME,
78 78 .id_table = pciidlist,
79 79 },
80   -
  80 +
81 81 .name = DRIVER_NAME,
82 82 .desc = DRIVER_DESC,
83 83 .date = DRIVER_DATE,
drivers/char/drm/i915_drv.h
... ... @@ -163,7 +163,7 @@
163 163  
164 164 #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
165 165 #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
166   -#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
  166 +#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
167 167 #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
168 168  
169 169 #define I915_VERBOSE 0
... ... @@ -200,7 +200,7 @@
200 200  
201 201 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
202 202  
203   -#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
  203 +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
204 204 #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
205 205 #define CMD_REPORT_HEAD (7<<23)
206 206 #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
... ... @@ -217,7 +217,7 @@
217 217  
218 218 #define I915REG_HWSTAM 0x02098
219 219 #define I915REG_INT_IDENTITY_R 0x020a4
220   -#define I915REG_INT_MASK_R 0x020a8
  220 +#define I915REG_INT_MASK_R 0x020a8
221 221 #define I915REG_INT_ENABLE_R 0x020a0
222 222  
223 223 #define I915REG_PIPEASTAT 0x70024
... ... @@ -229,7 +229,7 @@
229 229 #define SRX_INDEX 0x3c4
230 230 #define SRX_DATA 0x3c5
231 231 #define SR01 1
232   -#define SR01_SCREEN_OFF (1<<5)
  232 +#define SR01_SCREEN_OFF (1<<5)
233 233  
234 234 #define PPCR 0x61204
235 235 #define PPCR_ON (1<<0)
236 236  
... ... @@ -249,25 +249,25 @@
249 249 #define ADPA_DPMS_OFF (3<<10)
250 250  
251 251 #define NOPID 0x2094
252   -#define LP_RING 0x2030
253   -#define HP_RING 0x2040
254   -#define RING_TAIL 0x00
  252 +#define LP_RING 0x2030
  253 +#define HP_RING 0x2040
  254 +#define RING_TAIL 0x00
255 255 #define TAIL_ADDR 0x001FFFF8
256   -#define RING_HEAD 0x04
257   -#define HEAD_WRAP_COUNT 0xFFE00000
258   -#define HEAD_WRAP_ONE 0x00200000
259   -#define HEAD_ADDR 0x001FFFFC
260   -#define RING_START 0x08
261   -#define START_ADDR 0x0xFFFFF000
262   -#define RING_LEN 0x0C
263   -#define RING_NR_PAGES 0x001FF000
264   -#define RING_REPORT_MASK 0x00000006
265   -#define RING_REPORT_64K 0x00000002
266   -#define RING_REPORT_128K 0x00000004
267   -#define RING_NO_REPORT 0x00000000
268   -#define RING_VALID_MASK 0x00000001
269   -#define RING_VALID 0x00000001
270   -#define RING_INVALID 0x00000000
  256 +#define RING_HEAD 0x04
  257 +#define HEAD_WRAP_COUNT 0xFFE00000
  258 +#define HEAD_WRAP_ONE 0x00200000
  259 +#define HEAD_ADDR 0x001FFFFC
  260 +#define RING_START 0x08
  261 +#define START_ADDR 0x0xFFFFF000
  262 +#define RING_LEN 0x0C
  263 +#define RING_NR_PAGES 0x001FF000
  264 +#define RING_REPORT_MASK 0x00000006
  265 +#define RING_REPORT_64K 0x00000002
  266 +#define RING_REPORT_128K 0x00000004
  267 +#define RING_NO_REPORT 0x00000000
  268 +#define RING_VALID_MASK 0x00000001
  269 +#define RING_VALID 0x00000001
  270 +#define RING_INVALID 0x00000000
271 271  
272 272 #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
273 273 #define SC_UPDATE_SCISSOR (0x1<<1)
... ... @@ -294,9 +294,9 @@
294 294 #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
295 295 #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
296 296  
297   -#define MI_BATCH_BUFFER ((0x30<<23)|1)
298   -#define MI_BATCH_BUFFER_START (0x31<<23)
299   -#define MI_BATCH_BUFFER_END (0xA<<23)
  297 +#define MI_BATCH_BUFFER ((0x30<<23)|1)
  298 +#define MI_BATCH_BUFFER_START (0x31<<23)
  299 +#define MI_BATCH_BUFFER_END (0xA<<23)
300 300 #define MI_BATCH_NON_SECURE (1)
301 301 #define MI_BATCH_NON_SECURE_I965 (1<<8)
302 302  
drivers/char/drm/i915_irq.c
... ... @@ -291,7 +291,7 @@
291 291 OUT_RING(0);
292 292 OUT_RING(GFX_OP_USER_INTERRUPT);
293 293 ADVANCE_LP_RING();
294   -
  294 +
295 295 return dev_priv->counter;
296 296 }
297 297  
... ... @@ -336,7 +336,7 @@
336 336 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
337 337 (((cur_vblank = atomic_read(counter))
338 338 - *sequence) <= (1<<23)));
339   -
  339 +
340 340 *sequence = cur_vblank;
341 341  
342 342 return ret;
... ... @@ -423,7 +423,7 @@
423 423 }
424 424  
425 425 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
426   - DRM_ERROR("%s called with invalid pipe 0x%x\n",
  426 + DRM_ERROR("%s called with invalid pipe 0x%x\n",
427 427 __FUNCTION__, pipe->pipe);
428 428 return -EINVAL;
429 429 }
drivers/char/drm/i915_mem.c
... ... @@ -375,7 +375,7 @@
375 375 DRM_ERROR("get_heap failed");
376 376 return -EFAULT;
377 377 }
378   -
  378 +
379 379 if (!*heap) {
380 380 DRM_ERROR("heap not initialized?");
381 381 return -EFAULT;
drivers/char/drm/mga_dma.c
... ... @@ -493,7 +493,7 @@
493 493 dma_bs->agp_size);
494 494 return err;
495 495 }
496   -
  496 +
497 497 dev_priv->agp_size = agp_size;
498 498 dev_priv->agp_handle = agp_req.handle;
499 499  
... ... @@ -550,7 +550,7 @@
550 550 {
551 551 struct drm_map_list *_entry;
552 552 unsigned long agp_token = 0;
553   -
  553 +
554 554 list_for_each_entry(_entry, &dev->maplist, head) {
555 555 if (_entry->map == dev->agp_buffer_map)
556 556 agp_token = _entry->user_token;
... ... @@ -964,7 +964,7 @@
964 964  
965 965 free_req.handle = dev_priv->agp_handle;
966 966 drm_agp_free(dev, &free_req);
967   -
  967 +
968 968 dev_priv->agp_textures = NULL;
969 969 dev_priv->agp_size = 0;
970 970 dev_priv->agp_handle = 0;
drivers/char/drm/mga_drv.h
... ... @@ -216,8 +216,8 @@
216 216 #define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
217 217 #endif
218 218  
219   -#define DWGREG0 0x1c00
220   -#define DWGREG0_END 0x1dff
  219 +#define DWGREG0 0x1c00
  220 +#define DWGREG0_END 0x1dff
221 221 #define DWGREG1 0x2c00
222 222 #define DWGREG1_END 0x2dff
223 223  
224 224  
225 225  
... ... @@ -394,22 +394,22 @@
394 394 #define MGA_VINTCLR (1 << 4)
395 395 #define MGA_VINTEN (1 << 5)
396 396  
397   -#define MGA_ALPHACTRL 0x2c7c
398   -#define MGA_AR0 0x1c60
399   -#define MGA_AR1 0x1c64
400   -#define MGA_AR2 0x1c68
401   -#define MGA_AR3 0x1c6c
402   -#define MGA_AR4 0x1c70
403   -#define MGA_AR5 0x1c74
404   -#define MGA_AR6 0x1c78
  397 +#define MGA_ALPHACTRL 0x2c7c
  398 +#define MGA_AR0 0x1c60
  399 +#define MGA_AR1 0x1c64
  400 +#define MGA_AR2 0x1c68
  401 +#define MGA_AR3 0x1c6c
  402 +#define MGA_AR4 0x1c70
  403 +#define MGA_AR5 0x1c74
  404 +#define MGA_AR6 0x1c78
405 405  
406 406 #define MGA_CXBNDRY 0x1c80
407   -#define MGA_CXLEFT 0x1ca0
  407 +#define MGA_CXLEFT 0x1ca0
408 408 #define MGA_CXRIGHT 0x1ca4
409 409  
410   -#define MGA_DMAPAD 0x1c54
411   -#define MGA_DSTORG 0x2cb8
412   -#define MGA_DWGCTL 0x1c00
  410 +#define MGA_DMAPAD 0x1c54
  411 +#define MGA_DSTORG 0x2cb8
  412 +#define MGA_DWGCTL 0x1c00
413 413 # define MGA_OPCOD_MASK (15 << 0)
414 414 # define MGA_OPCOD_TRAP (4 << 0)
415 415 # define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
416 416  
417 417  
418 418  
419 419  
420 420  
... ... @@ -455,27 +455,27 @@
455 455 # define MGA_CLIPDIS (1 << 31)
456 456 #define MGA_DWGSYNC 0x2c4c
457 457  
458   -#define MGA_FCOL 0x1c24
459   -#define MGA_FIFOSTATUS 0x1e10
460   -#define MGA_FOGCOL 0x1cf4
  458 +#define MGA_FCOL 0x1c24
  459 +#define MGA_FIFOSTATUS 0x1e10
  460 +#define MGA_FOGCOL 0x1cf4
461 461 #define MGA_FXBNDRY 0x1c84
462   -#define MGA_FXLEFT 0x1ca8
  462 +#define MGA_FXLEFT 0x1ca8
463 463 #define MGA_FXRIGHT 0x1cac
464 464  
465   -#define MGA_ICLEAR 0x1e18
  465 +#define MGA_ICLEAR 0x1e18
466 466 # define MGA_SOFTRAPICLR (1 << 0)
467 467 # define MGA_VLINEICLR (1 << 5)
468   -#define MGA_IEN 0x1e1c
  468 +#define MGA_IEN 0x1e1c
469 469 # define MGA_SOFTRAPIEN (1 << 0)
470 470 # define MGA_VLINEIEN (1 << 5)
471 471  
472   -#define MGA_LEN 0x1c5c
  472 +#define MGA_LEN 0x1c5c
473 473  
474 474 #define MGA_MACCESS 0x1c04
475 475  
476   -#define MGA_PITCH 0x1c8c
477   -#define MGA_PLNWT 0x1c1c
478   -#define MGA_PRIMADDRESS 0x1e58
  476 +#define MGA_PITCH 0x1c8c
  477 +#define MGA_PLNWT 0x1c1c
  478 +#define MGA_PRIMADDRESS 0x1e58
479 479 # define MGA_DMA_GENERAL (0 << 0)
480 480 # define MGA_DMA_BLIT (1 << 0)
481 481 # define MGA_DMA_VECTOR (2 << 0)
482 482  
483 483  
484 484  
485 485  
486 486  
487 487  
... ... @@ -487,43 +487,43 @@
487 487 # define MGA_PRIMPTREN0 (1 << 0)
488 488 # define MGA_PRIMPTREN1 (1 << 1)
489 489  
490   -#define MGA_RST 0x1e40
  490 +#define MGA_RST 0x1e40
491 491 # define MGA_SOFTRESET (1 << 0)
492 492 # define MGA_SOFTEXTRST (1 << 1)
493 493  
494   -#define MGA_SECADDRESS 0x2c40
495   -#define MGA_SECEND 0x2c44
496   -#define MGA_SETUPADDRESS 0x2cd0
497   -#define MGA_SETUPEND 0x2cd4
  494 +#define MGA_SECADDRESS 0x2c40
  495 +#define MGA_SECEND 0x2c44
  496 +#define MGA_SETUPADDRESS 0x2cd0
  497 +#define MGA_SETUPEND 0x2cd4
498 498 #define MGA_SGN 0x1c58
499 499 #define MGA_SOFTRAP 0x2c48
500   -#define MGA_SRCORG 0x2cb4
  500 +#define MGA_SRCORG 0x2cb4
501 501 # define MGA_SRMMAP_MASK (1 << 0)
502 502 # define MGA_SRCMAP_FB (0 << 0)
503 503 # define MGA_SRCMAP_SYSMEM (1 << 0)
504 504 # define MGA_SRCACC_MASK (1 << 1)
505 505 # define MGA_SRCACC_PCI (0 << 1)
506 506 # define MGA_SRCACC_AGP (1 << 1)
507   -#define MGA_STATUS 0x1e14
  507 +#define MGA_STATUS 0x1e14
508 508 # define MGA_SOFTRAPEN (1 << 0)
509 509 # define MGA_VSYNCPEN (1 << 4)
510 510 # define MGA_VLINEPEN (1 << 5)
511 511 # define MGA_DWGENGSTS (1 << 16)
512 512 # define MGA_ENDPRDMASTS (1 << 17)
513 513 #define MGA_STENCIL 0x2cc8
514   -#define MGA_STENCILCTL 0x2ccc
  514 +#define MGA_STENCILCTL 0x2ccc
515 515  
516   -#define MGA_TDUALSTAGE0 0x2cf8
517   -#define MGA_TDUALSTAGE1 0x2cfc
518   -#define MGA_TEXBORDERCOL 0x2c5c
519   -#define MGA_TEXCTL 0x2c30
  516 +#define MGA_TDUALSTAGE0 0x2cf8
  517 +#define MGA_TDUALSTAGE1 0x2cfc
  518 +#define MGA_TEXBORDERCOL 0x2c5c
  519 +#define MGA_TEXCTL 0x2c30
520 520 #define MGA_TEXCTL2 0x2c3c
521 521 # define MGA_DUALTEX (1 << 7)
522 522 # define MGA_G400_TC2_MAGIC (1 << 15)
523 523 # define MGA_MAP1_ENABLE (1 << 31)
524   -#define MGA_TEXFILTER 0x2c58
525   -#define MGA_TEXHEIGHT 0x2c2c
526   -#define MGA_TEXORG 0x2c24
  524 +#define MGA_TEXFILTER 0x2c58
  525 +#define MGA_TEXHEIGHT 0x2c2c
  526 +#define MGA_TEXORG 0x2c24
527 527 # define MGA_TEXORGMAP_MASK (1 << 0)
528 528 # define MGA_TEXORGMAP_FB (0 << 0)
529 529 # define MGA_TEXORGMAP_SYSMEM (1 << 0)
530 530  
531 531  
532 532  
533 533  
534 534  
535 535  
536 536  
537 537  
538 538  
... ... @@ -534,45 +534,45 @@
534 534 #define MGA_TEXORG2 0x2ca8
535 535 #define MGA_TEXORG3 0x2cac
536 536 #define MGA_TEXORG4 0x2cb0
537   -#define MGA_TEXTRANS 0x2c34
538   -#define MGA_TEXTRANSHIGH 0x2c38
539   -#define MGA_TEXWIDTH 0x2c28
  537 +#define MGA_TEXTRANS 0x2c34
  538 +#define MGA_TEXTRANSHIGH 0x2c38
  539 +#define MGA_TEXWIDTH 0x2c28
540 540  
541   -#define MGA_WACCEPTSEQ 0x1dd4
542   -#define MGA_WCODEADDR 0x1e6c
543   -#define MGA_WFLAG 0x1dc4
544   -#define MGA_WFLAG1 0x1de0
  541 +#define MGA_WACCEPTSEQ 0x1dd4
  542 +#define MGA_WCODEADDR 0x1e6c
  543 +#define MGA_WFLAG 0x1dc4
  544 +#define MGA_WFLAG1 0x1de0
545 545 #define MGA_WFLAGNB 0x1e64
546   -#define MGA_WFLAGNB1 0x1e08
  546 +#define MGA_WFLAGNB1 0x1e08
547 547 #define MGA_WGETMSB 0x1dc8
548   -#define MGA_WIADDR 0x1dc0
  548 +#define MGA_WIADDR 0x1dc0
549 549 #define MGA_WIADDR2 0x1dd8
550 550 # define MGA_WMODE_SUSPEND (0 << 0)
551 551 # define MGA_WMODE_RESUME (1 << 0)
552 552 # define MGA_WMODE_JUMP (2 << 0)
553 553 # define MGA_WMODE_START (3 << 0)
554 554 # define MGA_WAGP_ENABLE (1 << 2)
555   -#define MGA_WMISC 0x1e70
  555 +#define MGA_WMISC 0x1e70
556 556 # define MGA_WUCODECACHE_ENABLE (1 << 0)
557 557 # define MGA_WMASTER_ENABLE (1 << 1)
558 558 # define MGA_WCACHEFLUSH_ENABLE (1 << 3)
559 559 #define MGA_WVRTXSZ 0x1dcc
560 560  
561   -#define MGA_YBOT 0x1c9c
562   -#define MGA_YDST 0x1c90
  561 +#define MGA_YBOT 0x1c9c
  562 +#define MGA_YDST 0x1c90
563 563 #define MGA_YDSTLEN 0x1c88
564 564 #define MGA_YDSTORG 0x1c94
565   -#define MGA_YTOP 0x1c98
  565 +#define MGA_YTOP 0x1c98
566 566  
567   -#define MGA_ZORG 0x1c0c
  567 +#define MGA_ZORG 0x1c0c
568 568  
569 569 /* This finishes the current batch of commands
570 570 */
571   -#define MGA_EXEC 0x0100
  571 +#define MGA_EXEC 0x0100
572 572  
573 573 /* AGP PLL encoding (for G200 only).
574 574 */
575   -#define MGA_AGP_PLL 0x1e4c
  575 +#define MGA_AGP_PLL 0x1e4c
576 576 # define MGA_AGP2XPLL_DISABLE (0 << 0)
577 577 # define MGA_AGP2XPLL_ENABLE (1 << 0)
578 578  
drivers/char/drm/mga_state.c
... ... @@ -150,8 +150,8 @@
150 150 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
151 151 DMA_LOCALS;
152 152  
153   -/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
154   -/* tex->texctl, tex->texctl2); */
  153 +/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
  154 +/* tex->texctl, tex->texctl2); */
155 155  
156 156 BEGIN_DMA(6);
157 157  
... ... @@ -190,8 +190,8 @@
190 190 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
191 191 DMA_LOCALS;
192 192  
193   -/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
194   -/* tex->texctl, tex->texctl2); */
  193 +/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
  194 +/* tex->texctl, tex->texctl2); */
195 195  
196 196 BEGIN_DMA(5);
197 197  
... ... @@ -256,7 +256,7 @@
256 256 unsigned int pipe = sarea_priv->warp_pipe;
257 257 DMA_LOCALS;
258 258  
259   -/* printk("mga_g400_emit_pipe %x\n", pipe); */
  259 +/* printk("mga_g400_emit_pipe %x\n", pipe); */
260 260  
261 261 BEGIN_DMA(10);
262 262  
drivers/char/drm/r128_cce.c
1   -/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
  1 +/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
2 2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
3 3 */
4 4 /*
drivers/char/drm/r128_drv.h
... ... @@ -493,7 +493,7 @@
493 493 write * sizeof(u32) ); \
494 494 } \
495 495 if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
496   - DRM_ERROR( \
  496 + DRM_ERROR( \
497 497 "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
498 498 ((dev_priv->ring.tail + _nr) & tail_mask), \
499 499 write, __LINE__); \
drivers/char/drm/r300_cmdbuf.c
... ... @@ -486,7 +486,7 @@
486 486 if (cmd[0] & 0x8000) {
487 487 u32 offset;
488 488  
489   - if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
  489 + if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
490 490 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
491 491 offset = cmd[2] << 10;
492 492 ret = !radeon_check_offset(dev_priv, offset);
... ... @@ -504,7 +504,7 @@
504 504 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
505 505 return -EINVAL;
506 506 }
507   -
  507 +
508 508 }
509 509 }
510 510  
511 511  
512 512  
513 513  
514 514  
515 515  
516 516  
517 517  
518 518  
519 519  
520 520  
521 521  
522 522  
523 523  
... ... @@ -723,54 +723,54 @@
723 723 u32 *ref_age_base;
724 724 u32 i, buf_idx, h_pending;
725 725 RING_LOCALS;
726   -
727   - if (cmdbuf->bufsz <
  726 +
  727 + if (cmdbuf->bufsz <
728 728 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
729 729 return -EINVAL;
730 730 }
731   -
  731 +
732 732 if (header.scratch.reg >= 5) {
733 733 return -EINVAL;
734 734 }
735   -
  735 +
736 736 dev_priv->scratch_ages[header.scratch.reg]++;
737   -
  737 +
738 738 ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
739   -
  739 +
740 740 cmdbuf->buf += sizeof(u64);
741 741 cmdbuf->bufsz -= sizeof(u64);
742   -
  742 +
743 743 for (i=0; i < header.scratch.n_bufs; i++) {
744 744 buf_idx = *(u32 *)cmdbuf->buf;
745 745 buf_idx *= 2; /* 8 bytes per buf */
746   -
  746 +
747 747 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
748 748 return -EINVAL;
749 749 }
750   -
  750 +
751 751 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
752 752 return -EINVAL;
753 753 }
754   -
  754 +
755 755 if (h_pending == 0) {
756 756 return -EINVAL;
757 757 }
758   -
  758 +
759 759 h_pending--;
760   -
  760 +
761 761 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
762 762 return -EINVAL;
763 763 }
764   -
  764 +
765 765 cmdbuf->buf += sizeof(buf_idx);
766 766 cmdbuf->bufsz -= sizeof(buf_idx);
767 767 }
768   -
  768 +
769 769 BEGIN_RING(2);
770 770 OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
771 771 OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
772 772 ADVANCE_RING();
773   -
  773 +
774 774 return 0;
775 775 }
776 776  
... ... @@ -919,7 +919,7 @@
919 919 goto cleanup;
920 920 }
921 921 break;
922   -
  922 +
923 923 default:
924 924 DRM_ERROR("bad cmd_type %i at %p\n",
925 925 header.header.cmd_type,
drivers/char/drm/r300_reg.h
... ... @@ -853,13 +853,13 @@
853 853 # define R300_TX_FORMAT_W8Z8Y8X8 0xC
854 854 # define R300_TX_FORMAT_W2Z10Y10X10 0xD
855 855 # define R300_TX_FORMAT_W16Z16Y16X16 0xE
856   -# define R300_TX_FORMAT_DXT1 0xF
857   -# define R300_TX_FORMAT_DXT3 0x10
858   -# define R300_TX_FORMAT_DXT5 0x11
  856 +# define R300_TX_FORMAT_DXT1 0xF
  857 +# define R300_TX_FORMAT_DXT3 0x10
  858 +# define R300_TX_FORMAT_DXT5 0x11
859 859 # define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
860   -# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
861   -# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
862   -# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
  860 +# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
  861 +# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
  862 +# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
863 863 /* 0x16 - some 16 bit green format.. ?? */
864 864 # define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
865 865 # define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
866 866  
867 867  
... ... @@ -867,19 +867,19 @@
867 867 /* gap */
868 868 /* Floating point formats */
869 869 /* Note - hardware supports both 16 and 32 bit floating point */
870   -# define R300_TX_FORMAT_FL_I16 0x18
871   -# define R300_TX_FORMAT_FL_I16A16 0x19
  870 +# define R300_TX_FORMAT_FL_I16 0x18
  871 +# define R300_TX_FORMAT_FL_I16A16 0x19
872 872 # define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
873   -# define R300_TX_FORMAT_FL_I32 0x1B
874   -# define R300_TX_FORMAT_FL_I32A32 0x1C
  873 +# define R300_TX_FORMAT_FL_I32 0x1B
  874 +# define R300_TX_FORMAT_FL_I32A32 0x1C
875 875 # define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
876 876 /* alpha modes, convenience mostly */
877 877 /* if you have alpha, pick constant appropriate to the
878 878 number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
879   -# define R300_TX_FORMAT_ALPHA_1CH 0x000
880   -# define R300_TX_FORMAT_ALPHA_2CH 0x200
881   -# define R300_TX_FORMAT_ALPHA_4CH 0x600
882   -# define R300_TX_FORMAT_ALPHA_NONE 0xA00
  879 +# define R300_TX_FORMAT_ALPHA_1CH 0x000
  880 +# define R300_TX_FORMAT_ALPHA_2CH 0x200
  881 +# define R300_TX_FORMAT_ALPHA_4CH 0x600
  882 +# define R300_TX_FORMAT_ALPHA_NONE 0xA00
883 883 /* Swizzling */
884 884 /* constants */
885 885 # define R300_TX_FORMAT_X 0
886 886  
... ... @@ -1360,11 +1360,11 @@
1360 1360 # define R300_RB3D_Z_DISABLED_2 0x00000014
1361 1361 # define R300_RB3D_Z_TEST 0x00000012
1362 1362 # define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
1363   -# define R300_RB3D_Z_WRITE_ONLY 0x00000006
  1363 +# define R300_RB3D_Z_WRITE_ONLY 0x00000006
1364 1364  
1365 1365 # define R300_RB3D_Z_TEST 0x00000012
1366 1366 # define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
1367   -# define R300_RB3D_Z_WRITE_ONLY 0x00000006
  1367 +# define R300_RB3D_Z_WRITE_ONLY 0x00000006
1368 1368 # define R300_RB3D_STENCIL_ENABLE 0x00000001
1369 1369  
1370 1370 #define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
drivers/char/drm/radeon_cp.c
... ... @@ -1127,7 +1127,7 @@
1127 1127 {
1128 1128 u32 ring_start, cur_read_ptr;
1129 1129 u32 tmp;
1130   -
  1130 +
1131 1131 /* Initialize the memory controller. With new memory map, the fb location
1132 1132 * is not changed, it should have been properly initialized already. Part
1133 1133 * of the problem is that the code below is bogus, assuming the GART is
... ... @@ -1358,7 +1358,7 @@
1358 1358 return;
1359 1359 }
1360 1360  
1361   - tmp = RADEON_READ(RADEON_AIC_CNTL);
  1361 + tmp = RADEON_READ(RADEON_AIC_CNTL);
1362 1362  
1363 1363 if (on) {
1364 1364 RADEON_WRITE(RADEON_AIC_CNTL,
... ... @@ -1583,7 +1583,7 @@
1583 1583  
1584 1584 dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION)
1585 1585 & 0xffff) << 16;
1586   - dev_priv->fb_size =
  1586 + dev_priv->fb_size =
1587 1587 ((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000)
1588 1588 - dev_priv->fb_location;
1589 1589  
... ... @@ -1630,7 +1630,7 @@
1630 1630 ((base + dev_priv->gart_size) & 0xfffffffful) < base)
1631 1631 base = dev_priv->fb_location
1632 1632 - dev_priv->gart_size;
1633   - }
  1633 + }
1634 1634 dev_priv->gart_vm_start = base & 0xffc00000u;
1635 1635 if (dev_priv->gart_vm_start != base)
1636 1636 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
drivers/char/drm/radeon_drm.h
... ... @@ -223,10 +223,10 @@
223 223 #define R300_CMD_CP_DELAY 5
224 224 #define R300_CMD_DMA_DISCARD 6
225 225 #define R300_CMD_WAIT 7
226   -# define R300_WAIT_2D 0x1
227   -# define R300_WAIT_3D 0x2
228   -# define R300_WAIT_2D_CLEAN 0x3
229   -# define R300_WAIT_3D_CLEAN 0x4
  226 +# define R300_WAIT_2D 0x1
  227 +# define R300_WAIT_3D 0x2
  228 +# define R300_WAIT_2D_CLEAN 0x3
  229 +# define R300_WAIT_3D_CLEAN 0x4
230 230 #define R300_CMD_SCRATCH 8
231 231  
232 232 typedef union {
... ... @@ -722,8 +722,8 @@
722 722 unsigned int address;
723 723 } drm_radeon_surface_free_t;
724 724  
725   -#define DRM_RADEON_VBLANK_CRTC1 1
726   -#define DRM_RADEON_VBLANK_CRTC2 2
  725 +#define DRM_RADEON_VBLANK_CRTC1 1
  726 +#define DRM_RADEON_VBLANK_CRTC2 2
727 727  
728 728 #endif
drivers/char/drm/radeon_drv.h
... ... @@ -429,7 +429,7 @@
429 429 #define RADEON_PCIE_INDEX 0x0030
430 430 #define RADEON_PCIE_DATA 0x0034
431 431 #define RADEON_PCIE_TX_GART_CNTL 0x10
432   -# define RADEON_PCIE_TX_GART_EN (1 << 0)
  432 +# define RADEON_PCIE_TX_GART_EN (1 << 0)
433 433 # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1)
434 434 # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1<<1)
435 435 # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3<<1)
... ... @@ -439,7 +439,7 @@
439 439 # define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1<<8)
440 440 #define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
441 441 #define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
442   -#define RADEON_PCIE_TX_GART_BASE 0x13
  442 +#define RADEON_PCIE_TX_GART_BASE 0x13
443 443 #define RADEON_PCIE_TX_GART_START_LO 0x14
444 444 #define RADEON_PCIE_TX_GART_START_HI 0x15
445 445 #define RADEON_PCIE_TX_GART_END_LO 0x16
446 446  
447 447  
... ... @@ -512,12 +512,12 @@
512 512  
513 513 #define RADEON_GEN_INT_STATUS 0x0044
514 514 # define RADEON_CRTC_VBLANK_STAT (1 << 0)
515   -# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
  515 +# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
516 516 # define RADEON_CRTC2_VBLANK_STAT (1 << 9)
517   -# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
  517 +# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
518 518 # define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
519 519 # define RADEON_SW_INT_TEST (1 << 25)
520   -# define RADEON_SW_INT_TEST_ACK (1 << 25)
  520 +# define RADEON_SW_INT_TEST_ACK (1 << 25)
521 521 # define RADEON_SW_INT_FIRE (1 << 26)
522 522  
523 523 #define RADEON_HOST_PATH_CNTL 0x0130
... ... @@ -1133,7 +1133,7 @@
1133 1133 write, dev_priv->ring.tail ); \
1134 1134 } \
1135 1135 if (((dev_priv->ring.tail + _nr) & mask) != write) { \
1136   - DRM_ERROR( \
  1136 + DRM_ERROR( \
1137 1137 "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
1138 1138 ((dev_priv->ring.tail + _nr) & mask), \
1139 1139 write, __LINE__); \
drivers/char/drm/savage_state.c
... ... @@ -512,7 +512,7 @@
512 512 DMA_DRAW_PRIMITIVE(count, prim, skip);
513 513  
514 514 if (vb_stride == vtx_size) {
515   - DMA_COPY(&vtxbuf[vb_stride * start],
  515 + DMA_COPY(&vtxbuf[vb_stride * start],
516 516 vtx_size * count);
517 517 } else {
518 518 for (i = start; i < start + count; ++i) {
... ... @@ -742,7 +742,7 @@
742 742 while (n != 0) {
743 743 /* Can emit up to 255 vertices (85 triangles) at once. */
744 744 unsigned int count = n > 255 ? 255 : n;
745   -
  745 +
746 746 /* Check indices */
747 747 for (i = 0; i < count; ++i) {
748 748 if (idx[i] > vb_size / (vb_stride * 4)) {
... ... @@ -933,7 +933,7 @@
933 933 /* j was check in savage_bci_cmdbuf */
934 934 ret = savage_dispatch_vb_idx(dev_priv,
935 935 &cmd_header, (const uint16_t *)cmdbuf,
936   - (const uint32_t *)vtxbuf, vb_size,
  936 + (const uint32_t *)vtxbuf, vb_size,
937 937 vb_stride);
938 938 cmdbuf += j;
939 939 break;
drivers/char/drm/sis_mm.c
... ... @@ -249,7 +249,7 @@
249 249 return 0;
250 250 }
251 251 }
252   -
  252 +
253 253 /*
254 254 * Implement a device switch here if needed
255 255 */
drivers/char/drm/via_dmablit.c
1 1 /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2   - *
  2 + *
3 3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
4 4 *
5 5 * Permission is hereby granted, free of charge, to any person obtaining a
6 6  
7 7  
... ... @@ -16,22 +16,22 @@
16 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19   - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20   - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21   - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  20 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  21 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 23 *
24   - * Authors:
  24 + * Authors:
25 25 * Thomas Hellstrom.
26 26 * Partially based on code obtained from Digeo Inc.
27 27 */
28 28  
29 29  
30 30 /*
31   - * Unmaps the DMA mappings.
32   - * FIXME: Is this a NoOp on x86? Also
33   - * FIXME: What happens if this one is called and a pending blit has previously done
34   - * the same DMA mappings?
  31 + * Unmaps the DMA mappings.
  32 + * FIXME: Is this a NoOp on x86? Also
  33 + * FIXME: What happens if this one is called and a pending blit has previously done
  34 + * the same DMA mappings?
35 35 */
36 36  
37 37 #include "drmP.h"
... ... @@ -65,7 +65,7 @@
65 65 int num_desc = vsg->num_desc;
66 66 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
67 67 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
68   - drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
  68 + drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
69 69 descriptor_this_page;
70 70 dma_addr_t next = vsg->chain_start;
71 71  
... ... @@ -73,7 +73,7 @@
73 73 if (descriptor_this_page-- == 0) {
74 74 cur_descriptor_page--;
75 75 descriptor_this_page = vsg->descriptors_per_page - 1;
76   - desc_ptr = vsg->desc_pages[cur_descriptor_page] +
  76 + desc_ptr = vsg->desc_pages[cur_descriptor_page] +
77 77 descriptor_this_page;
78 78 }
79 79 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
... ... @@ -93,7 +93,7 @@
93 93 static void
94 94 via_map_blit_for_device(struct pci_dev *pdev,
95 95 const drm_via_dmablit_t *xfer,
96   - drm_via_sg_info_t *vsg,
  96 + drm_via_sg_info_t *vsg,
97 97 int mode)
98 98 {
99 99 unsigned cur_descriptor_page = 0;
... ... @@ -110,7 +110,7 @@
110 110 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
111 111 drm_via_descriptor_t *desc_ptr = NULL;
112 112  
113   - if (mode == 1)
  113 + if (mode == 1)
114 114 desc_ptr = vsg->desc_pages[cur_descriptor_page];
115 115  
116 116 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
117 117  
118 118  
119 119  
120 120  
... ... @@ -118,24 +118,24 @@
118 118 line_len = xfer->line_length;
119 119 cur_fb = fb_addr;
120 120 cur_mem = mem_addr;
121   -
  121 +
122 122 while (line_len > 0) {
123 123  
124 124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 125 line_len -= remaining_len;
126 126  
127 127 if (mode == 1) {
128   - desc_ptr->mem_addr =
129   - dma_map_page(&pdev->dev,
130   - vsg->pages[VIA_PFN(cur_mem) -
  128 + desc_ptr->mem_addr =
  129 + dma_map_page(&pdev->dev,
  130 + vsg->pages[VIA_PFN(cur_mem) -
131 131 VIA_PFN(first_addr)],
132   - VIA_PGOFF(cur_mem), remaining_len,
  132 + VIA_PGOFF(cur_mem), remaining_len,
133 133 vsg->direction);
134 134 desc_ptr->dev_addr = cur_fb;
135   -
  135 +
136 136 desc_ptr->size = remaining_len;
137 137 desc_ptr->next = (uint32_t) next;
138   - next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
  138 + next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
139 139 DMA_TO_DEVICE);
140 140 desc_ptr++;
141 141 if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
142 142  
... ... @@ -143,12 +143,12 @@
143 143 desc_ptr = vsg->desc_pages[++cur_descriptor_page];
144 144 }
145 145 }
146   -
  146 +
147 147 num_desc++;
148 148 cur_mem += remaining_len;
149 149 cur_fb += remaining_len;
150 150 }
151   -
  151 +
152 152 mem_addr += xfer->mem_stride;
153 153 fb_addr += xfer->fb_stride;
154 154 }
155 155  
... ... @@ -161,14 +161,14 @@
161 161 }
162 162  
163 163 /*
164   - * Function that frees up all resources for a blit. It is usable even if the
  164 + * Function that frees up all resources for a blit. It is usable even if the
165 165 * blit info has only been partially built as long as the status enum is consistent
166 166 * with the actual status of the used resources.
167 167 */
168 168  
169 169  
170 170 static void
171   -via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
  171 +via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
172 172 {
173 173 struct page *page;
174 174 int i;
... ... @@ -185,7 +185,7 @@
185 185 case dr_via_pages_locked:
186 186 for (i=0; i<vsg->num_pages; ++i) {
187 187 if ( NULL != (page = vsg->pages[i])) {
188   - if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
  188 + if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
189 189 SetPageDirty(page);
190 190 page_cache_release(page);
191 191 }
... ... @@ -200,7 +200,7 @@
200 200 vsg->bounce_buffer = NULL;
201 201 }
202 202 vsg->free_on_sequence = 0;
203   -}
  203 +}
204 204  
205 205 /*
206 206 * Fire a blit engine.
... ... @@ -213,7 +213,7 @@
213 213  
214 214 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
215 215 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
216   - VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
  216 + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
217 217 VIA_DMA_CSR_DE);
218 218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219 219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
220 220  
... ... @@ -233,9 +233,9 @@
233 233 {
234 234 int ret;
235 235 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
236   - vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
  236 + vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
237 237 first_pfn + 1;
238   -
  238 +
239 239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
240 240 return -ENOMEM;
241 241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
... ... @@ -248,7 +248,7 @@
248 248  
249 249 up_read(&current->mm->mmap_sem);
250 250 if (ret != vsg->num_pages) {
251   - if (ret < 0)
  251 + if (ret < 0)
252 252 return ret;
253 253 vsg->state = dr_via_pages_locked;
254 254 return -EINVAL;
255 255  
256 256  
257 257  
258 258  
... ... @@ -264,21 +264,21 @@
264 264 * quite large for some blits, and pages don't need to be contingous.
265 265 */
266 266  
267   -static int
  267 +static int
268 268 via_alloc_desc_pages(drm_via_sg_info_t *vsg)
269 269 {
270 270 int i;
271   -
  271 +
272 272 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
273   - vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
  273 + vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
274 274 vsg->descriptors_per_page;
275 275  
276 276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
277 277 return -ENOMEM;
278   -
  278 +
279 279 vsg->state = dr_via_desc_pages_alloc;
280 280 for (i=0; i<vsg->num_desc_pages; ++i) {
281   - if (NULL == (vsg->desc_pages[i] =
  281 + if (NULL == (vsg->desc_pages[i] =
282 282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
283 283 return -ENOMEM;
284 284 }
... ... @@ -286,7 +286,7 @@
286 286 vsg->num_desc);
287 287 return 0;
288 288 }
289   -
  289 +
290 290 static void
291 291 via_abort_dmablit(struct drm_device *dev, int engine)
292 292 {
... ... @@ -300,7 +300,7 @@
300 300 {
301 301 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
302 302  
303   - VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
  303 + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
304 304 }
305 305  
306 306  
... ... @@ -311,7 +311,7 @@
311 311 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
312 312 * the workqueue task takes care of processing associated with the old blit.
313 313 */
314   -
  314 +
315 315 void
316 316 via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
317 317 {
318 318  
319 319  
320 320  
... ... @@ -331,19 +331,19 @@
331 331 spin_lock_irqsave(&blitq->blit_lock, irqsave);
332 332 }
333 333  
334   - done_transfer = blitq->is_active &&
  334 + done_transfer = blitq->is_active &&
335 335 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
336   - done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
  336 + done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
337 337  
338 338 cur = blitq->cur;
339 339 if (done_transfer) {
340 340  
341 341 blitq->blits[cur]->aborted = blitq->aborting;
342 342 blitq->done_blit_handle++;
343   - DRM_WAKEUP(blitq->blit_queue + cur);
  343 + DRM_WAKEUP(blitq->blit_queue + cur);
344 344  
345 345 cur++;
346   - if (cur >= VIA_NUM_BLIT_SLOTS)
  346 + if (cur >= VIA_NUM_BLIT_SLOTS)
347 347 cur = 0;
348 348 blitq->cur = cur;
349 349  
... ... @@ -355,7 +355,7 @@
355 355  
356 356 blitq->is_active = 0;
357 357 blitq->aborting = 0;
358   - schedule_work(&blitq->wq);
  358 + schedule_work(&blitq->wq);
359 359  
360 360 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
361 361  
... ... @@ -367,7 +367,7 @@
367 367 blitq->aborting = 1;
368 368 blitq->end = jiffies + DRM_HZ;
369 369 }
370   -
  370 +
371 371 if (!blitq->is_active) {
372 372 if (blitq->num_outstanding) {
373 373 via_fire_dmablit(dev, blitq->blits[cur], engine);
374 374  
... ... @@ -383,14 +383,14 @@
383 383 }
384 384 via_dmablit_engine_off(dev, engine);
385 385 }
386   - }
  386 + }
387 387  
388 388 if (from_irq) {
389 389 spin_unlock(&blitq->blit_lock);
390 390 } else {
391 391 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
392 392 }
393   -}
  393 +}
394 394  
395 395  
396 396  
397 397  
... ... @@ -426,13 +426,13 @@
426 426  
427 427 return active;
428 428 }
429   -
  429 +
430 430 /*
431 431 * Sync. Wait for at least three seconds for the blit to be performed.
432 432 */
433 433  
434 434 static int
435   -via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
  435 +via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
436 436 {
437 437  
438 438 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
439 439  
... ... @@ -441,12 +441,12 @@
441 441 int ret = 0;
442 442  
443 443 if (via_dmablit_active(blitq, engine, handle, &queue)) {
444   - DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
  444 + DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
445 445 !via_dmablit_active(blitq, engine, handle, NULL));
446 446 }
447 447 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
448 448 handle, engine, ret);
449   -
  449 +
450 450 return ret;
451 451 }
452 452  
453 453  
... ... @@ -468,12 +468,12 @@
468 468 struct drm_device *dev = blitq->dev;
469 469 int engine = (int)
470 470 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
471   -
472   - DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
  471 +
  472 + DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
473 473 (unsigned long) jiffies);
474 474  
475 475 via_dmablit_handler(dev, engine, 0);
476   -
  476 +
477 477 if (!timer_pending(&blitq->poll_timer)) {
478 478 mod_timer(&blitq->poll_timer, jiffies + 1);
479 479  
... ... @@ -497,7 +497,7 @@
497 497 */
498 498  
499 499  
500   -static void
  500 +static void
501 501 via_dmablit_workqueue(struct work_struct *work)
502 502 {
503 503 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
504 504  
505 505  
506 506  
507 507  
508 508  
509 509  
510 510  
511 511  
512 512  
... ... @@ -505,39 +505,39 @@
505 505 unsigned long irqsave;
506 506 drm_via_sg_info_t *cur_sg;
507 507 int cur_released;
508   -
509   -
510   - DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
  508 +
  509 +
  510 + DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
511 511 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
512 512  
513 513 spin_lock_irqsave(&blitq->blit_lock, irqsave);
514   -
  514 +
515 515 while(blitq->serviced != blitq->cur) {
516 516  
517 517 cur_released = blitq->serviced++;
518 518  
519 519 DRM_DEBUG("Releasing blit slot %d\n", cur_released);
520 520  
521   - if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
  521 + if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
522 522 blitq->serviced = 0;
523   -
  523 +
524 524 cur_sg = blitq->blits[cur_released];
525 525 blitq->num_free++;
526   -
  526 +
527 527 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
528   -
  528 +
529 529 DRM_WAKEUP(&blitq->busy_queue);
530   -
  530 +
531 531 via_free_sg_info(dev->pdev, cur_sg);
532 532 kfree(cur_sg);
533   -
  533 +
534 534 spin_lock_irqsave(&blitq->blit_lock, irqsave);
535 535 }
536 536  
537 537 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
538 538 }
539   -
540 539  
  540 +
541 541 /*
542 542 * Init all blit engines. Currently we use two, but some hardware have 4.
543 543 */
... ... @@ -550,8 +550,8 @@
550 550 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
551 551 drm_via_blitq_t *blitq;
552 552  
553   - pci_set_master(dev->pdev);
554   -
  553 + pci_set_master(dev->pdev);
  554 +
555 555 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
556 556 blitq = dev_priv->blit_queues + i;
557 557 blitq->dev = dev;
558 558  
559 559  
560 560  
... ... @@ -572,20 +572,20 @@
572 572 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
573 573 setup_timer(&blitq->poll_timer, via_dmablit_timer,
574 574 (unsigned long)blitq);
575   - }
  575 + }
576 576 }
577 577  
578 578 /*
579 579 * Build all info and do all mappings required for a blit.
580 580 */
581   -
582 581  
  582 +
583 583 static int
584 584 via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
585 585 {
586 586 int draw = xfer->to_fb;
587 587 int ret = 0;
588   -
  588 +
589 589 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
590 590 vsg->bounce_buffer = NULL;
591 591  
... ... @@ -599,7 +599,7 @@
599 599 /*
600 600 * Below check is a driver limitation, not a hardware one. We
601 601 * don't want to lock unused pages, and don't want to incoporate the
602   - * extra logic of avoiding them. Make sure there are no.
  602 + * extra logic of avoiding them. Make sure there are no.
603 603 * (Not a big limitation anyway.)
604 604 */
605 605  
606 606  
607 607  
... ... @@ -625,11 +625,11 @@
625 625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
626 626 DRM_ERROR("Too large PCI DMA bitblt.\n");
627 627 return -EINVAL;
628   - }
  628 + }
629 629  
630   - /*
  630 + /*
631 631 * we allow a negative fb stride to allow flipping of images in
632   - * transfer.
  632 + * transfer.
633 633 */
634 634  
635 635 if (xfer->mem_stride < xfer->line_length ||
636 636  
... ... @@ -653,11 +653,11 @@
653 653 #else
654 654 if ((((unsigned long)xfer->mem_addr & 15) ||
655 655 ((unsigned long)xfer->fb_addr & 3)) ||
656   - ((xfer->num_lines > 1) &&
  656 + ((xfer->num_lines > 1) &&
657 657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
658 658 DRM_ERROR("Invalid DRM bitblt alignment.\n");
659 659 return -EINVAL;
660   - }
  660 + }
661 661 #endif
662 662  
663 663 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
664 664  
665 665  
666 666  
... ... @@ -673,17 +673,17 @@
673 673 return ret;
674 674 }
675 675 via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
676   -
  676 +
677 677 return 0;
678 678 }
679   -
680 679  
  680 +
681 681 /*
682 682 * Reserve one free slot in the blit queue. Will wait for one second for one
683 683 * to become available. Otherwise -EBUSY is returned.
684 684 */
685 685  
686   -static int
  686 +static int
687 687 via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
688 688 {
689 689 int ret=0;
690 690  
... ... @@ -698,10 +698,10 @@
698 698 if (ret) {
699 699 return (-EINTR == ret) ? -EAGAIN : ret;
700 700 }
701   -
  701 +
702 702 spin_lock_irqsave(&blitq->blit_lock, irqsave);
703 703 }
704   -
  704 +
705 705 blitq->num_free--;
706 706 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
707 707  
... ... @@ -712,7 +712,7 @@
712 712 * Hand back a free slot if we changed our mind.
713 713 */
714 714  
715   -static void
  715 +static void
716 716 via_dmablit_release_slot(drm_via_blitq_t *blitq)
717 717 {
718 718 unsigned long irqsave;
... ... @@ -728,8 +728,8 @@
728 728 */
729 729  
730 730  
731   -static int
732   -via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
  731 +static int
  732 +via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
733 733 {
734 734 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
735 735 drm_via_sg_info_t *vsg;
736 736  
737 737  
... ... @@ -760,15 +760,15 @@
760 760 spin_lock_irqsave(&blitq->blit_lock, irqsave);
761 761  
762 762 blitq->blits[blitq->head++] = vsg;
763   - if (blitq->head >= VIA_NUM_BLIT_SLOTS)
  763 + if (blitq->head >= VIA_NUM_BLIT_SLOTS)
764 764 blitq->head = 0;
765 765 blitq->num_outstanding++;
766   - xfer->sync.sync_handle = ++blitq->cur_blit_handle;
  766 + xfer->sync.sync_handle = ++blitq->cur_blit_handle;
767 767  
768 768 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
769 769 xfer->sync.engine = engine;
770 770  
771   - via_dmablit_handler(dev, engine, 0);
  771 + via_dmablit_handler(dev, engine, 0);
772 772  
773 773 return 0;
774 774 }
... ... @@ -776,7 +776,7 @@
776 776 /*
777 777 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
778 778 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
779   - * case it returns with -EAGAIN for the signal to be delivered.
  779 + * case it returns with -EAGAIN for the signal to be delivered.
780 780 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
781 781 */
782 782  
... ... @@ -786,7 +786,7 @@
786 786 drm_via_blitsync_t *sync = data;
787 787 int err;
788 788  
789   - if (sync->engine >= VIA_NUM_BLIT_ENGINES)
  789 + if (sync->engine >= VIA_NUM_BLIT_ENGINES)
790 790 return -EINVAL;
791 791  
792 792 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
793 793  
794 794  
795 795  
... ... @@ -796,15 +796,15 @@
796 796  
797 797 return err;
798 798 }
799   -
800 799  
  800 +
801 801 /*
802 802 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
803   - * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
  803 + * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
804 804 * be reissued. See the above IOCTL code.
805 805 */
806 806  
807   -int
  807 +int
808 808 via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
809 809 {
810 810 drm_via_dmablit_t *xfer = data;
drivers/char/drm/via_dmablit.h
1 1 /* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2   - *
  2 + *
3 3 * Copyright 2005 Thomas Hellstrom.
4 4 * All Rights Reserved.
5 5 *
6 6  
... ... @@ -17,12 +17,12 @@
17 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20   - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21   - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22   - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  20 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  21 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  22 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 24 *
25   - * Authors:
  25 + * Authors:
26 26 * Thomas Hellstrom.
27 27 * Register info from Digeo Inc.
28 28 */
... ... @@ -67,7 +67,7 @@
67 67 unsigned cur;
68 68 unsigned num_free;
69 69 unsigned num_outstanding;
70   - unsigned long end;
  70 + unsigned long end;
71 71 int aborting;
72 72 int is_active;
73 73 drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
74 74  
75 75  
76 76  
77 77  
78 78  
79 79  
80 80  
81 81  
82 82  
... ... @@ -77,46 +77,46 @@
77 77 struct work_struct wq;
78 78 struct timer_list poll_timer;
79 79 } drm_via_blitq_t;
80   -
81 80  
82   -/*
  81 +
  82 +/*
83 83 * PCI DMA Registers
84 84 * Channels 2 & 3 don't seem to be implemented in hardware.
85 85 */
86   -
87   -#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
88   -#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
89   -#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
90   -#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
91 86  
92   -#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
93   -#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
94   -#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
95   -#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
  87 +#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
  88 +#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
  89 +#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
  90 +#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
96 91  
97   -#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
98   -#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
99   -#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
100   -#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
  92 +#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
  93 +#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
  94 +#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
  95 +#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
101 96  
102   -#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
103   -#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
104   -#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
105   -#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
  97 +#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
  98 +#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
  99 +#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
  100 +#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
106 101  
107   -#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
108   -#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
109   -#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
110   -#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
  102 +#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
  103 +#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
  104 +#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
  105 +#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
111 106  
112   -#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
113   -#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
114   -#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
115   -#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
  107 +#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
  108 +#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
  109 +#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
  110 +#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
116 111  
117   -#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
  112 +#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
  113 +#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
  114 +#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
  115 +#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
118 116  
119   -/* Define for DMA engine */
  117 +#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
  118 +
  119 +/* Define for DMA engine */
120 120 /* DPR */
121 121 #define VIA_DMA_DPR_EC (1<<1) /* end of chain */
122 122 #define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
drivers/char/drm/via_drm.h
... ... @@ -35,7 +35,7 @@
35 35 #include "via_drmclient.h"
36 36 #endif
37 37  
38   -#define VIA_NR_SAREA_CLIPRECTS 8
  38 +#define VIA_NR_SAREA_CLIPRECTS 8
39 39 #define VIA_NR_XVMC_PORTS 10
40 40 #define VIA_NR_XVMC_LOCKS 5
41 41 #define VIA_MAX_CACHELINE_SIZE 64
... ... @@ -259,7 +259,7 @@
259 259 typedef struct drm_via_dmablit {
260 260 uint32_t num_lines;
261 261 uint32_t line_length;
262   -
  262 +
263 263 uint32_t fb_addr;
264 264 uint32_t fb_stride;
265 265  
drivers/char/drm/via_drv.c
... ... @@ -71,7 +71,7 @@
71 71 .name = DRIVER_NAME,
72 72 .id_table = pciidlist,
73 73 },
74   -
  74 +
75 75 .name = DRIVER_NAME,
76 76 .desc = DRIVER_DESC,
77 77 .date = DRIVER_DATE,
drivers/char/drm/via_mm.c
... ... @@ -113,7 +113,7 @@
113 113 dev_priv->vram_initialized = 0;
114 114 dev_priv->agp_initialized = 0;
115 115 mutex_unlock(&dev->struct_mutex);
116   -}
  116 +}
117 117  
118 118 int via_mem_alloc(struct drm_device *dev, void *data,
119 119 struct drm_file *file_priv)