Commit 3c8a63e22a0802fd56380f6ab305b419f18eb6f5
Committed by
Florian Tobias Schandinat
1 parent
4a47a0e09c
Exists in
master
and in
6 other branches
Add support for SMSC UFX6000/7000 USB display adapters
This patch adds framebuffer suport for SMSC's UFX6000 (USB 2.0) and UFX7000 (USB 3.0) display adapters. Signed-off-by: Steve Glendinning <steve.glendinning@smsc.com> Acked-by: Greg Kroah-Hartman <gregkh@suse.de> Signed-off-by: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
Showing 4 changed files with 2017 additions and 0 deletions Side-by-side Diff
MAINTAINERS
... | ... | @@ -5920,6 +5920,12 @@ |
5920 | 5920 | S: Supported |
5921 | 5921 | F: drivers/net/smsc9420.* |
5922 | 5922 | |
5923 | +SMSC UFX6000 and UFX7000 USB to VGA DRIVER | |
5924 | +M: Steve Glendinning <steve.glendinning@smsc.com> | |
5925 | +L: linux-fbdev@vger.kernel.org | |
5926 | +S: Supported | |
5927 | +F: drivers/video/smscufx.c | |
5928 | + | |
5923 | 5929 | SN-IA64 (Itanium) SUB-PLATFORM |
5924 | 5930 | M: Jes Sorensen <jes@sgi.com> |
5925 | 5931 | L: linux-altix@sgi.com |
drivers/video/Kconfig
... | ... | @@ -2120,6 +2120,22 @@ |
2120 | 2120 | |
2121 | 2121 | If unsure, say N. |
2122 | 2122 | |
2123 | +config FB_SMSCUFX | |
2124 | + tristate "SMSC UFX6000/7000 USB Framebuffer support" | |
2125 | + depends on FB && USB | |
2126 | + select FB_MODE_HELPERS | |
2127 | + select FB_SYS_FILLRECT | |
2128 | + select FB_SYS_COPYAREA | |
2129 | + select FB_SYS_IMAGEBLIT | |
2130 | + select FB_SYS_FOPS | |
2131 | + select FB_DEFERRED_IO | |
2132 | + ---help--- | |
2133 | + This is a kernel framebuffer driver for SMSC UFX USB devices. | |
2134 | + Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and | |
2135 | + mplayer -vo fbdev. Supports both UFX6000 (USB 2.0) and UFX7000 | |
2136 | + (USB 3.0) devices. | |
2137 | + To compile as a module, choose M here: the module name is smscufx. | |
2138 | + | |
2123 | 2139 | config FB_UDL |
2124 | 2140 | tristate "Displaylink USB Framebuffer support" |
2125 | 2141 | depends on FB && USB |
drivers/video/Makefile
... | ... | @@ -128,6 +128,7 @@ |
128 | 128 | obj-$(CONFIG_FB_PS3) += ps3fb.o |
129 | 129 | obj-$(CONFIG_FB_SM501) += sm501fb.o |
130 | 130 | obj-$(CONFIG_FB_UDL) += udlfb.o |
131 | +obj-$(CONFIG_FB_SMSCUFX) += smscufx.o | |
131 | 132 | obj-$(CONFIG_FB_XILINX) += xilinxfb.o |
132 | 133 | obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o |
133 | 134 | obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o |
drivers/video/smscufx.c
Changes suppressed. Click to show
1 | +/* | |
2 | + * smscufx.c -- Framebuffer driver for SMSC UFX USB controller | |
3 | + * | |
4 | + * Copyright (C) 2011 Steve Glendinning <steve.glendinning@smsc.com> | |
5 | + * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | |
6 | + * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | |
7 | + * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | |
8 | + * | |
9 | + * This file is subject to the terms and conditions of the GNU General Public | |
10 | + * License v2. See the file COPYING in the main directory of this archive for | |
11 | + * more details. | |
12 | + * | |
13 | + * Based on udlfb, with work from Florian Echtler, Henrik Bjerregaard Pedersen, | |
14 | + * and others. | |
15 | + * | |
16 | + * Works well with Bernie Thompson's X DAMAGE patch to xf86-video-fbdev | |
17 | + * available from http://git.plugable.com | |
18 | + * | |
19 | + * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven, | |
20 | + * usb-skeleton by GregKH. | |
21 | + */ | |
22 | + | |
23 | +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
24 | + | |
25 | +#include <linux/module.h> | |
26 | +#include <linux/kernel.h> | |
27 | +#include <linux/init.h> | |
28 | +#include <linux/usb.h> | |
29 | +#include <linux/uaccess.h> | |
30 | +#include <linux/mm.h> | |
31 | +#include <linux/fb.h> | |
32 | +#include <linux/vmalloc.h> | |
33 | +#include <linux/slab.h> | |
34 | +#include <linux/delay.h> | |
35 | +#include "edid.h" | |
36 | + | |
37 | +#define check_warn(status, fmt, args...) \ | |
38 | + ({ if (status < 0) pr_warn(fmt, ##args); }) | |
39 | + | |
40 | +#define check_warn_return(status, fmt, args...) \ | |
41 | + ({ if (status < 0) { pr_warn(fmt, ##args); return status; } }) | |
42 | + | |
43 | +#define check_warn_goto_error(status, fmt, args...) \ | |
44 | + ({ if (status < 0) { pr_warn(fmt, ##args); goto error; } }) | |
45 | + | |
46 | +#define all_bits_set(x, bits) (((x) & (bits)) == (bits)) | |
47 | + | |
48 | +#define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0 | |
49 | +#define USB_VENDOR_REQUEST_READ_REGISTER 0xA1 | |
50 | + | |
51 | +/* | |
52 | + * TODO: Propose standard fb.h ioctl for reporting damage, | |
53 | + * using _IOWR() and one of the existing area structs from fb.h | |
54 | + * Consider these ioctls deprecated, but they're still used by the | |
55 | + * DisplayLink X server as yet - need both to be modified in tandem | |
56 | + * when new ioctl(s) are ready. | |
57 | + */ | |
58 | +#define UFX_IOCTL_RETURN_EDID (0xAD) | |
59 | +#define UFX_IOCTL_REPORT_DAMAGE (0xAA) | |
60 | + | |
61 | +/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */ | |
62 | +#define BULK_SIZE (512) | |
63 | +#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE) | |
64 | +#define WRITES_IN_FLIGHT (4) | |
65 | + | |
66 | +#define GET_URB_TIMEOUT (HZ) | |
67 | +#define FREE_URB_TIMEOUT (HZ*2) | |
68 | + | |
69 | +#define BPP 2 | |
70 | + | |
71 | +#define UFX_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ | |
72 | +#define UFX_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */ | |
73 | + | |
74 | +struct dloarea { | |
75 | + int x, y; | |
76 | + int w, h; | |
77 | +}; | |
78 | + | |
79 | +struct urb_node { | |
80 | + struct list_head entry; | |
81 | + struct ufx_data *dev; | |
82 | + struct delayed_work release_urb_work; | |
83 | + struct urb *urb; | |
84 | +}; | |
85 | + | |
86 | +struct urb_list { | |
87 | + struct list_head list; | |
88 | + spinlock_t lock; | |
89 | + struct semaphore limit_sem; | |
90 | + int available; | |
91 | + int count; | |
92 | + size_t size; | |
93 | +}; | |
94 | + | |
95 | +struct ufx_data { | |
96 | + struct usb_device *udev; | |
97 | + struct device *gdev; /* &udev->dev */ | |
98 | + struct fb_info *info; | |
99 | + struct urb_list urbs; | |
100 | + struct kref kref; | |
101 | + int fb_count; | |
102 | + bool virtualized; /* true when physical usb device not present */ | |
103 | + struct delayed_work free_framebuffer_work; | |
104 | + atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */ | |
105 | + atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */ | |
106 | + char *edid; /* null until we read edid from hw or get from sysfs */ | |
107 | + size_t edid_size; | |
108 | + u32 pseudo_palette[256]; | |
109 | +}; | |
110 | + | |
111 | +static struct fb_fix_screeninfo ufx_fix = { | |
112 | + .id = "smscufx", | |
113 | + .type = FB_TYPE_PACKED_PIXELS, | |
114 | + .visual = FB_VISUAL_TRUECOLOR, | |
115 | + .xpanstep = 0, | |
116 | + .ypanstep = 0, | |
117 | + .ywrapstep = 0, | |
118 | + .accel = FB_ACCEL_NONE, | |
119 | +}; | |
120 | + | |
121 | +static const u32 smscufx_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST | | |
122 | + FBINFO_VIRTFB | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT | | |
123 | + FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR; | |
124 | + | |
125 | +static struct usb_device_id id_table[] = { | |
126 | + {USB_DEVICE(0x0424, 0x9d00),}, | |
127 | + {USB_DEVICE(0x0424, 0x9d01),}, | |
128 | + {}, | |
129 | +}; | |
130 | +MODULE_DEVICE_TABLE(usb, id_table); | |
131 | + | |
132 | +/* module options */ | |
133 | +static int console; /* Optionally allow fbcon to consume first framebuffer */ | |
134 | +static int fb_defio = true; /* Optionally enable fb_defio mmap support */ | |
135 | + | |
136 | +/* ufx keeps a list of urbs for efficient bulk transfers */ | |
137 | +static void ufx_urb_completion(struct urb *urb); | |
138 | +static struct urb *ufx_get_urb(struct ufx_data *dev); | |
139 | +static int ufx_submit_urb(struct ufx_data *dev, struct urb * urb, size_t len); | |
140 | +static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size); | |
141 | +static void ufx_free_urb_list(struct ufx_data *dev); | |
142 | + | |
143 | +/* reads a control register */ | |
144 | +static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data) | |
145 | +{ | |
146 | + u32 *buf = kmalloc(4, GFP_KERNEL); | |
147 | + int ret; | |
148 | + | |
149 | + BUG_ON(!dev); | |
150 | + | |
151 | + if (!buf) | |
152 | + return -ENOMEM; | |
153 | + | |
154 | + ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), | |
155 | + USB_VENDOR_REQUEST_READ_REGISTER, | |
156 | + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | |
157 | + 00, index, buf, 4, USB_CTRL_GET_TIMEOUT); | |
158 | + | |
159 | + le32_to_cpus(buf); | |
160 | + *data = *buf; | |
161 | + kfree(buf); | |
162 | + | |
163 | + if (unlikely(ret < 0)) | |
164 | + pr_warn("Failed to read register index 0x%08x\n", index); | |
165 | + | |
166 | + return ret; | |
167 | +} | |
168 | + | |
169 | +/* writes a control register */ | |
170 | +static int ufx_reg_write(struct ufx_data *dev, u32 index, u32 data) | |
171 | +{ | |
172 | + u32 *buf = kmalloc(4, GFP_KERNEL); | |
173 | + int ret; | |
174 | + | |
175 | + BUG_ON(!dev); | |
176 | + | |
177 | + if (!buf) | |
178 | + return -ENOMEM; | |
179 | + | |
180 | + *buf = data; | |
181 | + cpu_to_le32s(buf); | |
182 | + | |
183 | + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | |
184 | + USB_VENDOR_REQUEST_WRITE_REGISTER, | |
185 | + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | |
186 | + 00, index, buf, 4, USB_CTRL_SET_TIMEOUT); | |
187 | + | |
188 | + kfree(buf); | |
189 | + | |
190 | + if (unlikely(ret < 0)) | |
191 | + pr_warn("Failed to write register index 0x%08x with value " | |
192 | + "0x%08x\n", index, data); | |
193 | + | |
194 | + return ret; | |
195 | +} | |
196 | + | |
197 | +static int ufx_reg_clear_and_set_bits(struct ufx_data *dev, u32 index, | |
198 | + u32 bits_to_clear, u32 bits_to_set) | |
199 | +{ | |
200 | + u32 data; | |
201 | + int status = ufx_reg_read(dev, index, &data); | |
202 | + check_warn_return(status, "ufx_reg_clear_and_set_bits error reading " | |
203 | + "0x%x", index); | |
204 | + | |
205 | + data &= (~bits_to_clear); | |
206 | + data |= bits_to_set; | |
207 | + | |
208 | + status = ufx_reg_write(dev, index, data); | |
209 | + check_warn_return(status, "ufx_reg_clear_and_set_bits error writing " | |
210 | + "0x%x", index); | |
211 | + | |
212 | + return 0; | |
213 | +} | |
214 | + | |
215 | +static int ufx_reg_set_bits(struct ufx_data *dev, u32 index, u32 bits) | |
216 | +{ | |
217 | + return ufx_reg_clear_and_set_bits(dev, index, 0, bits); | |
218 | +} | |
219 | + | |
220 | +static int ufx_reg_clear_bits(struct ufx_data *dev, u32 index, u32 bits) | |
221 | +{ | |
222 | + return ufx_reg_clear_and_set_bits(dev, index, bits, 0); | |
223 | +} | |
224 | + | |
225 | +static int ufx_lite_reset(struct ufx_data *dev) | |
226 | +{ | |
227 | + int status; | |
228 | + u32 value; | |
229 | + | |
230 | + status = ufx_reg_write(dev, 0x3008, 0x00000001); | |
231 | + check_warn_return(status, "ufx_lite_reset error writing 0x3008"); | |
232 | + | |
233 | + status = ufx_reg_read(dev, 0x3008, &value); | |
234 | + check_warn_return(status, "ufx_lite_reset error reading 0x3008"); | |
235 | + | |
236 | + return (value == 0) ? 0 : -EIO; | |
237 | +} | |
238 | + | |
239 | +/* If display is unblanked, then blank it */ | |
240 | +static int ufx_blank(struct ufx_data *dev, bool wait) | |
241 | +{ | |
242 | + u32 dc_ctrl, dc_sts; | |
243 | + int i; | |
244 | + | |
245 | + int status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
246 | + check_warn_return(status, "ufx_blank error reading 0x2004"); | |
247 | + | |
248 | + status = ufx_reg_read(dev, 0x2000, &dc_ctrl); | |
249 | + check_warn_return(status, "ufx_blank error reading 0x2000"); | |
250 | + | |
251 | + /* return success if display is already blanked */ | |
252 | + if ((dc_sts & 0x00000100) || (dc_ctrl & 0x00000100)) | |
253 | + return 0; | |
254 | + | |
255 | + /* request the DC to blank the display */ | |
256 | + dc_ctrl |= 0x00000100; | |
257 | + status = ufx_reg_write(dev, 0x2000, dc_ctrl); | |
258 | + check_warn_return(status, "ufx_blank error writing 0x2000"); | |
259 | + | |
260 | + /* return success immediately if we don't have to wait */ | |
261 | + if (!wait) | |
262 | + return 0; | |
263 | + | |
264 | + for (i = 0; i < 250; i++) { | |
265 | + status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
266 | + check_warn_return(status, "ufx_blank error reading 0x2004"); | |
267 | + | |
268 | + if (dc_sts & 0x00000100) | |
269 | + return 0; | |
270 | + } | |
271 | + | |
272 | + /* timed out waiting for display to blank */ | |
273 | + return -EIO; | |
274 | +} | |
275 | + | |
276 | +/* If display is blanked, then unblank it */ | |
277 | +static int ufx_unblank(struct ufx_data *dev, bool wait) | |
278 | +{ | |
279 | + u32 dc_ctrl, dc_sts; | |
280 | + int i; | |
281 | + | |
282 | + int status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
283 | + check_warn_return(status, "ufx_unblank error reading 0x2004"); | |
284 | + | |
285 | + status = ufx_reg_read(dev, 0x2000, &dc_ctrl); | |
286 | + check_warn_return(status, "ufx_unblank error reading 0x2000"); | |
287 | + | |
288 | + /* return success if display is already unblanked */ | |
289 | + if (((dc_sts & 0x00000100) == 0) || ((dc_ctrl & 0x00000100) == 0)) | |
290 | + return 0; | |
291 | + | |
292 | + /* request the DC to unblank the display */ | |
293 | + dc_ctrl &= ~0x00000100; | |
294 | + status = ufx_reg_write(dev, 0x2000, dc_ctrl); | |
295 | + check_warn_return(status, "ufx_unblank error writing 0x2000"); | |
296 | + | |
297 | + /* return success immediately if we don't have to wait */ | |
298 | + if (!wait) | |
299 | + return 0; | |
300 | + | |
301 | + for (i = 0; i < 250; i++) { | |
302 | + status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
303 | + check_warn_return(status, "ufx_unblank error reading 0x2004"); | |
304 | + | |
305 | + if ((dc_sts & 0x00000100) == 0) | |
306 | + return 0; | |
307 | + } | |
308 | + | |
309 | + /* timed out waiting for display to unblank */ | |
310 | + return -EIO; | |
311 | +} | |
312 | + | |
313 | +/* If display is enabled, then disable it */ | |
314 | +static int ufx_disable(struct ufx_data *dev, bool wait) | |
315 | +{ | |
316 | + u32 dc_ctrl, dc_sts; | |
317 | + int i; | |
318 | + | |
319 | + int status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
320 | + check_warn_return(status, "ufx_disable error reading 0x2004"); | |
321 | + | |
322 | + status = ufx_reg_read(dev, 0x2000, &dc_ctrl); | |
323 | + check_warn_return(status, "ufx_disable error reading 0x2000"); | |
324 | + | |
325 | + /* return success if display is already disabled */ | |
326 | + if (((dc_sts & 0x00000001) == 0) || ((dc_ctrl & 0x00000001) == 0)) | |
327 | + return 0; | |
328 | + | |
329 | + /* request the DC to disable the display */ | |
330 | + dc_ctrl &= ~(0x00000001); | |
331 | + status = ufx_reg_write(dev, 0x2000, dc_ctrl); | |
332 | + check_warn_return(status, "ufx_disable error writing 0x2000"); | |
333 | + | |
334 | + /* return success immediately if we don't have to wait */ | |
335 | + if (!wait) | |
336 | + return 0; | |
337 | + | |
338 | + for (i = 0; i < 250; i++) { | |
339 | + status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
340 | + check_warn_return(status, "ufx_disable error reading 0x2004"); | |
341 | + | |
342 | + if ((dc_sts & 0x00000001) == 0) | |
343 | + return 0; | |
344 | + } | |
345 | + | |
346 | + /* timed out waiting for display to disable */ | |
347 | + return -EIO; | |
348 | +} | |
349 | + | |
350 | +/* If display is disabled, then enable it */ | |
351 | +static int ufx_enable(struct ufx_data *dev, bool wait) | |
352 | +{ | |
353 | + u32 dc_ctrl, dc_sts; | |
354 | + int i; | |
355 | + | |
356 | + int status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
357 | + check_warn_return(status, "ufx_enable error reading 0x2004"); | |
358 | + | |
359 | + status = ufx_reg_read(dev, 0x2000, &dc_ctrl); | |
360 | + check_warn_return(status, "ufx_enable error reading 0x2000"); | |
361 | + | |
362 | + /* return success if display is already enabled */ | |
363 | + if ((dc_sts & 0x00000001) || (dc_ctrl & 0x00000001)) | |
364 | + return 0; | |
365 | + | |
366 | + /* request the DC to enable the display */ | |
367 | + dc_ctrl |= 0x00000001; | |
368 | + status = ufx_reg_write(dev, 0x2000, dc_ctrl); | |
369 | + check_warn_return(status, "ufx_enable error writing 0x2000"); | |
370 | + | |
371 | + /* return success immediately if we don't have to wait */ | |
372 | + if (!wait) | |
373 | + return 0; | |
374 | + | |
375 | + for (i = 0; i < 250; i++) { | |
376 | + status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
377 | + check_warn_return(status, "ufx_enable error reading 0x2004"); | |
378 | + | |
379 | + if (dc_sts & 0x00000001) | |
380 | + return 0; | |
381 | + } | |
382 | + | |
383 | + /* timed out waiting for display to enable */ | |
384 | + return -EIO; | |
385 | +} | |
386 | + | |
387 | +static int ufx_config_sys_clk(struct ufx_data *dev) | |
388 | +{ | |
389 | + int status = ufx_reg_write(dev, 0x700C, 0x8000000F); | |
390 | + check_warn_return(status, "error writing 0x700C"); | |
391 | + | |
392 | + status = ufx_reg_write(dev, 0x7014, 0x0010024F); | |
393 | + check_warn_return(status, "error writing 0x7014"); | |
394 | + | |
395 | + status = ufx_reg_write(dev, 0x7010, 0x00000000); | |
396 | + check_warn_return(status, "error writing 0x7010"); | |
397 | + | |
398 | + status = ufx_reg_clear_bits(dev, 0x700C, 0x0000000A); | |
399 | + check_warn_return(status, "error clearing PLL1 bypass in 0x700C"); | |
400 | + msleep(1); | |
401 | + | |
402 | + status = ufx_reg_clear_bits(dev, 0x700C, 0x80000000); | |
403 | + check_warn_return(status, "error clearing output gate in 0x700C"); | |
404 | + | |
405 | + return 0; | |
406 | +} | |
407 | + | |
408 | +static int ufx_config_ddr2(struct ufx_data *dev) | |
409 | +{ | |
410 | + int status, i = 0; | |
411 | + u32 tmp; | |
412 | + | |
413 | + status = ufx_reg_write(dev, 0x0004, 0x001F0F77); | |
414 | + check_warn_return(status, "error writing 0x0004"); | |
415 | + | |
416 | + status = ufx_reg_write(dev, 0x0008, 0xFFF00000); | |
417 | + check_warn_return(status, "error writing 0x0008"); | |
418 | + | |
419 | + status = ufx_reg_write(dev, 0x000C, 0x0FFF2222); | |
420 | + check_warn_return(status, "error writing 0x000C"); | |
421 | + | |
422 | + status = ufx_reg_write(dev, 0x0010, 0x00030814); | |
423 | + check_warn_return(status, "error writing 0x0010"); | |
424 | + | |
425 | + status = ufx_reg_write(dev, 0x0014, 0x00500019); | |
426 | + check_warn_return(status, "error writing 0x0014"); | |
427 | + | |
428 | + status = ufx_reg_write(dev, 0x0018, 0x020D0F15); | |
429 | + check_warn_return(status, "error writing 0x0018"); | |
430 | + | |
431 | + status = ufx_reg_write(dev, 0x001C, 0x02532305); | |
432 | + check_warn_return(status, "error writing 0x001C"); | |
433 | + | |
434 | + status = ufx_reg_write(dev, 0x0020, 0x0B030905); | |
435 | + check_warn_return(status, "error writing 0x0020"); | |
436 | + | |
437 | + status = ufx_reg_write(dev, 0x0024, 0x00000827); | |
438 | + check_warn_return(status, "error writing 0x0024"); | |
439 | + | |
440 | + status = ufx_reg_write(dev, 0x0028, 0x00000000); | |
441 | + check_warn_return(status, "error writing 0x0028"); | |
442 | + | |
443 | + status = ufx_reg_write(dev, 0x002C, 0x00000042); | |
444 | + check_warn_return(status, "error writing 0x002C"); | |
445 | + | |
446 | + status = ufx_reg_write(dev, 0x0030, 0x09520000); | |
447 | + check_warn_return(status, "error writing 0x0030"); | |
448 | + | |
449 | + status = ufx_reg_write(dev, 0x0034, 0x02223314); | |
450 | + check_warn_return(status, "error writing 0x0034"); | |
451 | + | |
452 | + status = ufx_reg_write(dev, 0x0038, 0x00430043); | |
453 | + check_warn_return(status, "error writing 0x0038"); | |
454 | + | |
455 | + status = ufx_reg_write(dev, 0x003C, 0xF00F000F); | |
456 | + check_warn_return(status, "error writing 0x003C"); | |
457 | + | |
458 | + status = ufx_reg_write(dev, 0x0040, 0xF380F00F); | |
459 | + check_warn_return(status, "error writing 0x0040"); | |
460 | + | |
461 | + status = ufx_reg_write(dev, 0x0044, 0xF00F0496); | |
462 | + check_warn_return(status, "error writing 0x0044"); | |
463 | + | |
464 | + status = ufx_reg_write(dev, 0x0048, 0x03080406); | |
465 | + check_warn_return(status, "error writing 0x0048"); | |
466 | + | |
467 | + status = ufx_reg_write(dev, 0x004C, 0x00001000); | |
468 | + check_warn_return(status, "error writing 0x004C"); | |
469 | + | |
470 | + status = ufx_reg_write(dev, 0x005C, 0x00000007); | |
471 | + check_warn_return(status, "error writing 0x005C"); | |
472 | + | |
473 | + status = ufx_reg_write(dev, 0x0100, 0x54F00012); | |
474 | + check_warn_return(status, "error writing 0x0100"); | |
475 | + | |
476 | + status = ufx_reg_write(dev, 0x0104, 0x00004012); | |
477 | + check_warn_return(status, "error writing 0x0104"); | |
478 | + | |
479 | + status = ufx_reg_write(dev, 0x0118, 0x40404040); | |
480 | + check_warn_return(status, "error writing 0x0118"); | |
481 | + | |
482 | + status = ufx_reg_write(dev, 0x0000, 0x00000001); | |
483 | + check_warn_return(status, "error writing 0x0000"); | |
484 | + | |
485 | + while (i++ < 500) { | |
486 | + status = ufx_reg_read(dev, 0x0000, &tmp); | |
487 | + check_warn_return(status, "error reading 0x0000"); | |
488 | + | |
489 | + if (all_bits_set(tmp, 0xC0000000)) | |
490 | + return 0; | |
491 | + } | |
492 | + | |
493 | + pr_err("DDR2 initialisation timed out, reg 0x0000=0x%08x", tmp); | |
494 | + return -ETIMEDOUT; | |
495 | +} | |
496 | + | |
497 | +struct pll_values { | |
498 | + u32 div_r0; | |
499 | + u32 div_f0; | |
500 | + u32 div_q0; | |
501 | + u32 range0; | |
502 | + u32 div_r1; | |
503 | + u32 div_f1; | |
504 | + u32 div_q1; | |
505 | + u32 range1; | |
506 | +}; | |
507 | + | |
508 | +static u32 ufx_calc_range(u32 ref_freq) | |
509 | +{ | |
510 | + if (ref_freq >= 88000000) | |
511 | + return 7; | |
512 | + | |
513 | + if (ref_freq >= 54000000) | |
514 | + return 6; | |
515 | + | |
516 | + if (ref_freq >= 34000000) | |
517 | + return 5; | |
518 | + | |
519 | + if (ref_freq >= 21000000) | |
520 | + return 4; | |
521 | + | |
522 | + if (ref_freq >= 13000000) | |
523 | + return 3; | |
524 | + | |
525 | + if (ref_freq >= 8000000) | |
526 | + return 2; | |
527 | + | |
528 | + return 1; | |
529 | +} | |
530 | + | |
531 | +/* calculates PLL divider settings for a desired target frequency */ | |
532 | +static void ufx_calc_pll_values(const u32 clk_pixel_pll, struct pll_values *asic_pll) | |
533 | +{ | |
534 | + const u32 ref_clk = 25000000; | |
535 | + u32 div_r0, div_f0, div_q0, div_r1, div_f1, div_q1; | |
536 | + u32 min_error = clk_pixel_pll; | |
537 | + | |
538 | + for (div_r0 = 1; div_r0 <= 32; div_r0++) { | |
539 | + u32 ref_freq0 = ref_clk / div_r0; | |
540 | + if (ref_freq0 < 5000000) | |
541 | + break; | |
542 | + | |
543 | + if (ref_freq0 > 200000000) | |
544 | + continue; | |
545 | + | |
546 | + for (div_f0 = 1; div_f0 <= 256; div_f0++) { | |
547 | + u32 vco_freq0 = ref_freq0 * div_f0; | |
548 | + | |
549 | + if (vco_freq0 < 350000000) | |
550 | + continue; | |
551 | + | |
552 | + if (vco_freq0 > 700000000) | |
553 | + break; | |
554 | + | |
555 | + for (div_q0 = 0; div_q0 < 7; div_q0++) { | |
556 | + u32 pllout_freq0 = vco_freq0 / (1 << div_q0); | |
557 | + | |
558 | + if (pllout_freq0 < 5000000) | |
559 | + break; | |
560 | + | |
561 | + if (pllout_freq0 > 200000000) | |
562 | + continue; | |
563 | + | |
564 | + for (div_r1 = 1; div_r1 <= 32; div_r1++) { | |
565 | + u32 ref_freq1 = pllout_freq0 / div_r1; | |
566 | + | |
567 | + if (ref_freq1 < 5000000) | |
568 | + break; | |
569 | + | |
570 | + for (div_f1 = 1; div_f1 <= 256; div_f1++) { | |
571 | + u32 vco_freq1 = ref_freq1 * div_f1; | |
572 | + | |
573 | + if (vco_freq1 < 350000000) | |
574 | + continue; | |
575 | + | |
576 | + if (vco_freq1 > 700000000) | |
577 | + break; | |
578 | + | |
579 | + for (div_q1 = 0; div_q1 < 7; div_q1++) { | |
580 | + u32 pllout_freq1 = vco_freq1 / (1 << div_q1); | |
581 | + int error = abs(pllout_freq1 - clk_pixel_pll); | |
582 | + | |
583 | + if (pllout_freq1 < 5000000) | |
584 | + break; | |
585 | + | |
586 | + if (pllout_freq1 > 700000000) | |
587 | + continue; | |
588 | + | |
589 | + if (error < min_error) { | |
590 | + min_error = error; | |
591 | + | |
592 | + /* final returned value is equal to calculated value - 1 | |
593 | + * because a value of 0 = divide by 1 */ | |
594 | + asic_pll->div_r0 = div_r0 - 1; | |
595 | + asic_pll->div_f0 = div_f0 - 1; | |
596 | + asic_pll->div_q0 = div_q0; | |
597 | + asic_pll->div_r1 = div_r1 - 1; | |
598 | + asic_pll->div_f1 = div_f1 - 1; | |
599 | + asic_pll->div_q1 = div_q1; | |
600 | + | |
601 | + asic_pll->range0 = ufx_calc_range(ref_freq0); | |
602 | + asic_pll->range1 = ufx_calc_range(ref_freq1); | |
603 | + | |
604 | + if (min_error == 0) | |
605 | + return; | |
606 | + } | |
607 | + } | |
608 | + } | |
609 | + } | |
610 | + } | |
611 | + } | |
612 | + } | |
613 | +} | |
614 | + | |
615 | +/* sets analog bit PLL configuration values */ | |
616 | +static int ufx_config_pix_clk(struct ufx_data *dev, u32 pixclock) | |
617 | +{ | |
618 | + struct pll_values asic_pll = {0}; | |
619 | + u32 value, clk_pixel, clk_pixel_pll; | |
620 | + int status; | |
621 | + | |
622 | + /* convert pixclock (in ps) to frequency (in Hz) */ | |
623 | + clk_pixel = PICOS2KHZ(pixclock) * 1000; | |
624 | + pr_debug("pixclock %d ps = clk_pixel %d Hz", pixclock, clk_pixel); | |
625 | + | |
626 | + /* clk_pixel = 1/2 clk_pixel_pll */ | |
627 | + clk_pixel_pll = clk_pixel * 2; | |
628 | + | |
629 | + ufx_calc_pll_values(clk_pixel_pll, &asic_pll); | |
630 | + | |
631 | + /* Keep BYPASS and RESET signals asserted until configured */ | |
632 | + status = ufx_reg_write(dev, 0x7000, 0x8000000F); | |
633 | + check_warn_return(status, "error writing 0x7000"); | |
634 | + | |
635 | + value = (asic_pll.div_f1 | (asic_pll.div_r1 << 8) | | |
636 | + (asic_pll.div_q1 << 16) | (asic_pll.range1 << 20)); | |
637 | + status = ufx_reg_write(dev, 0x7008, value); | |
638 | + check_warn_return(status, "error writing 0x7008"); | |
639 | + | |
640 | + value = (asic_pll.div_f0 | (asic_pll.div_r0 << 8) | | |
641 | + (asic_pll.div_q0 << 16) | (asic_pll.range0 << 20)); | |
642 | + status = ufx_reg_write(dev, 0x7004, value); | |
643 | + check_warn_return(status, "error writing 0x7004"); | |
644 | + | |
645 | + status = ufx_reg_clear_bits(dev, 0x7000, 0x00000005); | |
646 | + check_warn_return(status, | |
647 | + "error clearing PLL0 bypass bits in 0x7000"); | |
648 | + msleep(1); | |
649 | + | |
650 | + status = ufx_reg_clear_bits(dev, 0x7000, 0x0000000A); | |
651 | + check_warn_return(status, | |
652 | + "error clearing PLL1 bypass bits in 0x7000"); | |
653 | + msleep(1); | |
654 | + | |
655 | + status = ufx_reg_clear_bits(dev, 0x7000, 0x80000000); | |
656 | + check_warn_return(status, "error clearing gate bits in 0x7000"); | |
657 | + | |
658 | + return 0; | |
659 | +} | |
660 | + | |
661 | +static int ufx_set_vid_mode(struct ufx_data *dev, struct fb_var_screeninfo *var) | |
662 | +{ | |
663 | + u32 temp; | |
664 | + u16 h_total, h_active, h_blank_start, h_blank_end, h_sync_start, h_sync_end; | |
665 | + u16 v_total, v_active, v_blank_start, v_blank_end, v_sync_start, v_sync_end; | |
666 | + | |
667 | + int status = ufx_reg_write(dev, 0x8028, 0); | |
668 | + check_warn_return(status, "ufx_set_vid_mode error disabling RGB pad"); | |
669 | + | |
670 | + status = ufx_reg_write(dev, 0x8024, 0); | |
671 | + check_warn_return(status, "ufx_set_vid_mode error disabling VDAC"); | |
672 | + | |
673 | + /* shut everything down before changing timing */ | |
674 | + status = ufx_blank(dev, true); | |
675 | + check_warn_return(status, "ufx_set_vid_mode error blanking display"); | |
676 | + | |
677 | + status = ufx_disable(dev, true); | |
678 | + check_warn_return(status, "ufx_set_vid_mode error disabling display"); | |
679 | + | |
680 | + status = ufx_config_pix_clk(dev, var->pixclock); | |
681 | + check_warn_return(status, "ufx_set_vid_mode error configuring pixclock"); | |
682 | + | |
683 | + status = ufx_reg_write(dev, 0x2000, 0x00000104); | |
684 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2000"); | |
685 | + | |
686 | + /* set horizontal timings */ | |
687 | + h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin; | |
688 | + h_active = var->xres; | |
689 | + h_blank_start = var->xres + var->right_margin; | |
690 | + h_blank_end = var->xres + var->right_margin + var->hsync_len; | |
691 | + h_sync_start = var->xres + var->right_margin; | |
692 | + h_sync_end = var->xres + var->right_margin + var->hsync_len; | |
693 | + | |
694 | + temp = ((h_total - 1) << 16) | (h_active - 1); | |
695 | + status = ufx_reg_write(dev, 0x2008, temp); | |
696 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2008"); | |
697 | + | |
698 | + temp = ((h_blank_start - 1) << 16) | (h_blank_end - 1); | |
699 | + status = ufx_reg_write(dev, 0x200C, temp); | |
700 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x200C"); | |
701 | + | |
702 | + temp = ((h_sync_start - 1) << 16) | (h_sync_end - 1); | |
703 | + status = ufx_reg_write(dev, 0x2010, temp); | |
704 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2010"); | |
705 | + | |
706 | + /* set vertical timings */ | |
707 | + v_total = var->upper_margin + var->yres + var->lower_margin + var->vsync_len; | |
708 | + v_active = var->yres; | |
709 | + v_blank_start = var->yres + var->lower_margin; | |
710 | + v_blank_end = var->yres + var->lower_margin + var->vsync_len; | |
711 | + v_sync_start = var->yres + var->lower_margin; | |
712 | + v_sync_end = var->yres + var->lower_margin + var->vsync_len; | |
713 | + | |
714 | + temp = ((v_total - 1) << 16) | (v_active - 1); | |
715 | + status = ufx_reg_write(dev, 0x2014, temp); | |
716 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2014"); | |
717 | + | |
718 | + temp = ((v_blank_start - 1) << 16) | (v_blank_end - 1); | |
719 | + status = ufx_reg_write(dev, 0x2018, temp); | |
720 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2018"); | |
721 | + | |
722 | + temp = ((v_sync_start - 1) << 16) | (v_sync_end - 1); | |
723 | + status = ufx_reg_write(dev, 0x201C, temp); | |
724 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x201C"); | |
725 | + | |
726 | + status = ufx_reg_write(dev, 0x2020, 0x00000000); | |
727 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2020"); | |
728 | + | |
729 | + status = ufx_reg_write(dev, 0x2024, 0x00000000); | |
730 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2024"); | |
731 | + | |
732 | + /* Set the frame length register (#pix * 2 bytes/pixel) */ | |
733 | + temp = var->xres * var->yres * 2; | |
734 | + temp = (temp + 7) & (~0x7); | |
735 | + status = ufx_reg_write(dev, 0x2028, temp); | |
736 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2028"); | |
737 | + | |
738 | + /* enable desired output interface & disable others */ | |
739 | + status = ufx_reg_write(dev, 0x2040, 0); | |
740 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2040"); | |
741 | + | |
742 | + status = ufx_reg_write(dev, 0x2044, 0); | |
743 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2044"); | |
744 | + | |
745 | + status = ufx_reg_write(dev, 0x2048, 0); | |
746 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2048"); | |
747 | + | |
748 | + /* set the sync polarities & enable bit */ | |
749 | + temp = 0x00000001; | |
750 | + if (var->sync & FB_SYNC_HOR_HIGH_ACT) | |
751 | + temp |= 0x00000010; | |
752 | + | |
753 | + if (var->sync & FB_SYNC_VERT_HIGH_ACT) | |
754 | + temp |= 0x00000008; | |
755 | + | |
756 | + status = ufx_reg_write(dev, 0x2040, temp); | |
757 | + check_warn_return(status, "ufx_set_vid_mode error writing 0x2040"); | |
758 | + | |
759 | + /* start everything back up */ | |
760 | + status = ufx_enable(dev, true); | |
761 | + check_warn_return(status, "ufx_set_vid_mode error enabling display"); | |
762 | + | |
763 | + /* Unblank the display */ | |
764 | + status = ufx_unblank(dev, true); | |
765 | + check_warn_return(status, "ufx_set_vid_mode error unblanking display"); | |
766 | + | |
767 | + /* enable RGB pad */ | |
768 | + status = ufx_reg_write(dev, 0x8028, 0x00000003); | |
769 | + check_warn_return(status, "ufx_set_vid_mode error enabling RGB pad"); | |
770 | + | |
771 | + /* enable VDAC */ | |
772 | + status = ufx_reg_write(dev, 0x8024, 0x00000007); | |
773 | + check_warn_return(status, "ufx_set_vid_mode error enabling VDAC"); | |
774 | + | |
775 | + return 0; | |
776 | +} | |
777 | + | |
778 | +static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma) | |
779 | +{ | |
780 | + unsigned long start = vma->vm_start; | |
781 | + unsigned long size = vma->vm_end - vma->vm_start; | |
782 | + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
783 | + unsigned long page, pos; | |
784 | + | |
785 | + if (offset + size > info->fix.smem_len) | |
786 | + return -EINVAL; | |
787 | + | |
788 | + pos = (unsigned long)info->fix.smem_start + offset; | |
789 | + | |
790 | + pr_debug("mmap() framebuffer addr:%lu size:%lu\n", | |
791 | + pos, size); | |
792 | + | |
793 | + while (size > 0) { | |
794 | + page = vmalloc_to_pfn((void *)pos); | |
795 | + if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) | |
796 | + return -EAGAIN; | |
797 | + | |
798 | + start += PAGE_SIZE; | |
799 | + pos += PAGE_SIZE; | |
800 | + if (size > PAGE_SIZE) | |
801 | + size -= PAGE_SIZE; | |
802 | + else | |
803 | + size = 0; | |
804 | + } | |
805 | + | |
806 | + vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ | |
807 | + return 0; | |
808 | +} | |
809 | + | |
810 | +static void ufx_raw_rect(struct ufx_data *dev, char *cmd, int x, int y, | |
811 | + int width, int height) | |
812 | +{ | |
813 | + size_t packed_line_len = ALIGN((width * 2), 4); | |
814 | + size_t packed_rect_len = packed_line_len * height; | |
815 | + int line; | |
816 | + | |
817 | + BUG_ON(!dev); | |
818 | + BUG_ON(!dev->info); | |
819 | + | |
820 | + /* command word */ | |
821 | + *((u32 *)&cmd[0]) = cpu_to_le32(0x01); | |
822 | + | |
823 | + /* length word */ | |
824 | + *((u32 *)&cmd[4]) = cpu_to_le32(packed_rect_len + 16); | |
825 | + | |
826 | + *((u16 *)&cmd[8]) = cpu_to_le16(x); | |
827 | + *((u16 *)&cmd[10]) = cpu_to_le16(y); | |
828 | + *((u16 *)&cmd[12]) = cpu_to_le16(width); | |
829 | + *((u16 *)&cmd[14]) = cpu_to_le16(height); | |
830 | + | |
831 | + /* frame base address */ | |
832 | + *((u32 *)&cmd[16]) = cpu_to_le32(0 & 0xffffff80); | |
833 | + | |
834 | + /* color mode and horizontal resolution */ | |
835 | + *((u16 *)&cmd[20]) = cpu_to_le16(0x4000 | dev->info->var.xres); | |
836 | + | |
837 | + /* vertical resolution */ | |
838 | + *((u16 *)&cmd[22]) = cpu_to_le16(dev->info->var.yres); | |
839 | + | |
840 | + /* packed data */ | |
841 | + for (line = 0; line < height; line++) { | |
842 | + const int line_offset = dev->info->fix.line_length * (y + line); | |
843 | + const int byte_offset = line_offset + (x * BPP); | |
844 | + memcpy(&cmd[24 + (packed_line_len * line)], | |
845 | + (char *)dev->info->fix.smem_start + byte_offset, width * BPP); | |
846 | + } | |
847 | +} | |
848 | + | |
849 | +int ufx_handle_damage(struct ufx_data *dev, int x, int y, | |
850 | + int width, int height) | |
851 | +{ | |
852 | + size_t packed_line_len = ALIGN((width * 2), 4); | |
853 | + int len, status, urb_lines, start_line = 0; | |
854 | + | |
855 | + if ((width <= 0) || (height <= 0) || | |
856 | + (x + width > dev->info->var.xres) || | |
857 | + (y + height > dev->info->var.yres)) | |
858 | + return -EINVAL; | |
859 | + | |
860 | + if (!atomic_read(&dev->usb_active)) | |
861 | + return 0; | |
862 | + | |
863 | + while (start_line < height) { | |
864 | + struct urb *urb = ufx_get_urb(dev); | |
865 | + if (!urb) { | |
866 | + pr_warn("ufx_handle_damage unable to get urb"); | |
867 | + return 0; | |
868 | + } | |
869 | + | |
870 | + /* assume we have enough space to transfer at least one line */ | |
871 | + BUG_ON(urb->transfer_buffer_length < (24 + (width * 2))); | |
872 | + | |
873 | + /* calculate the maximum number of lines we could fit in */ | |
874 | + urb_lines = (urb->transfer_buffer_length - 24) / packed_line_len; | |
875 | + | |
876 | + /* but we might not need this many */ | |
877 | + urb_lines = min(urb_lines, (height - start_line)); | |
878 | + | |
879 | + memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); | |
880 | + | |
881 | + ufx_raw_rect(dev, urb->transfer_buffer, x, (y + start_line), width, urb_lines); | |
882 | + len = 24 + (packed_line_len * urb_lines); | |
883 | + | |
884 | + status = ufx_submit_urb(dev, urb, len); | |
885 | + check_warn_return(status, "Error submitting URB"); | |
886 | + | |
887 | + start_line += urb_lines; | |
888 | + } | |
889 | + | |
890 | + return 0; | |
891 | +} | |
892 | + | |
893 | +/* Path triggered by usermode clients who write to filesystem | |
894 | + * e.g. cat filename > /dev/fb1 | |
895 | + * Not used by X Windows or text-mode console. But useful for testing. | |
896 | + * Slow because of extra copy and we must assume all pixels dirty. */ | |
897 | +static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf, | |
898 | + size_t count, loff_t *ppos) | |
899 | +{ | |
900 | + ssize_t result; | |
901 | + struct ufx_data *dev = info->par; | |
902 | + u32 offset = (u32) *ppos; | |
903 | + | |
904 | + result = fb_sys_write(info, buf, count, ppos); | |
905 | + | |
906 | + if (result > 0) { | |
907 | + int start = max((int)(offset / info->fix.line_length) - 1, 0); | |
908 | + int lines = min((u32)((result / info->fix.line_length) + 1), | |
909 | + (u32)info->var.yres); | |
910 | + | |
911 | + ufx_handle_damage(dev, 0, start, info->var.xres, lines); | |
912 | + } | |
913 | + | |
914 | + return result; | |
915 | +} | |
916 | + | |
917 | +static void ufx_ops_copyarea(struct fb_info *info, | |
918 | + const struct fb_copyarea *area) | |
919 | +{ | |
920 | + | |
921 | + struct ufx_data *dev = info->par; | |
922 | + | |
923 | + sys_copyarea(info, area); | |
924 | + | |
925 | + ufx_handle_damage(dev, area->dx, area->dy, | |
926 | + area->width, area->height); | |
927 | +} | |
928 | + | |
929 | +static void ufx_ops_imageblit(struct fb_info *info, | |
930 | + const struct fb_image *image) | |
931 | +{ | |
932 | + struct ufx_data *dev = info->par; | |
933 | + | |
934 | + sys_imageblit(info, image); | |
935 | + | |
936 | + ufx_handle_damage(dev, image->dx, image->dy, | |
937 | + image->width, image->height); | |
938 | +} | |
939 | + | |
940 | +static void ufx_ops_fillrect(struct fb_info *info, | |
941 | + const struct fb_fillrect *rect) | |
942 | +{ | |
943 | + struct ufx_data *dev = info->par; | |
944 | + | |
945 | + sys_fillrect(info, rect); | |
946 | + | |
947 | + ufx_handle_damage(dev, rect->dx, rect->dy, rect->width, | |
948 | + rect->height); | |
949 | +} | |
950 | + | |
951 | +/* NOTE: fb_defio.c is holding info->fbdefio.mutex | |
952 | + * Touching ANY framebuffer memory that triggers a page fault | |
953 | + * in fb_defio will cause a deadlock, when it also tries to | |
954 | + * grab the same mutex. */ | |
955 | +static void ufx_dpy_deferred_io(struct fb_info *info, | |
956 | + struct list_head *pagelist) | |
957 | +{ | |
958 | + struct page *cur; | |
959 | + struct fb_deferred_io *fbdefio = info->fbdefio; | |
960 | + struct ufx_data *dev = info->par; | |
961 | + | |
962 | + if (!fb_defio) | |
963 | + return; | |
964 | + | |
965 | + if (!atomic_read(&dev->usb_active)) | |
966 | + return; | |
967 | + | |
968 | + /* walk the written page list and render each to device */ | |
969 | + list_for_each_entry(cur, &fbdefio->pagelist, lru) { | |
970 | + /* create a rectangle of full screen width that encloses the | |
971 | + * entire dirty framebuffer page */ | |
972 | + const int x = 0; | |
973 | + const int width = dev->info->var.xres; | |
974 | + const int y = (cur->index << PAGE_SHIFT) / (width * 2); | |
975 | + int height = (PAGE_SIZE / (width * 2)) + 1; | |
976 | + height = min(height, (int)(dev->info->var.yres - y)); | |
977 | + | |
978 | + BUG_ON(y >= dev->info->var.yres); | |
979 | + BUG_ON((y + height) > dev->info->var.yres); | |
980 | + | |
981 | + ufx_handle_damage(dev, x, y, width, height); | |
982 | + } | |
983 | +} | |
984 | + | |
985 | +static int ufx_ops_ioctl(struct fb_info *info, unsigned int cmd, | |
986 | + unsigned long arg) | |
987 | +{ | |
988 | + struct ufx_data *dev = info->par; | |
989 | + struct dloarea *area = NULL; | |
990 | + | |
991 | + if (!atomic_read(&dev->usb_active)) | |
992 | + return 0; | |
993 | + | |
994 | + /* TODO: Update X server to get this from sysfs instead */ | |
995 | + if (cmd == UFX_IOCTL_RETURN_EDID) { | |
996 | + char *edid = (char *)arg; | |
997 | + if (copy_to_user(edid, dev->edid, dev->edid_size)) | |
998 | + return -EFAULT; | |
999 | + return 0; | |
1000 | + } | |
1001 | + | |
1002 | + /* TODO: Help propose a standard fb.h ioctl to report mmap damage */ | |
1003 | + if (cmd == UFX_IOCTL_REPORT_DAMAGE) { | |
1004 | + /* If we have a damage-aware client, turn fb_defio "off" | |
1005 | + * To avoid perf imact of unecessary page fault handling. | |
1006 | + * Done by resetting the delay for this fb_info to a very | |
1007 | + * long period. Pages will become writable and stay that way. | |
1008 | + * Reset to normal value when all clients have closed this fb. | |
1009 | + */ | |
1010 | + if (info->fbdefio) | |
1011 | + info->fbdefio->delay = UFX_DEFIO_WRITE_DISABLE; | |
1012 | + | |
1013 | + area = (struct dloarea *)arg; | |
1014 | + | |
1015 | + if (area->x < 0) | |
1016 | + area->x = 0; | |
1017 | + | |
1018 | + if (area->x > info->var.xres) | |
1019 | + area->x = info->var.xres; | |
1020 | + | |
1021 | + if (area->y < 0) | |
1022 | + area->y = 0; | |
1023 | + | |
1024 | + if (area->y > info->var.yres) | |
1025 | + area->y = info->var.yres; | |
1026 | + | |
1027 | + ufx_handle_damage(dev, area->x, area->y, area->w, area->h); | |
1028 | + } | |
1029 | + | |
1030 | + return 0; | |
1031 | +} | |
1032 | + | |
1033 | +/* taken from vesafb */ | |
1034 | +static int | |
1035 | +ufx_ops_setcolreg(unsigned regno, unsigned red, unsigned green, | |
1036 | + unsigned blue, unsigned transp, struct fb_info *info) | |
1037 | +{ | |
1038 | + int err = 0; | |
1039 | + | |
1040 | + if (regno >= info->cmap.len) | |
1041 | + return 1; | |
1042 | + | |
1043 | + if (regno < 16) { | |
1044 | + if (info->var.red.offset == 10) { | |
1045 | + /* 1:5:5:5 */ | |
1046 | + ((u32 *) (info->pseudo_palette))[regno] = | |
1047 | + ((red & 0xf800) >> 1) | | |
1048 | + ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11); | |
1049 | + } else { | |
1050 | + /* 0:5:6:5 */ | |
1051 | + ((u32 *) (info->pseudo_palette))[regno] = | |
1052 | + ((red & 0xf800)) | | |
1053 | + ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); | |
1054 | + } | |
1055 | + } | |
1056 | + | |
1057 | + return err; | |
1058 | +} | |
1059 | + | |
1060 | +/* It's common for several clients to have framebuffer open simultaneously. | |
1061 | + * e.g. both fbcon and X. Makes things interesting. | |
1062 | + * Assumes caller is holding info->lock (for open and release at least) */ | |
1063 | +static int ufx_ops_open(struct fb_info *info, int user) | |
1064 | +{ | |
1065 | + struct ufx_data *dev = info->par; | |
1066 | + | |
1067 | + /* fbcon aggressively connects to first framebuffer it finds, | |
1068 | + * preventing other clients (X) from working properly. Usually | |
1069 | + * not what the user wants. Fail by default with option to enable. */ | |
1070 | + if (user == 0 && !console) | |
1071 | + return -EBUSY; | |
1072 | + | |
1073 | + /* If the USB device is gone, we don't accept new opens */ | |
1074 | + if (dev->virtualized) | |
1075 | + return -ENODEV; | |
1076 | + | |
1077 | + dev->fb_count++; | |
1078 | + | |
1079 | + kref_get(&dev->kref); | |
1080 | + | |
1081 | + if (fb_defio && (info->fbdefio == NULL)) { | |
1082 | + /* enable defio at last moment if not disabled by client */ | |
1083 | + | |
1084 | + struct fb_deferred_io *fbdefio; | |
1085 | + | |
1086 | + fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); | |
1087 | + | |
1088 | + if (fbdefio) { | |
1089 | + fbdefio->delay = UFX_DEFIO_WRITE_DELAY; | |
1090 | + fbdefio->deferred_io = ufx_dpy_deferred_io; | |
1091 | + } | |
1092 | + | |
1093 | + info->fbdefio = fbdefio; | |
1094 | + fb_deferred_io_init(info); | |
1095 | + } | |
1096 | + | |
1097 | + pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d", | |
1098 | + info->node, user, info, dev->fb_count); | |
1099 | + | |
1100 | + return 0; | |
1101 | +} | |
1102 | + | |
1103 | +/* | |
1104 | + * Called when all client interfaces to start transactions have been disabled, | |
1105 | + * and all references to our device instance (ufx_data) are released. | |
1106 | + * Every transaction must have a reference, so we know are fully spun down | |
1107 | + */ | |
1108 | +static void ufx_free(struct kref *kref) | |
1109 | +{ | |
1110 | + struct ufx_data *dev = container_of(kref, struct ufx_data, kref); | |
1111 | + | |
1112 | + /* this function will wait for all in-flight urbs to complete */ | |
1113 | + if (dev->urbs.count > 0) | |
1114 | + ufx_free_urb_list(dev); | |
1115 | + | |
1116 | + pr_debug("freeing ufx_data %p", dev); | |
1117 | + | |
1118 | + kfree(dev); | |
1119 | +} | |
1120 | + | |
1121 | +static void ufx_release_urb_work(struct work_struct *work) | |
1122 | +{ | |
1123 | + struct urb_node *unode = container_of(work, struct urb_node, | |
1124 | + release_urb_work.work); | |
1125 | + | |
1126 | + up(&unode->dev->urbs.limit_sem); | |
1127 | +} | |
1128 | + | |
1129 | +static void ufx_free_framebuffer_work(struct work_struct *work) | |
1130 | +{ | |
1131 | + struct ufx_data *dev = container_of(work, struct ufx_data, | |
1132 | + free_framebuffer_work.work); | |
1133 | + struct fb_info *info = dev->info; | |
1134 | + int node = info->node; | |
1135 | + | |
1136 | + unregister_framebuffer(info); | |
1137 | + | |
1138 | + if (info->cmap.len != 0) | |
1139 | + fb_dealloc_cmap(&info->cmap); | |
1140 | + if (info->monspecs.modedb) | |
1141 | + fb_destroy_modedb(info->monspecs.modedb); | |
1142 | + if (info->screen_base) | |
1143 | + vfree(info->screen_base); | |
1144 | + | |
1145 | + fb_destroy_modelist(&info->modelist); | |
1146 | + | |
1147 | + dev->info = 0; | |
1148 | + | |
1149 | + /* Assume info structure is freed after this point */ | |
1150 | + framebuffer_release(info); | |
1151 | + | |
1152 | + pr_debug("fb_info for /dev/fb%d has been freed", node); | |
1153 | + | |
1154 | + /* ref taken in probe() as part of registering framebfufer */ | |
1155 | + kref_put(&dev->kref, ufx_free); | |
1156 | +} | |
1157 | + | |
1158 | +/* | |
1159 | + * Assumes caller is holding info->lock mutex (for open and release at least) | |
1160 | + */ | |
1161 | +static int ufx_ops_release(struct fb_info *info, int user) | |
1162 | +{ | |
1163 | + struct ufx_data *dev = info->par; | |
1164 | + | |
1165 | + dev->fb_count--; | |
1166 | + | |
1167 | + /* We can't free fb_info here - fbmem will touch it when we return */ | |
1168 | + if (dev->virtualized && (dev->fb_count == 0)) | |
1169 | + schedule_delayed_work(&dev->free_framebuffer_work, HZ); | |
1170 | + | |
1171 | + if ((dev->fb_count == 0) && (info->fbdefio)) { | |
1172 | + fb_deferred_io_cleanup(info); | |
1173 | + kfree(info->fbdefio); | |
1174 | + info->fbdefio = NULL; | |
1175 | + info->fbops->fb_mmap = ufx_ops_mmap; | |
1176 | + } | |
1177 | + | |
1178 | + pr_debug("released /dev/fb%d user=%d count=%d", | |
1179 | + info->node, user, dev->fb_count); | |
1180 | + | |
1181 | + kref_put(&dev->kref, ufx_free); | |
1182 | + | |
1183 | + return 0; | |
1184 | +} | |
1185 | + | |
1186 | +/* Check whether a video mode is supported by the chip | |
1187 | + * We start from monitor's modes, so don't need to filter that here */ | |
1188 | +static int ufx_is_valid_mode(struct fb_videomode *mode, | |
1189 | + struct fb_info *info) | |
1190 | +{ | |
1191 | + if ((mode->xres * mode->yres) > (2048 * 1152)) { | |
1192 | + pr_debug("%dx%d too many pixels", | |
1193 | + mode->xres, mode->yres); | |
1194 | + return 0; | |
1195 | + } | |
1196 | + | |
1197 | + if (mode->pixclock < 5000) { | |
1198 | + pr_debug("%dx%d %dps pixel clock too fast", | |
1199 | + mode->xres, mode->yres, mode->pixclock); | |
1200 | + return 0; | |
1201 | + } | |
1202 | + | |
1203 | + pr_debug("%dx%d (pixclk %dps %dMHz) valid mode", mode->xres, mode->yres, | |
1204 | + mode->pixclock, (1000000 / mode->pixclock)); | |
1205 | + return 1; | |
1206 | +} | |
1207 | + | |
1208 | +static void ufx_var_color_format(struct fb_var_screeninfo *var) | |
1209 | +{ | |
1210 | + const struct fb_bitfield red = { 11, 5, 0 }; | |
1211 | + const struct fb_bitfield green = { 5, 6, 0 }; | |
1212 | + const struct fb_bitfield blue = { 0, 5, 0 }; | |
1213 | + | |
1214 | + var->bits_per_pixel = 16; | |
1215 | + var->red = red; | |
1216 | + var->green = green; | |
1217 | + var->blue = blue; | |
1218 | +} | |
1219 | + | |
1220 | +static int ufx_ops_check_var(struct fb_var_screeninfo *var, | |
1221 | + struct fb_info *info) | |
1222 | +{ | |
1223 | + struct fb_videomode mode; | |
1224 | + | |
1225 | + /* TODO: support dynamically changing framebuffer size */ | |
1226 | + if ((var->xres * var->yres * 2) > info->fix.smem_len) | |
1227 | + return -EINVAL; | |
1228 | + | |
1229 | + /* set device-specific elements of var unrelated to mode */ | |
1230 | + ufx_var_color_format(var); | |
1231 | + | |
1232 | + fb_var_to_videomode(&mode, var); | |
1233 | + | |
1234 | + if (!ufx_is_valid_mode(&mode, info)) | |
1235 | + return -EINVAL; | |
1236 | + | |
1237 | + return 0; | |
1238 | +} | |
1239 | + | |
1240 | +static int ufx_ops_set_par(struct fb_info *info) | |
1241 | +{ | |
1242 | + struct ufx_data *dev = info->par; | |
1243 | + int result; | |
1244 | + u16 *pix_framebuffer; | |
1245 | + int i; | |
1246 | + | |
1247 | + pr_debug("set_par mode %dx%d", info->var.xres, info->var.yres); | |
1248 | + result = ufx_set_vid_mode(dev, &info->var); | |
1249 | + | |
1250 | + if ((result == 0) && (dev->fb_count == 0)) { | |
1251 | + /* paint greenscreen */ | |
1252 | + pix_framebuffer = (u16 *) info->screen_base; | |
1253 | + for (i = 0; i < info->fix.smem_len / 2; i++) | |
1254 | + pix_framebuffer[i] = 0x37e6; | |
1255 | + | |
1256 | + ufx_handle_damage(dev, 0, 0, info->var.xres, info->var.yres); | |
1257 | + } | |
1258 | + | |
1259 | + /* re-enable defio if previously disabled by damage tracking */ | |
1260 | + if (info->fbdefio) | |
1261 | + info->fbdefio->delay = UFX_DEFIO_WRITE_DELAY; | |
1262 | + | |
1263 | + return result; | |
1264 | +} | |
1265 | + | |
1266 | +/* In order to come back from full DPMS off, we need to set the mode again */ | |
1267 | +static int ufx_ops_blank(int blank_mode, struct fb_info *info) | |
1268 | +{ | |
1269 | + struct ufx_data *dev = info->par; | |
1270 | + ufx_set_vid_mode(dev, &info->var); | |
1271 | + return 0; | |
1272 | +} | |
1273 | + | |
1274 | +static struct fb_ops ufx_ops = { | |
1275 | + .owner = THIS_MODULE, | |
1276 | + .fb_read = fb_sys_read, | |
1277 | + .fb_write = ufx_ops_write, | |
1278 | + .fb_setcolreg = ufx_ops_setcolreg, | |
1279 | + .fb_fillrect = ufx_ops_fillrect, | |
1280 | + .fb_copyarea = ufx_ops_copyarea, | |
1281 | + .fb_imageblit = ufx_ops_imageblit, | |
1282 | + .fb_mmap = ufx_ops_mmap, | |
1283 | + .fb_ioctl = ufx_ops_ioctl, | |
1284 | + .fb_open = ufx_ops_open, | |
1285 | + .fb_release = ufx_ops_release, | |
1286 | + .fb_blank = ufx_ops_blank, | |
1287 | + .fb_check_var = ufx_ops_check_var, | |
1288 | + .fb_set_par = ufx_ops_set_par, | |
1289 | +}; | |
1290 | + | |
1291 | +/* Assumes &info->lock held by caller | |
1292 | + * Assumes no active clients have framebuffer open */ | |
1293 | +static int ufx_realloc_framebuffer(struct ufx_data *dev, struct fb_info *info) | |
1294 | +{ | |
1295 | + int retval = -ENOMEM; | |
1296 | + int old_len = info->fix.smem_len; | |
1297 | + int new_len; | |
1298 | + unsigned char *old_fb = info->screen_base; | |
1299 | + unsigned char *new_fb; | |
1300 | + | |
1301 | + pr_debug("Reallocating framebuffer. Addresses will change!"); | |
1302 | + | |
1303 | + new_len = info->fix.line_length * info->var.yres; | |
1304 | + | |
1305 | + if (PAGE_ALIGN(new_len) > old_len) { | |
1306 | + /* | |
1307 | + * Alloc system memory for virtual framebuffer | |
1308 | + */ | |
1309 | + new_fb = vmalloc(new_len); | |
1310 | + if (!new_fb) { | |
1311 | + pr_err("Virtual framebuffer alloc failed"); | |
1312 | + goto error; | |
1313 | + } | |
1314 | + | |
1315 | + if (info->screen_base) { | |
1316 | + memcpy(new_fb, old_fb, old_len); | |
1317 | + vfree(info->screen_base); | |
1318 | + } | |
1319 | + | |
1320 | + info->screen_base = new_fb; | |
1321 | + info->fix.smem_len = PAGE_ALIGN(new_len); | |
1322 | + info->fix.smem_start = (unsigned long) new_fb; | |
1323 | + info->flags = smscufx_info_flags; | |
1324 | + } | |
1325 | + | |
1326 | + retval = 0; | |
1327 | + | |
1328 | +error: | |
1329 | + return retval; | |
1330 | +} | |
1331 | + | |
1332 | +/* sets up I2C Controller for 100 Kbps, std. speed, 7-bit addr, master, | |
1333 | + * restart enabled, but no start byte, enable controller */ | |
1334 | +static int ufx_i2c_init(struct ufx_data *dev) | |
1335 | +{ | |
1336 | + u32 tmp; | |
1337 | + | |
1338 | + /* disable the controller before it can be reprogrammed */ | |
1339 | + int status = ufx_reg_write(dev, 0x106C, 0x00); | |
1340 | + check_warn_return(status, "failed to disable I2C"); | |
1341 | + | |
1342 | + /* Setup the clock count registers | |
1343 | + * (12+1) = 13 clks @ 2.5 MHz = 5.2 uS */ | |
1344 | + status = ufx_reg_write(dev, 0x1018, 12); | |
1345 | + check_warn_return(status, "error writing 0x1018"); | |
1346 | + | |
1347 | + /* (6+8) = 14 clks @ 2.5 MHz = 5.6 uS */ | |
1348 | + status = ufx_reg_write(dev, 0x1014, 6); | |
1349 | + check_warn_return(status, "error writing 0x1014"); | |
1350 | + | |
1351 | + status = ufx_reg_read(dev, 0x1000, &tmp); | |
1352 | + check_warn_return(status, "error reading 0x1000"); | |
1353 | + | |
1354 | + /* set speed to std mode */ | |
1355 | + tmp &= ~(0x06); | |
1356 | + tmp |= 0x02; | |
1357 | + | |
1358 | + /* 7-bit (not 10-bit) addressing */ | |
1359 | + tmp &= ~(0x10); | |
1360 | + | |
1361 | + /* enable restart conditions and master mode */ | |
1362 | + tmp |= 0x21; | |
1363 | + | |
1364 | + status = ufx_reg_write(dev, 0x1000, tmp); | |
1365 | + check_warn_return(status, "error writing 0x1000"); | |
1366 | + | |
1367 | + /* Set normal tx using target address 0 */ | |
1368 | + status = ufx_reg_clear_and_set_bits(dev, 0x1004, 0xC00, 0x000); | |
1369 | + check_warn_return(status, "error setting TX mode bits in 0x1004"); | |
1370 | + | |
1371 | + /* Enable the controller */ | |
1372 | + status = ufx_reg_write(dev, 0x106C, 0x01); | |
1373 | + check_warn_return(status, "failed to enable I2C"); | |
1374 | + | |
1375 | + return 0; | |
1376 | +} | |
1377 | + | |
1378 | +/* sets the I2C port mux and target address */ | |
1379 | +static int ufx_i2c_configure(struct ufx_data *dev) | |
1380 | +{ | |
1381 | + int status = ufx_reg_write(dev, 0x106C, 0x00); | |
1382 | + check_warn_return(status, "failed to disable I2C"); | |
1383 | + | |
1384 | + status = ufx_reg_write(dev, 0x3010, 0x00000000); | |
1385 | + check_warn_return(status, "failed to write 0x3010"); | |
1386 | + | |
1387 | + /* A0h is std for any EDID, right shifted by one */ | |
1388 | + status = ufx_reg_clear_and_set_bits(dev, 0x1004, 0x3FF, (0xA0 >> 1)); | |
1389 | + check_warn_return(status, "failed to set TAR bits in 0x1004"); | |
1390 | + | |
1391 | + status = ufx_reg_write(dev, 0x106C, 0x01); | |
1392 | + check_warn_return(status, "failed to enable I2C"); | |
1393 | + | |
1394 | + return 0; | |
1395 | +} | |
1396 | + | |
1397 | +/* wait for BUSY to clear, with a timeout of 50ms with 10ms sleeps. if no | |
1398 | + * monitor is connected, there is no error except for timeout */ | |
1399 | +static int ufx_i2c_wait_busy(struct ufx_data *dev) | |
1400 | +{ | |
1401 | + u32 tmp; | |
1402 | + int i, status; | |
1403 | + | |
1404 | + for (i = 0; i < 15; i++) { | |
1405 | + status = ufx_reg_read(dev, 0x1100, &tmp); | |
1406 | + check_warn_return(status, "0x1100 read failed"); | |
1407 | + | |
1408 | + /* if BUSY is clear, check for error */ | |
1409 | + if ((tmp & 0x80000000) == 0) { | |
1410 | + if (tmp & 0x20000000) { | |
1411 | + pr_warn("I2C read failed, 0x1100=0x%08x", tmp); | |
1412 | + return -EIO; | |
1413 | + } | |
1414 | + | |
1415 | + return 0; | |
1416 | + } | |
1417 | + | |
1418 | + /* perform the first 10 retries without delay */ | |
1419 | + if (i >= 10) | |
1420 | + msleep(10); | |
1421 | + } | |
1422 | + | |
1423 | + pr_warn("I2C access timed out, resetting I2C hardware"); | |
1424 | + status = ufx_reg_write(dev, 0x1100, 0x40000000); | |
1425 | + check_warn_return(status, "0x1100 write failed"); | |
1426 | + | |
1427 | + return -ETIMEDOUT; | |
1428 | +} | |
1429 | + | |
1430 | +/* reads a 128-byte EDID block from the currently selected port and TAR */ | |
1431 | +static int ufx_read_edid(struct ufx_data *dev, char *edid, int edid_len) | |
1432 | +{ | |
1433 | + int i, j, status; | |
1434 | + u32 *edid_u32 = (u32 *)edid; | |
1435 | + | |
1436 | + BUG_ON(edid_len != EDID_LENGTH); | |
1437 | + | |
1438 | + status = ufx_i2c_configure(dev); | |
1439 | + if (status < 0) { | |
1440 | + pr_err("ufx_i2c_configure failed"); | |
1441 | + return status; | |
1442 | + } | |
1443 | + | |
1444 | + memset(edid, 0xff, EDID_LENGTH); | |
1445 | + | |
1446 | + /* Read the 128-byte EDID as 2 bursts of 64 bytes */ | |
1447 | + for (i = 0; i < 2; i++) { | |
1448 | + u32 temp = 0x28070000 | (63 << 20) | (((u32)(i * 64)) << 8); | |
1449 | + status = ufx_reg_write(dev, 0x1100, temp); | |
1450 | + check_warn_return(status, "Failed to write 0x1100"); | |
1451 | + | |
1452 | + temp |= 0x80000000; | |
1453 | + status = ufx_reg_write(dev, 0x1100, temp); | |
1454 | + check_warn_return(status, "Failed to write 0x1100"); | |
1455 | + | |
1456 | + status = ufx_i2c_wait_busy(dev); | |
1457 | + check_warn_return(status, "Timeout waiting for I2C BUSY to clear"); | |
1458 | + | |
1459 | + for (j = 0; j < 16; j++) { | |
1460 | + u32 data_reg_addr = 0x1110 + (j * 4); | |
1461 | + status = ufx_reg_read(dev, data_reg_addr, edid_u32++); | |
1462 | + check_warn_return(status, "Error reading i2c data"); | |
1463 | + } | |
1464 | + } | |
1465 | + | |
1466 | + /* all FF's in the first 16 bytes indicates nothing is connected */ | |
1467 | + for (i = 0; i < 16; i++) { | |
1468 | + if (edid[i] != 0xFF) { | |
1469 | + pr_debug("edid data read succesfully"); | |
1470 | + return EDID_LENGTH; | |
1471 | + } | |
1472 | + } | |
1473 | + | |
1474 | + pr_warn("edid data contains all 0xff"); | |
1475 | + return -ETIMEDOUT; | |
1476 | +} | |
1477 | + | |
1478 | +/* 1) use sw default | |
1479 | + * 2) Parse into various fb_info structs | |
1480 | + * 3) Allocate virtual framebuffer memory to back highest res mode | |
1481 | + * | |
1482 | + * Parses EDID into three places used by various parts of fbdev: | |
1483 | + * fb_var_screeninfo contains the timing of the monitor's preferred mode | |
1484 | + * fb_info.monspecs is full parsed EDID info, including monspecs.modedb | |
1485 | + * fb_info.modelist is a linked list of all monitor & VESA modes which work | |
1486 | + * | |
1487 | + * If EDID is not readable/valid, then modelist is all VESA modes, | |
1488 | + * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode | |
1489 | + * Returns 0 if successful */ | |
1490 | +static int ufx_setup_modes(struct ufx_data *dev, struct fb_info *info, | |
1491 | + char *default_edid, size_t default_edid_size) | |
1492 | +{ | |
1493 | + const struct fb_videomode *default_vmode = NULL; | |
1494 | + char *edid; | |
1495 | + int i, result = 0, tries = 3; | |
1496 | + | |
1497 | + if (info->dev) /* only use mutex if info has been registered */ | |
1498 | + mutex_lock(&info->lock); | |
1499 | + | |
1500 | + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); | |
1501 | + if (!edid) { | |
1502 | + result = -ENOMEM; | |
1503 | + goto error; | |
1504 | + } | |
1505 | + | |
1506 | + fb_destroy_modelist(&info->modelist); | |
1507 | + memset(&info->monspecs, 0, sizeof(info->monspecs)); | |
1508 | + | |
1509 | + /* Try to (re)read EDID from hardware first | |
1510 | + * EDID data may return, but not parse as valid | |
1511 | + * Try again a few times, in case of e.g. analog cable noise */ | |
1512 | + while (tries--) { | |
1513 | + i = ufx_read_edid(dev, edid, EDID_LENGTH); | |
1514 | + | |
1515 | + if (i >= EDID_LENGTH) | |
1516 | + fb_edid_to_monspecs(edid, &info->monspecs); | |
1517 | + | |
1518 | + if (info->monspecs.modedb_len > 0) { | |
1519 | + dev->edid = edid; | |
1520 | + dev->edid_size = i; | |
1521 | + break; | |
1522 | + } | |
1523 | + } | |
1524 | + | |
1525 | + /* If that fails, use a previously returned EDID if available */ | |
1526 | + if (info->monspecs.modedb_len == 0) { | |
1527 | + pr_err("Unable to get valid EDID from device/display\n"); | |
1528 | + | |
1529 | + if (dev->edid) { | |
1530 | + fb_edid_to_monspecs(dev->edid, &info->monspecs); | |
1531 | + if (info->monspecs.modedb_len > 0) | |
1532 | + pr_err("Using previously queried EDID\n"); | |
1533 | + } | |
1534 | + } | |
1535 | + | |
1536 | + /* If that fails, use the default EDID we were handed */ | |
1537 | + if (info->monspecs.modedb_len == 0) { | |
1538 | + if (default_edid_size >= EDID_LENGTH) { | |
1539 | + fb_edid_to_monspecs(default_edid, &info->monspecs); | |
1540 | + if (info->monspecs.modedb_len > 0) { | |
1541 | + memcpy(edid, default_edid, default_edid_size); | |
1542 | + dev->edid = edid; | |
1543 | + dev->edid_size = default_edid_size; | |
1544 | + pr_err("Using default/backup EDID\n"); | |
1545 | + } | |
1546 | + } | |
1547 | + } | |
1548 | + | |
1549 | + /* If we've got modes, let's pick a best default mode */ | |
1550 | + if (info->monspecs.modedb_len > 0) { | |
1551 | + | |
1552 | + for (i = 0; i < info->monspecs.modedb_len; i++) { | |
1553 | + if (ufx_is_valid_mode(&info->monspecs.modedb[i], info)) | |
1554 | + fb_add_videomode(&info->monspecs.modedb[i], | |
1555 | + &info->modelist); | |
1556 | + else /* if we've removed top/best mode */ | |
1557 | + info->monspecs.misc &= ~FB_MISC_1ST_DETAIL; | |
1558 | + } | |
1559 | + | |
1560 | + default_vmode = fb_find_best_display(&info->monspecs, | |
1561 | + &info->modelist); | |
1562 | + } | |
1563 | + | |
1564 | + /* If everything else has failed, fall back to safe default mode */ | |
1565 | + if (default_vmode == NULL) { | |
1566 | + | |
1567 | + struct fb_videomode fb_vmode = {0}; | |
1568 | + | |
1569 | + /* Add the standard VESA modes to our modelist | |
1570 | + * Since we don't have EDID, there may be modes that | |
1571 | + * overspec monitor and/or are incorrect aspect ratio, etc. | |
1572 | + * But at least the user has a chance to choose | |
1573 | + */ | |
1574 | + for (i = 0; i < VESA_MODEDB_SIZE; i++) { | |
1575 | + if (ufx_is_valid_mode((struct fb_videomode *) | |
1576 | + &vesa_modes[i], info)) | |
1577 | + fb_add_videomode(&vesa_modes[i], | |
1578 | + &info->modelist); | |
1579 | + } | |
1580 | + | |
1581 | + /* default to resolution safe for projectors | |
1582 | + * (since they are most common case without EDID) | |
1583 | + */ | |
1584 | + fb_vmode.xres = 800; | |
1585 | + fb_vmode.yres = 600; | |
1586 | + fb_vmode.refresh = 60; | |
1587 | + default_vmode = fb_find_nearest_mode(&fb_vmode, | |
1588 | + &info->modelist); | |
1589 | + } | |
1590 | + | |
1591 | + /* If we have good mode and no active clients */ | |
1592 | + if ((default_vmode != NULL) && (dev->fb_count == 0)) { | |
1593 | + | |
1594 | + fb_videomode_to_var(&info->var, default_vmode); | |
1595 | + ufx_var_color_format(&info->var); | |
1596 | + | |
1597 | + /* with mode size info, we can now alloc our framebuffer */ | |
1598 | + memcpy(&info->fix, &ufx_fix, sizeof(ufx_fix)); | |
1599 | + info->fix.line_length = info->var.xres * | |
1600 | + (info->var.bits_per_pixel / 8); | |
1601 | + | |
1602 | + result = ufx_realloc_framebuffer(dev, info); | |
1603 | + | |
1604 | + } else | |
1605 | + result = -EINVAL; | |
1606 | + | |
1607 | +error: | |
1608 | + if (edid && (dev->edid != edid)) | |
1609 | + kfree(edid); | |
1610 | + | |
1611 | + if (info->dev) | |
1612 | + mutex_unlock(&info->lock); | |
1613 | + | |
1614 | + return result; | |
1615 | +} | |
1616 | + | |
1617 | +static int ufx_usb_probe(struct usb_interface *interface, | |
1618 | + const struct usb_device_id *id) | |
1619 | +{ | |
1620 | + struct usb_device *usbdev; | |
1621 | + struct ufx_data *dev; | |
1622 | + struct fb_info *info = 0; | |
1623 | + int retval = -ENOMEM; | |
1624 | + u32 id_rev, fpga_rev; | |
1625 | + | |
1626 | + /* usb initialization */ | |
1627 | + usbdev = interface_to_usbdev(interface); | |
1628 | + BUG_ON(!usbdev); | |
1629 | + | |
1630 | + dev = kzalloc(sizeof(*dev), GFP_KERNEL); | |
1631 | + if (dev == NULL) { | |
1632 | + dev_err(&usbdev->dev, "ufx_usb_probe: failed alloc of dev struct\n"); | |
1633 | + goto error; | |
1634 | + } | |
1635 | + | |
1636 | + /* we need to wait for both usb and fbdev to spin down on disconnect */ | |
1637 | + kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */ | |
1638 | + kref_get(&dev->kref); /* matching kref_put in free_framebuffer_work */ | |
1639 | + | |
1640 | + dev->udev = usbdev; | |
1641 | + dev->gdev = &usbdev->dev; /* our generic struct device * */ | |
1642 | + usb_set_intfdata(interface, dev); | |
1643 | + | |
1644 | + dev_dbg(dev->gdev, "%s %s - serial #%s\n", | |
1645 | + usbdev->manufacturer, usbdev->product, usbdev->serial); | |
1646 | + dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", | |
1647 | + usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, | |
1648 | + usbdev->descriptor.bcdDevice, dev); | |
1649 | + dev_dbg(dev->gdev, "console enable=%d\n", console); | |
1650 | + dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); | |
1651 | + | |
1652 | + if (!ufx_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { | |
1653 | + retval = -ENOMEM; | |
1654 | + dev_err(dev->gdev, "ufx_alloc_urb_list failed\n"); | |
1655 | + goto error; | |
1656 | + } | |
1657 | + | |
1658 | + /* We don't register a new USB class. Our client interface is fbdev */ | |
1659 | + | |
1660 | + /* allocates framebuffer driver structure, not framebuffer memory */ | |
1661 | + info = framebuffer_alloc(0, &usbdev->dev); | |
1662 | + if (!info) { | |
1663 | + retval = -ENOMEM; | |
1664 | + dev_err(dev->gdev, "framebuffer_alloc failed\n"); | |
1665 | + goto error; | |
1666 | + } | |
1667 | + | |
1668 | + dev->info = info; | |
1669 | + info->par = dev; | |
1670 | + info->pseudo_palette = dev->pseudo_palette; | |
1671 | + info->fbops = &ufx_ops; | |
1672 | + | |
1673 | + retval = fb_alloc_cmap(&info->cmap, 256, 0); | |
1674 | + if (retval < 0) { | |
1675 | + dev_err(dev->gdev, "fb_alloc_cmap failed %x\n", retval); | |
1676 | + goto error; | |
1677 | + } | |
1678 | + | |
1679 | + INIT_DELAYED_WORK(&dev->free_framebuffer_work, | |
1680 | + ufx_free_framebuffer_work); | |
1681 | + | |
1682 | + INIT_LIST_HEAD(&info->modelist); | |
1683 | + | |
1684 | + retval = ufx_reg_read(dev, 0x3000, &id_rev); | |
1685 | + check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval); | |
1686 | + dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev); | |
1687 | + | |
1688 | + retval = ufx_reg_read(dev, 0x3004, &fpga_rev); | |
1689 | + check_warn_goto_error(retval, "error %d reading 0x3004 register from device", retval); | |
1690 | + dev_dbg(dev->gdev, "FPGA_REV register value 0x%08x", fpga_rev); | |
1691 | + | |
1692 | + dev_dbg(dev->gdev, "resetting device"); | |
1693 | + retval = ufx_lite_reset(dev); | |
1694 | + check_warn_goto_error(retval, "error %d resetting device", retval); | |
1695 | + | |
1696 | + dev_dbg(dev->gdev, "configuring system clock"); | |
1697 | + retval = ufx_config_sys_clk(dev); | |
1698 | + check_warn_goto_error(retval, "error %d configuring system clock", retval); | |
1699 | + | |
1700 | + dev_dbg(dev->gdev, "configuring DDR2 controller"); | |
1701 | + retval = ufx_config_ddr2(dev); | |
1702 | + check_warn_goto_error(retval, "error %d initialising DDR2 controller", retval); | |
1703 | + | |
1704 | + dev_dbg(dev->gdev, "configuring I2C controller"); | |
1705 | + retval = ufx_i2c_init(dev); | |
1706 | + check_warn_goto_error(retval, "error %d initialising I2C controller", retval); | |
1707 | + | |
1708 | + dev_dbg(dev->gdev, "selecting display mode"); | |
1709 | + retval = ufx_setup_modes(dev, info, NULL, 0); | |
1710 | + check_warn_goto_error(retval, "unable to find common mode for display and adapter"); | |
1711 | + | |
1712 | + retval = ufx_reg_set_bits(dev, 0x4000, 0x00000001); | |
1713 | + check_warn_goto_error(retval, "error %d enabling graphics engine", retval); | |
1714 | + | |
1715 | + /* ready to begin using device */ | |
1716 | + atomic_set(&dev->usb_active, 1); | |
1717 | + | |
1718 | + dev_dbg(dev->gdev, "checking var"); | |
1719 | + retval = ufx_ops_check_var(&info->var, info); | |
1720 | + check_warn_goto_error(retval, "error %d ufx_ops_check_var", retval); | |
1721 | + | |
1722 | + dev_dbg(dev->gdev, "setting par"); | |
1723 | + retval = ufx_ops_set_par(info); | |
1724 | + check_warn_goto_error(retval, "error %d ufx_ops_set_par", retval); | |
1725 | + | |
1726 | + dev_dbg(dev->gdev, "registering framebuffer"); | |
1727 | + retval = register_framebuffer(info); | |
1728 | + check_warn_goto_error(retval, "error %d register_framebuffer", retval); | |
1729 | + | |
1730 | + dev_info(dev->gdev, "SMSC UDX USB device /dev/fb%d attached. %dx%d resolution." | |
1731 | + " Using %dK framebuffer memory\n", info->node, | |
1732 | + info->var.xres, info->var.yres, info->fix.smem_len >> 10); | |
1733 | + | |
1734 | + return 0; | |
1735 | + | |
1736 | +error: | |
1737 | + if (dev) { | |
1738 | + if (info) { | |
1739 | + if (info->cmap.len != 0) | |
1740 | + fb_dealloc_cmap(&info->cmap); | |
1741 | + if (info->monspecs.modedb) | |
1742 | + fb_destroy_modedb(info->monspecs.modedb); | |
1743 | + if (info->screen_base) | |
1744 | + vfree(info->screen_base); | |
1745 | + | |
1746 | + fb_destroy_modelist(&info->modelist); | |
1747 | + | |
1748 | + framebuffer_release(info); | |
1749 | + } | |
1750 | + | |
1751 | + kref_put(&dev->kref, ufx_free); /* ref for framebuffer */ | |
1752 | + kref_put(&dev->kref, ufx_free); /* last ref from kref_init */ | |
1753 | + | |
1754 | + /* dev has been deallocated. Do not dereference */ | |
1755 | + } | |
1756 | + | |
1757 | + return retval; | |
1758 | +} | |
1759 | + | |
1760 | +static void ufx_usb_disconnect(struct usb_interface *interface) | |
1761 | +{ | |
1762 | + struct ufx_data *dev; | |
1763 | + struct fb_info *info; | |
1764 | + | |
1765 | + dev = usb_get_intfdata(interface); | |
1766 | + info = dev->info; | |
1767 | + | |
1768 | + pr_debug("USB disconnect starting\n"); | |
1769 | + | |
1770 | + /* we virtualize until all fb clients release. Then we free */ | |
1771 | + dev->virtualized = true; | |
1772 | + | |
1773 | + /* When non-active we'll update virtual framebuffer, but no new urbs */ | |
1774 | + atomic_set(&dev->usb_active, 0); | |
1775 | + | |
1776 | + usb_set_intfdata(interface, NULL); | |
1777 | + | |
1778 | + /* if clients still have us open, will be freed on last close */ | |
1779 | + if (dev->fb_count == 0) | |
1780 | + schedule_delayed_work(&dev->free_framebuffer_work, 0); | |
1781 | + | |
1782 | + /* release reference taken by kref_init in probe() */ | |
1783 | + kref_put(&dev->kref, ufx_free); | |
1784 | + | |
1785 | + /* consider ufx_data freed */ | |
1786 | +} | |
1787 | + | |
1788 | +static struct usb_driver ufx_driver = { | |
1789 | + .name = "smscufx", | |
1790 | + .probe = ufx_usb_probe, | |
1791 | + .disconnect = ufx_usb_disconnect, | |
1792 | + .id_table = id_table, | |
1793 | +}; | |
1794 | + | |
1795 | +static int __init ufx_module_init(void) | |
1796 | +{ | |
1797 | + int res; | |
1798 | + | |
1799 | + res = usb_register(&ufx_driver); | |
1800 | + if (res) | |
1801 | + err("usb_register failed. Error number %d", res); | |
1802 | + | |
1803 | + return res; | |
1804 | +} | |
1805 | + | |
1806 | +static void __exit ufx_module_exit(void) | |
1807 | +{ | |
1808 | + usb_deregister(&ufx_driver); | |
1809 | +} | |
1810 | + | |
1811 | +module_init(ufx_module_init); | |
1812 | +module_exit(ufx_module_exit); | |
1813 | + | |
1814 | +static void ufx_urb_completion(struct urb *urb) | |
1815 | +{ | |
1816 | + struct urb_node *unode = urb->context; | |
1817 | + struct ufx_data *dev = unode->dev; | |
1818 | + unsigned long flags; | |
1819 | + | |
1820 | + /* sync/async unlink faults aren't errors */ | |
1821 | + if (urb->status) { | |
1822 | + if (!(urb->status == -ENOENT || | |
1823 | + urb->status == -ECONNRESET || | |
1824 | + urb->status == -ESHUTDOWN)) { | |
1825 | + pr_err("%s - nonzero write bulk status received: %d\n", | |
1826 | + __func__, urb->status); | |
1827 | + atomic_set(&dev->lost_pixels, 1); | |
1828 | + } | |
1829 | + } | |
1830 | + | |
1831 | + urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */ | |
1832 | + | |
1833 | + spin_lock_irqsave(&dev->urbs.lock, flags); | |
1834 | + list_add_tail(&unode->entry, &dev->urbs.list); | |
1835 | + dev->urbs.available++; | |
1836 | + spin_unlock_irqrestore(&dev->urbs.lock, flags); | |
1837 | + | |
1838 | + /* When using fb_defio, we deadlock if up() is called | |
1839 | + * while another is waiting. So queue to another process */ | |
1840 | + if (fb_defio) | |
1841 | + schedule_delayed_work(&unode->release_urb_work, 0); | |
1842 | + else | |
1843 | + up(&dev->urbs.limit_sem); | |
1844 | +} | |
1845 | + | |
1846 | +static void ufx_free_urb_list(struct ufx_data *dev) | |
1847 | +{ | |
1848 | + int count = dev->urbs.count; | |
1849 | + struct list_head *node; | |
1850 | + struct urb_node *unode; | |
1851 | + struct urb *urb; | |
1852 | + int ret; | |
1853 | + unsigned long flags; | |
1854 | + | |
1855 | + pr_debug("Waiting for completes and freeing all render urbs\n"); | |
1856 | + | |
1857 | + /* keep waiting and freeing, until we've got 'em all */ | |
1858 | + while (count--) { | |
1859 | + /* Getting interrupted means a leak, but ok at shutdown*/ | |
1860 | + ret = down_interruptible(&dev->urbs.limit_sem); | |
1861 | + if (ret) | |
1862 | + break; | |
1863 | + | |
1864 | + spin_lock_irqsave(&dev->urbs.lock, flags); | |
1865 | + | |
1866 | + node = dev->urbs.list.next; /* have reserved one with sem */ | |
1867 | + list_del_init(node); | |
1868 | + | |
1869 | + spin_unlock_irqrestore(&dev->urbs.lock, flags); | |
1870 | + | |
1871 | + unode = list_entry(node, struct urb_node, entry); | |
1872 | + urb = unode->urb; | |
1873 | + | |
1874 | + /* Free each separately allocated piece */ | |
1875 | + usb_free_coherent(urb->dev, dev->urbs.size, | |
1876 | + urb->transfer_buffer, urb->transfer_dma); | |
1877 | + usb_free_urb(urb); | |
1878 | + kfree(node); | |
1879 | + } | |
1880 | +} | |
1881 | + | |
1882 | +static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size) | |
1883 | +{ | |
1884 | + int i = 0; | |
1885 | + struct urb *urb; | |
1886 | + struct urb_node *unode; | |
1887 | + char *buf; | |
1888 | + | |
1889 | + spin_lock_init(&dev->urbs.lock); | |
1890 | + | |
1891 | + dev->urbs.size = size; | |
1892 | + INIT_LIST_HEAD(&dev->urbs.list); | |
1893 | + | |
1894 | + while (i < count) { | |
1895 | + unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); | |
1896 | + if (!unode) | |
1897 | + break; | |
1898 | + unode->dev = dev; | |
1899 | + | |
1900 | + INIT_DELAYED_WORK(&unode->release_urb_work, | |
1901 | + ufx_release_urb_work); | |
1902 | + | |
1903 | + urb = usb_alloc_urb(0, GFP_KERNEL); | |
1904 | + if (!urb) { | |
1905 | + kfree(unode); | |
1906 | + break; | |
1907 | + } | |
1908 | + unode->urb = urb; | |
1909 | + | |
1910 | + buf = usb_alloc_coherent(dev->udev, size, GFP_KERNEL, | |
1911 | + &urb->transfer_dma); | |
1912 | + if (!buf) { | |
1913 | + kfree(unode); | |
1914 | + usb_free_urb(urb); | |
1915 | + break; | |
1916 | + } | |
1917 | + | |
1918 | + /* urb->transfer_buffer_length set to actual before submit */ | |
1919 | + usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1), | |
1920 | + buf, size, ufx_urb_completion, unode); | |
1921 | + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | |
1922 | + | |
1923 | + list_add_tail(&unode->entry, &dev->urbs.list); | |
1924 | + | |
1925 | + i++; | |
1926 | + } | |
1927 | + | |
1928 | + sema_init(&dev->urbs.limit_sem, i); | |
1929 | + dev->urbs.count = i; | |
1930 | + dev->urbs.available = i; | |
1931 | + | |
1932 | + pr_debug("allocated %d %d byte urbs\n", i, (int) size); | |
1933 | + | |
1934 | + return i; | |
1935 | +} | |
1936 | + | |
1937 | +static struct urb *ufx_get_urb(struct ufx_data *dev) | |
1938 | +{ | |
1939 | + int ret = 0; | |
1940 | + struct list_head *entry; | |
1941 | + struct urb_node *unode; | |
1942 | + struct urb *urb = NULL; | |
1943 | + unsigned long flags; | |
1944 | + | |
1945 | + /* Wait for an in-flight buffer to complete and get re-queued */ | |
1946 | + ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT); | |
1947 | + if (ret) { | |
1948 | + atomic_set(&dev->lost_pixels, 1); | |
1949 | + pr_warn("wait for urb interrupted: %x available: %d\n", | |
1950 | + ret, dev->urbs.available); | |
1951 | + goto error; | |
1952 | + } | |
1953 | + | |
1954 | + spin_lock_irqsave(&dev->urbs.lock, flags); | |
1955 | + | |
1956 | + BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */ | |
1957 | + entry = dev->urbs.list.next; | |
1958 | + list_del_init(entry); | |
1959 | + dev->urbs.available--; | |
1960 | + | |
1961 | + spin_unlock_irqrestore(&dev->urbs.lock, flags); | |
1962 | + | |
1963 | + unode = list_entry(entry, struct urb_node, entry); | |
1964 | + urb = unode->urb; | |
1965 | + | |
1966 | +error: | |
1967 | + return urb; | |
1968 | +} | |
1969 | + | |
1970 | +static int ufx_submit_urb(struct ufx_data *dev, struct urb *urb, size_t len) | |
1971 | +{ | |
1972 | + int ret; | |
1973 | + | |
1974 | + BUG_ON(len > dev->urbs.size); | |
1975 | + | |
1976 | + urb->transfer_buffer_length = len; /* set to actual payload len */ | |
1977 | + ret = usb_submit_urb(urb, GFP_KERNEL); | |
1978 | + if (ret) { | |
1979 | + ufx_urb_completion(urb); /* because no one else will */ | |
1980 | + atomic_set(&dev->lost_pixels, 1); | |
1981 | + pr_err("usb_submit_urb error %x\n", ret); | |
1982 | + } | |
1983 | + return ret; | |
1984 | +} | |
1985 | + | |
1986 | +module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | |
1987 | +MODULE_PARM_DESC(console, "Allow fbcon to be used on this display"); | |
1988 | + | |
1989 | +module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | |
1990 | +MODULE_PARM_DESC(fb_defio, "Enable fb_defio mmap support"); | |
1991 | + | |
1992 | +MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>"); | |
1993 | +MODULE_DESCRIPTION("SMSC UFX kernel framebuffer driver"); | |
1994 | +MODULE_LICENSE("GPL"); |