Commit 3f10462f292e5d8f3bb2a19fa62f442bd8c4df5a

Authored by Jesper Nilsson
1 parent 80d6170a28

CRISv32: Rewrite of synchronous serial port driver

Make driver possible to load as a module and try to handle
locking better.

Signed-off-by: Jesper Nilsson <jesper.nilsson@axis.com>

Showing 1 changed file with 791 additions and 639 deletions Inline Diff

arch/cris/arch-v32/drivers/sync_serial.c
1 /* 1 /*
2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3. 2 * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3.
3 * 3 *
4 * Copyright (c) 2005 Axis Communications AB 4 * Copyright (c) 2005, 2008 Axis Communications AB
5 *
6 * Author: Mikael Starvik 5 * Author: Mikael Starvik
7 * 6 *
8 */ 7 */
9 8
10 #include <linux/module.h> 9 #include <linux/module.h>
11 #include <linux/kernel.h> 10 #include <linux/kernel.h>
12 #include <linux/types.h> 11 #include <linux/types.h>
13 #include <linux/errno.h> 12 #include <linux/errno.h>
14 #include <linux/major.h> 13 #include <linux/major.h>
15 #include <linux/sched.h> 14 #include <linux/sched.h>
16 #include <linux/mutex.h> 15 #include <linux/mutex.h>
17 #include <linux/interrupt.h> 16 #include <linux/interrupt.h>
18 #include <linux/poll.h> 17 #include <linux/poll.h>
19 #include <linux/init.h> 18 #include <linux/fs.h>
20 #include <linux/timer.h> 19 #include <linux/cdev.h>
21 #include <linux/spinlock.h> 20 #include <linux/device.h>
22 #include <linux/wait.h> 21 #include <linux/wait.h>
23 22
24 #include <asm/io.h> 23 #include <asm/io.h>
25 #include <dma.h> 24 #include <mach/dma.h>
26 #include <pinmux.h> 25 #include <pinmux.h>
27 #include <hwregs/reg_rdwr.h> 26 #include <hwregs/reg_rdwr.h>
28 #include <hwregs/sser_defs.h> 27 #include <hwregs/sser_defs.h>
28 #include <hwregs/timer_defs.h>
29 #include <hwregs/dma_defs.h> 29 #include <hwregs/dma_defs.h>
30 #include <hwregs/dma.h> 30 #include <hwregs/dma.h>
31 #include <hwregs/intr_vect_defs.h> 31 #include <hwregs/intr_vect_defs.h>
32 #include <hwregs/intr_vect.h> 32 #include <hwregs/intr_vect.h>
33 #include <hwregs/reg_map.h> 33 #include <hwregs/reg_map.h>
34 #include <asm/sync_serial.h> 34 #include <asm/sync_serial.h>
35 35
36 36
37 /* The receiver is a bit tricky because of the continuous stream of data.*/ 37 /* The receiver is a bit tricky because of the continuous stream of data.*/
38 /* */ 38 /* */
39 /* Three DMA descriptors are linked together. Each DMA descriptor is */ 39 /* Three DMA descriptors are linked together. Each DMA descriptor is */
40 /* responsible for port->bufchunk of a common buffer. */ 40 /* responsible for port->bufchunk of a common buffer. */
41 /* */ 41 /* */
42 /* +---------------------------------------------+ */ 42 /* +---------------------------------------------+ */
43 /* | +----------+ +----------+ +----------+ | */ 43 /* | +----------+ +----------+ +----------+ | */
44 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */ 44 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
45 /* +----------+ +----------+ +----------+ */ 45 /* +----------+ +----------+ +----------+ */
46 /* | | | */ 46 /* | | | */
47 /* v v v */ 47 /* v v v */
48 /* +-------------------------------------+ */ 48 /* +-------------------------------------+ */
49 /* | BUFFER | */ 49 /* | BUFFER | */
50 /* +-------------------------------------+ */ 50 /* +-------------------------------------+ */
51 /* |<- data_avail ->| */ 51 /* |<- data_avail ->| */
52 /* readp writep */ 52 /* readp writep */
53 /* */ 53 /* */
54 /* If the application keeps up the pace readp will be right after writep.*/ 54 /* If the application keeps up the pace readp will be right after writep.*/
55 /* If the application can't keep the pace we have to throw away data. */ 55 /* If the application can't keep the pace we have to throw away data. */
56 /* The idea is that readp should be ready with the data pointed out by */ 56 /* The idea is that readp should be ready with the data pointed out by */
57 /* Descr[i] when the DMA has filled in Descr[i+1]. */ 57 /* Descr[i] when the DMA has filled in Descr[i+1]. */
58 /* Otherwise we will discard */ 58 /* Otherwise we will discard */
59 /* the rest of the data pointed out by Descr1 and set readp to the start */ 59 /* the rest of the data pointed out by Descr1 and set readp to the start */
60 /* of Descr2 */ 60 /* of Descr2 */
61 61
62 #define SYNC_SERIAL_MAJOR 125
63
64 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */ 62 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
65 /* words can be handled */ 63 /* words can be handled */
66 #define IN_BUFFER_SIZE 12288 64 #define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE
67 #define IN_DESCR_SIZE 256 65 #define NBR_IN_DESCR (8*6)
68 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE) 66 #define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR)
69 67
70 #define OUT_BUFFER_SIZE 1024*8
71 #define NBR_OUT_DESCR 8 68 #define NBR_OUT_DESCR 8
69 #define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR)
72 70
73 #define DEFAULT_FRAME_RATE 0 71 #define DEFAULT_FRAME_RATE 0
74 #define DEFAULT_WORD_RATE 7 72 #define DEFAULT_WORD_RATE 7
75 73
74 /* To be removed when we move to pure udev. */
75 #define SYNC_SERIAL_MAJOR 125
76
76 /* NOTE: Enabling some debug will likely cause overrun or underrun, 77 /* NOTE: Enabling some debug will likely cause overrun or underrun,
77 * especially if manual mode is use. 78 * especially if manual mode is used.
78 */ 79 */
79 #define DEBUG(x) 80 #define DEBUG(x)
80 #define DEBUGREAD(x) 81 #define DEBUGREAD(x)
81 #define DEBUGWRITE(x) 82 #define DEBUGWRITE(x)
82 #define DEBUGPOLL(x) 83 #define DEBUGPOLL(x)
83 #define DEBUGRXINT(x) 84 #define DEBUGRXINT(x)
84 #define DEBUGTXINT(x) 85 #define DEBUGTXINT(x)
85 #define DEBUGTRDMA(x) 86 #define DEBUGTRDMA(x)
86 #define DEBUGOUTBUF(x) 87 #define DEBUGOUTBUF(x)
87 88
88 typedef struct sync_port 89 enum syncser_irq_setup {
89 { 90 no_irq_setup = 0,
90 reg_scope_instances regi_sser; 91 dma_irq_setup = 1,
91 reg_scope_instances regi_dmain; 92 manual_irq_setup = 2,
92 reg_scope_instances regi_dmaout; 93 };
93 94
95 struct sync_port {
96 unsigned long regi_sser;
97 unsigned long regi_dmain;
98 unsigned long regi_dmaout;
99
100 /* Interrupt vectors. */
101 unsigned long dma_in_intr_vect; /* Used for DMA in. */
102 unsigned long dma_out_intr_vect; /* Used for DMA out. */
103 unsigned long syncser_intr_vect; /* Used when no DMA. */
104
105 /* DMA number for in and out. */
106 unsigned int dma_in_nbr;
107 unsigned int dma_out_nbr;
108
109 /* DMA owner. */
110 enum dma_owner req_dma;
111
94 char started; /* 1 if port has been started */ 112 char started; /* 1 if port has been started */
95 char port_nbr; /* Port 0 or 1 */ 113 char port_nbr; /* Port 0 or 1 */
96 char busy; /* 1 if port is busy */ 114 char busy; /* 1 if port is busy */
97 115
98 char enabled; /* 1 if port is enabled */ 116 char enabled; /* 1 if port is enabled */
99 char use_dma; /* 1 if port uses dma */ 117 char use_dma; /* 1 if port uses dma */
100 char tr_running; 118 char tr_running;
101 119
102 char init_irqs; 120 enum syncser_irq_setup init_irqs;
103 int output; 121 int output;
104 int input; 122 int input;
105 123
106 /* Next byte to be read by application */ 124 /* Next byte to be read by application */
107 volatile unsigned char *volatile readp; 125 unsigned char *readp;
108 /* Next byte to be written by etrax */ 126 /* Next byte to be written by etrax */
109 volatile unsigned char *volatile writep; 127 unsigned char *writep;
110 128
111 unsigned int in_buffer_size; 129 unsigned int in_buffer_size;
130 unsigned int in_buffer_len;
112 unsigned int inbufchunk; 131 unsigned int inbufchunk;
113 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32))); 132 /* Data buffers for in and output. */
114 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32))); 133 unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32);
115 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32))); 134 unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32);
116 struct dma_descr_data* next_rx_desc; 135 unsigned char flip[IN_BUFFER_SIZE] __aligned(32);
117 struct dma_descr_data* prev_rx_desc; 136 struct timespec timestamp[NBR_IN_DESCR];
137 struct dma_descr_data *next_rx_desc;
138 struct dma_descr_data *prev_rx_desc;
118 139
140 struct timeval last_timestamp;
141 int read_ts_idx;
142 int write_ts_idx;
143
119 /* Pointer to the first available descriptor in the ring, 144 /* Pointer to the first available descriptor in the ring,
120 * unless active_tr_descr == catch_tr_descr and a dma 145 * unless active_tr_descr == catch_tr_descr and a dma
121 * transfer is active */ 146 * transfer is active */
122 struct dma_descr_data *active_tr_descr; 147 struct dma_descr_data *active_tr_descr;
123 148
124 /* Pointer to the first allocated descriptor in the ring */ 149 /* Pointer to the first allocated descriptor in the ring */
125 struct dma_descr_data *catch_tr_descr; 150 struct dma_descr_data *catch_tr_descr;
126 151
127 /* Pointer to the descriptor with the current end-of-list */ 152 /* Pointer to the descriptor with the current end-of-list */
128 struct dma_descr_data *prev_tr_descr; 153 struct dma_descr_data *prev_tr_descr;
129 int full; 154 int full;
130 155
131 /* Pointer to the first byte being read by DMA 156 /* Pointer to the first byte being read by DMA
132 * or current position in out_buffer if not using DMA. */ 157 * or current position in out_buffer if not using DMA. */
133 unsigned char *out_rd_ptr; 158 unsigned char *out_rd_ptr;
134 159
135 /* Number of bytes currently locked for being read by DMA */ 160 /* Number of bytes currently locked for being read by DMA */
136 int out_buf_count; 161 int out_buf_count;
137 162
138 dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16))); 163 dma_descr_context in_context __aligned(32);
139 dma_descr_context in_context __attribute__ ((__aligned__(32))); 164 dma_descr_context out_context __aligned(32);
140 dma_descr_data out_descr[NBR_OUT_DESCR] 165 dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16);
141 __attribute__ ((__aligned__(16))); 166 dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16);
142 dma_descr_context out_context __attribute__ ((__aligned__(32))); 167
143 wait_queue_head_t out_wait_q; 168 wait_queue_head_t out_wait_q;
144 wait_queue_head_t in_wait_q; 169 wait_queue_head_t in_wait_q;
145 170
146 spinlock_t lock; 171 spinlock_t lock;
147 } sync_port; 172 };
148 173
149 static DEFINE_MUTEX(sync_serial_mutex); 174 static DEFINE_MUTEX(sync_serial_mutex);
150 static int etrax_sync_serial_init(void); 175 static int etrax_sync_serial_init(void);
151 static void initialize_port(int portnbr); 176 static void initialize_port(int portnbr);
152 static inline int sync_data_avail(struct sync_port *port); 177 static inline int sync_data_avail(struct sync_port *port);
153 178
154 static int sync_serial_open(struct inode *, struct file*); 179 static int sync_serial_open(struct inode *, struct file *);
155 static int sync_serial_release(struct inode*, struct file*); 180 static int sync_serial_release(struct inode *, struct file *);
156 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait); 181 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
157 182
158 static int sync_serial_ioctl(struct file *, 183 static long sync_serial_ioctl(struct file *file,
159 unsigned int cmd, unsigned long arg); 184 unsigned int cmd, unsigned long arg);
160 static ssize_t sync_serial_write(struct file * file, const char * buf, 185 static int sync_serial_ioctl_unlocked(struct file *file,
186 unsigned int cmd, unsigned long arg);
187 static ssize_t sync_serial_write(struct file *file, const char __user *buf,
161 size_t count, loff_t *ppos); 188 size_t count, loff_t *ppos);
162 static ssize_t sync_serial_read(struct file *file, char *buf, 189 static ssize_t sync_serial_read(struct file *file, char __user *buf,
163 size_t count, loff_t *ppos); 190 size_t count, loff_t *ppos);
164 191
165 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ 192 #if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
166 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ 193 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
167 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ 194 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
168 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)) 195 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)))
169 #define SYNC_SER_DMA 196 #define SYNC_SER_DMA
197 #else
198 #define SYNC_SER_MANUAL
170 #endif 199 #endif
171 200
172 static void send_word(sync_port* port);
173 static void start_dma_out(struct sync_port *port, const char *data, int count);
174 static void start_dma_in(sync_port* port);
175 #ifdef SYNC_SER_DMA 201 #ifdef SYNC_SER_DMA
202 static void start_dma_out(struct sync_port *port, const char *data, int count);
203 static void start_dma_in(struct sync_port *port);
176 static irqreturn_t tr_interrupt(int irq, void *dev_id); 204 static irqreturn_t tr_interrupt(int irq, void *dev_id);
177 static irqreturn_t rx_interrupt(int irq, void *dev_id); 205 static irqreturn_t rx_interrupt(int irq, void *dev_id);
178 #endif 206 #endif
179
180 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
181 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
182 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
183 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
184 #define SYNC_SER_MANUAL
185 #endif
186 #ifdef SYNC_SER_MANUAL 207 #ifdef SYNC_SER_MANUAL
208 static void send_word(struct sync_port *port);
187 static irqreturn_t manual_interrupt(int irq, void *dev_id); 209 static irqreturn_t manual_interrupt(int irq, void *dev_id);
188 #endif 210 #endif
189 211
190 #ifdef CONFIG_ETRAXFS /* ETRAX FS */ 212 #define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed
191 #define OUT_DMA_NBR 4 213 #define artpec_request_dma crisv32_request_dma
192 #define IN_DMA_NBR 5 214 #define artpec_free_dma crisv32_free_dma
193 #define PINMUX_SSER pinmux_sser0 215
194 #define SYNCSER_INST regi_sser0 216 #ifdef CONFIG_ETRAXFS
195 #define SYNCSER_INTR_VECT SSER0_INTR_VECT 217 /* ETRAX FS */
196 #define OUT_DMA_INST regi_dma4 218 #define DMA_OUT_NBR0 SYNC_SER0_TX_DMA_NBR
197 #define IN_DMA_INST regi_dma5 219 #define DMA_IN_NBR0 SYNC_SER0_RX_DMA_NBR
198 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT 220 #define DMA_OUT_NBR1 SYNC_SER1_TX_DMA_NBR
199 #define DMA_IN_INTR_VECT DMA5_INTR_VECT 221 #define DMA_IN_NBR1 SYNC_SER1_RX_DMA_NBR
200 #define REQ_DMA_SYNCSER dma_sser0 222 #define PINMUX_SSER0 pinmux_sser0
201 #else /* Artpec-3 */ 223 #define PINMUX_SSER1 pinmux_sser1
202 #define OUT_DMA_NBR 6 224 #define SYNCSER_INST0 regi_sser0
203 #define IN_DMA_NBR 7 225 #define SYNCSER_INST1 regi_sser1
204 #define PINMUX_SSER pinmux_sser 226 #define SYNCSER_INTR_VECT0 SSER0_INTR_VECT
205 #define SYNCSER_INST regi_sser 227 #define SYNCSER_INTR_VECT1 SSER1_INTR_VECT
206 #define SYNCSER_INTR_VECT SSER_INTR_VECT 228 #define OUT_DMA_INST0 regi_dma4
207 #define OUT_DMA_INST regi_dma6 229 #define IN_DMA_INST0 regi_dma5
208 #define IN_DMA_INST regi_dma7 230 #define DMA_OUT_INTR_VECT0 DMA4_INTR_VECT
209 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT 231 #define DMA_OUT_INTR_VECT1 DMA7_INTR_VECT
210 #define DMA_IN_INTR_VECT DMA7_INTR_VECT 232 #define DMA_IN_INTR_VECT0 DMA5_INTR_VECT
211 #define REQ_DMA_SYNCSER dma_sser 233 #define DMA_IN_INTR_VECT1 DMA6_INTR_VECT
234 #define REQ_DMA_SYNCSER0 dma_sser0
235 #define REQ_DMA_SYNCSER1 dma_sser1
236 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
237 #define PORT1_DMA 1
238 #else
239 #define PORT1_DMA 0
212 #endif 240 #endif
241 #elif defined(CONFIG_CRIS_MACH_ARTPEC3)
242 /* ARTPEC-3 */
243 #define DMA_OUT_NBR0 SYNC_SER_TX_DMA_NBR
244 #define DMA_IN_NBR0 SYNC_SER_RX_DMA_NBR
245 #define PINMUX_SSER0 pinmux_sser
246 #define SYNCSER_INST0 regi_sser
247 #define SYNCSER_INTR_VECT0 SSER_INTR_VECT
248 #define OUT_DMA_INST0 regi_dma6
249 #define IN_DMA_INST0 regi_dma7
250 #define DMA_OUT_INTR_VECT0 DMA6_INTR_VECT
251 #define DMA_IN_INTR_VECT0 DMA7_INTR_VECT
252 #define REQ_DMA_SYNCSER0 dma_sser
253 #define REQ_DMA_SYNCSER1 dma_sser
254 #endif
213 255
214 /* The ports */
215 static struct sync_port ports[]=
216 {
217 {
218 .regi_sser = SYNCSER_INST,
219 .regi_dmaout = OUT_DMA_INST,
220 .regi_dmain = IN_DMA_INST,
221 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) 256 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
222 .use_dma = 1, 257 #define PORT0_DMA 1
223 #else 258 #else
224 .use_dma = 0, 259 #define PORT0_DMA 0
225 #endif 260 #endif
226 }
227 #ifdef CONFIG_ETRAXFS
228 ,
229 261
262 /* The ports */
263 static struct sync_port ports[] = {
230 { 264 {
231 .regi_sser = regi_sser1, 265 .regi_sser = SYNCSER_INST0,
232 .regi_dmaout = regi_dma6, 266 .regi_dmaout = OUT_DMA_INST0,
233 .regi_dmain = regi_dma7, 267 .regi_dmain = IN_DMA_INST0,
234 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) 268 .use_dma = PORT0_DMA,
235 .use_dma = 1, 269 .dma_in_intr_vect = DMA_IN_INTR_VECT0,
236 #else 270 .dma_out_intr_vect = DMA_OUT_INTR_VECT0,
237 .use_dma = 0, 271 .dma_in_nbr = DMA_IN_NBR0,
272 .dma_out_nbr = DMA_OUT_NBR0,
273 .req_dma = REQ_DMA_SYNCSER0,
274 .syncser_intr_vect = SYNCSER_INTR_VECT0,
275 },
276 #ifdef CONFIG_ETRAXFS
277 {
278 .regi_sser = SYNCSER_INST1,
279 .regi_dmaout = regi_dma6,
280 .regi_dmain = regi_dma7,
281 .use_dma = PORT1_DMA,
282 .dma_in_intr_vect = DMA_IN_INTR_VECT1,
283 .dma_out_intr_vect = DMA_OUT_INTR_VECT1,
284 .dma_in_nbr = DMA_IN_NBR1,
285 .dma_out_nbr = DMA_OUT_NBR1,
286 .req_dma = REQ_DMA_SYNCSER1,
287 .syncser_intr_vect = SYNCSER_INTR_VECT1,
288 },
238 #endif 289 #endif
239 }
240 #endif
241 }; 290 };
242 291
243 #define NBR_PORTS ARRAY_SIZE(ports) 292 #define NBR_PORTS ARRAY_SIZE(ports)
244 293
245 static const struct file_operations sync_serial_fops = { 294 static const struct file_operations syncser_fops = {
246 .owner = THIS_MODULE, 295 .owner = THIS_MODULE,
247 .write = sync_serial_write, 296 .write = sync_serial_write,
248 .read = sync_serial_read, 297 .read = sync_serial_read,
249 .poll = sync_serial_poll, 298 .poll = sync_serial_poll,
250 .unlocked_ioctl = sync_serial_ioctl, 299 .unlocked_ioctl = sync_serial_ioctl,
251 .open = sync_serial_open, 300 .open = sync_serial_open,
252 .release = sync_serial_release, 301 .release = sync_serial_release,
253 .llseek = noop_llseek, 302 .llseek = noop_llseek,
254 }; 303 };
255 304
256 static int __init etrax_sync_serial_init(void) 305 static dev_t syncser_first;
257 { 306 static int minor_count = NBR_PORTS;
258 ports[0].enabled = 0; 307 #define SYNCSER_NAME "syncser"
259 #ifdef CONFIG_ETRAXFS 308 static struct cdev *syncser_cdev;
260 ports[1].enabled = 0; 309 static struct class *syncser_class;
261 #endif
262 if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
263 &sync_serial_fops) < 0) {
264 printk(KERN_WARNING
265 "Unable to get major for synchronous serial port\n");
266 return -EBUSY;
267 }
268 310
269 /* Initialize Ports */ 311 static void sync_serial_start_port(struct sync_port *port)
270 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) 312 {
271 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) { 313 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
272 printk(KERN_WARNING 314 reg_sser_rw_tr_cfg tr_cfg =
273 "Unable to alloc pins for synchronous serial port 0\n"); 315 REG_RD(sser, port->regi_sser, rw_tr_cfg);
274 return -EIO; 316 reg_sser_rw_rec_cfg rec_cfg =
275 } 317 REG_RD(sser, port->regi_sser, rw_rec_cfg);
276 ports[0].enabled = 1; 318 cfg.en = regk_sser_yes;
277 initialize_port(0); 319 tr_cfg.tr_en = regk_sser_yes;
278 #endif 320 rec_cfg.rec_en = regk_sser_yes;
279 321 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
280 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) 322 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
281 if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) { 323 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
282 printk(KERN_WARNING 324 port->started = 1;
283 "Unable to alloc pins for synchronous serial port 0\n");
284 return -EIO;
285 }
286 ports[1].enabled = 1;
287 initialize_port(1);
288 #endif
289
290 #ifdef CONFIG_ETRAXFS
291 printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
292 #else
293 printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
294 #endif
295 return 0;
296 } 325 }
297 326
298 static void __init initialize_port(int portnbr) 327 static void __init initialize_port(int portnbr)
299 { 328 {
300 int __attribute__((unused)) i;
301 struct sync_port *port = &ports[portnbr]; 329 struct sync_port *port = &ports[portnbr];
302 reg_sser_rw_cfg cfg = {0}; 330 reg_sser_rw_cfg cfg = { 0 };
303 reg_sser_rw_frm_cfg frm_cfg = {0}; 331 reg_sser_rw_frm_cfg frm_cfg = { 0 };
304 reg_sser_rw_tr_cfg tr_cfg = {0}; 332 reg_sser_rw_tr_cfg tr_cfg = { 0 };
305 reg_sser_rw_rec_cfg rec_cfg = {0}; 333 reg_sser_rw_rec_cfg rec_cfg = { 0 };
306 334
307 DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr)); 335 DEBUG(pr_info("Init sync serial port %d\n", portnbr));
308 336
309 port->port_nbr = portnbr; 337 port->port_nbr = portnbr;
310 port->init_irqs = 1; 338 port->init_irqs = no_irq_setup;
311 339
312 port->out_rd_ptr = port->out_buffer; 340 port->out_rd_ptr = port->out_buffer;
313 port->out_buf_count = 0; 341 port->out_buf_count = 0;
314 342
315 port->output = 1; 343 port->output = 1;
316 port->input = 0; 344 port->input = 0;
317 345
318 port->readp = port->flip; 346 port->readp = port->flip;
319 port->writep = port->flip; 347 port->writep = port->flip;
320 port->in_buffer_size = IN_BUFFER_SIZE; 348 port->in_buffer_size = IN_BUFFER_SIZE;
349 port->in_buffer_len = 0;
321 port->inbufchunk = IN_DESCR_SIZE; 350 port->inbufchunk = IN_DESCR_SIZE;
322 port->next_rx_desc = &port->in_descr[0];
323 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
324 port->prev_rx_desc->eol = 1;
325 351
352 port->read_ts_idx = 0;
353 port->write_ts_idx = 0;
354
326 init_waitqueue_head(&port->out_wait_q); 355 init_waitqueue_head(&port->out_wait_q);
327 init_waitqueue_head(&port->in_wait_q); 356 init_waitqueue_head(&port->in_wait_q);
328 357
329 spin_lock_init(&port->lock); 358 spin_lock_init(&port->lock);
330 359
331 cfg.out_clk_src = regk_sser_intern_clk; 360 cfg.out_clk_src = regk_sser_intern_clk;
332 cfg.out_clk_pol = regk_sser_pos; 361 cfg.out_clk_pol = regk_sser_pos;
333 cfg.clk_od_mode = regk_sser_no; 362 cfg.clk_od_mode = regk_sser_no;
334 cfg.clk_dir = regk_sser_out; 363 cfg.clk_dir = regk_sser_out;
335 cfg.gate_clk = regk_sser_no; 364 cfg.gate_clk = regk_sser_no;
336 cfg.base_freq = regk_sser_f29_493; 365 cfg.base_freq = regk_sser_f29_493;
337 cfg.clk_div = 256; 366 cfg.clk_div = 256;
338 REG_WR(sser, port->regi_sser, rw_cfg, cfg); 367 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
339 368
340 frm_cfg.wordrate = DEFAULT_WORD_RATE; 369 frm_cfg.wordrate = DEFAULT_WORD_RATE;
341 frm_cfg.type = regk_sser_edge; 370 frm_cfg.type = regk_sser_edge;
342 frm_cfg.frame_pin_dir = regk_sser_out; 371 frm_cfg.frame_pin_dir = regk_sser_out;
343 frm_cfg.frame_pin_use = regk_sser_frm; 372 frm_cfg.frame_pin_use = regk_sser_frm;
344 frm_cfg.status_pin_dir = regk_sser_in; 373 frm_cfg.status_pin_dir = regk_sser_in;
345 frm_cfg.status_pin_use = regk_sser_hold; 374 frm_cfg.status_pin_use = regk_sser_hold;
346 frm_cfg.out_on = regk_sser_tr; 375 frm_cfg.out_on = regk_sser_tr;
347 frm_cfg.tr_delay = 1; 376 frm_cfg.tr_delay = 1;
348 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); 377 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
349 378
350 tr_cfg.urun_stop = regk_sser_no; 379 tr_cfg.urun_stop = regk_sser_no;
351 tr_cfg.sample_size = 7; 380 tr_cfg.sample_size = 7;
352 tr_cfg.sh_dir = regk_sser_msbfirst; 381 tr_cfg.sh_dir = regk_sser_msbfirst;
353 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; 382 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
354 #if 0 383 #if 0
355 tr_cfg.rate_ctrl = regk_sser_bulk; 384 tr_cfg.rate_ctrl = regk_sser_bulk;
356 tr_cfg.data_pin_use = regk_sser_dout; 385 tr_cfg.data_pin_use = regk_sser_dout;
357 #else 386 #else
358 tr_cfg.rate_ctrl = regk_sser_iso; 387 tr_cfg.rate_ctrl = regk_sser_iso;
359 tr_cfg.data_pin_use = regk_sser_dout; 388 tr_cfg.data_pin_use = regk_sser_dout;
360 #endif 389 #endif
361 tr_cfg.bulk_wspace = 1; 390 tr_cfg.bulk_wspace = 1;
362 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); 391 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
363 392
364 rec_cfg.sample_size = 7; 393 rec_cfg.sample_size = 7;
365 rec_cfg.sh_dir = regk_sser_msbfirst; 394 rec_cfg.sh_dir = regk_sser_msbfirst;
366 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; 395 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
367 rec_cfg.fifo_thr = regk_sser_inf; 396 rec_cfg.fifo_thr = regk_sser_inf;
368 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); 397 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
369 398
370 #ifdef SYNC_SER_DMA 399 #ifdef SYNC_SER_DMA
371 /* Setup the descriptor ring for dma out/transmit. */ 400 {
372 for (i = 0; i < NBR_OUT_DESCR; i++) { 401 int i;
373 port->out_descr[i].wait = 0; 402 /* Setup the descriptor ring for dma out/transmit. */
374 port->out_descr[i].intr = 1; 403 for (i = 0; i < NBR_OUT_DESCR; i++) {
375 port->out_descr[i].eol = 0; 404 dma_descr_data *descr = &port->out_descr[i];
376 port->out_descr[i].out_eop = 0; 405 descr->wait = 0;
377 port->out_descr[i].next = 406 descr->intr = 1;
378 (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]); 407 descr->eol = 0;
408 descr->out_eop = 0;
409 descr->next =
410 (dma_descr_data *)virt_to_phys(&descr[i+1]);
411 }
379 } 412 }
380 413
381 /* Create a ring from the list. */ 414 /* Create a ring from the list. */
382 port->out_descr[NBR_OUT_DESCR-1].next = 415 port->out_descr[NBR_OUT_DESCR-1].next =
383 (dma_descr_data *)virt_to_phys(&port->out_descr[0]); 416 (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
384 417
385 /* Setup context for traversing the ring. */ 418 /* Setup context for traversing the ring. */
386 port->active_tr_descr = &port->out_descr[0]; 419 port->active_tr_descr = &port->out_descr[0];
387 port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1]; 420 port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
388 port->catch_tr_descr = &port->out_descr[0]; 421 port->catch_tr_descr = &port->out_descr[0];
389 #endif 422 #endif
390 } 423 }
391 424
392 static inline int sync_data_avail(struct sync_port *port) 425 static inline int sync_data_avail(struct sync_port *port)
393 { 426 {
394 int avail; 427 return port->in_buffer_len;
395 unsigned char *start;
396 unsigned char *end;
397
398 start = (unsigned char*)port->readp; /* cast away volatile */
399 end = (unsigned char*)port->writep; /* cast away volatile */
400 /* 0123456789 0123456789
401 * ----- - -----
402 * ^rp ^wp ^wp ^rp
403 */
404
405 if (end >= start)
406 avail = end - start;
407 else
408 avail = port->in_buffer_size - (start - end);
409 return avail;
410 } 428 }
411 429
412 static inline int sync_data_avail_to_end(struct sync_port *port)
413 {
414 int avail;
415 unsigned char *start;
416 unsigned char *end;
417
418 start = (unsigned char*)port->readp; /* cast away volatile */
419 end = (unsigned char*)port->writep; /* cast away volatile */
420 /* 0123456789 0123456789
421 * ----- -----
422 * ^rp ^wp ^wp ^rp
423 */
424
425 if (end >= start)
426 avail = end - start;
427 else
428 avail = port->flip + port->in_buffer_size - start;
429 return avail;
430 }
431
432 static int sync_serial_open(struct inode *inode, struct file *file) 430 static int sync_serial_open(struct inode *inode, struct file *file)
433 { 431 {
432 int ret = 0;
434 int dev = iminor(inode); 433 int dev = iminor(inode);
435 int ret = -EBUSY; 434 struct sync_port *port;
436 sync_port *port; 435 #ifdef SYNC_SER_DMA
437 reg_dma_rw_cfg cfg = {.en = regk_dma_yes}; 436 reg_dma_rw_cfg cfg = { .en = regk_dma_yes };
438 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes}; 437 reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes };
438 #endif
439 439
440 mutex_lock(&sync_serial_mutex); 440 DEBUG(pr_debug("Open sync serial port %d\n", dev));
441 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
442 441
443 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) 442 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
444 { 443 DEBUG(pr_info("Invalid minor %d\n", dev));
445 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); 444 return -ENODEV;
446 ret = -ENODEV;
447 goto out;
448 } 445 }
449 port = &ports[dev]; 446 port = &ports[dev];
450 /* Allow open this device twice (assuming one reader and one writer) */ 447 /* Allow open this device twice (assuming one reader and one writer) */
451 if (port->busy == 2) 448 if (port->busy == 2) {
452 { 449 DEBUG(pr_info("syncser%d is busy\n", dev));
453 DEBUG(printk(KERN_DEBUG "Device is busy.. \n")); 450 return -EBUSY;
454 goto out;
455 } 451 }
456 452
453 mutex_lock(&sync_serial_mutex);
457 454
458 if (port->init_irqs) { 455 /* Clear any stale date left in the flip buffer */
459 if (port->use_dma) { 456 port->readp = port->writep = port->flip;
460 if (port == &ports[0]) { 457 port->in_buffer_len = 0;
458 port->read_ts_idx = 0;
459 port->write_ts_idx = 0;
460
461 if (port->init_irqs != no_irq_setup) {
462 /* Init only on first call. */
463 port->busy++;
464 mutex_unlock(&sync_serial_mutex);
465 return 0;
466 }
467 if (port->use_dma) {
461 #ifdef SYNC_SER_DMA 468 #ifdef SYNC_SER_DMA
462 if (request_irq(DMA_OUT_INTR_VECT, 469 const char *tmp;
463 tr_interrupt, 470 DEBUG(pr_info("Using DMA for syncser%d\n", dev));
464 0,
465 "synchronous serial 0 dma tr",
466 &ports[0])) {
467 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
468 goto out;
469 } else if (request_irq(DMA_IN_INTR_VECT,
470 rx_interrupt,
471 0,
472 "synchronous serial 1 dma rx",
473 &ports[0])) {
474 free_irq(DMA_OUT_INTR_VECT, &port[0]);
475 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
476 goto out;
477 } else if (crisv32_request_dma(OUT_DMA_NBR,
478 "synchronous serial 0 dma tr",
479 DMA_VERBOSE_ON_ERROR,
480 0,
481 REQ_DMA_SYNCSER)) {
482 free_irq(DMA_OUT_INTR_VECT, &port[0]);
483 free_irq(DMA_IN_INTR_VECT, &port[0]);
484 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
485 goto out;
486 } else if (crisv32_request_dma(IN_DMA_NBR,
487 "synchronous serial 0 dma rec",
488 DMA_VERBOSE_ON_ERROR,
489 0,
490 REQ_DMA_SYNCSER)) {
491 crisv32_free_dma(OUT_DMA_NBR);
492 free_irq(DMA_OUT_INTR_VECT, &port[0]);
493 free_irq(DMA_IN_INTR_VECT, &port[0]);
494 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
495 goto out;
496 }
497 #endif
498 }
499 #ifdef CONFIG_ETRAXFS
500 else if (port == &ports[1]) {
501 #ifdef SYNC_SER_DMA
502 if (request_irq(DMA6_INTR_VECT,
503 tr_interrupt,
504 0,
505 "synchronous serial 1 dma tr",
506 &ports[1])) {
507 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
508 goto out;
509 } else if (request_irq(DMA7_INTR_VECT,
510 rx_interrupt,
511 0,
512 "synchronous serial 1 dma rx",
513 &ports[1])) {
514 free_irq(DMA6_INTR_VECT, &ports[1]);
515 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
516 goto out;
517 } else if (crisv32_request_dma(
518 SYNC_SER1_TX_DMA_NBR,
519 "synchronous serial 1 dma tr",
520 DMA_VERBOSE_ON_ERROR,
521 0,
522 dma_sser1)) {
523 free_irq(DMA6_INTR_VECT, &ports[1]);
524 free_irq(DMA7_INTR_VECT, &ports[1]);
525 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
526 goto out;
527 } else if (crisv32_request_dma(
528 SYNC_SER1_RX_DMA_NBR,
529 "synchronous serial 3 dma rec",
530 DMA_VERBOSE_ON_ERROR,
531 0,
532 dma_sser1)) {
533 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
534 free_irq(DMA6_INTR_VECT, &ports[1]);
535 free_irq(DMA7_INTR_VECT, &ports[1]);
536 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
537 goto out;
538 }
539 #endif
540 }
541 #endif
542 /* Enable DMAs */
543 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
544 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
545 /* Enable DMA IRQs */
546 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
547 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
548 /* Set up wordsize = 1 for DMAs. */
549 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
550 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
551 471
552 start_dma_in(port); 472 tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx";
553 port->init_irqs = 0; 473 if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0,
554 } else { /* !port->use_dma */ 474 tmp, port)) {
555 #ifdef SYNC_SER_MANUAL 475 pr_err("Can't alloc syncser%d TX IRQ", dev);
556 if (port == &ports[0]) { 476 ret = -EBUSY;
557 if (request_irq(SYNCSER_INTR_VECT, 477 goto unlock_and_exit;
558 manual_interrupt, 478 }
559 0, 479 if (artpec_request_dma(port->dma_out_nbr, tmp,
560 "synchronous serial manual irq", 480 DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
561 &ports[0])) { 481 free_irq(port->dma_out_intr_vect, port);
562 printk("Can't allocate sync serial manual irq"); 482 pr_err("Can't alloc syncser%d TX DMA", dev);
563 goto out; 483 ret = -EBUSY;
564 } 484 goto unlock_and_exit;
565 } 485 }
566 #ifdef CONFIG_ETRAXFS 486 tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx";
567 else if (port == &ports[1]) { 487 if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0,
568 if (request_irq(SSER1_INTR_VECT, 488 tmp, port)) {
569 manual_interrupt, 489 artpec_free_dma(port->dma_out_nbr);
570 0, 490 free_irq(port->dma_out_intr_vect, port);
571 "synchronous serial manual irq", 491 pr_err("Can't alloc syncser%d RX IRQ", dev);
572 &ports[1])) { 492 ret = -EBUSY;
573 printk(KERN_CRIT "Can't allocate sync serial manual irq"); 493 goto unlock_and_exit;
574 goto out; 494 }
575 } 495 if (artpec_request_dma(port->dma_in_nbr, tmp,
576 } 496 DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
497 artpec_free_dma(port->dma_out_nbr);
498 free_irq(port->dma_out_intr_vect, port);
499 free_irq(port->dma_in_intr_vect, port);
500 pr_err("Can't alloc syncser%d RX DMA", dev);
501 ret = -EBUSY;
502 goto unlock_and_exit;
503 }
504 /* Enable DMAs */
505 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
506 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
507 /* Enable DMA IRQs */
508 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
509 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
510 /* Set up wordsize = 1 for DMAs. */
511 DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1);
512 DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1);
513
514 start_dma_in(port);
515 port->init_irqs = dma_irq_setup;
577 #endif 516 #endif
578 port->init_irqs = 0; 517 } else { /* !port->use_dma */
518 #ifdef SYNC_SER_MANUAL
519 const char *tmp = dev == 0 ? "syncser0 manual irq" :
520 "syncser1 manual irq";
521 if (request_irq(port->syncser_intr_vect, manual_interrupt,
522 0, tmp, port)) {
523 pr_err("Can't alloc syncser%d manual irq",
524 dev);
525 ret = -EBUSY;
526 goto unlock_and_exit;
527 }
528 port->init_irqs = manual_irq_setup;
579 #else 529 #else
580 panic("sync_serial: Manual mode not supported.\n"); 530 panic("sync_serial: Manual mode not supported\n");
581 #endif /* SYNC_SER_MANUAL */ 531 #endif /* SYNC_SER_MANUAL */
582 } 532 }
583
584 } /* port->init_irqs */
585
586 port->busy++; 533 port->busy++;
587 ret = 0; 534 ret = 0;
588 out: 535
536 unlock_and_exit:
589 mutex_unlock(&sync_serial_mutex); 537 mutex_unlock(&sync_serial_mutex);
590 return ret; 538 return ret;
591 } 539 }
592 540
593 static int sync_serial_release(struct inode *inode, struct file *file) 541 static int sync_serial_release(struct inode *inode, struct file *file)
594 { 542 {
595 int dev = iminor(inode); 543 int dev = iminor(inode);
596 sync_port *port; 544 struct sync_port *port;
597 545
598 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) 546 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
599 { 547 DEBUG(pr_info("Invalid minor %d\n", dev));
600 DEBUG(printk("Invalid minor %d\n", dev));
601 return -ENODEV; 548 return -ENODEV;
602 } 549 }
603 port = &ports[dev]; 550 port = &ports[dev];
604 if (port->busy) 551 if (port->busy)
605 port->busy--; 552 port->busy--;
606 if (!port->busy) 553 if (!port->busy)
607 /* XXX */ ; 554 /* XXX */;
608 return 0; 555 return 0;
609 } 556 }
610 557
611 static unsigned int sync_serial_poll(struct file *file, poll_table *wait) 558 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
612 { 559 {
613 int dev = iminor(file_inode(file)); 560 int dev = iminor(file_inode(file));
614 unsigned int mask = 0; 561 unsigned int mask = 0;
615 sync_port *port; 562 struct sync_port *port;
616 DEBUGPOLL( static unsigned int prev_mask = 0; ); 563 DEBUGPOLL(
564 static unsigned int prev_mask;
565 );
617 566
618 port = &ports[dev]; 567 port = &ports[dev];
619 568
620 if (!port->started) { 569 if (!port->started)
621 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); 570 sync_serial_start_port(port);
622 reg_sser_rw_rec_cfg rec_cfg =
623 REG_RD(sser, port->regi_sser, rw_rec_cfg);
624 cfg.en = regk_sser_yes;
625 rec_cfg.rec_en = port->input;
626 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
627 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
628 port->started = 1;
629 }
630 571
631 poll_wait(file, &port->out_wait_q, wait); 572 poll_wait(file, &port->out_wait_q, wait);
632 poll_wait(file, &port->in_wait_q, wait); 573 poll_wait(file, &port->in_wait_q, wait);
633 574
634 /* No active transfer, descriptors are available */ 575 /* No active transfer, descriptors are available */
635 if (port->output && !port->tr_running) 576 if (port->output && !port->tr_running)
636 mask |= POLLOUT | POLLWRNORM; 577 mask |= POLLOUT | POLLWRNORM;
637 578
638 /* Descriptor and buffer space available. */ 579 /* Descriptor and buffer space available. */
639 if (port->output && 580 if (port->output &&
640 port->active_tr_descr != port->catch_tr_descr && 581 port->active_tr_descr != port->catch_tr_descr &&
641 port->out_buf_count < OUT_BUFFER_SIZE) 582 port->out_buf_count < OUT_BUFFER_SIZE)
642 mask |= POLLOUT | POLLWRNORM; 583 mask |= POLLOUT | POLLWRNORM;
643 584
644 /* At least an inbufchunk of data */ 585 /* At least an inbufchunk of data */
645 if (port->input && sync_data_avail(port) >= port->inbufchunk) 586 if (port->input && sync_data_avail(port) >= port->inbufchunk)
646 mask |= POLLIN | POLLRDNORM; 587 mask |= POLLIN | POLLRDNORM;
647 588
648 DEBUGPOLL(if (mask != prev_mask) 589 DEBUGPOLL(
649 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask, 590 if (mask != prev_mask)
650 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":""); 591 pr_info("sync_serial_poll: mask 0x%08X %s %s\n",
651 prev_mask = mask; 592 mask,
652 ); 593 mask & POLLOUT ? "POLLOUT" : "",
594 mask & POLLIN ? "POLLIN" : "");
595 prev_mask = mask;
596 );
653 return mask; 597 return mask;
654 } 598 }
655 599
656 static int sync_serial_ioctl(struct file *file, 600 static ssize_t __sync_serial_read(struct file *file,
657 unsigned int cmd, unsigned long arg) 601 char __user *buf,
602 size_t count,
603 loff_t *ppos,
604 struct timespec *ts)
658 { 605 {
606 unsigned long flags;
607 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
608 int avail;
609 struct sync_port *port;
610 unsigned char *start;
611 unsigned char *end;
612
613 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
614 DEBUG(pr_info("Invalid minor %d\n", dev));
615 return -ENODEV;
616 }
617 port = &ports[dev];
618
619 if (!port->started)
620 sync_serial_start_port(port);
621
622 /* Calculate number of available bytes */
623 /* Save pointers to avoid that they are modified by interrupt */
624 spin_lock_irqsave(&port->lock, flags);
625 start = port->readp;
626 end = port->writep;
627 spin_unlock_irqrestore(&port->lock, flags);
628
629 while ((start == end) && !port->in_buffer_len) {
630 if (file->f_flags & O_NONBLOCK)
631 return -EAGAIN;
632
633 wait_event_interruptible(port->in_wait_q,
634 !(start == end && !port->full));
635
636 if (signal_pending(current))
637 return -EINTR;
638
639 spin_lock_irqsave(&port->lock, flags);
640 start = port->readp;
641 end = port->writep;
642 spin_unlock_irqrestore(&port->lock, flags);
643 }
644
645 DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n",
646 dev, count,
647 start - port->flip, end - port->flip,
648 port->in_buffer_size));
649
650 /* Lazy read, never return wrapped data. */
651 if (end > start)
652 avail = end - start;
653 else
654 avail = port->flip + port->in_buffer_size - start;
655
656 count = count > avail ? avail : count;
657 if (copy_to_user(buf, start, count))
658 return -EFAULT;
659
660 /* If timestamp requested, find timestamp of first returned byte
661 * and copy it.
662 * N.B: Applications that request timstamps MUST read data in
663 * chunks that are multiples of IN_DESCR_SIZE.
664 * Otherwise the timestamps will not be aligned to the data read.
665 */
666 if (ts != NULL) {
667 int idx = port->read_ts_idx;
668 memcpy(ts, &port->timestamp[idx], sizeof(struct timespec));
669 port->read_ts_idx += count / IN_DESCR_SIZE;
670 if (port->read_ts_idx >= NBR_IN_DESCR)
671 port->read_ts_idx = 0;
672 }
673
674 spin_lock_irqsave(&port->lock, flags);
675 port->readp += count;
676 /* Check for wrap */
677 if (port->readp >= port->flip + port->in_buffer_size)
678 port->readp = port->flip;
679 port->in_buffer_len -= count;
680 port->full = 0;
681 spin_unlock_irqrestore(&port->lock, flags);
682
683 DEBUGREAD(pr_info("r %d\n", count));
684
685 return count;
686 }
687
688 static ssize_t sync_serial_input(struct file *file, unsigned long arg)
689 {
690 struct ssp_request req;
691 int count;
692 int ret;
693
694 /* Copy the request structure from user-mode. */
695 ret = copy_from_user(&req, (struct ssp_request __user *)arg,
696 sizeof(struct ssp_request));
697
698 if (ret) {
699 DEBUG(pr_info("sync_serial_input copy from user failed\n"));
700 return -EFAULT;
701 }
702
703 /* To get the timestamps aligned, make sure that 'len'
704 * is a multiple of IN_DESCR_SIZE.
705 */
706 if ((req.len % IN_DESCR_SIZE) != 0) {
707 DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n",
708 req.len, IN_DESCR_SIZE));
709 return -EFAULT;
710 }
711
712 /* Do the actual read. */
713 /* Note that req.buf is actually a pointer to user space. */
714 count = __sync_serial_read(file, req.buf, req.len,
715 NULL, &req.ts);
716
717 if (count < 0) {
718 DEBUG(pr_info("sync_serial_input read failed\n"));
719 return count;
720 }
721
722 /* Copy the request back to user-mode. */
723 ret = copy_to_user((struct ssp_request __user *)arg, &req,
724 sizeof(struct ssp_request));
725
726 if (ret) {
727 DEBUG(pr_info("syncser input copy2user failed\n"));
728 return -EFAULT;
729 }
730
731 /* Return the number of bytes read. */
732 return count;
733 }
734
735
736 static int sync_serial_ioctl_unlocked(struct file *file,
737 unsigned int cmd, unsigned long arg)
738 {
659 int return_val = 0; 739 int return_val = 0;
660 int dma_w_size = regk_dma_set_w_size1; 740 int dma_w_size = regk_dma_set_w_size1;
661 int dev = iminor(file_inode(file)); 741 int dev = iminor(file_inode(file));
662 sync_port *port; 742 struct sync_port *port;
663 reg_sser_rw_tr_cfg tr_cfg; 743 reg_sser_rw_tr_cfg tr_cfg;
664 reg_sser_rw_rec_cfg rec_cfg; 744 reg_sser_rw_rec_cfg rec_cfg;
665 reg_sser_rw_frm_cfg frm_cfg; 745 reg_sser_rw_frm_cfg frm_cfg;
666 reg_sser_rw_cfg gen_cfg; 746 reg_sser_rw_cfg gen_cfg;
667 reg_sser_rw_intr_mask intr_mask; 747 reg_sser_rw_intr_mask intr_mask;
668 748
669 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) 749 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
670 { 750 DEBUG(pr_info("Invalid minor %d\n", dev));
671 DEBUG(printk("Invalid minor %d\n", dev));
672 return -1; 751 return -1;
673 } 752 }
674 port = &ports[dev]; 753
754 if (cmd == SSP_INPUT)
755 return sync_serial_input(file, arg);
756
757 port = &ports[dev];
675 spin_lock_irq(&port->lock); 758 spin_lock_irq(&port->lock);
676 759
677 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); 760 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
678 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); 761 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
679 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg); 762 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
680 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg); 763 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
681 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); 764 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
682 765
683 switch(cmd) 766 switch (cmd) {
684 {
685 case SSP_SPEED: 767 case SSP_SPEED:
686 if (GET_SPEED(arg) == CODEC) 768 if (GET_SPEED(arg) == CODEC) {
687 {
688 unsigned int freq; 769 unsigned int freq;
689 770
690 gen_cfg.base_freq = regk_sser_f32; 771 gen_cfg.base_freq = regk_sser_f32;
691 772
692 /* Clock divider will internally be 773 /* Clock divider will internally be
693 * gen_cfg.clk_div + 1. 774 * gen_cfg.clk_div + 1.
694 */ 775 */
695 776
696 freq = GET_FREQ(arg); 777 freq = GET_FREQ(arg);
697 switch (freq) { 778 switch (freq) {
698 case FREQ_32kHz: 779 case FREQ_32kHz:
699 case FREQ_64kHz: 780 case FREQ_64kHz:
700 case FREQ_128kHz: 781 case FREQ_128kHz:
701 case FREQ_256kHz: 782 case FREQ_256kHz:
702 gen_cfg.clk_div = 125 * 783 gen_cfg.clk_div = 125 *
703 (1 << (freq - FREQ_256kHz)) - 1; 784 (1 << (freq - FREQ_256kHz)) - 1;
704 break; 785 break;
705 case FREQ_512kHz: 786 case FREQ_512kHz:
706 gen_cfg.clk_div = 62; 787 gen_cfg.clk_div = 62;
707 break; 788 break;
708 case FREQ_1MHz: 789 case FREQ_1MHz:
709 case FREQ_2MHz: 790 case FREQ_2MHz:
710 case FREQ_4MHz: 791 case FREQ_4MHz:
711 gen_cfg.clk_div = 8 * (1 << freq) - 1; 792 gen_cfg.clk_div = 8 * (1 << freq) - 1;
712 break; 793 break;
713 } 794 }
795 } else if (GET_SPEED(arg) == CODEC_f32768) {
796 gen_cfg.base_freq = regk_sser_f32_768;
797 switch (GET_FREQ(arg)) {
798 case FREQ_4096kHz:
799 gen_cfg.clk_div = 7;
800 break;
801 default:
802 spin_unlock_irq(&port->lock);
803 return -EINVAL;
804 }
714 } else { 805 } else {
715 gen_cfg.base_freq = regk_sser_f29_493; 806 gen_cfg.base_freq = regk_sser_f29_493;
716 switch (GET_SPEED(arg)) { 807 switch (GET_SPEED(arg)) {
717 case SSP150: 808 case SSP150:
718 gen_cfg.clk_div = 29493000 / (150 * 8) - 1; 809 gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
719 break; 810 break;
720 case SSP300: 811 case SSP300:
721 gen_cfg.clk_div = 29493000 / (300 * 8) - 1; 812 gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
722 break; 813 break;
723 case SSP600: 814 case SSP600:
724 gen_cfg.clk_div = 29493000 / (600 * 8) - 1; 815 gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
725 break; 816 break;
726 case SSP1200: 817 case SSP1200:
727 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1; 818 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
728 break; 819 break;
729 case SSP2400: 820 case SSP2400:
730 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1; 821 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
731 break; 822 break;
732 case SSP4800: 823 case SSP4800:
733 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1; 824 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
734 break; 825 break;
735 case SSP9600: 826 case SSP9600:
736 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1; 827 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
737 break; 828 break;
738 case SSP19200: 829 case SSP19200:
739 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1; 830 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
740 break; 831 break;
741 case SSP28800: 832 case SSP28800:
742 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1; 833 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
743 break; 834 break;
744 case SSP57600: 835 case SSP57600:
745 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1; 836 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
746 break; 837 break;
747 case SSP115200: 838 case SSP115200:
748 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1; 839 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
749 break; 840 break;
750 case SSP230400: 841 case SSP230400:
751 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1; 842 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
752 break; 843 break;
753 case SSP460800: 844 case SSP460800:
754 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1; 845 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
755 break; 846 break;
756 case SSP921600: 847 case SSP921600:
757 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1; 848 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
758 break; 849 break;
759 case SSP3125000: 850 case SSP3125000:
760 gen_cfg.base_freq = regk_sser_f100; 851 gen_cfg.base_freq = regk_sser_f100;
761 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1; 852 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
762 break; 853 break;
763 854
764 } 855 }
765 } 856 }
766 frm_cfg.wordrate = GET_WORD_RATE(arg); 857 frm_cfg.wordrate = GET_WORD_RATE(arg);
767 858
768 break; 859 break;
769 case SSP_MODE: 860 case SSP_MODE:
770 switch(arg) 861 switch (arg) {
771 { 862 case MASTER_OUTPUT:
772 case MASTER_OUTPUT: 863 port->output = 1;
773 port->output = 1; 864 port->input = 0;
774 port->input = 0; 865 frm_cfg.out_on = regk_sser_tr;
775 frm_cfg.out_on = regk_sser_tr; 866 frm_cfg.frame_pin_dir = regk_sser_out;
776 frm_cfg.frame_pin_dir = regk_sser_out; 867 gen_cfg.clk_dir = regk_sser_out;
777 gen_cfg.clk_dir = regk_sser_out; 868 break;
778 break; 869 case SLAVE_OUTPUT:
779 case SLAVE_OUTPUT: 870 port->output = 1;
780 port->output = 1; 871 port->input = 0;
781 port->input = 0; 872 frm_cfg.frame_pin_dir = regk_sser_in;
782 frm_cfg.frame_pin_dir = regk_sser_in; 873 gen_cfg.clk_dir = regk_sser_in;
783 gen_cfg.clk_dir = regk_sser_in; 874 break;
784 break; 875 case MASTER_INPUT:
785 case MASTER_INPUT: 876 port->output = 0;
786 port->output = 0; 877 port->input = 1;
787 port->input = 1; 878 frm_cfg.frame_pin_dir = regk_sser_out;
788 frm_cfg.frame_pin_dir = regk_sser_out; 879 frm_cfg.out_on = regk_sser_intern_tb;
789 frm_cfg.out_on = regk_sser_intern_tb; 880 gen_cfg.clk_dir = regk_sser_out;
790 gen_cfg.clk_dir = regk_sser_out; 881 break;
791 break; 882 case SLAVE_INPUT:
792 case SLAVE_INPUT: 883 port->output = 0;
793 port->output = 0; 884 port->input = 1;
794 port->input = 1; 885 frm_cfg.frame_pin_dir = regk_sser_in;
795 frm_cfg.frame_pin_dir = regk_sser_in; 886 gen_cfg.clk_dir = regk_sser_in;
796 gen_cfg.clk_dir = regk_sser_in; 887 break;
797 break; 888 case MASTER_BIDIR:
798 case MASTER_BIDIR: 889 port->output = 1;
799 port->output = 1; 890 port->input = 1;
800 port->input = 1; 891 frm_cfg.frame_pin_dir = regk_sser_out;
801 frm_cfg.frame_pin_dir = regk_sser_out; 892 frm_cfg.out_on = regk_sser_intern_tb;
802 frm_cfg.out_on = regk_sser_intern_tb; 893 gen_cfg.clk_dir = regk_sser_out;
803 gen_cfg.clk_dir = regk_sser_out; 894 break;
804 break; 895 case SLAVE_BIDIR:
805 case SLAVE_BIDIR: 896 port->output = 1;
806 port->output = 1; 897 port->input = 1;
807 port->input = 1; 898 frm_cfg.frame_pin_dir = regk_sser_in;
808 frm_cfg.frame_pin_dir = regk_sser_in; 899 gen_cfg.clk_dir = regk_sser_in;
809 gen_cfg.clk_dir = regk_sser_in; 900 break;
810 break; 901 default:
811 default: 902 spin_unlock_irq(&port->lock);
812 spin_unlock_irq(&port->lock); 903 return -EINVAL;
813 return -EINVAL;
814 } 904 }
815 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT)) 905 if (!port->use_dma || arg == MASTER_OUTPUT ||
906 arg == SLAVE_OUTPUT)
816 intr_mask.rdav = regk_sser_yes; 907 intr_mask.rdav = regk_sser_yes;
817 break; 908 break;
818 case SSP_FRAME_SYNC: 909 case SSP_FRAME_SYNC:
819 if (arg & NORMAL_SYNC) { 910 if (arg & NORMAL_SYNC) {
820 frm_cfg.rec_delay = 1; 911 frm_cfg.rec_delay = 1;
821 frm_cfg.tr_delay = 1; 912 frm_cfg.tr_delay = 1;
822 } 913 } else if (arg & EARLY_SYNC)
823 else if (arg & EARLY_SYNC)
824 frm_cfg.rec_delay = frm_cfg.tr_delay = 0; 914 frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
825 else if (arg & SECOND_WORD_SYNC) { 915 else if (arg & LATE_SYNC) {
916 frm_cfg.tr_delay = 2;
917 frm_cfg.rec_delay = 2;
918 } else if (arg & SECOND_WORD_SYNC) {
826 frm_cfg.rec_delay = 7; 919 frm_cfg.rec_delay = 7;
827 frm_cfg.tr_delay = 1; 920 frm_cfg.tr_delay = 1;
828 } 921 }
829 922
830 tr_cfg.bulk_wspace = frm_cfg.tr_delay; 923 tr_cfg.bulk_wspace = frm_cfg.tr_delay;
831 frm_cfg.early_wend = regk_sser_yes; 924 frm_cfg.early_wend = regk_sser_yes;
832 if (arg & BIT_SYNC) 925 if (arg & BIT_SYNC)
833 frm_cfg.type = regk_sser_edge; 926 frm_cfg.type = regk_sser_edge;
834 else if (arg & WORD_SYNC) 927 else if (arg & WORD_SYNC)
835 frm_cfg.type = regk_sser_level; 928 frm_cfg.type = regk_sser_level;
836 else if (arg & EXTENDED_SYNC) 929 else if (arg & EXTENDED_SYNC)
837 frm_cfg.early_wend = regk_sser_no; 930 frm_cfg.early_wend = regk_sser_no;
838 931
839 if (arg & SYNC_ON) 932 if (arg & SYNC_ON)
840 frm_cfg.frame_pin_use = regk_sser_frm; 933 frm_cfg.frame_pin_use = regk_sser_frm;
841 else if (arg & SYNC_OFF) 934 else if (arg & SYNC_OFF)
842 frm_cfg.frame_pin_use = regk_sser_gio0; 935 frm_cfg.frame_pin_use = regk_sser_gio0;
843 936
844 dma_w_size = regk_dma_set_w_size2; 937 dma_w_size = regk_dma_set_w_size2;
845 if (arg & WORD_SIZE_8) { 938 if (arg & WORD_SIZE_8) {
846 rec_cfg.sample_size = tr_cfg.sample_size = 7; 939 rec_cfg.sample_size = tr_cfg.sample_size = 7;
847 dma_w_size = regk_dma_set_w_size1; 940 dma_w_size = regk_dma_set_w_size1;
848 } else if (arg & WORD_SIZE_12) 941 } else if (arg & WORD_SIZE_12)
849 rec_cfg.sample_size = tr_cfg.sample_size = 11; 942 rec_cfg.sample_size = tr_cfg.sample_size = 11;
850 else if (arg & WORD_SIZE_16) 943 else if (arg & WORD_SIZE_16)
851 rec_cfg.sample_size = tr_cfg.sample_size = 15; 944 rec_cfg.sample_size = tr_cfg.sample_size = 15;
852 else if (arg & WORD_SIZE_24) 945 else if (arg & WORD_SIZE_24)
853 rec_cfg.sample_size = tr_cfg.sample_size = 23; 946 rec_cfg.sample_size = tr_cfg.sample_size = 23;
854 else if (arg & WORD_SIZE_32) 947 else if (arg & WORD_SIZE_32)
855 rec_cfg.sample_size = tr_cfg.sample_size = 31; 948 rec_cfg.sample_size = tr_cfg.sample_size = 31;
856 949
857 if (arg & BIT_ORDER_MSB) 950 if (arg & BIT_ORDER_MSB)
858 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; 951 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
859 else if (arg & BIT_ORDER_LSB) 952 else if (arg & BIT_ORDER_LSB)
860 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst; 953 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
861 954
862 if (arg & FLOW_CONTROL_ENABLE) { 955 if (arg & FLOW_CONTROL_ENABLE) {
863 frm_cfg.status_pin_use = regk_sser_frm; 956 frm_cfg.status_pin_use = regk_sser_frm;
864 rec_cfg.fifo_thr = regk_sser_thr16; 957 rec_cfg.fifo_thr = regk_sser_thr16;
865 } else if (arg & FLOW_CONTROL_DISABLE) { 958 } else if (arg & FLOW_CONTROL_DISABLE) {
866 frm_cfg.status_pin_use = regk_sser_gio0; 959 frm_cfg.status_pin_use = regk_sser_gio0;
867 rec_cfg.fifo_thr = regk_sser_inf; 960 rec_cfg.fifo_thr = regk_sser_inf;
868 } 961 }
869 962
870 if (arg & CLOCK_NOT_GATED) 963 if (arg & CLOCK_NOT_GATED)
871 gen_cfg.gate_clk = regk_sser_no; 964 gen_cfg.gate_clk = regk_sser_no;
872 else if (arg & CLOCK_GATED) 965 else if (arg & CLOCK_GATED)
873 gen_cfg.gate_clk = regk_sser_yes; 966 gen_cfg.gate_clk = regk_sser_yes;
874 967
875 break; 968 break;
876 case SSP_IPOLARITY: 969 case SSP_IPOLARITY:
877 /* NOTE!! negedge is considered NORMAL */ 970 /* NOTE!! negedge is considered NORMAL */
878 if (arg & CLOCK_NORMAL) 971 if (arg & CLOCK_NORMAL)
879 rec_cfg.clk_pol = regk_sser_neg; 972 rec_cfg.clk_pol = regk_sser_neg;
880 else if (arg & CLOCK_INVERT) 973 else if (arg & CLOCK_INVERT)
881 rec_cfg.clk_pol = regk_sser_pos; 974 rec_cfg.clk_pol = regk_sser_pos;
882 975
883 if (arg & FRAME_NORMAL) 976 if (arg & FRAME_NORMAL)
884 frm_cfg.level = regk_sser_pos_hi; 977 frm_cfg.level = regk_sser_pos_hi;
885 else if (arg & FRAME_INVERT) 978 else if (arg & FRAME_INVERT)
886 frm_cfg.level = regk_sser_neg_lo; 979 frm_cfg.level = regk_sser_neg_lo;
887 980
888 if (arg & STATUS_NORMAL) 981 if (arg & STATUS_NORMAL)
889 gen_cfg.hold_pol = regk_sser_pos; 982 gen_cfg.hold_pol = regk_sser_pos;
890 else if (arg & STATUS_INVERT) 983 else if (arg & STATUS_INVERT)
891 gen_cfg.hold_pol = regk_sser_neg; 984 gen_cfg.hold_pol = regk_sser_neg;
892 break; 985 break;
893 case SSP_OPOLARITY: 986 case SSP_OPOLARITY:
894 if (arg & CLOCK_NORMAL) 987 if (arg & CLOCK_NORMAL)
895 gen_cfg.out_clk_pol = regk_sser_pos; 988 gen_cfg.out_clk_pol = regk_sser_pos;
896 else if (arg & CLOCK_INVERT) 989 else if (arg & CLOCK_INVERT)
897 gen_cfg.out_clk_pol = regk_sser_neg; 990 gen_cfg.out_clk_pol = regk_sser_neg;
898 991
899 if (arg & FRAME_NORMAL) 992 if (arg & FRAME_NORMAL)
900 frm_cfg.level = regk_sser_pos_hi; 993 frm_cfg.level = regk_sser_pos_hi;
901 else if (arg & FRAME_INVERT) 994 else if (arg & FRAME_INVERT)
902 frm_cfg.level = regk_sser_neg_lo; 995 frm_cfg.level = regk_sser_neg_lo;
903 996
904 if (arg & STATUS_NORMAL) 997 if (arg & STATUS_NORMAL)
905 gen_cfg.hold_pol = regk_sser_pos; 998 gen_cfg.hold_pol = regk_sser_pos;
906 else if (arg & STATUS_INVERT) 999 else if (arg & STATUS_INVERT)
907 gen_cfg.hold_pol = regk_sser_neg; 1000 gen_cfg.hold_pol = regk_sser_neg;
908 break; 1001 break;
909 case SSP_SPI: 1002 case SSP_SPI:
910 rec_cfg.fifo_thr = regk_sser_inf; 1003 rec_cfg.fifo_thr = regk_sser_inf;
911 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; 1004 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
912 rec_cfg.sample_size = tr_cfg.sample_size = 7; 1005 rec_cfg.sample_size = tr_cfg.sample_size = 7;
913 frm_cfg.frame_pin_use = regk_sser_frm; 1006 frm_cfg.frame_pin_use = regk_sser_frm;
914 frm_cfg.type = regk_sser_level; 1007 frm_cfg.type = regk_sser_level;
915 frm_cfg.tr_delay = 1; 1008 frm_cfg.tr_delay = 1;
916 frm_cfg.level = regk_sser_neg_lo; 1009 frm_cfg.level = regk_sser_neg_lo;
917 if (arg & SPI_SLAVE) 1010 if (arg & SPI_SLAVE) {
918 {
919 rec_cfg.clk_pol = regk_sser_neg; 1011 rec_cfg.clk_pol = regk_sser_neg;
920 gen_cfg.clk_dir = regk_sser_in; 1012 gen_cfg.clk_dir = regk_sser_in;
921 port->input = 1; 1013 port->input = 1;
922 port->output = 0; 1014 port->output = 0;
923 } 1015 } else {
924 else
925 {
926 gen_cfg.out_clk_pol = regk_sser_pos; 1016 gen_cfg.out_clk_pol = regk_sser_pos;
927 port->input = 0; 1017 port->input = 0;
928 port->output = 1; 1018 port->output = 1;
929 gen_cfg.clk_dir = regk_sser_out; 1019 gen_cfg.clk_dir = regk_sser_out;
930 } 1020 }
931 break; 1021 break;
932 case SSP_INBUFCHUNK: 1022 case SSP_INBUFCHUNK:
933 break; 1023 break;
934 default: 1024 default:
935 return_val = -1; 1025 return_val = -1;
936 } 1026 }
937 1027
938 1028
939 if (port->started) { 1029 if (port->started) {
940 rec_cfg.rec_en = port->input; 1030 rec_cfg.rec_en = port->input;
941 gen_cfg.en = (port->output | port->input); 1031 gen_cfg.en = (port->output | port->input);
942 } 1032 }
943 1033
944 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); 1034 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
945 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); 1035 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
946 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); 1036 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
947 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); 1037 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
948 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); 1038 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
949 1039
950 1040
951 if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 | 1041 if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
952 WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) { 1042 WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
953 int en = gen_cfg.en; 1043 int en = gen_cfg.en;
954 gen_cfg.en = 0; 1044 gen_cfg.en = 0;
955 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); 1045 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
956 /* ##### Should DMA be stoped before we change dma size? */ 1046 /* ##### Should DMA be stoped before we change dma size? */
957 DMA_WR_CMD(port->regi_dmain, dma_w_size); 1047 DMA_WR_CMD(port->regi_dmain, dma_w_size);
958 DMA_WR_CMD(port->regi_dmaout, dma_w_size); 1048 DMA_WR_CMD(port->regi_dmaout, dma_w_size);
959 gen_cfg.en = en; 1049 gen_cfg.en = en;
960 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); 1050 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
961 } 1051 }
962 1052
963 spin_unlock_irq(&port->lock); 1053 spin_unlock_irq(&port->lock);
964 return return_val; 1054 return return_val;
965 } 1055 }
966 1056
967 static long sync_serial_ioctl(struct file *file, 1057 static long sync_serial_ioctl(struct file *file,
968 unsigned int cmd, unsigned long arg) 1058 unsigned int cmd, unsigned long arg)
969 { 1059 {
970 long ret; 1060 long ret;
971 1061
972 mutex_lock(&sync_serial_mutex); 1062 mutex_lock(&sync_serial_mutex);
973 ret = sync_serial_ioctl_unlocked(file, cmd, arg); 1063 ret = sync_serial_ioctl_unlocked(file, cmd, arg);
974 mutex_unlock(&sync_serial_mutex); 1064 mutex_unlock(&sync_serial_mutex);
975 1065
976 return ret; 1066 return ret;
977 } 1067 }
978 1068
979 /* NOTE: sync_serial_write does not support concurrency */ 1069 /* NOTE: sync_serial_write does not support concurrency */
980 static ssize_t sync_serial_write(struct file *file, const char *buf, 1070 static ssize_t sync_serial_write(struct file *file, const char __user *buf,
981 size_t count, loff_t *ppos) 1071 size_t count, loff_t *ppos)
982 { 1072 {
983 int dev = iminor(file_inode(file)); 1073 int dev = iminor(file_inode(file));
984 DECLARE_WAITQUEUE(wait, current); 1074 DECLARE_WAITQUEUE(wait, current);
985 struct sync_port *port; 1075 struct sync_port *port;
986 int trunc_count; 1076 int trunc_count;
987 unsigned long flags; 1077 unsigned long flags;
988 int bytes_free; 1078 int bytes_free;
989 int out_buf_count; 1079 int out_buf_count;
990 1080
991 unsigned char *rd_ptr; /* First allocated byte in the buffer */ 1081 unsigned char *rd_ptr; /* First allocated byte in the buffer */
992 unsigned char *wr_ptr; /* First free byte in the buffer */ 1082 unsigned char *wr_ptr; /* First free byte in the buffer */
993 unsigned char *buf_stop_ptr; /* Last byte + 1 */ 1083 unsigned char *buf_stop_ptr; /* Last byte + 1 */
994 1084
995 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { 1085 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
996 DEBUG(printk("Invalid minor %d\n", dev)); 1086 DEBUG(pr_info("Invalid minor %d\n", dev));
997 return -ENODEV; 1087 return -ENODEV;
998 } 1088 }
999 port = &ports[dev]; 1089 port = &ports[dev];
1000 1090
1001 /* |<- OUT_BUFFER_SIZE ->| 1091 /* |<- OUT_BUFFER_SIZE ->|
1002 * |<- out_buf_count ->| 1092 * |<- out_buf_count ->|
1003 * |<- trunc_count ->| ...->| 1093 * |<- trunc_count ->| ...->|
1004 * ______________________________________________________ 1094 * ______________________________________________________
1005 * | free | data | free | 1095 * | free | data | free |
1006 * |_________|___________________|________________________| 1096 * |_________|___________________|________________________|
1007 * ^ rd_ptr ^ wr_ptr 1097 * ^ rd_ptr ^ wr_ptr
1008 */ 1098 */
1009 DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n", 1099 DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n",
1010 port->port_nbr, count, port->active_tr_descr, 1100 port->port_nbr, count, port->active_tr_descr,
1011 port->catch_tr_descr)); 1101 port->catch_tr_descr));
1012 1102
1013 /* Read variables that may be updated by interrupts */ 1103 /* Read variables that may be updated by interrupts */
1014 spin_lock_irqsave(&port->lock, flags); 1104 spin_lock_irqsave(&port->lock, flags);
1015 rd_ptr = port->out_rd_ptr; 1105 rd_ptr = port->out_rd_ptr;
1016 out_buf_count = port->out_buf_count; 1106 out_buf_count = port->out_buf_count;
1017 spin_unlock_irqrestore(&port->lock, flags); 1107 spin_unlock_irqrestore(&port->lock, flags);
1018 1108
1019 /* Check if resources are available */ 1109 /* Check if resources are available */
1020 if (port->tr_running && 1110 if (port->tr_running &&
1021 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) || 1111 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
1022 out_buf_count >= OUT_BUFFER_SIZE)) { 1112 out_buf_count >= OUT_BUFFER_SIZE)) {
1023 DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev)); 1113 DEBUGWRITE(pr_info("sser%d full\n", dev));
1024 return -EAGAIN; 1114 return -EAGAIN;
1025 } 1115 }
1026 1116
1027 buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE; 1117 buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
1028 1118
1029 /* Determine pointer to the first free byte, before copying. */ 1119 /* Determine pointer to the first free byte, before copying. */
1030 wr_ptr = rd_ptr + out_buf_count; 1120 wr_ptr = rd_ptr + out_buf_count;
1031 if (wr_ptr >= buf_stop_ptr) 1121 if (wr_ptr >= buf_stop_ptr)
1032 wr_ptr -= OUT_BUFFER_SIZE; 1122 wr_ptr -= OUT_BUFFER_SIZE;
1033 1123
1034 /* If we wrap the ring buffer, let the user space program handle it by 1124 /* If we wrap the ring buffer, let the user space program handle it by
1035 * truncating the data. This could be more elegant, small buffer 1125 * truncating the data. This could be more elegant, small buffer
1036 * fragments may occur. 1126 * fragments may occur.
1037 */ 1127 */
1038 bytes_free = OUT_BUFFER_SIZE - out_buf_count; 1128 bytes_free = OUT_BUFFER_SIZE - out_buf_count;
1039 if (wr_ptr + bytes_free > buf_stop_ptr) 1129 if (wr_ptr + bytes_free > buf_stop_ptr)
1040 bytes_free = buf_stop_ptr - wr_ptr; 1130 bytes_free = buf_stop_ptr - wr_ptr;
1041 trunc_count = (count < bytes_free) ? count : bytes_free; 1131 trunc_count = (count < bytes_free) ? count : bytes_free;
1042 1132
1043 if (copy_from_user(wr_ptr, buf, trunc_count)) 1133 if (copy_from_user(wr_ptr, buf, trunc_count))
1044 return -EFAULT; 1134 return -EFAULT;
1045 1135
1046 DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n", 1136 DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d %p %p %p\n",
1047 out_buf_count, trunc_count, 1137 out_buf_count, trunc_count,
1048 port->out_buf_count, port->out_buffer, 1138 port->out_buf_count, port->out_buffer,
1049 wr_ptr, buf_stop_ptr)); 1139 wr_ptr, buf_stop_ptr));
1050 1140
1051 /* Make sure transmitter/receiver is running */ 1141 /* Make sure transmitter/receiver is running */
1052 if (!port->started) { 1142 if (!port->started) {
1053 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); 1143 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1054 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); 1144 reg_sser_rw_rec_cfg rec_cfg =
1145 REG_RD(sser, port->regi_sser, rw_rec_cfg);
1055 cfg.en = regk_sser_yes; 1146 cfg.en = regk_sser_yes;
1056 rec_cfg.rec_en = port->input; 1147 rec_cfg.rec_en = port->input;
1057 REG_WR(sser, port->regi_sser, rw_cfg, cfg); 1148 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1058 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); 1149 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1059 port->started = 1; 1150 port->started = 1;
1060 } 1151 }
1061 1152
1062 /* Setup wait if blocking */ 1153 /* Setup wait if blocking */
1063 if (!(file->f_flags & O_NONBLOCK)) { 1154 if (!(file->f_flags & O_NONBLOCK)) {
1064 add_wait_queue(&port->out_wait_q, &wait); 1155 add_wait_queue(&port->out_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE); 1156 set_current_state(TASK_INTERRUPTIBLE);
1066 } 1157 }
1067 1158
1068 spin_lock_irqsave(&port->lock, flags); 1159 spin_lock_irqsave(&port->lock, flags);
1069 port->out_buf_count += trunc_count; 1160 port->out_buf_count += trunc_count;
1070 if (port->use_dma) { 1161 if (port->use_dma) {
1162 #ifdef SYNC_SER_DMA
1071 start_dma_out(port, wr_ptr, trunc_count); 1163 start_dma_out(port, wr_ptr, trunc_count);
1164 #endif
1072 } else if (!port->tr_running) { 1165 } else if (!port->tr_running) {
1166 #ifdef SYNC_SER_MANUAL
1073 reg_sser_rw_intr_mask intr_mask; 1167 reg_sser_rw_intr_mask intr_mask;
1074 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); 1168 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1075 /* Start sender by writing data */ 1169 /* Start sender by writing data */
1076 send_word(port); 1170 send_word(port);
1077 /* and enable transmitter ready IRQ */ 1171 /* and enable transmitter ready IRQ */
1078 intr_mask.trdy = 1; 1172 intr_mask.trdy = 1;
1079 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); 1173 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1174 #endif
1080 } 1175 }
1081 spin_unlock_irqrestore(&port->lock, flags); 1176 spin_unlock_irqrestore(&port->lock, flags);
1082 1177
1083 /* Exit if non blocking */ 1178 /* Exit if non blocking */
1084 if (file->f_flags & O_NONBLOCK) { 1179 if (file->f_flags & O_NONBLOCK) {
1085 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n", 1180 DEBUGWRITE(pr_info("w d%d c %u %08x\n",
1086 port->port_nbr, trunc_count, 1181 port->port_nbr, trunc_count,
1087 REG_RD_INT(dma, port->regi_dmaout, r_intr))); 1182 REG_RD_INT(dma, port->regi_dmaout, r_intr)));
1088 return trunc_count; 1183 return trunc_count;
1089 } 1184 }
1090 1185
1091 schedule(); 1186 schedule();
1092 remove_wait_queue(&port->out_wait_q, &wait); 1187 remove_wait_queue(&port->out_wait_q, &wait);
1093 1188
1094 if (signal_pending(current)) 1189 if (signal_pending(current))
1095 return -EINTR; 1190 return -EINTR;
1096 1191
1097 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n", 1192 DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count));
1098 port->port_nbr, trunc_count));
1099 return trunc_count; 1193 return trunc_count;
1100 } 1194 }
1101 1195
1102 static ssize_t sync_serial_read(struct file * file, char * buf, 1196 static ssize_t sync_serial_read(struct file *file, char __user *buf,
1103 size_t count, loff_t *ppos) 1197 size_t count, loff_t *ppos)
1104 { 1198 {
1105 int dev = iminor(file_inode(file)); 1199 return __sync_serial_read(file, buf, count, ppos, NULL);
1106 int avail;
1107 sync_port *port;
1108 unsigned char* start;
1109 unsigned char* end;
1110 unsigned long flags;
1111
1112 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
1113 {
1114 DEBUG(printk("Invalid minor %d\n", dev));
1115 return -ENODEV;
1116 }
1117 port = &ports[dev];
1118
1119 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
1120
1121 if (!port->started)
1122 {
1123 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1124 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1125 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1126 cfg.en = regk_sser_yes;
1127 tr_cfg.tr_en = regk_sser_yes;
1128 rec_cfg.rec_en = regk_sser_yes;
1129 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1130 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1131 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1132 port->started = 1;
1133 }
1134
1135 /* Calculate number of available bytes */
1136 /* Save pointers to avoid that they are modified by interrupt */
1137 spin_lock_irqsave(&port->lock, flags);
1138 start = (unsigned char*)port->readp; /* cast away volatile */
1139 end = (unsigned char*)port->writep; /* cast away volatile */
1140 spin_unlock_irqrestore(&port->lock, flags);
1141 while ((start == end) && !port->full) /* No data */
1142 {
1143 DEBUGREAD(printk(KERN_DEBUG "&"));
1144 if (file->f_flags & O_NONBLOCK)
1145 return -EAGAIN;
1146
1147 wait_event_interruptible(port->in_wait_q,
1148 !(start == end && !port->full));
1149 if (signal_pending(current))
1150 return -EINTR;
1151
1152 spin_lock_irqsave(&port->lock, flags);
1153 start = (unsigned char*)port->readp; /* cast away volatile */
1154 end = (unsigned char*)port->writep; /* cast away volatile */
1155 spin_unlock_irqrestore(&port->lock, flags);
1156 }
1157
1158 /* Lazy read, never return wrapped data. */
1159 if (port->full)
1160 avail = port->in_buffer_size;
1161 else if (end > start)
1162 avail = end - start;
1163 else
1164 avail = port->flip + port->in_buffer_size - start;
1165
1166 count = count > avail ? avail : count;
1167 if (copy_to_user(buf, start, count))
1168 return -EFAULT;
1169 /* Disable interrupts while updating readp */
1170 spin_lock_irqsave(&port->lock, flags);
1171 port->readp += count;
1172 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1173 port->readp = port->flip;
1174 port->full = 0;
1175 spin_unlock_irqrestore(&port->lock, flags);
1176 DEBUGREAD(printk("r %d\n", count));
1177 return count;
1178 } 1200 }
1179 1201
1180 static void send_word(sync_port* port) 1202 #ifdef SYNC_SER_MANUAL
1203 static void send_word(struct sync_port *port)
1181 { 1204 {
1182 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); 1205 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1183 reg_sser_rw_tr_data tr_data = {0}; 1206 reg_sser_rw_tr_data tr_data = {0};
1184 1207
1185 switch(tr_cfg.sample_size) 1208 switch (tr_cfg.sample_size) {
1209 case 8:
1210 port->out_buf_count--;
1211 tr_data.data = *port->out_rd_ptr++;
1212 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1213 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1214 port->out_rd_ptr = port->out_buffer;
1215 break;
1216 case 12:
1186 { 1217 {
1187 case 8:
1188 port->out_buf_count--;
1189 tr_data.data = *port->out_rd_ptr++;
1190 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1191 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1192 port->out_rd_ptr = port->out_buffer;
1193 break;
1194 case 12:
1195 {
1196 int data = (*port->out_rd_ptr++) << 8; 1218 int data = (*port->out_rd_ptr++) << 8;
1197 data |= *port->out_rd_ptr++; 1219 data |= *port->out_rd_ptr++;
1198 port->out_buf_count -= 2; 1220 port->out_buf_count -= 2;
1199 tr_data.data = data; 1221 tr_data.data = data;
1200 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); 1222 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1201 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) 1223 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1202 port->out_rd_ptr = port->out_buffer; 1224 port->out_rd_ptr = port->out_buffer;
1225 break;
1203 } 1226 }
1204 break;
1205 case 16: 1227 case 16:
1206 port->out_buf_count -= 2; 1228 port->out_buf_count -= 2;
1207 tr_data.data = *(unsigned short *)port->out_rd_ptr; 1229 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1208 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); 1230 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1209 port->out_rd_ptr += 2; 1231 port->out_rd_ptr += 2;
1210 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) 1232 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1211 port->out_rd_ptr = port->out_buffer; 1233 port->out_rd_ptr = port->out_buffer;
1212 break; 1234 break;
1213 case 24: 1235 case 24:
1214 port->out_buf_count -= 3; 1236 port->out_buf_count -= 3;
1215 tr_data.data = *(unsigned short *)port->out_rd_ptr; 1237 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1216 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); 1238 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1217 port->out_rd_ptr += 2; 1239 port->out_rd_ptr += 2;
1218 tr_data.data = *port->out_rd_ptr++; 1240 tr_data.data = *port->out_rd_ptr++;
1219 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); 1241 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1220 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) 1242 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1221 port->out_rd_ptr = port->out_buffer; 1243 port->out_rd_ptr = port->out_buffer;
1222 break; 1244 break;
1223 case 32: 1245 case 32:
1224 port->out_buf_count -= 4; 1246 port->out_buf_count -= 4;
1225 tr_data.data = *(unsigned short *)port->out_rd_ptr; 1247 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1226 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); 1248 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1227 port->out_rd_ptr += 2; 1249 port->out_rd_ptr += 2;
1228 tr_data.data = *(unsigned short *)port->out_rd_ptr; 1250 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1229 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); 1251 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1230 port->out_rd_ptr += 2; 1252 port->out_rd_ptr += 2;
1231 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) 1253 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1232 port->out_rd_ptr = port->out_buffer; 1254 port->out_rd_ptr = port->out_buffer;
1233 break; 1255 break;
1234 } 1256 }
1235 } 1257 }
1258 #endif
1236 1259
1237 static void start_dma_out(struct sync_port *port, 1260 #ifdef SYNC_SER_DMA
1238 const char *data, int count) 1261 static void start_dma_out(struct sync_port *port, const char *data, int count)
1239 { 1262 {
1240 port->active_tr_descr->buf = (char *) virt_to_phys((char *) data); 1263 port->active_tr_descr->buf = (char *)virt_to_phys((char *)data);
1241 port->active_tr_descr->after = port->active_tr_descr->buf + count; 1264 port->active_tr_descr->after = port->active_tr_descr->buf + count;
1242 port->active_tr_descr->intr = 1; 1265 port->active_tr_descr->intr = 1;
1243 1266
1244 port->active_tr_descr->eol = 1; 1267 port->active_tr_descr->eol = 1;
1245 port->prev_tr_descr->eol = 0; 1268 port->prev_tr_descr->eol = 0;
1246 1269
1247 DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n", 1270 DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n",
1248 port->prev_tr_descr, port->active_tr_descr)); 1271 port->prev_tr_descr, port->active_tr_descr));
1249 port->prev_tr_descr = port->active_tr_descr; 1272 port->prev_tr_descr = port->active_tr_descr;
1250 port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next); 1273 port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next);
1251 1274
1252 if (!port->tr_running) { 1275 if (!port->tr_running) {
1253 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, 1276 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
1254 rw_tr_cfg); 1277 rw_tr_cfg);
1255 1278
1256 port->out_context.next = 0; 1279 port->out_context.next = NULL;
1257 port->out_context.saved_data = 1280 port->out_context.saved_data =
1258 (dma_descr_data *)virt_to_phys(port->prev_tr_descr); 1281 (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
1259 port->out_context.saved_data_buf = port->prev_tr_descr->buf; 1282 port->out_context.saved_data_buf = port->prev_tr_descr->buf;
1260 1283
1261 DMA_START_CONTEXT(port->regi_dmaout, 1284 DMA_START_CONTEXT(port->regi_dmaout,
1262 virt_to_phys((char *)&port->out_context)); 1285 virt_to_phys((char *)&port->out_context));
1263 1286
1264 tr_cfg.tr_en = regk_sser_yes; 1287 tr_cfg.tr_en = regk_sser_yes;
1265 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); 1288 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1266 DEBUGTRDMA(printk(KERN_DEBUG "dma s\n");); 1289 DEBUGTRDMA(pr_info(KERN_INFO "dma s\n"););
1267 } else { 1290 } else {
1268 DMA_CONTINUE_DATA(port->regi_dmaout); 1291 DMA_CONTINUE_DATA(port->regi_dmaout);
1269 DEBUGTRDMA(printk(KERN_DEBUG "dma c\n");); 1292 DEBUGTRDMA(pr_info("dma c\n"););
1270 } 1293 }
1271 1294
1272 port->tr_running = 1; 1295 port->tr_running = 1;
1273 } 1296 }
1274 1297
1275 static void start_dma_in(sync_port *port) 1298 static void start_dma_in(struct sync_port *port)
1276 { 1299 {
1277 int i; 1300 int i;
1278 char *buf; 1301 char *buf;
1302 unsigned long flags;
1303 spin_lock_irqsave(&port->lock, flags);
1279 port->writep = port->flip; 1304 port->writep = port->flip;
1305 spin_unlock_irqrestore(&port->lock, flags);
1280 1306
1281 if (port->writep > port->flip + port->in_buffer_size) { 1307 buf = (char *)virt_to_phys(port->in_buffer);
1282 panic("Offset too large in sync serial driver\n");
1283 return;
1284 }
1285 buf = (char*)virt_to_phys(port->in_buffer);
1286 for (i = 0; i < NBR_IN_DESCR; i++) { 1308 for (i = 0; i < NBR_IN_DESCR; i++) {
1287 port->in_descr[i].buf = buf; 1309 port->in_descr[i].buf = buf;
1288 port->in_descr[i].after = buf + port->inbufchunk; 1310 port->in_descr[i].after = buf + port->inbufchunk;
1289 port->in_descr[i].intr = 1; 1311 port->in_descr[i].intr = 1;
1290 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]); 1312 port->in_descr[i].next =
1313 (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]);
1291 port->in_descr[i].buf = buf; 1314 port->in_descr[i].buf = buf;
1292 buf += port->inbufchunk; 1315 buf += port->inbufchunk;
1293 } 1316 }
1294 /* Link the last descriptor to the first */ 1317 /* Link the last descriptor to the first */
1295 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); 1318 port->in_descr[i-1].next =
1319 (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
1296 port->in_descr[i-1].eol = regk_sser_yes; 1320 port->in_descr[i-1].eol = regk_sser_yes;
1297 port->next_rx_desc = &port->in_descr[0]; 1321 port->next_rx_desc = &port->in_descr[0];
1298 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1]; 1322 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
1299 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); 1323 port->in_context.saved_data =
1324 (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
1300 port->in_context.saved_data_buf = port->in_descr[0].buf; 1325 port->in_context.saved_data_buf = port->in_descr[0].buf;
1301 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context)); 1326 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1302 } 1327 }
1303 1328
1304 #ifdef SYNC_SER_DMA
1305 static irqreturn_t tr_interrupt(int irq, void *dev_id) 1329 static irqreturn_t tr_interrupt(int irq, void *dev_id)
1306 { 1330 {
1307 reg_dma_r_masked_intr masked; 1331 reg_dma_r_masked_intr masked;
1308 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; 1332 reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
1309 reg_dma_rw_stat stat; 1333 reg_dma_rw_stat stat;
1310 int i; 1334 int i;
1311 int found = 0; 1335 int found = 0;
1312 int stop_sser = 0; 1336 int stop_sser = 0;
1313 1337
1314 for (i = 0; i < NBR_PORTS; i++) { 1338 for (i = 0; i < NBR_PORTS; i++) {
1315 sync_port *port = &ports[i]; 1339 struct sync_port *port = &ports[i];
1316 if (!port->enabled || !port->use_dma) 1340 if (!port->enabled || !port->use_dma)
1317 continue; 1341 continue;
1318 1342
1319 /* IRQ active for the port? */ 1343 /* IRQ active for the port? */
1320 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr); 1344 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1321 if (!masked.data) 1345 if (!masked.data)
1322 continue; 1346 continue;
1323 1347
1324 found = 1; 1348 found = 1;
1325 1349
1326 /* Check if we should stop the DMA transfer */ 1350 /* Check if we should stop the DMA transfer */
1327 stat = REG_RD(dma, port->regi_dmaout, rw_stat); 1351 stat = REG_RD(dma, port->regi_dmaout, rw_stat);
1328 if (stat.list_state == regk_dma_data_at_eol) 1352 if (stat.list_state == regk_dma_data_at_eol)
1329 stop_sser = 1; 1353 stop_sser = 1;
1330 1354
1331 /* Clear IRQ */ 1355 /* Clear IRQ */
1332 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr); 1356 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1333 1357
1334 if (!stop_sser) { 1358 if (!stop_sser) {
1335 /* The DMA has completed a descriptor, EOL was not 1359 /* The DMA has completed a descriptor, EOL was not
1336 * encountered, so step relevant descriptor and 1360 * encountered, so step relevant descriptor and
1337 * datapointers forward. */ 1361 * datapointers forward. */
1338 int sent; 1362 int sent;
1339 sent = port->catch_tr_descr->after - 1363 sent = port->catch_tr_descr->after -
1340 port->catch_tr_descr->buf; 1364 port->catch_tr_descr->buf;
1341 DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t" 1365 DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t"
1342 "in descr %p (ac: %p)\n", 1366 "in descr %p (ac: %p)\n",
1343 port->out_buf_count, sent, 1367 port->out_buf_count, sent,
1344 port->out_buf_count - sent, 1368 port->out_buf_count - sent,
1345 port->catch_tr_descr, 1369 port->catch_tr_descr,
1346 port->active_tr_descr);); 1370 port->active_tr_descr););
1347 port->out_buf_count -= sent; 1371 port->out_buf_count -= sent;
1348 port->catch_tr_descr = 1372 port->catch_tr_descr =
1349 phys_to_virt((int) port->catch_tr_descr->next); 1373 phys_to_virt((int) port->catch_tr_descr->next);
1350 port->out_rd_ptr = 1374 port->out_rd_ptr =
1351 phys_to_virt((int) port->catch_tr_descr->buf); 1375 phys_to_virt((int) port->catch_tr_descr->buf);
1352 } else { 1376 } else {
1353 int i, sent; 1377 reg_sser_rw_tr_cfg tr_cfg;
1378 int j, sent;
1354 /* EOL handler. 1379 /* EOL handler.
1355 * Note that if an EOL was encountered during the irq 1380 * Note that if an EOL was encountered during the irq
1356 * locked section of sync_ser_write the DMA will be 1381 * locked section of sync_ser_write the DMA will be
1357 * restarted and the eol flag will be cleared. 1382 * restarted and the eol flag will be cleared.
1358 * The remaining descriptors will be traversed by 1383 * The remaining descriptors will be traversed by
1359 * the descriptor interrupts as usual. 1384 * the descriptor interrupts as usual.
1360 */ 1385 */
1361 i = 0; 1386 j = 0;
1362 while (!port->catch_tr_descr->eol) { 1387 while (!port->catch_tr_descr->eol) {
1363 sent = port->catch_tr_descr->after - 1388 sent = port->catch_tr_descr->after -
1364 port->catch_tr_descr->buf; 1389 port->catch_tr_descr->buf;
1365 DEBUGOUTBUF(printk(KERN_DEBUG 1390 DEBUGOUTBUF(pr_info(
1366 "traversing descr %p -%d (%d)\n", 1391 "traversing descr %p -%d (%d)\n",
1367 port->catch_tr_descr, 1392 port->catch_tr_descr,
1368 sent, 1393 sent,
1369 port->out_buf_count)); 1394 port->out_buf_count));
1370 port->out_buf_count -= sent; 1395 port->out_buf_count -= sent;
1371 port->catch_tr_descr = phys_to_virt( 1396 port->catch_tr_descr = phys_to_virt(
1372 (int)port->catch_tr_descr->next); 1397 (int)port->catch_tr_descr->next);
1373 i++; 1398 j++;
1374 if (i >= NBR_OUT_DESCR) { 1399 if (j >= NBR_OUT_DESCR) {
1375 /* TODO: Reset and recover */ 1400 /* TODO: Reset and recover */
1376 panic("sync_serial: missing eol"); 1401 panic("sync_serial: missing eol");
1377 } 1402 }
1378 } 1403 }
1379 sent = port->catch_tr_descr->after - 1404 sent = port->catch_tr_descr->after -
1380 port->catch_tr_descr->buf; 1405 port->catch_tr_descr->buf;
1381 DEBUGOUTBUF(printk(KERN_DEBUG 1406 DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n",
1382 "eol at descr %p -%d (%d)\n",
1383 port->catch_tr_descr, 1407 port->catch_tr_descr,
1384 sent, 1408 sent,
1385 port->out_buf_count)); 1409 port->out_buf_count));
1386 1410
1387 port->out_buf_count -= sent; 1411 port->out_buf_count -= sent;
1388 1412
1389 /* Update read pointer to first free byte, we 1413 /* Update read pointer to first free byte, we
1390 * may already be writing data there. */ 1414 * may already be writing data there. */
1391 port->out_rd_ptr = 1415 port->out_rd_ptr =
1392 phys_to_virt((int) port->catch_tr_descr->after); 1416 phys_to_virt((int) port->catch_tr_descr->after);
1393 if (port->out_rd_ptr > port->out_buffer + 1417 if (port->out_rd_ptr > port->out_buffer +
1394 OUT_BUFFER_SIZE) 1418 OUT_BUFFER_SIZE)
1395 port->out_rd_ptr = port->out_buffer; 1419 port->out_rd_ptr = port->out_buffer;
1396 1420
1397 reg_sser_rw_tr_cfg tr_cfg = 1421 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1398 REG_RD(sser, port->regi_sser, rw_tr_cfg); 1422 DEBUGTXINT(pr_info(
1399 DEBUGTXINT(printk(KERN_DEBUG
1400 "tr_int DMA stop %d, set catch @ %p\n", 1423 "tr_int DMA stop %d, set catch @ %p\n",
1401 port->out_buf_count, 1424 port->out_buf_count,