Commit ea9d1e960ca2158794425738c6258afe60879a64
Committed by
Greg Kroah-Hartman
1 parent
1bcf4b2a22
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
staging: cxt1e1: musycc.c: uses tabs for indentation
This commit converts several instances of space usage for indentation to tabs. Signed-off-by: Johan Meiring <johanmeiring@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 1 changed file with 921 additions and 921 deletions Inline Diff
drivers/staging/cxt1e1/musycc.c
1 | unsigned int max_intcnt = 0; | 1 | unsigned int max_intcnt = 0; |
2 | unsigned int max_bh = 0; | 2 | unsigned int max_bh = 0; |
3 | 3 | ||
4 | /*----------------------------------------------------------------------------- | 4 | /*----------------------------------------------------------------------------- |
5 | * musycc.c - | 5 | * musycc.c - |
6 | * | 6 | * |
7 | * Copyright (C) 2007 One Stop Systems, Inc. | 7 | * Copyright (C) 2007 One Stop Systems, Inc. |
8 | * Copyright (C) 2003-2006 SBE, Inc. | 8 | * Copyright (C) 2003-2006 SBE, Inc. |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2 of the License, or | 12 | * the Free Software Foundation; either version 2 of the License, or |
13 | * (at your option) any later version. | 13 | * (at your option) any later version. |
14 | * | 14 | * |
15 | * This program is distributed in the hope that it will be useful, | 15 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. | 18 | * GNU General Public License for more details. |
19 | * | 19 | * |
20 | * For further information, contact via email: support@onestopsystems.com | 20 | * For further information, contact via email: support@onestopsystems.com |
21 | * One Stop Systems, Inc. Escondido, California U.S.A. | 21 | * One Stop Systems, Inc. Escondido, California U.S.A. |
22 | *----------------------------------------------------------------------------- | 22 | *----------------------------------------------------------------------------- |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
26 | 26 | ||
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include "pmcc4_sysdep.h" | 28 | #include "pmcc4_sysdep.h" |
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/errno.h> | 30 | #include <linux/errno.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include "sbecom_inline_linux.h" | 32 | #include "sbecom_inline_linux.h" |
33 | #include "libsbew.h" | 33 | #include "libsbew.h" |
34 | #include "pmcc4_private.h" | 34 | #include "pmcc4_private.h" |
35 | #include "pmcc4.h" | 35 | #include "pmcc4.h" |
36 | #include "musycc.h" | 36 | #include "musycc.h" |
37 | 37 | ||
38 | #ifdef SBE_INCLUDE_SYMBOLS | 38 | #ifdef SBE_INCLUDE_SYMBOLS |
39 | #define STATIC | 39 | #define STATIC |
40 | #else | 40 | #else |
41 | #define STATIC static | 41 | #define STATIC static |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #define sd_find_chan(ci,ch) c4_find_chan(ch) | 44 | #define sd_find_chan(ci,ch) c4_find_chan(ch) |
45 | 45 | ||
46 | 46 | ||
47 | /*******************************************************************/ | 47 | /*******************************************************************/ |
48 | /* global driver variables */ | 48 | /* global driver variables */ |
49 | extern ci_t *c4_list; | 49 | extern ci_t *c4_list; |
50 | extern int drvr_state; | 50 | extern int drvr_state; |
51 | extern int cxt1e1_log_level; | 51 | extern int cxt1e1_log_level; |
52 | 52 | ||
53 | extern int cxt1e1_max_mru; | 53 | extern int cxt1e1_max_mru; |
54 | extern int cxt1e1_max_mtu; | 54 | extern int cxt1e1_max_mtu; |
55 | extern int max_rxdesc_used; | 55 | extern int max_rxdesc_used; |
56 | extern int max_txdesc_used; | 56 | extern int max_txdesc_used; |
57 | extern ci_t *CI; /* dummy pointr to board ZEROE's data - DEBUG | 57 | extern ci_t *CI; /* dummy pointr to board ZEROE's data - DEBUG |
58 | * USAGE */ | 58 | * USAGE */ |
59 | 59 | ||
60 | 60 | ||
61 | /*******************************************************************/ | 61 | /*******************************************************************/ |
62 | /* forward references */ | 62 | /* forward references */ |
63 | void c4_fifo_free (mpi_t *, int); | 63 | void c4_fifo_free (mpi_t *, int); |
64 | void c4_wk_chan_restart (mch_t *); | 64 | void c4_wk_chan_restart (mch_t *); |
65 | void musycc_bh_tx_eom (mpi_t *, int); | 65 | void musycc_bh_tx_eom (mpi_t *, int); |
66 | int musycc_chan_up (ci_t *, int); | 66 | int musycc_chan_up (ci_t *, int); |
67 | status_t __init musycc_init (ci_t *); | 67 | status_t __init musycc_init (ci_t *); |
68 | STATIC void __init musycc_init_port (mpi_t *); | 68 | STATIC void __init musycc_init_port (mpi_t *); |
69 | void musycc_intr_bh_tasklet (ci_t *); | 69 | void musycc_intr_bh_tasklet (ci_t *); |
70 | void musycc_serv_req (mpi_t *, u_int32_t); | 70 | void musycc_serv_req (mpi_t *, u_int32_t); |
71 | void musycc_update_timeslots (mpi_t *); | 71 | void musycc_update_timeslots (mpi_t *); |
72 | 72 | ||
73 | /*******************************************************************/ | 73 | /*******************************************************************/ |
74 | 74 | ||
75 | #if 1 | 75 | #if 1 |
76 | STATIC int | 76 | STATIC int |
77 | musycc_dump_rxbuffer_ring (mch_t * ch, int lockit) | 77 | musycc_dump_rxbuffer_ring (mch_t * ch, int lockit) |
78 | { | 78 | { |
79 | struct mdesc *m; | 79 | struct mdesc *m; |
80 | unsigned long flags = 0; | 80 | unsigned long flags = 0; |
81 | 81 | ||
82 | u_int32_t status; | 82 | u_int32_t status; |
83 | int n; | 83 | int n; |
84 | 84 | ||
85 | if (lockit) | 85 | if (lockit) |
86 | { | 86 | { |
87 | spin_lock_irqsave (&ch->ch_rxlock, flags); | 87 | spin_lock_irqsave (&ch->ch_rxlock, flags); |
88 | } | 88 | } |
89 | if (ch->rxd_num == 0) | 89 | if (ch->rxd_num == 0) |
90 | { | 90 | { |
91 | pr_info(" ZERO receive buffers allocated for this channel."); | 91 | pr_info(" ZERO receive buffers allocated for this channel."); |
92 | } else | 92 | } else |
93 | { | 93 | { |
94 | FLUSH_MEM_READ (); | 94 | FLUSH_MEM_READ (); |
95 | m = &ch->mdr[ch->rxix_irq_srv]; | 95 | m = &ch->mdr[ch->rxix_irq_srv]; |
96 | for (n = ch->rxd_num; n; n--) | 96 | for (n = ch->rxd_num; n; n--) |
97 | { | 97 | { |
98 | status = le32_to_cpu (m->status); | 98 | status = le32_to_cpu (m->status); |
99 | { | 99 | { |
100 | pr_info("%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n", | 100 | pr_info("%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n", |
101 | (m == &ch->mdr[ch->rxix_irq_srv]) ? 'F' : ' ', | 101 | (m == &ch->mdr[ch->rxix_irq_srv]) ? 'F' : ' ', |
102 | (unsigned long) m, n, | 102 | (unsigned long) m, n, |
103 | status, | 103 | status, |
104 | m->data ? (status & HOST_RX_OWNED ? 'H' : 'M') : '-', | 104 | m->data ? (status & HOST_RX_OWNED ? 'H' : 'M') : '-', |
105 | status & POLL_DISABLED ? 'P' : '-', | 105 | status & POLL_DISABLED ? 'P' : '-', |
106 | status & EOBIRQ_ENABLE ? 'b' : '-', | 106 | status & EOBIRQ_ENABLE ? 'b' : '-', |
107 | status & EOMIRQ_ENABLE ? 'm' : '-', | 107 | status & EOMIRQ_ENABLE ? 'm' : '-', |
108 | status & LENGTH_MASK, | 108 | status & LENGTH_MASK, |
109 | le32_to_cpu (m->data), le32_to_cpu (m->next)); | 109 | le32_to_cpu (m->data), le32_to_cpu (m->next)); |
110 | #ifdef RLD_DUMP_BUFDATA | 110 | #ifdef RLD_DUMP_BUFDATA |
111 | { | 111 | { |
112 | u_int32_t *dp; | 112 | u_int32_t *dp; |
113 | int len = status & LENGTH_MASK; | 113 | int len = status & LENGTH_MASK; |
114 | 114 | ||
115 | #if 1 | 115 | #if 1 |
116 | if (m->data && (status & HOST_RX_OWNED)) | 116 | if (m->data && (status & HOST_RX_OWNED)) |
117 | #else | 117 | #else |
118 | if (m->data) /* always dump regardless of valid RX | 118 | if (m->data) /* always dump regardless of valid RX |
119 | * data */ | 119 | * data */ |
120 | #endif | 120 | #endif |
121 | { | 121 | { |
122 | dp = (u_int32_t *) OS_phystov ((void *) (le32_to_cpu (m->data))); | 122 | dp = (u_int32_t *) OS_phystov ((void *) (le32_to_cpu (m->data))); |
123 | if (len >= 0x10) | 123 | if (len >= 0x10) |
124 | pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len, | 124 | pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len, |
125 | *dp, *(dp + 1), *(dp + 2), *(dp + 3)); | 125 | *dp, *(dp + 1), *(dp + 2), *(dp + 3)); |
126 | else if (len >= 0x08) | 126 | else if (len >= 0x08) |
127 | pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len, | 127 | pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len, |
128 | *dp, *(dp + 1)); | 128 | *dp, *(dp + 1)); |
129 | else | 129 | else |
130 | pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp); | 130 | pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp); |
131 | } | 131 | } |
132 | } | 132 | } |
133 | #endif | 133 | #endif |
134 | } | 134 | } |
135 | m = m->snext; | 135 | m = m->snext; |
136 | } | 136 | } |
137 | } /* -for- */ | 137 | } /* -for- */ |
138 | pr_info("\n"); | 138 | pr_info("\n"); |
139 | 139 | ||
140 | if (lockit) | 140 | if (lockit) |
141 | { | 141 | { |
142 | spin_unlock_irqrestore (&ch->ch_rxlock, flags); | 142 | spin_unlock_irqrestore (&ch->ch_rxlock, flags); |
143 | } | 143 | } |
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
146 | #endif | 146 | #endif |
147 | 147 | ||
148 | #if 1 | 148 | #if 1 |
149 | STATIC int | 149 | STATIC int |
150 | musycc_dump_txbuffer_ring (mch_t * ch, int lockit) | 150 | musycc_dump_txbuffer_ring (mch_t * ch, int lockit) |
151 | { | 151 | { |
152 | struct mdesc *m; | 152 | struct mdesc *m; |
153 | unsigned long flags = 0; | 153 | unsigned long flags = 0; |
154 | u_int32_t status; | 154 | u_int32_t status; |
155 | int n; | 155 | int n; |
156 | 156 | ||
157 | if (lockit) | 157 | if (lockit) |
158 | { | 158 | { |
159 | spin_lock_irqsave (&ch->ch_txlock, flags); | 159 | spin_lock_irqsave (&ch->ch_txlock, flags); |
160 | } | 160 | } |
161 | if (ch->txd_num == 0) | 161 | if (ch->txd_num == 0) |
162 | { | 162 | { |
163 | pr_info(" ZERO transmit buffers allocated for this channel."); | 163 | pr_info(" ZERO transmit buffers allocated for this channel."); |
164 | } else | 164 | } else |
165 | { | 165 | { |
166 | FLUSH_MEM_READ (); | 166 | FLUSH_MEM_READ (); |
167 | m = ch->txd_irq_srv; | 167 | m = ch->txd_irq_srv; |
168 | for (n = ch->txd_num; n; n--) | 168 | for (n = ch->txd_num; n; n--) |
169 | { | 169 | { |
170 | status = le32_to_cpu (m->status); | 170 | status = le32_to_cpu (m->status); |
171 | { | 171 | { |
172 | pr_info("%c%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n", | 172 | pr_info("%c%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n", |
173 | (m == ch->txd_usr_add) ? 'F' : ' ', | 173 | (m == ch->txd_usr_add) ? 'F' : ' ', |
174 | (m == ch->txd_irq_srv) ? 'L' : ' ', | 174 | (m == ch->txd_irq_srv) ? 'L' : ' ', |
175 | (unsigned long) m, n, | 175 | (unsigned long) m, n, |
176 | status, | 176 | status, |
177 | m->data ? (status & MUSYCC_TX_OWNED ? 'M' : 'H') : '-', | 177 | m->data ? (status & MUSYCC_TX_OWNED ? 'M' : 'H') : '-', |
178 | status & POLL_DISABLED ? 'P' : '-', | 178 | status & POLL_DISABLED ? 'P' : '-', |
179 | status & EOBIRQ_ENABLE ? 'b' : '-', | 179 | status & EOBIRQ_ENABLE ? 'b' : '-', |
180 | status & EOMIRQ_ENABLE ? 'm' : '-', | 180 | status & EOMIRQ_ENABLE ? 'm' : '-', |
181 | status & LENGTH_MASK, | 181 | status & LENGTH_MASK, |
182 | le32_to_cpu (m->data), le32_to_cpu (m->next)); | 182 | le32_to_cpu (m->data), le32_to_cpu (m->next)); |
183 | #ifdef RLD_DUMP_BUFDATA | 183 | #ifdef RLD_DUMP_BUFDATA |
184 | { | 184 | { |
185 | u_int32_t *dp; | 185 | u_int32_t *dp; |
186 | int len = status & LENGTH_MASK; | 186 | int len = status & LENGTH_MASK; |
187 | 187 | ||
188 | if (m->data) | 188 | if (m->data) |
189 | { | 189 | { |
190 | dp = (u_int32_t *) OS_phystov ((void *) (le32_to_cpu (m->data))); | 190 | dp = (u_int32_t *) OS_phystov ((void *) (le32_to_cpu (m->data))); |
191 | if (len >= 0x10) | 191 | if (len >= 0x10) |
192 | pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len, | 192 | pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len, |
193 | *dp, *(dp + 1), *(dp + 2), *(dp + 3)); | 193 | *dp, *(dp + 1), *(dp + 2), *(dp + 3)); |
194 | else if (len >= 0x08) | 194 | else if (len >= 0x08) |
195 | pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len, | 195 | pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len, |
196 | *dp, *(dp + 1)); | 196 | *dp, *(dp + 1)); |
197 | else | 197 | else |
198 | pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp); | 198 | pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp); |
199 | } | 199 | } |
200 | } | 200 | } |
201 | #endif | 201 | #endif |
202 | } | 202 | } |
203 | m = m->snext; | 203 | m = m->snext; |
204 | } | 204 | } |
205 | } /* -for- */ | 205 | } /* -for- */ |
206 | pr_info("\n"); | 206 | pr_info("\n"); |
207 | 207 | ||
208 | if (lockit) | 208 | if (lockit) |
209 | { | 209 | { |
210 | spin_unlock_irqrestore (&ch->ch_txlock, flags); | 210 | spin_unlock_irqrestore (&ch->ch_txlock, flags); |
211 | } | 211 | } |
212 | return 0; | 212 | return 0; |
213 | } | 213 | } |
214 | #endif | 214 | #endif |
215 | 215 | ||
216 | 216 | ||
217 | /* | 217 | /* |
218 | * The following supports a backdoor debug facility which can be used to | 218 | * The following supports a backdoor debug facility which can be used to |
219 | * display the state of a board's channel. | 219 | * display the state of a board's channel. |
220 | */ | 220 | */ |
221 | 221 | ||
222 | status_t | 222 | status_t |
223 | musycc_dump_ring (ci_t * ci, unsigned int chan) | 223 | musycc_dump_ring (ci_t * ci, unsigned int chan) |
224 | { | 224 | { |
225 | mch_t *ch; | 225 | mch_t *ch; |
226 | 226 | ||
227 | if (chan >= MAX_CHANS_USED) | 227 | if (chan >= MAX_CHANS_USED) |
228 | { | 228 | { |
229 | return SBE_DRVR_FAIL; /* E2BIG */ | 229 | return SBE_DRVR_FAIL; /* E2BIG */ |
230 | } | 230 | } |
231 | { | 231 | { |
232 | int bh; | 232 | int bh; |
233 | 233 | ||
234 | bh = atomic_read (&ci->bh_pending); | 234 | bh = atomic_read (&ci->bh_pending); |
235 | pr_info(">> bh_pend %d [%d] ihead %d itail %d [%d] th_cnt %d bh_cnt %d wdcnt %d note %d\n", | 235 | pr_info(">> bh_pend %d [%d] ihead %d itail %d [%d] th_cnt %d bh_cnt %d wdcnt %d note %d\n", |
236 | bh, max_bh, ci->iqp_headx, ci->iqp_tailx, max_intcnt, | 236 | bh, max_bh, ci->iqp_headx, ci->iqp_tailx, max_intcnt, |
237 | ci->intlog.drvr_intr_thcount, | 237 | ci->intlog.drvr_intr_thcount, |
238 | ci->intlog.drvr_intr_bhcount, | 238 | ci->intlog.drvr_intr_bhcount, |
239 | ci->wdcount, ci->wd_notify); | 239 | ci->wdcount, ci->wd_notify); |
240 | max_bh = 0; /* reset counter */ | 240 | max_bh = 0; /* reset counter */ |
241 | max_intcnt = 0; /* reset counter */ | 241 | max_intcnt = 0; /* reset counter */ |
242 | } | 242 | } |
243 | 243 | ||
244 | if (!(ch = sd_find_chan (dummy, chan))) | 244 | if (!(ch = sd_find_chan (dummy, chan))) |
245 | { | 245 | { |
246 | pr_info(">> musycc_dump_ring: channel %d not up.\n", chan); | 246 | pr_info(">> musycc_dump_ring: channel %d not up.\n", chan); |
247 | return ENOENT; | 247 | return ENOENT; |
248 | } | 248 | } |
249 | pr_info(">> CI %p CHANNEL %3d @ %p: state %x status/p %x/%x\n", ci, chan, ch, ch->state, | 249 | pr_info(">> CI %p CHANNEL %3d @ %p: state %x status/p %x/%x\n", ci, chan, ch, ch->state, |
250 | ch->status, ch->p.status); | 250 | ch->status, ch->p.status); |
251 | pr_info("--------------------------------\nTX Buffer Ring - Channel %d, txd_num %d. (bd/ch pend %d %d), TXD required %d, txpkt %lu\n", | 251 | pr_info("--------------------------------\nTX Buffer Ring - Channel %d, txd_num %d. (bd/ch pend %d %d), TXD required %d, txpkt %lu\n", |
252 | chan, ch->txd_num, | 252 | chan, ch->txd_num, |
253 | (u_int32_t) atomic_read (&ci->tx_pending), (u_int32_t) atomic_read (&ch->tx_pending), ch->txd_required, ch->s.tx_packets); | 253 | (u_int32_t) atomic_read (&ci->tx_pending), (u_int32_t) atomic_read (&ch->tx_pending), ch->txd_required, ch->s.tx_packets); |
254 | pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n", | 254 | pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n", |
255 | ch->user, ch->txd_irq_srv, ch->txd_usr_add, | 255 | ch->user, ch->txd_irq_srv, ch->txd_usr_add, |
256 | sd_queue_stopped (ch->user), | 256 | sd_queue_stopped (ch->user), |
257 | ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode); | 257 | ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode); |
258 | musycc_dump_txbuffer_ring (ch, 1); | 258 | musycc_dump_txbuffer_ring (ch, 1); |
259 | pr_info("RX Buffer Ring - Channel %d, rxd_num %d. IRQ_SRV[%d] 0x%p, start_rx %x rxpkt %lu\n", | 259 | pr_info("RX Buffer Ring - Channel %d, rxd_num %d. IRQ_SRV[%d] 0x%p, start_rx %x rxpkt %lu\n", |
260 | chan, ch->rxd_num, ch->rxix_irq_srv, | 260 | chan, ch->rxd_num, ch->rxix_irq_srv, |
261 | &ch->mdr[ch->rxix_irq_srv], ch->ch_start_rx, ch->s.rx_packets); | 261 | &ch->mdr[ch->rxix_irq_srv], ch->ch_start_rx, ch->s.rx_packets); |
262 | musycc_dump_rxbuffer_ring (ch, 1); | 262 | musycc_dump_rxbuffer_ring (ch, 1); |
263 | 263 | ||
264 | return SBE_DRVR_SUCCESS; | 264 | return SBE_DRVR_SUCCESS; |
265 | } | 265 | } |
266 | 266 | ||
267 | 267 | ||
268 | status_t | 268 | status_t |
269 | musycc_dump_rings (ci_t * ci, unsigned int start_chan) | 269 | musycc_dump_rings (ci_t * ci, unsigned int start_chan) |
270 | { | 270 | { |
271 | unsigned int chan; | 271 | unsigned int chan; |
272 | 272 | ||
273 | for (chan = start_chan; chan < (start_chan + 5); chan++) | 273 | for (chan = start_chan; chan < (start_chan + 5); chan++) |
274 | musycc_dump_ring (ci, chan); | 274 | musycc_dump_ring (ci, chan); |
275 | return SBE_DRVR_SUCCESS; | 275 | return SBE_DRVR_SUCCESS; |
276 | } | 276 | } |
277 | 277 | ||
278 | 278 | ||
279 | /* | 279 | /* |
280 | * NOTE on musycc_init_mdt(): These MUSYCC writes are only operational after | 280 | * NOTE on musycc_init_mdt(): These MUSYCC writes are only operational after |
281 | * a MUSYCC GROUP_INIT command has been issued. | 281 | * a MUSYCC GROUP_INIT command has been issued. |
282 | */ | 282 | */ |
283 | 283 | ||
284 | void | 284 | void |
285 | musycc_init_mdt (mpi_t * pi) | 285 | musycc_init_mdt (mpi_t * pi) |
286 | { | 286 | { |
287 | u_int32_t *addr, cfg; | 287 | u_int32_t *addr, cfg; |
288 | int i; | 288 | int i; |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * This Idle Code insertion takes effect prior to channel's first | 291 | * This Idle Code insertion takes effect prior to channel's first |
292 | * transmitted message. After that, each message contains its own Idle | 292 | * transmitted message. After that, each message contains its own Idle |
293 | * Code information which is to be issued after the message is | 293 | * Code information which is to be issued after the message is |
294 | * transmitted (Ref.MUSYCC 5.2.2.3: MCENBL bit in Group Configuration | 294 | * transmitted (Ref.MUSYCC 5.2.2.3: MCENBL bit in Group Configuration |
295 | * Descriptor). | 295 | * Descriptor). |
296 | */ | 296 | */ |
297 | 297 | ||
298 | addr = (u_int32_t *) ((u_long) pi->reg + MUSYCC_MDT_BASE03_ADDR); | 298 | addr = (u_int32_t *) ((u_long) pi->reg + MUSYCC_MDT_BASE03_ADDR); |
299 | cfg = CFG_CH_FLAG_7E << IDLE_CODE; | 299 | cfg = CFG_CH_FLAG_7E << IDLE_CODE; |
300 | 300 | ||
301 | for (i = 0; i < 32; addr++, i++) | 301 | for (i = 0; i < 32; addr++, i++) |
302 | { | 302 | { |
303 | pci_write_32 (addr, cfg); | 303 | pci_write_32 (addr, cfg); |
304 | } | 304 | } |
305 | } | 305 | } |
306 | 306 | ||
307 | 307 | ||
308 | /* Set TX thp to the next unprocessed md */ | 308 | /* Set TX thp to the next unprocessed md */ |
309 | 309 | ||
310 | void | 310 | void |
311 | musycc_update_tx_thp (mch_t * ch) | 311 | musycc_update_tx_thp (mch_t * ch) |
312 | { | 312 | { |
313 | struct mdesc *md; | 313 | struct mdesc *md; |
314 | unsigned long flags; | 314 | unsigned long flags; |
315 | 315 | ||
316 | spin_lock_irqsave (&ch->ch_txlock, flags); | 316 | spin_lock_irqsave (&ch->ch_txlock, flags); |
317 | while (1) | 317 | while (1) |
318 | { | 318 | { |
319 | md = ch->txd_irq_srv; | 319 | md = ch->txd_irq_srv; |
320 | FLUSH_MEM_READ (); | 320 | FLUSH_MEM_READ (); |
321 | if (!md->data) | 321 | if (!md->data) |
322 | { | 322 | { |
323 | /* No MDs with buffers to process */ | 323 | /* No MDs with buffers to process */ |
324 | spin_unlock_irqrestore (&ch->ch_txlock, flags); | 324 | spin_unlock_irqrestore (&ch->ch_txlock, flags); |
325 | return; | 325 | return; |
326 | } | 326 | } |
327 | if ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED) | 327 | if ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED) |
328 | { | 328 | { |
329 | /* this is the MD to restart TX with */ | 329 | /* this is the MD to restart TX with */ |
330 | break; | 330 | break; |
331 | } | 331 | } |
332 | /* | 332 | /* |
333 | * Otherwise, we have a valid, host-owned message descriptor which | 333 | * Otherwise, we have a valid, host-owned message descriptor which |
334 | * has been successfully transmitted and whose buffer can be freed, | 334 | * has been successfully transmitted and whose buffer can be freed, |
335 | * so... process this MD, it's owned by the host. (This might give | 335 | * so... process this MD, it's owned by the host. (This might give |
336 | * as a new, updated txd_irq_srv.) | 336 | * as a new, updated txd_irq_srv.) |
337 | */ | 337 | */ |
338 | musycc_bh_tx_eom (ch->up, ch->gchan); | 338 | musycc_bh_tx_eom (ch->up, ch->gchan); |
339 | } | 339 | } |
340 | md = ch->txd_irq_srv; | 340 | md = ch->txd_irq_srv; |
341 | ch->up->regram->thp[ch->gchan] = cpu_to_le32 (OS_vtophys (md)); | 341 | ch->up->regram->thp[ch->gchan] = cpu_to_le32 (OS_vtophys (md)); |
342 | FLUSH_MEM_WRITE (); | 342 | FLUSH_MEM_WRITE (); |
343 | 343 | ||
344 | if (ch->tx_full) | 344 | if (ch->tx_full) |
345 | { | 345 | { |
346 | ch->tx_full = 0; | 346 | ch->tx_full = 0; |
347 | ch->txd_required = 0; | 347 | ch->txd_required = 0; |
348 | sd_enable_xmit (ch->user); /* re-enable to catch flow controlled | 348 | sd_enable_xmit (ch->user); /* re-enable to catch flow controlled |
349 | * channel */ | 349 | * channel */ |
350 | } | 350 | } |
351 | spin_unlock_irqrestore (&ch->ch_txlock, flags); | 351 | spin_unlock_irqrestore (&ch->ch_txlock, flags); |
352 | 352 | ||
353 | #ifdef RLD_TRANS_DEBUG | 353 | #ifdef RLD_TRANS_DEBUG |
354 | pr_info("++ musycc_update_tx_thp[%d]: setting thp = %p, sts %x\n", ch->channum, md, md->status); | 354 | pr_info("++ musycc_update_tx_thp[%d]: setting thp = %p, sts %x\n", ch->channum, md, md->status); |
355 | #endif | 355 | #endif |
356 | } | 356 | } |
357 | 357 | ||
358 | 358 | ||
359 | /* | 359 | /* |
360 | * This is the workq task executed by the OS when our queue_work() is | 360 | * This is the workq task executed by the OS when our queue_work() is |
361 | * scheduled and run. It can fire off either RX or TX ACTIVATION depending | 361 | * scheduled and run. It can fire off either RX or TX ACTIVATION depending |
362 | * upon the channel's ch_start_tx and ch_start_rx variables. This routine | 362 | * upon the channel's ch_start_tx and ch_start_rx variables. This routine |
363 | * is implemented as a work queue so that the call to the service request is | 363 | * is implemented as a work queue so that the call to the service request is |
364 | * able to sleep, awaiting an interrupt acknowledgment response (SACK) from | 364 | * able to sleep, awaiting an interrupt acknowledgment response (SACK) from |
365 | * the hardware. | 365 | * the hardware. |
366 | */ | 366 | */ |
367 | 367 | ||
368 | void | 368 | void |
369 | musycc_wq_chan_restart (void *arg) /* channel private structure */ | 369 | musycc_wq_chan_restart (void *arg) /* channel private structure */ |
370 | { | 370 | { |
371 | mch_t *ch; | 371 | mch_t *ch; |
372 | mpi_t *pi; | 372 | mpi_t *pi; |
373 | struct mdesc *md; | 373 | struct mdesc *md; |
374 | #if 0 | 374 | #if 0 |
375 | unsigned long flags; | 375 | unsigned long flags; |
376 | #endif | 376 | #endif |
377 | 377 | ||
378 | ch = container_of(arg, struct c4_chan_info, ch_work); | 378 | ch = container_of(arg, struct c4_chan_info, ch_work); |
379 | pi = ch->up; | 379 | pi = ch->up; |
380 | 380 | ||
381 | #ifdef RLD_TRANS_DEBUG | 381 | #ifdef RLD_TRANS_DEBUG |
382 | pr_info("wq_chan_restart[%d]: start_RT[%d/%d] status %x\n", | 382 | pr_info("wq_chan_restart[%d]: start_RT[%d/%d] status %x\n", |
383 | ch->channum, ch->ch_start_rx, ch->ch_start_tx, ch->status); | 383 | ch->channum, ch->ch_start_rx, ch->ch_start_tx, ch->status); |
384 | 384 | ||
385 | #endif | 385 | #endif |
386 | 386 | ||
387 | /**********************************/ | 387 | /**********************************/ |
388 | /** check for RX restart request **/ | 388 | /** check for RX restart request **/ |
389 | /**********************************/ | 389 | /**********************************/ |
390 | 390 | ||
391 | if ((ch->ch_start_rx) && (ch->status & RX_ENABLED)) | 391 | if ((ch->ch_start_rx) && (ch->status & RX_ENABLED)) |
392 | { | 392 | { |
393 | 393 | ||
394 | ch->ch_start_rx = 0; | 394 | ch->ch_start_rx = 0; |
395 | #if defined(RLD_TRANS_DEBUG) || defined(RLD_RXACT_DEBUG) | 395 | #if defined(RLD_TRANS_DEBUG) || defined(RLD_RXACT_DEBUG) |
396 | { | 396 | { |
397 | static int hereb4 = 7; | 397 | static int hereb4 = 7; |
398 | 398 | ||
399 | if (hereb4) /* RLD DEBUG */ | 399 | if (hereb4) /* RLD DEBUG */ |
400 | { | 400 | { |
401 | hereb4--; | 401 | hereb4--; |
402 | #ifdef RLD_TRANS_DEBUG | 402 | #ifdef RLD_TRANS_DEBUG |
403 | md = &ch->mdr[ch->rxix_irq_srv]; | 403 | md = &ch->mdr[ch->rxix_irq_srv]; |
404 | pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n", | 404 | pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n", |
405 | ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status), | 405 | ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status), |
406 | ch->s.rx_packets); | 406 | ch->s.rx_packets); |
407 | #elif defined(RLD_RXACT_DEBUG) | 407 | #elif defined(RLD_RXACT_DEBUG) |
408 | md = &ch->mdr[ch->rxix_irq_srv]; | 408 | md = &ch->mdr[ch->rxix_irq_srv]; |
409 | pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n", | 409 | pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n", |
410 | ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status), | 410 | ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status), |
411 | ch->s.rx_packets); | 411 | ch->s.rx_packets); |
412 | musycc_dump_rxbuffer_ring (ch, 1); /* RLD DEBUG */ | 412 | musycc_dump_rxbuffer_ring (ch, 1); /* RLD DEBUG */ |
413 | #endif | 413 | #endif |
414 | } | 414 | } |
415 | } | 415 | } |
416 | #endif | 416 | #endif |
417 | musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | ch->gchan); | 417 | musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | ch->gchan); |
418 | } | 418 | } |
419 | /**********************************/ | 419 | /**********************************/ |
420 | /** check for TX restart request **/ | 420 | /** check for TX restart request **/ |
421 | /**********************************/ | 421 | /**********************************/ |
422 | 422 | ||
423 | if ((ch->ch_start_tx) && (ch->status & TX_ENABLED)) | 423 | if ((ch->ch_start_tx) && (ch->status & TX_ENABLED)) |
424 | { | 424 | { |
425 | /* find next unprocessed message, then set TX thp to it */ | 425 | /* find next unprocessed message, then set TX thp to it */ |
426 | musycc_update_tx_thp (ch); | 426 | musycc_update_tx_thp (ch); |
427 | 427 | ||
428 | #if 0 | 428 | #if 0 |
429 | spin_lock_irqsave (&ch->ch_txlock, flags); | 429 | spin_lock_irqsave (&ch->ch_txlock, flags); |
430 | #endif | 430 | #endif |
431 | md = ch->txd_irq_srv; | 431 | md = ch->txd_irq_srv; |
432 | if (!md) | 432 | if (!md) |
433 | { | 433 | { |
434 | #ifdef RLD_TRANS_DEBUG | 434 | #ifdef RLD_TRANS_DEBUG |
435 | pr_info("-- musycc_wq_chan_restart[%d]: WARNING, starting NULL md\n", ch->channum); | 435 | pr_info("-- musycc_wq_chan_restart[%d]: WARNING, starting NULL md\n", ch->channum); |
436 | #endif | 436 | #endif |
437 | #if 0 | 437 | #if 0 |
438 | spin_unlock_irqrestore (&ch->ch_txlock, flags); | 438 | spin_unlock_irqrestore (&ch->ch_txlock, flags); |
439 | #endif | 439 | #endif |
440 | } else if (md->data && ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED)) | 440 | } else if (md->data && ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED)) |
441 | { | 441 | { |
442 | ch->ch_start_tx = 0; | 442 | ch->ch_start_tx = 0; |
443 | #if 0 | 443 | #if 0 |
444 | spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow interrupts for service request */ | 444 | spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow interrupts for service request */ |
445 | #endif | 445 | #endif |
446 | #ifdef RLD_TRANS_DEBUG | 446 | #ifdef RLD_TRANS_DEBUG |
447 | pr_info("++ musycc_wq_chan_restart() CHAN TX ACTIVATE: chan %d txd_irq_srv %p = sts %x, txpkt %lu\n", | 447 | pr_info("++ musycc_wq_chan_restart() CHAN TX ACTIVATE: chan %d txd_irq_srv %p = sts %x, txpkt %lu\n", |
448 | ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status, ch->s.tx_packets); | 448 | ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status, ch->s.tx_packets); |
449 | #endif | 449 | #endif |
450 | musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION | ch->gchan); | 450 | musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION | ch->gchan); |
451 | } | 451 | } |
452 | #ifdef RLD_RESTART_DEBUG | 452 | #ifdef RLD_RESTART_DEBUG |
453 | else | 453 | else |
454 | { | 454 | { |
455 | /* retain request to start until retried and we have data to xmit */ | 455 | /* retain request to start until retried and we have data to xmit */ |
456 | pr_info("-- musycc_wq_chan_restart[%d]: DELAYED due to md %p sts %x data %x, start_tx %x\n", | 456 | pr_info("-- musycc_wq_chan_restart[%d]: DELAYED due to md %p sts %x data %x, start_tx %x\n", |
457 | ch->channum, md, | 457 | ch->channum, md, |
458 | le32_to_cpu (md->status), | 458 | le32_to_cpu (md->status), |
459 | le32_to_cpu (md->data), ch->ch_start_tx); | 459 | le32_to_cpu (md->data), ch->ch_start_tx); |
460 | musycc_dump_txbuffer_ring (ch, 0); | 460 | musycc_dump_txbuffer_ring (ch, 0); |
461 | #if 0 | 461 | #if 0 |
462 | spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow interrupts for service request */ | 462 | spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow interrupts for service request */ |
463 | #endif | 463 | #endif |
464 | } | 464 | } |
465 | #endif | 465 | #endif |
466 | } | 466 | } |
467 | } | 467 | } |
468 | 468 | ||
469 | 469 | ||
470 | /* | 470 | /* |
471 | * Channel restart either fires of a workqueue request (2.6) or lodges a | 471 | * Channel restart either fires of a workqueue request (2.6) or lodges a |
472 | * watchdog activation sequence (2.4). | 472 | * watchdog activation sequence (2.4). |
473 | */ | 473 | */ |
474 | 474 | ||
475 | void | 475 | void |
476 | musycc_chan_restart (mch_t * ch) | 476 | musycc_chan_restart (mch_t * ch) |
477 | { | 477 | { |
478 | #ifdef RLD_RESTART_DEBUG | 478 | #ifdef RLD_RESTART_DEBUG |
479 | pr_info("++ musycc_chan_restart[%d]: txd_irq_srv @ %p = sts %x\n", | 479 | pr_info("++ musycc_chan_restart[%d]: txd_irq_srv @ %p = sts %x\n", |
480 | ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status); | 480 | ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status); |
481 | #endif | 481 | #endif |
482 | 482 | ||
483 | /* 2.6 - find next unprocessed message, then set TX thp to it */ | 483 | /* 2.6 - find next unprocessed message, then set TX thp to it */ |
484 | #ifdef RLD_RESTART_DEBUG | 484 | #ifdef RLD_RESTART_DEBUG |
485 | pr_info(">> musycc_chan_restart: scheduling Chan %x workQ @ %p\n", ch->channum, &ch->ch_work); | 485 | pr_info(">> musycc_chan_restart: scheduling Chan %x workQ @ %p\n", ch->channum, &ch->ch_work); |
486 | #endif | 486 | #endif |
487 | c4_wk_chan_restart (ch); /* work queue mechanism fires off: Ref: | 487 | c4_wk_chan_restart (ch); /* work queue mechanism fires off: Ref: |
488 | * musycc_wq_chan_restart () */ | 488 | * musycc_wq_chan_restart () */ |
489 | 489 | ||
490 | } | 490 | } |
491 | 491 | ||
492 | 492 | ||
493 | void | 493 | void |
494 | rld_put_led (mpi_t * pi, u_int32_t ledval) | 494 | rld_put_led (mpi_t * pi, u_int32_t ledval) |
495 | { | 495 | { |
496 | static u_int32_t led = 0; | 496 | static u_int32_t led = 0; |
497 | 497 | ||
498 | if (ledval == 0) | 498 | if (ledval == 0) |
499 | led = 0; | 499 | led = 0; |
500 | else | 500 | else |
501 | led |= ledval; | 501 | led |= ledval; |
502 | 502 | ||
503 | pci_write_32 ((u_int32_t *) &pi->up->cpldbase->leds, led); /* RLD DEBUG TRANHANG */ | 503 | pci_write_32 ((u_int32_t *) &pi->up->cpldbase->leds, led); /* RLD DEBUG TRANHANG */ |
504 | } | 504 | } |
505 | 505 | ||
506 | 506 | ||
507 | #define MUSYCC_SR_RETRY_CNT 9 | 507 | #define MUSYCC_SR_RETRY_CNT 9 |
508 | 508 | ||
509 | void | 509 | void |
510 | musycc_serv_req (mpi_t * pi, u_int32_t req) | 510 | musycc_serv_req (mpi_t * pi, u_int32_t req) |
511 | { | 511 | { |
512 | volatile u_int32_t r; | 512 | volatile u_int32_t r; |
513 | int rcnt; | 513 | int rcnt; |
514 | 514 | ||
515 | /* | 515 | /* |
516 | * PORT NOTE: Semaphore protect service loop guarantees only a single | 516 | * PORT NOTE: Semaphore protect service loop guarantees only a single |
517 | * operation at a time. Per MUSYCC Manual - "Issuing service requests to | 517 | * operation at a time. Per MUSYCC Manual - "Issuing service requests to |
518 | * the same channel group without first receiving ACK from each request | 518 | * the same channel group without first receiving ACK from each request |
519 | * may cause the host to lose track of which service request has been | 519 | * may cause the host to lose track of which service request has been |
520 | * acknowledged." | 520 | * acknowledged." |
521 | */ | 521 | */ |
522 | 522 | ||
523 | SD_SEM_TAKE (&pi->sr_sem_busy, "serv"); /* only 1 thru here, per | 523 | SD_SEM_TAKE (&pi->sr_sem_busy, "serv"); /* only 1 thru here, per |
524 | * group */ | 524 | * group */ |
525 | 525 | ||
526 | if (pi->sr_last == req) | 526 | if (pi->sr_last == req) |
527 | { | 527 | { |
528 | #ifdef RLD_TRANS_DEBUG | 528 | #ifdef RLD_TRANS_DEBUG |
529 | pr_info(">> same SR, Port %d Req %x\n", pi->portnum, req); | 529 | pr_info(">> same SR, Port %d Req %x\n", pi->portnum, req); |
530 | #endif | 530 | #endif |
531 | 531 | ||
532 | /* | 532 | /* |
533 | * The most likely repeated request is the channel activation command | 533 | * The most likely repeated request is the channel activation command |
534 | * which follows the occurrence of a Transparent mode TX ONR or a | 534 | * which follows the occurrence of a Transparent mode TX ONR or a |
535 | * BUFF error. If the previous command was a CHANNEL ACTIVATE, | 535 | * BUFF error. If the previous command was a CHANNEL ACTIVATE, |
536 | * precede it with a NOOP command in order maintain coherent control | 536 | * precede it with a NOOP command in order maintain coherent control |
537 | * of this current (re)ACTIVATE. | 537 | * of this current (re)ACTIVATE. |
538 | */ | 538 | */ |
539 | 539 | ||
540 | r = (pi->sr_last & ~SR_GCHANNEL_MASK); | 540 | r = (pi->sr_last & ~SR_GCHANNEL_MASK); |
541 | if ((r == (SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION)) || | 541 | if ((r == (SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION)) || |
542 | (r == (SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION))) | 542 | (r == (SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION))) |
543 | { | 543 | { |
544 | #ifdef RLD_TRANS_DEBUG | 544 | #ifdef RLD_TRANS_DEBUG |
545 | pr_info(">> same CHAN ACT SR, Port %d Req %x => issue SR_NOOP CMD\n", pi->portnum, req); | 545 | pr_info(">> same CHAN ACT SR, Port %d Req %x => issue SR_NOOP CMD\n", pi->portnum, req); |
546 | #endif | 546 | #endif |
547 | SD_SEM_GIVE (&pi->sr_sem_busy); /* allow this next request */ | 547 | SD_SEM_GIVE (&pi->sr_sem_busy); /* allow this next request */ |
548 | musycc_serv_req (pi, SR_NOOP); | 548 | musycc_serv_req (pi, SR_NOOP); |
549 | SD_SEM_TAKE (&pi->sr_sem_busy, "serv"); /* relock & continue w/ | 549 | SD_SEM_TAKE (&pi->sr_sem_busy, "serv"); /* relock & continue w/ |
550 | * original req */ | 550 | * original req */ |
551 | } else if (req == SR_NOOP) | 551 | } else if (req == SR_NOOP) |
552 | { | 552 | { |
553 | /* no need to issue back-to-back SR_NOOP commands at this time */ | 553 | /* no need to issue back-to-back SR_NOOP commands at this time */ |
554 | #ifdef RLD_TRANS_DEBUG | 554 | #ifdef RLD_TRANS_DEBUG |
555 | pr_info(">> same Port SR_NOOP skipped, Port %d\n", pi->portnum); | 555 | pr_info(">> same Port SR_NOOP skipped, Port %d\n", pi->portnum); |
556 | #endif | 556 | #endif |
557 | SD_SEM_GIVE (&pi->sr_sem_busy); /* allow this next request */ | 557 | SD_SEM_GIVE (&pi->sr_sem_busy); /* allow this next request */ |
558 | return; | 558 | return; |
559 | } | 559 | } |
560 | } | 560 | } |
561 | rcnt = 0; | 561 | rcnt = 0; |
562 | pi->sr_last = req; | 562 | pi->sr_last = req; |
563 | rewrite: | 563 | rewrite: |
564 | pci_write_32 ((u_int32_t *) &pi->reg->srd, req); | 564 | pci_write_32 ((u_int32_t *) &pi->reg->srd, req); |
565 | FLUSH_MEM_WRITE (); | 565 | FLUSH_MEM_WRITE (); |
566 | 566 | ||
567 | /* | 567 | /* |
568 | * Per MUSYCC Manual, Section 6.1,2 - "When writing an SCR service | 568 | * Per MUSYCC Manual, Section 6.1,2 - "When writing an SCR service |
569 | * request, the host must ensure at least one PCI bus clock cycle has | 569 | * request, the host must ensure at least one PCI bus clock cycle has |
570 | * elapsed before writing another service request. To meet this minimum | 570 | * elapsed before writing another service request. To meet this minimum |
571 | * elapsed service request write timing interval, it is recommended that | 571 | * elapsed service request write timing interval, it is recommended that |
572 | * the host follow any SCR write with another operation which reads from | 572 | * the host follow any SCR write with another operation which reads from |
573 | * the same address." | 573 | * the same address." |
574 | */ | 574 | */ |
575 | r = pci_read_32 ((u_int32_t *) &pi->reg->srd); /* adhere to write | 575 | r = pci_read_32 ((u_int32_t *) &pi->reg->srd); /* adhere to write |
576 | * timing imposition */ | 576 | * timing imposition */ |
577 | 577 | ||
578 | 578 | ||
579 | if ((r != req) && (req != SR_CHIP_RESET) && (++rcnt <= MUSYCC_SR_RETRY_CNT)) | 579 | if ((r != req) && (req != SR_CHIP_RESET) && (++rcnt <= MUSYCC_SR_RETRY_CNT)) |
580 | { | 580 | { |
581 | if (cxt1e1_log_level >= LOG_MONITOR) | 581 | if (cxt1e1_log_level >= LOG_MONITOR) |
582 | pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n", | 582 | pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n", |
583 | pi->up->devname, rcnt, req, pi->sr_last, r, | 583 | pi->up->devname, rcnt, req, pi->sr_last, r, |
584 | (pi->portnum * MUSYCC_NCHANS) + (req & 0x1f)); | 584 | (pi->portnum * MUSYCC_NCHANS) + (req & 0x1f)); |
585 | OS_uwait_dummy (); /* this delay helps reduce reissue counts | 585 | OS_uwait_dummy (); /* this delay helps reduce reissue counts |
586 | * (reason not yet researched) */ | 586 | * (reason not yet researched) */ |
587 | goto rewrite; | 587 | goto rewrite; |
588 | } | 588 | } |
589 | if (rcnt > MUSYCC_SR_RETRY_CNT) | 589 | if (rcnt > MUSYCC_SR_RETRY_CNT) |
590 | { | 590 | { |
591 | pr_warning("%s: failed service request (#%d)= %x, group %d.\n", | 591 | pr_warning("%s: failed service request (#%d)= %x, group %d.\n", |
592 | pi->up->devname, MUSYCC_SR_RETRY_CNT, req, pi->portnum); | 592 | pi->up->devname, MUSYCC_SR_RETRY_CNT, req, pi->portnum); |
593 | SD_SEM_GIVE (&pi->sr_sem_busy); /* allow any next request */ | 593 | SD_SEM_GIVE (&pi->sr_sem_busy); /* allow any next request */ |
594 | return; | 594 | return; |
595 | } | 595 | } |
596 | if (req == SR_CHIP_RESET) | 596 | if (req == SR_CHIP_RESET) |
597 | { | 597 | { |
598 | /* | 598 | /* |
599 | * PORT NOTE: the CHIP_RESET command is NOT ack'd by the MUSYCC, thus | 599 | * PORT NOTE: the CHIP_RESET command is NOT ack'd by the MUSYCC, thus |
600 | * the upcoming delay is used. Though the MUSYCC documentation | 600 | * the upcoming delay is used. Though the MUSYCC documentation |
601 | * suggests a read-after-write would supply the required delay, it's | 601 | * suggests a read-after-write would supply the required delay, it's |
602 | * unclear what CPU/BUS clock speeds might have been assumed when | 602 | * unclear what CPU/BUS clock speeds might have been assumed when |
603 | * suggesting this 'lack of ACK' workaround. Thus the use of uwait. | 603 | * suggesting this 'lack of ACK' workaround. Thus the use of uwait. |
604 | */ | 604 | */ |
605 | OS_uwait (100000, "icard"); /* 100ms */ | 605 | OS_uwait (100000, "icard"); /* 100ms */ |
606 | } else | 606 | } else |
607 | { | 607 | { |
608 | FLUSH_MEM_READ (); | 608 | FLUSH_MEM_READ (); |
609 | SD_SEM_TAKE (&pi->sr_sem_wait, "sakack"); /* sleep until SACK | 609 | SD_SEM_TAKE (&pi->sr_sem_wait, "sakack"); /* sleep until SACK |
610 | * interrupt occurs */ | 610 | * interrupt occurs */ |
611 | } | 611 | } |
612 | SD_SEM_GIVE (&pi->sr_sem_busy); /* allow any next request */ | 612 | SD_SEM_GIVE (&pi->sr_sem_busy); /* allow any next request */ |
613 | } | 613 | } |
614 | 614 | ||
615 | 615 | ||
616 | #ifdef SBE_PMCC4_ENABLE | 616 | #ifdef SBE_PMCC4_ENABLE |
617 | void | 617 | void |
618 | musycc_update_timeslots (mpi_t * pi) | 618 | musycc_update_timeslots (mpi_t * pi) |
619 | { | 619 | { |
620 | int i, ch; | 620 | int i, ch; |
621 | char e1mode = IS_FRAME_ANY_E1 (pi->p.port_mode); | 621 | char e1mode = IS_FRAME_ANY_E1 (pi->p.port_mode); |
622 | 622 | ||
623 | for (i = 0; i < 32; i++) | 623 | for (i = 0; i < 32; i++) |
624 | { | 624 | { |
625 | int usedby = 0, last = 0, ts, j, bits[8]; | 625 | int usedby = 0, last = 0, ts, j, bits[8]; |
626 | 626 | ||
627 | u_int8_t lastval = 0; | 627 | u_int8_t lastval = 0; |
628 | 628 | ||
629 | if (((i == 0) && e1mode) || /* disable if E1 mode */ | 629 | if (((i == 0) && e1mode) || /* disable if E1 mode */ |
630 | ((i == 16) && ((pi->p.port_mode == CFG_FRAME_E1CRC_CAS) || (pi->p.port_mode == CFG_FRAME_E1CRC_CAS_AMI))) | 630 | ((i == 16) && ((pi->p.port_mode == CFG_FRAME_E1CRC_CAS) || (pi->p.port_mode == CFG_FRAME_E1CRC_CAS_AMI))) |
631 | || ((i > 23) && (!e1mode))) /* disable if T1 mode */ | 631 | || ((i > 23) && (!e1mode))) /* disable if T1 mode */ |
632 | { | 632 | { |
633 | pi->tsm[i] = 0xff; /* make tslot unavailable for this mode */ | 633 | pi->tsm[i] = 0xff; /* make tslot unavailable for this mode */ |
634 | } else | 634 | } else |
635 | { | 635 | { |
636 | pi->tsm[i] = 0x00; /* make tslot available for assignment */ | 636 | pi->tsm[i] = 0x00; /* make tslot available for assignment */ |
637 | } | 637 | } |
638 | for (j = 0; j < 8; j++) | 638 | for (j = 0; j < 8; j++) |
639 | bits[j] = -1; | 639 | bits[j] = -1; |
640 | for (ch = 0; ch < MUSYCC_NCHANS; ch++) | 640 | for (ch = 0; ch < MUSYCC_NCHANS; ch++) |
641 | { | 641 | { |
642 | if ((pi->chan[ch]->state == UP) && (pi->chan[ch]->p.bitmask[i])) | 642 | if ((pi->chan[ch]->state == UP) && (pi->chan[ch]->p.bitmask[i])) |
643 | { | 643 | { |
644 | usedby++; | 644 | usedby++; |
645 | last = ch; | 645 | last = ch; |
646 | lastval = pi->chan[ch]->p.bitmask[i]; | 646 | lastval = pi->chan[ch]->p.bitmask[i]; |
647 | for (j = 0; j < 8; j++) | 647 | for (j = 0; j < 8; j++) |
648 | if (lastval & (1 << j)) | 648 | if (lastval & (1 << j)) |
649 | bits[j] = ch; | 649 | bits[j] = ch; |
650 | pi->tsm[i] |= lastval; | 650 | pi->tsm[i] |= lastval; |
651 | } | 651 | } |
652 | } | 652 | } |
653 | if (!usedby) | 653 | if (!usedby) |
654 | ts = 0; | 654 | ts = 0; |
655 | else if ((usedby == 1) && (lastval == 0xff)) | 655 | else if ((usedby == 1) && (lastval == 0xff)) |
656 | ts = (4 << 5) | last; | 656 | ts = (4 << 5) | last; |
657 | else if ((usedby == 1) && (lastval == 0x7f)) | 657 | else if ((usedby == 1) && (lastval == 0x7f)) |
658 | ts = (5 << 5) | last; | 658 | ts = (5 << 5) | last; |
659 | else | 659 | else |
660 | { | 660 | { |
661 | int idx; | 661 | int idx; |
662 | 662 | ||
663 | if (bits[0] < 0) | 663 | if (bits[0] < 0) |
664 | ts = (6 << 5) | (idx = last); | 664 | ts = (6 << 5) | (idx = last); |
665 | else | 665 | else |
666 | ts = (7 << 5) | (idx = bits[0]); | 666 | ts = (7 << 5) | (idx = bits[0]); |
667 | for (j = 1; j < 8; j++) | 667 | for (j = 1; j < 8; j++) |
668 | { | 668 | { |
669 | pi->regram->rscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]); | 669 | pi->regram->rscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]); |
670 | pi->regram->tscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]); | 670 | pi->regram->tscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]); |
671 | } | 671 | } |
672 | } | 672 | } |
673 | pi->regram->rtsm[i] = ts; | 673 | pi->regram->rtsm[i] = ts; |
674 | pi->regram->ttsm[i] = ts; | 674 | pi->regram->ttsm[i] = ts; |
675 | } | 675 | } |
676 | FLUSH_MEM_WRITE (); | 676 | FLUSH_MEM_WRITE (); |
677 | 677 | ||
678 | musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION); | 678 | musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION); |
679 | musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION); | 679 | musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION); |
680 | musycc_serv_req (pi, SR_SUBCHANNEL_MAP | SR_RX_DIRECTION); | 680 | musycc_serv_req (pi, SR_SUBCHANNEL_MAP | SR_RX_DIRECTION); |
681 | musycc_serv_req (pi, SR_SUBCHANNEL_MAP | SR_TX_DIRECTION); | 681 | musycc_serv_req (pi, SR_SUBCHANNEL_MAP | SR_TX_DIRECTION); |
682 | } | 682 | } |
683 | #endif | 683 | #endif |
684 | 684 | ||
685 | 685 | ||
686 | #ifdef SBE_WAN256T3_ENABLE | 686 | #ifdef SBE_WAN256T3_ENABLE |
687 | void | 687 | void |
688 | musycc_update_timeslots (mpi_t * pi) | 688 | musycc_update_timeslots (mpi_t * pi) |
689 | { | 689 | { |
690 | mch_t *ch; | 690 | mch_t *ch; |
691 | 691 | ||
692 | u_int8_t ts, hmask, tsen; | 692 | u_int8_t ts, hmask, tsen; |
693 | int gchan; | 693 | int gchan; |
694 | int i; | 694 | int i; |
695 | 695 | ||
696 | #ifdef SBE_PMCC4_ENABLE | 696 | #ifdef SBE_PMCC4_ENABLE |
697 | hmask = (0x1f << pi->up->p.hypersize) & 0x1f; | 697 | hmask = (0x1f << pi->up->p.hypersize) & 0x1f; |
698 | #endif | 698 | #endif |
699 | #ifdef SBE_WAN256T3_ENABLE | 699 | #ifdef SBE_WAN256T3_ENABLE |
700 | hmask = (0x1f << hyperdummy) & 0x1f; | 700 | hmask = (0x1f << hyperdummy) & 0x1f; |
701 | #endif | 701 | #endif |
702 | for (i = 0; i < 128; i++) | 702 | for (i = 0; i < 128; i++) |
703 | { | 703 | { |
704 | gchan = ((pi->portnum * MUSYCC_NCHANS) + (i & hmask)) % MUSYCC_NCHANS; | 704 | gchan = ((pi->portnum * MUSYCC_NCHANS) + (i & hmask)) % MUSYCC_NCHANS; |
705 | ch = pi->chan[gchan]; | 705 | ch = pi->chan[gchan]; |
706 | if (ch->p.mode_56k) | 706 | if (ch->p.mode_56k) |
707 | tsen = MODE_56KBPS; | 707 | tsen = MODE_56KBPS; |
708 | else | 708 | else |
709 | tsen = MODE_64KBPS; /* also the default */ | 709 | tsen = MODE_64KBPS; /* also the default */ |
710 | ts = ((pi->portnum % 4) == (i / 32)) ? (tsen << 5) | (i & hmask) : 0; | 710 | ts = ((pi->portnum % 4) == (i / 32)) ? (tsen << 5) | (i & hmask) : 0; |
711 | pi->regram->rtsm[i] = ts; | 711 | pi->regram->rtsm[i] = ts; |
712 | pi->regram->ttsm[i] = ts; | 712 | pi->regram->ttsm[i] = ts; |
713 | } | 713 | } |
714 | FLUSH_MEM_WRITE (); | 714 | FLUSH_MEM_WRITE (); |
715 | musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION); | 715 | musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION); |
716 | musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION); | 716 | musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION); |
717 | } | 717 | } |
718 | #endif | 718 | #endif |
719 | 719 | ||
720 | 720 | ||
721 | /* | 721 | /* |
722 | * This routine converts a generic library channel configuration parameter | 722 | * This routine converts a generic library channel configuration parameter |
723 | * into a hardware specific register value (IE. MUSYCC CCD Register). | 723 | * into a hardware specific register value (IE. MUSYCC CCD Register). |
724 | */ | 724 | */ |
725 | u_int32_t | 725 | u_int32_t |
726 | musycc_chan_proto (int proto) | 726 | musycc_chan_proto (int proto) |
727 | { | 727 | { |
728 | int reg; | 728 | int reg; |
729 | 729 | ||
730 | switch (proto) | 730 | switch (proto) |
731 | { | 731 | { |
732 | case CFG_CH_PROTO_TRANS: /* 0 */ | 732 | case CFG_CH_PROTO_TRANS: /* 0 */ |
733 | reg = MUSYCC_CCD_TRANS; | 733 | reg = MUSYCC_CCD_TRANS; |
734 | break; | 734 | break; |
735 | case CFG_CH_PROTO_SS7: /* 1 */ | 735 | case CFG_CH_PROTO_SS7: /* 1 */ |
736 | reg = MUSYCC_CCD_SS7; | 736 | reg = MUSYCC_CCD_SS7; |
737 | break; | 737 | break; |
738 | default: | 738 | default: |
739 | case CFG_CH_PROTO_ISLP_MODE: /* 4 */ | 739 | case CFG_CH_PROTO_ISLP_MODE: /* 4 */ |
740 | case CFG_CH_PROTO_HDLC_FCS16: /* 2 */ | 740 | case CFG_CH_PROTO_HDLC_FCS16: /* 2 */ |
741 | reg = MUSYCC_CCD_HDLC_FCS16; | 741 | reg = MUSYCC_CCD_HDLC_FCS16; |
742 | break; | 742 | break; |
743 | case CFG_CH_PROTO_HDLC_FCS32: /* 3 */ | 743 | case CFG_CH_PROTO_HDLC_FCS32: /* 3 */ |
744 | reg = MUSYCC_CCD_HDLC_FCS32; | 744 | reg = MUSYCC_CCD_HDLC_FCS32; |
745 | break; | 745 | break; |
746 | } | 746 | } |
747 | 747 | ||
748 | return reg; | 748 | return reg; |
749 | } | 749 | } |
750 | 750 | ||
751 | #ifdef SBE_WAN256T3_ENABLE | 751 | #ifdef SBE_WAN256T3_ENABLE |
752 | STATIC void __init | 752 | STATIC void __init |
753 | musycc_init_port (mpi_t * pi) | 753 | musycc_init_port (mpi_t * pi) |
754 | { | 754 | { |
755 | pci_write_32 ((u_int32_t *) &pi->reg->gbp, OS_vtophys (pi->regram)); | 755 | pci_write_32 ((u_int32_t *) &pi->reg->gbp, OS_vtophys (pi->regram)); |
756 | 756 | ||
757 | pi->regram->grcd = | 757 | pi->regram->grcd = |
758 | __constant_cpu_to_le32 (MUSYCC_GRCD_RX_ENABLE | | 758 | __constant_cpu_to_le32 (MUSYCC_GRCD_RX_ENABLE | |
759 | MUSYCC_GRCD_TX_ENABLE | | 759 | MUSYCC_GRCD_TX_ENABLE | |
760 | MUSYCC_GRCD_SF_ALIGN | | 760 | MUSYCC_GRCD_SF_ALIGN | |
761 | MUSYCC_GRCD_SUBCHAN_DISABLE | | 761 | MUSYCC_GRCD_SUBCHAN_DISABLE | |
762 | MUSYCC_GRCD_OOFMP_DISABLE | | 762 | MUSYCC_GRCD_OOFMP_DISABLE | |
763 | MUSYCC_GRCD_COFAIRQ_DISABLE | | 763 | MUSYCC_GRCD_COFAIRQ_DISABLE | |
764 | MUSYCC_GRCD_MC_ENABLE | | 764 | MUSYCC_GRCD_MC_ENABLE | |
765 | (MUSYCC_GRCD_POLLTH_32 << MUSYCC_GRCD_POLLTH_SHIFT)); | 765 | (MUSYCC_GRCD_POLLTH_32 << MUSYCC_GRCD_POLLTH_SHIFT)); |
766 | 766 | ||
767 | pi->regram->pcd = | 767 | pi->regram->pcd = |
768 | __constant_cpu_to_le32 (MUSYCC_PCD_E1X4_MODE | | 768 | __constant_cpu_to_le32 (MUSYCC_PCD_E1X4_MODE | |
769 | MUSYCC_PCD_TXDATA_RISING | | 769 | MUSYCC_PCD_TXDATA_RISING | |
770 | MUSYCC_PCD_TX_DRIVEN); | 770 | MUSYCC_PCD_TX_DRIVEN); |
771 | 771 | ||
772 | /* Message length descriptor */ | 772 | /* Message length descriptor */ |
773 | pi->regram->mld = __constant_cpu_to_le32 (cxt1e1_max_mru | (cxt1e1_max_mru << 16)); | 773 | pi->regram->mld = __constant_cpu_to_le32 (cxt1e1_max_mru | (cxt1e1_max_mru << 16)); |
774 | FLUSH_MEM_WRITE (); | 774 | FLUSH_MEM_WRITE (); |
775 | 775 | ||
776 | musycc_serv_req (pi, SR_GROUP_INIT | SR_RX_DIRECTION); | 776 | musycc_serv_req (pi, SR_GROUP_INIT | SR_RX_DIRECTION); |
777 | musycc_serv_req (pi, SR_GROUP_INIT | SR_TX_DIRECTION); | 777 | musycc_serv_req (pi, SR_GROUP_INIT | SR_TX_DIRECTION); |
778 | 778 | ||
779 | musycc_init_mdt (pi); | 779 | musycc_init_mdt (pi); |
780 | 780 | ||
781 | musycc_update_timeslots (pi); | 781 | musycc_update_timeslots (pi); |
782 | } | 782 | } |
783 | #endif | 783 | #endif |
784 | 784 | ||
785 | 785 | ||
786 | status_t __init | 786 | status_t __init |
787 | musycc_init (ci_t * ci) | 787 | musycc_init (ci_t * ci) |
788 | { | 788 | { |
789 | char *regaddr; /* temp for address boundary calculations */ | 789 | char *regaddr; /* temp for address boundary calculations */ |
790 | int i, gchan; | 790 | int i, gchan; |
791 | 791 | ||
792 | OS_sem_init (&ci->sem_wdbusy, SEM_AVAILABLE); /* watchdog exclusion */ | 792 | OS_sem_init (&ci->sem_wdbusy, SEM_AVAILABLE); /* watchdog exclusion */ |
793 | 793 | ||
794 | /* | 794 | /* |
795 | * Per MUSYCC manual, Section 6.3.4 - "The host must allocate a dword | 795 | * Per MUSYCC manual, Section 6.3.4 - "The host must allocate a dword |
796 | * aligned memory segment for interrupt queue pointers." | 796 | * aligned memory segment for interrupt queue pointers." |
797 | */ | 797 | */ |
798 | 798 | ||
799 | #define INT_QUEUE_BOUNDARY 4 | 799 | #define INT_QUEUE_BOUNDARY 4 |
800 | 800 | ||
801 | regaddr = OS_kmalloc ((INT_QUEUE_SIZE + 1) * sizeof (u_int32_t)); | 801 | regaddr = OS_kmalloc ((INT_QUEUE_SIZE + 1) * sizeof (u_int32_t)); |
802 | if (regaddr == 0) | 802 | if (regaddr == 0) |
803 | return ENOMEM; | 803 | return ENOMEM; |
804 | ci->iqd_p_saved = regaddr; /* save orig value for free's usage */ | 804 | ci->iqd_p_saved = regaddr; /* save orig value for free's usage */ |
805 | ci->iqd_p = (u_int32_t *) ((unsigned long) (regaddr + INT_QUEUE_BOUNDARY - 1) & | 805 | ci->iqd_p = (u_int32_t *) ((unsigned long) (regaddr + INT_QUEUE_BOUNDARY - 1) & |
806 | (~(INT_QUEUE_BOUNDARY - 1))); /* this calculates | 806 | (~(INT_QUEUE_BOUNDARY - 1))); /* this calculates |
807 | * closest boundary */ | 807 | * closest boundary */ |
808 | 808 | ||
809 | for (i = 0; i < INT_QUEUE_SIZE; i++) | 809 | for (i = 0; i < INT_QUEUE_SIZE; i++) |
810 | { | 810 | { |
811 | ci->iqd_p[i] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY); | 811 | ci->iqd_p[i] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY); |
812 | } | 812 | } |
813 | 813 | ||
814 | for (i = 0; i < ci->max_port; i++) | 814 | for (i = 0; i < ci->max_port; i++) |
815 | { | 815 | { |
816 | mpi_t *pi = &ci->port[i]; | 816 | mpi_t *pi = &ci->port[i]; |
817 | 817 | ||
818 | /* | 818 | /* |
819 | * Per MUSYCC manual, Section 6.3.2 - "The host must allocate a 2KB | 819 | * Per MUSYCC manual, Section 6.3.2 - "The host must allocate a 2KB |
820 | * bound memory segment for Channel Group 0." | 820 | * bound memory segment for Channel Group 0." |
821 | */ | 821 | */ |
822 | 822 | ||
823 | #define GROUP_BOUNDARY 0x800 | 823 | #define GROUP_BOUNDARY 0x800 |
824 | 824 | ||
825 | regaddr = OS_kmalloc (sizeof (struct musycc_groupr) + GROUP_BOUNDARY); | 825 | regaddr = OS_kmalloc (sizeof (struct musycc_groupr) + GROUP_BOUNDARY); |
826 | if (regaddr == 0) | 826 | if (regaddr == 0) |
827 | { | 827 | { |
828 | for (gchan = 0; gchan < i; gchan++) | 828 | for (gchan = 0; gchan < i; gchan++) |
829 | { | 829 | { |
830 | pi = &ci->port[gchan]; | 830 | pi = &ci->port[gchan]; |
831 | OS_kfree (pi->reg); | 831 | OS_kfree (pi->reg); |
832 | pi->reg = 0; | 832 | pi->reg = 0; |
833 | } | 833 | } |
834 | return ENOMEM; | 834 | return ENOMEM; |
835 | } | 835 | } |
836 | pi->regram_saved = regaddr; /* save orig value for free's usage */ | 836 | pi->regram_saved = regaddr; /* save orig value for free's usage */ |
837 | pi->regram = (struct musycc_groupr *) ((unsigned long) (regaddr + GROUP_BOUNDARY - 1) & | 837 | pi->regram = (struct musycc_groupr *) ((unsigned long) (regaddr + GROUP_BOUNDARY - 1) & |
838 | (~(GROUP_BOUNDARY - 1))); /* this calculates | 838 | (~(GROUP_BOUNDARY - 1))); /* this calculates |
839 | * closest boundary */ | 839 | * closest boundary */ |
840 | } | 840 | } |
841 | 841 | ||
842 | /* any board centric MUSYCC commands will use group ZERO as its "home" */ | 842 | /* any board centric MUSYCC commands will use group ZERO as its "home" */ |
843 | ci->regram = ci->port[0].regram; | 843 | ci->regram = ci->port[0].regram; |
844 | musycc_serv_req (&ci->port[0], SR_CHIP_RESET); | 844 | musycc_serv_req (&ci->port[0], SR_CHIP_RESET); |
845 | 845 | ||
846 | pci_write_32 ((u_int32_t *) &ci->reg->gbp, OS_vtophys (ci->regram)); | 846 | pci_write_32 ((u_int32_t *) &ci->reg->gbp, OS_vtophys (ci->regram)); |
847 | pci_flush_write (ci); | 847 | pci_flush_write (ci); |
848 | #ifdef CONFIG_SBE_PMCC4_NCOMM | 848 | #ifdef CONFIG_SBE_PMCC4_NCOMM |
849 | ci->regram->__glcd = __constant_cpu_to_le32 (GCD_MAGIC); | 849 | ci->regram->__glcd = __constant_cpu_to_le32 (GCD_MAGIC); |
850 | #else | 850 | #else |
851 | /* standard driver POLLS for INTB via CPLD register */ | 851 | /* standard driver POLLS for INTB via CPLD register */ |
852 | ci->regram->__glcd = __constant_cpu_to_le32 (GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE); | 852 | ci->regram->__glcd = __constant_cpu_to_le32 (GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE); |
853 | #endif | 853 | #endif |
854 | 854 | ||
855 | ci->regram->__iqp = cpu_to_le32 (OS_vtophys (&ci->iqd_p[0])); | 855 | ci->regram->__iqp = cpu_to_le32 (OS_vtophys (&ci->iqd_p[0])); |
856 | ci->regram->__iql = __constant_cpu_to_le32 (INT_QUEUE_SIZE - 1); | 856 | ci->regram->__iql = __constant_cpu_to_le32 (INT_QUEUE_SIZE - 1); |
857 | pci_write_32 ((u_int32_t *) &ci->reg->dacbp, 0); | 857 | pci_write_32 ((u_int32_t *) &ci->reg->dacbp, 0); |
858 | FLUSH_MEM_WRITE (); | 858 | FLUSH_MEM_WRITE (); |
859 | 859 | ||
860 | ci->state = C_RUNNING; /* mark as full interrupt processing | 860 | ci->state = C_RUNNING; /* mark as full interrupt processing |
861 | * available */ | 861 | * available */ |
862 | 862 | ||
863 | musycc_serv_req (&ci->port[0], SR_GLOBAL_INIT); /* FIRST INTERRUPT ! */ | 863 | musycc_serv_req (&ci->port[0], SR_GLOBAL_INIT); /* FIRST INTERRUPT ! */ |
864 | 864 | ||
865 | /* sanity check settable parameters */ | 865 | /* sanity check settable parameters */ |
866 | 866 | ||
867 | if (cxt1e1_max_mru > 0xffe) | 867 | if (cxt1e1_max_mru > 0xffe) |
868 | { | 868 | { |
869 | pr_warning("Maximum allowed MRU exceeded, resetting %d to %d.\n", | 869 | pr_warning("Maximum allowed MRU exceeded, resetting %d to %d.\n", |
870 | cxt1e1_max_mru, 0xffe); | 870 | cxt1e1_max_mru, 0xffe); |
871 | cxt1e1_max_mru = 0xffe; | 871 | cxt1e1_max_mru = 0xffe; |
872 | } | 872 | } |
873 | if (cxt1e1_max_mtu > 0xffe) | 873 | if (cxt1e1_max_mtu > 0xffe) |
874 | { | 874 | { |
875 | pr_warning("Maximum allowed MTU exceeded, resetting %d to %d.\n", | 875 | pr_warning("Maximum allowed MTU exceeded, resetting %d to %d.\n", |
876 | cxt1e1_max_mtu, 0xffe); | 876 | cxt1e1_max_mtu, 0xffe); |
877 | cxt1e1_max_mtu = 0xffe; | 877 | cxt1e1_max_mtu = 0xffe; |
878 | } | 878 | } |
879 | #ifdef SBE_WAN256T3_ENABLE | 879 | #ifdef SBE_WAN256T3_ENABLE |
880 | for (i = 0; i < MUSYCC_NPORTS; i++) | 880 | for (i = 0; i < MUSYCC_NPORTS; i++) |
881 | musycc_init_port (&ci->port[i]); | 881 | musycc_init_port (&ci->port[i]); |
882 | #endif | 882 | #endif |
883 | 883 | ||
884 | return SBE_DRVR_SUCCESS; /* no error */ | 884 | return SBE_DRVR_SUCCESS; /* no error */ |
885 | } | 885 | } |
886 | 886 | ||
887 | 887 | ||
888 | void | 888 | void |
889 | musycc_bh_tx_eom (mpi_t * pi, int gchan) | 889 | musycc_bh_tx_eom (mpi_t * pi, int gchan) |
890 | { | 890 | { |
891 | mch_t *ch; | 891 | mch_t *ch; |
892 | struct mdesc *md; | 892 | struct mdesc *md; |
893 | 893 | ||
894 | #if 0 | 894 | #if 0 |
895 | #ifndef SBE_ISR_INLINE | 895 | #ifndef SBE_ISR_INLINE |
896 | unsigned long flags; | 896 | unsigned long flags; |
897 | 897 | ||
898 | #endif | 898 | #endif |
899 | #endif | 899 | #endif |
900 | volatile u_int32_t status; | 900 | volatile u_int32_t status; |
901 | 901 | ||
902 | ch = pi->chan[gchan]; | 902 | ch = pi->chan[gchan]; |
903 | if (ch == 0 || ch->state != UP) | 903 | if (ch == 0 || ch->state != UP) |
904 | { | 904 | { |
905 | if (cxt1e1_log_level >= LOG_ERROR) | 905 | if (cxt1e1_log_level >= LOG_ERROR) |
906 | pr_info("%s: intr: xmit EOM on uninitialized channel %d\n", | 906 | pr_info("%s: intr: xmit EOM on uninitialized channel %d\n", |
907 | pi->up->devname, gchan); | 907 | pi->up->devname, gchan); |
908 | } | 908 | } |
909 | if (ch == 0 || ch->mdt == 0) | 909 | if (ch == 0 || ch->mdt == 0) |
910 | return; /* note: mdt==0 implies a malloc() | 910 | return; /* note: mdt==0 implies a malloc() |
911 | * failure w/in chan_up() routine */ | 911 | * failure w/in chan_up() routine */ |
912 | 912 | ||
913 | #if 0 | 913 | #if 0 |
914 | #ifdef SBE_ISR_INLINE | 914 | #ifdef SBE_ISR_INLINE |
915 | spin_lock_irq (&ch->ch_txlock); | 915 | spin_lock_irq (&ch->ch_txlock); |
916 | #else | 916 | #else |
917 | spin_lock_irqsave (&ch->ch_txlock, flags); | 917 | spin_lock_irqsave (&ch->ch_txlock, flags); |
918 | #endif | 918 | #endif |
919 | #endif | 919 | #endif |
920 | do | 920 | do |
921 | { | 921 | { |
922 | FLUSH_MEM_READ (); | 922 | FLUSH_MEM_READ (); |
923 | md = ch->txd_irq_srv; | 923 | md = ch->txd_irq_srv; |
924 | status = le32_to_cpu (md->status); | 924 | status = le32_to_cpu (md->status); |
925 | 925 | ||
926 | /* | 926 | /* |
927 | * Note: Per MUSYCC Ref 6.4.9, the host does not poll a host-owned | 927 | * Note: Per MUSYCC Ref 6.4.9, the host does not poll a host-owned |
928 | * Transmit Buffer Descriptor during Transparent Mode. | 928 | * Transmit Buffer Descriptor during Transparent Mode. |
929 | */ | 929 | */ |
930 | if (status & MUSYCC_TX_OWNED) | 930 | if (status & MUSYCC_TX_OWNED) |
931 | { | 931 | { |
932 | int readCount, loopCount; | 932 | int readCount, loopCount; |
933 | 933 | ||
934 | /***********************************************************/ | 934 | /***********************************************************/ |
935 | /* HW Bug Fix */ | 935 | /* HW Bug Fix */ |
936 | /* ---------- */ | 936 | /* ---------- */ |
937 | /* Under certain PCI Bus loading conditions, the data */ | 937 | /* Under certain PCI Bus loading conditions, the data */ |
938 | /* associated with an update of Shared Memory is delayed */ | 938 | /* associated with an update of Shared Memory is delayed */ |
939 | /* relative to its PCI Interrupt. This is caught when */ | 939 | /* relative to its PCI Interrupt. This is caught when */ |
940 | /* the host determines it does not yet OWN the descriptor. */ | 940 | /* the host determines it does not yet OWN the descriptor. */ |
941 | /***********************************************************/ | 941 | /***********************************************************/ |
942 | 942 | ||
943 | readCount = 0; | 943 | readCount = 0; |
944 | while (status & MUSYCC_TX_OWNED) | 944 | while (status & MUSYCC_TX_OWNED) |
945 | { | 945 | { |
946 | for (loopCount = 0; loopCount < 0x30; loopCount++) | 946 | for (loopCount = 0; loopCount < 0x30; loopCount++) |
947 | OS_uwait_dummy (); /* use call to avoid optimization | 947 | OS_uwait_dummy (); /* use call to avoid optimization |
948 | * removal of dummy delay */ | 948 | * removal of dummy delay */ |
949 | FLUSH_MEM_READ (); | 949 | FLUSH_MEM_READ (); |
950 | status = le32_to_cpu (md->status); | 950 | status = le32_to_cpu (md->status); |
951 | if (readCount++ > 40) | 951 | if (readCount++ > 40) |
952 | break; /* don't wait any longer */ | 952 | break; /* don't wait any longer */ |
953 | } | 953 | } |
954 | if (status & MUSYCC_TX_OWNED) | 954 | if (status & MUSYCC_TX_OWNED) |
955 | { | 955 | { |
956 | if (cxt1e1_log_level >= LOG_MONITOR) | 956 | if (cxt1e1_log_level >= LOG_MONITOR) |
957 | { | 957 | { |
958 | pr_info("%s: Port %d Chan %2d - unexpected TX msg ownership intr (md %p sts %x)\n", | 958 | pr_info("%s: Port %d Chan %2d - unexpected TX msg ownership intr (md %p sts %x)\n", |
959 | pi->up->devname, pi->portnum, ch->channum, | 959 | pi->up->devname, pi->portnum, ch->channum, |
960 | md, status); | 960 | md, status); |
961 | pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n", | 961 | pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n", |
962 | ch->user, ch->txd_irq_srv, ch->txd_usr_add, | 962 | ch->user, ch->txd_irq_srv, ch->txd_usr_add, |
963 | sd_queue_stopped (ch->user), | 963 | sd_queue_stopped (ch->user), |
964 | ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode); | 964 | ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode); |
965 | musycc_dump_txbuffer_ring (ch, 0); | 965 | musycc_dump_txbuffer_ring (ch, 0); |
966 | } | 966 | } |
967 | break; /* Not our mdesc, done */ | 967 | break; /* Not our mdesc, done */ |
968 | } else | 968 | } else |
969 | { | 969 | { |
970 | if (cxt1e1_log_level >= LOG_MONITOR) | 970 | if (cxt1e1_log_level >= LOG_MONITOR) |
971 | pr_info("%s: Port %d Chan %2d - recovered TX msg ownership [%d] (md %p sts %x)\n", | 971 | pr_info("%s: Port %d Chan %2d - recovered TX msg ownership [%d] (md %p sts %x)\n", |
972 | pi->up->devname, pi->portnum, ch->channum, readCount, md, status); | 972 | pi->up->devname, pi->portnum, ch->channum, readCount, md, status); |
973 | } | 973 | } |
974 | } | 974 | } |
975 | ch->txd_irq_srv = md->snext; | 975 | ch->txd_irq_srv = md->snext; |
976 | 976 | ||
977 | md->data = 0; | 977 | md->data = 0; |
978 | if (md->mem_token != 0) | 978 | if (md->mem_token != 0) |
979 | { | 979 | { |
980 | /* upcount channel */ | 980 | /* upcount channel */ |
981 | atomic_sub (OS_mem_token_tlen (md->mem_token), &ch->tx_pending); | 981 | atomic_sub (OS_mem_token_tlen (md->mem_token), &ch->tx_pending); |
982 | /* upcount card */ | 982 | /* upcount card */ |
983 | atomic_sub (OS_mem_token_tlen (md->mem_token), &pi->up->tx_pending); | 983 | atomic_sub (OS_mem_token_tlen (md->mem_token), &pi->up->tx_pending); |
984 | #ifdef SBE_WAN256T3_ENABLE | 984 | #ifdef SBE_WAN256T3_ENABLE |
985 | if (!atomic_read (&pi->up->tx_pending)) | 985 | if (!atomic_read (&pi->up->tx_pending)) |
986 | wan256t3_led (pi->up, LED_TX, 0); | 986 | wan256t3_led (pi->up, LED_TX, 0); |
987 | #endif | 987 | #endif |
988 | 988 | ||
989 | #ifdef CONFIG_SBE_WAN256T3_NCOMM | 989 | #ifdef CONFIG_SBE_WAN256T3_NCOMM |
990 | /* callback that our packet was sent */ | 990 | /* callback that our packet was sent */ |
991 | { | 991 | { |
992 | int hdlcnum = (pi->portnum * 32 + gchan); | 992 | int hdlcnum = (pi->portnum * 32 + gchan); |
993 | 993 | ||
994 | if (hdlcnum >= 228) | 994 | if (hdlcnum >= 228) |
995 | { | 995 | { |
996 | if (nciProcess_TX_complete) | 996 | if (nciProcess_TX_complete) |
997 | (*nciProcess_TX_complete) (hdlcnum, | 997 | (*nciProcess_TX_complete) (hdlcnum, |
998 | getuserbychan (gchan)); | 998 | getuserbychan (gchan)); |
999 | } | 999 | } |
1000 | } | 1000 | } |
1001 | #endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/ | 1001 | #endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/ |
1002 | 1002 | ||
1003 | OS_mem_token_free_irq (md->mem_token); | 1003 | OS_mem_token_free_irq (md->mem_token); |
1004 | md->mem_token = 0; | 1004 | md->mem_token = 0; |
1005 | } | 1005 | } |
1006 | md->status = 0; | 1006 | md->status = 0; |
1007 | #ifdef RLD_TXFULL_DEBUG | 1007 | #ifdef RLD_TXFULL_DEBUG |
1008 | if (cxt1e1_log_level >= LOG_MONITOR2) | 1008 | if (cxt1e1_log_level >= LOG_MONITOR2) |
1009 | pr_info("~~ tx_eom: tx_full %x txd_free %d -> %d\n", | 1009 | pr_info("~~ tx_eom: tx_full %x txd_free %d -> %d\n", |
1010 | ch->tx_full, ch->txd_free, ch->txd_free + 1); | 1010 | ch->tx_full, ch->txd_free, ch->txd_free + 1); |
1011 | #endif | 1011 | #endif |
1012 | ++ch->txd_free; | 1012 | ++ch->txd_free; |
1013 | FLUSH_MEM_WRITE (); | 1013 | FLUSH_MEM_WRITE (); |
1014 | 1014 | ||
1015 | if ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && (status & EOBIRQ_ENABLE)) | 1015 | if ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && (status & EOBIRQ_ENABLE)) |
1016 | { | 1016 | { |
1017 | if (cxt1e1_log_level >= LOG_MONITOR) | 1017 | if (cxt1e1_log_level >= LOG_MONITOR) |
1018 | pr_info("%s: Mode (%x) incorrect EOB status (%x)\n", | 1018 | pr_info("%s: Mode (%x) incorrect EOB status (%x)\n", |
1019 | pi->up->devname, ch->p.chan_mode, status); | 1019 | pi->up->devname, ch->p.chan_mode, status); |
1020 | if ((status & EOMIRQ_ENABLE) == 0) | 1020 | if ((status & EOMIRQ_ENABLE) == 0) |
1021 | break; | 1021 | break; |
1022 | } | 1022 | } |
1023 | } | 1023 | } |
1024 | while ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && ((status & EOMIRQ_ENABLE) == 0)); | 1024 | while ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && ((status & EOMIRQ_ENABLE) == 0)); |
1025 | /* | 1025 | /* |
1026 | * NOTE: (The above 'while' is coupled w/ previous 'do', way above.) Each | 1026 | * NOTE: (The above 'while' is coupled w/ previous 'do', way above.) Each |
1027 | * Transparent data buffer has the EOB bit, and NOT the EOM bit, set and | 1027 | * Transparent data buffer has the EOB bit, and NOT the EOM bit, set and |
1028 | * will furthermore have a separate IQD associated with each messages | 1028 | * will furthermore have a separate IQD associated with each messages |
1029 | * buffer. | 1029 | * buffer. |
1030 | */ | 1030 | */ |
1031 | 1031 | ||
1032 | FLUSH_MEM_READ (); | 1032 | FLUSH_MEM_READ (); |
1033 | /* | 1033 | /* |
1034 | * Smooth flow control hysterisis by maintaining task stoppage until half | 1034 | * Smooth flow control hysterisis by maintaining task stoppage until half |
1035 | * the available write buffers are available. | 1035 | * the available write buffers are available. |
1036 | */ | 1036 | */ |
1037 | if (ch->tx_full && (ch->txd_free >= (ch->txd_num / 2))) | 1037 | if (ch->tx_full && (ch->txd_free >= (ch->txd_num / 2))) |
1038 | { | 1038 | { |
1039 | /* | 1039 | /* |
1040 | * Then, only releave task stoppage if we actually have enough | 1040 | * Then, only releave task stoppage if we actually have enough |
1041 | * buffers to service the last requested packet. It may require MORE | 1041 | * buffers to service the last requested packet. It may require MORE |
1042 | * than half the available! | 1042 | * than half the available! |
1043 | */ | 1043 | */ |
1044 | if (ch->txd_free >= ch->txd_required) | 1044 | if (ch->txd_free >= ch->txd_required) |
1045 | { | 1045 | { |
1046 | 1046 | ||
1047 | #ifdef RLD_TXFULL_DEBUG | 1047 | #ifdef RLD_TXFULL_DEBUG |
1048 | if (cxt1e1_log_level >= LOG_MONITOR2) | 1048 | if (cxt1e1_log_level >= LOG_MONITOR2) |
1049 | pr_info("tx_eom[%d]: enable xmit tx_full no more, txd_free %d txd_num/2 %d\n", | 1049 | pr_info("tx_eom[%d]: enable xmit tx_full no more, txd_free %d txd_num/2 %d\n", |
1050 | ch->channum, | 1050 | ch->channum, |
1051 | ch->txd_free, ch->txd_num / 2); | 1051 | ch->txd_free, ch->txd_num / 2); |
1052 | #endif | 1052 | #endif |
1053 | ch->tx_full = 0; | 1053 | ch->tx_full = 0; |
1054 | ch->txd_required = 0; | 1054 | ch->txd_required = 0; |
1055 | sd_enable_xmit (ch->user); /* re-enable to catch flow controlled | 1055 | sd_enable_xmit (ch->user); /* re-enable to catch flow controlled |
1056 | * channel */ | 1056 | * channel */ |
1057 | } | 1057 | } |
1058 | } | 1058 | } |
1059 | #ifdef RLD_TXFULL_DEBUG | 1059 | #ifdef RLD_TXFULL_DEBUG |
1060 | else if (ch->tx_full) | 1060 | else if (ch->tx_full) |
1061 | { | 1061 | { |
1062 | if (cxt1e1_log_level >= LOG_MONITOR2) | 1062 | if (cxt1e1_log_level >= LOG_MONITOR2) |
1063 | pr_info("tx_eom[%d]: bypass TX enable though room available? (txd_free %d txd_num/2 %d)\n", | 1063 | pr_info("tx_eom[%d]: bypass TX enable though room available? (txd_free %d txd_num/2 %d)\n", |
1064 | ch->channum, | 1064 | ch->channum, |
1065 | ch->txd_free, ch->txd_num / 2); | 1065 | ch->txd_free, ch->txd_num / 2); |
1066 | } | 1066 | } |
1067 | #endif | 1067 | #endif |
1068 | 1068 | ||
1069 | FLUSH_MEM_WRITE (); | 1069 | FLUSH_MEM_WRITE (); |
1070 | #if 0 | 1070 | #if 0 |
1071 | #ifdef SBE_ISR_INLINE | 1071 | #ifdef SBE_ISR_INLINE |
1072 | spin_unlock_irq (&ch->ch_txlock); | 1072 | spin_unlock_irq (&ch->ch_txlock); |
1073 | #else | 1073 | #else |
1074 | spin_unlock_irqrestore (&ch->ch_txlock, flags); | 1074 | spin_unlock_irqrestore (&ch->ch_txlock, flags); |
1075 | #endif | 1075 | #endif |
1076 | #endif | 1076 | #endif |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | 1079 | ||
1080 | STATIC void | 1080 | STATIC void |
1081 | musycc_bh_rx_eom (mpi_t * pi, int gchan) | 1081 | musycc_bh_rx_eom (mpi_t * pi, int gchan) |
1082 | { | 1082 | { |
1083 | mch_t *ch; | 1083 | mch_t *ch; |
1084 | void *m, *m2; | 1084 | void *m, *m2; |
1085 | struct mdesc *md; | 1085 | struct mdesc *md; |
1086 | volatile u_int32_t status; | 1086 | volatile u_int32_t status; |
1087 | u_int32_t error; | 1087 | u_int32_t error; |
1088 | 1088 | ||
1089 | ch = pi->chan[gchan]; | 1089 | ch = pi->chan[gchan]; |
1090 | if (ch == 0 || ch->state != UP) | 1090 | if (ch == 0 || ch->state != UP) |
1091 | { | 1091 | { |
1092 | if (cxt1e1_log_level > LOG_ERROR) | 1092 | if (cxt1e1_log_level > LOG_ERROR) |
1093 | pr_info("%s: intr: receive EOM on uninitialized channel %d\n", | 1093 | pr_info("%s: intr: receive EOM on uninitialized channel %d\n", |
1094 | pi->up->devname, gchan); | 1094 | pi->up->devname, gchan); |
1095 | return; | 1095 | return; |
1096 | } | 1096 | } |
1097 | if (ch->mdr == 0) | 1097 | if (ch->mdr == 0) |
1098 | return; /* can this happen ? */ | 1098 | return; /* can this happen ? */ |
1099 | 1099 | ||
1100 | for (;;) | 1100 | for (;;) |
1101 | { | 1101 | { |
1102 | FLUSH_MEM_READ (); | 1102 | FLUSH_MEM_READ (); |
1103 | md = &ch->mdr[ch->rxix_irq_srv]; | 1103 | md = &ch->mdr[ch->rxix_irq_srv]; |
1104 | status = le32_to_cpu (md->status); | 1104 | status = le32_to_cpu (md->status); |
1105 | if (!(status & HOST_RX_OWNED)) | 1105 | if (!(status & HOST_RX_OWNED)) |
1106 | break; /* Not our mdesc, done */ | 1106 | break; /* Not our mdesc, done */ |
1107 | m = md->mem_token; | 1107 | m = md->mem_token; |
1108 | error = (status >> 16) & 0xf; | 1108 | error = (status >> 16) & 0xf; |
1109 | if (error == 0) | 1109 | if (error == 0) |
1110 | { | 1110 | { |
1111 | #ifdef CONFIG_SBE_WAN256T3_NCOMM | 1111 | #ifdef CONFIG_SBE_WAN256T3_NCOMM |
1112 | int hdlcnum = (pi->portnum * 32 + gchan); | 1112 | int hdlcnum = (pi->portnum * 32 + gchan); |
1113 | 1113 | ||
1114 | /* | 1114 | /* |
1115 | * if the packet number belongs to NCOMM, then send it to the TMS | 1115 | * if the packet number belongs to NCOMM, then send it to the TMS |
1116 | * driver | 1116 | * driver |
1117 | */ | 1117 | */ |
1118 | if (hdlcnum >= 228) | 1118 | if (hdlcnum >= 228) |
1119 | { | 1119 | { |
1120 | if (nciProcess_RX_packet) | 1120 | if (nciProcess_RX_packet) |
1121 | (*nciProcess_RX_packet) (hdlcnum, status & 0x3fff, m, ch->user); | 1121 | (*nciProcess_RX_packet) (hdlcnum, status & 0x3fff, m, ch->user); |
1122 | } else | 1122 | } else |
1123 | #endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/ | 1123 | #endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/ |
1124 | 1124 | ||
1125 | { | 1125 | { |
1126 | if ((m2 = OS_mem_token_alloc (cxt1e1_max_mru))) | 1126 | if ((m2 = OS_mem_token_alloc (cxt1e1_max_mru))) |
1127 | { | 1127 | { |
1128 | /* substitute the mbuf+cluster */ | 1128 | /* substitute the mbuf+cluster */ |
1129 | md->mem_token = m2; | 1129 | md->mem_token = m2; |
1130 | md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m2))); | 1130 | md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m2))); |
1131 | 1131 | ||
1132 | /* pass the received mbuf upward */ | 1132 | /* pass the received mbuf upward */ |
1133 | sd_recv_consume (m, status & LENGTH_MASK, ch->user); | 1133 | sd_recv_consume (m, status & LENGTH_MASK, ch->user); |
1134 | ch->s.rx_packets++; | 1134 | ch->s.rx_packets++; |
1135 | ch->s.rx_bytes += status & LENGTH_MASK; | 1135 | ch->s.rx_bytes += status & LENGTH_MASK; |
1136 | } else | 1136 | } else |
1137 | { | 1137 | { |
1138 | ch->s.rx_dropped++; | 1138 | ch->s.rx_dropped++; |
1139 | } | 1139 | } |
1140 | } | 1140 | } |
1141 | } else if (error == ERR_FCS) | 1141 | } else if (error == ERR_FCS) |
1142 | { | 1142 | { |
1143 | ch->s.rx_crc_errors++; | 1143 | ch->s.rx_crc_errors++; |
1144 | } else if (error == ERR_ALIGN) | 1144 | } else if (error == ERR_ALIGN) |
1145 | { | 1145 | { |
1146 | ch->s.rx_missed_errors++; | 1146 | ch->s.rx_missed_errors++; |
1147 | } else if (error == ERR_ABT) | 1147 | } else if (error == ERR_ABT) |
1148 | { | 1148 | { |
1149 | ch->s.rx_missed_errors++; | 1149 | ch->s.rx_missed_errors++; |
1150 | } else if (error == ERR_LNG) | 1150 | } else if (error == ERR_LNG) |
1151 | { | 1151 | { |
1152 | ch->s.rx_length_errors++; | 1152 | ch->s.rx_length_errors++; |
1153 | } else if (error == ERR_SHT) | 1153 | } else if (error == ERR_SHT) |
1154 | { | 1154 | { |
1155 | ch->s.rx_length_errors++; | 1155 | ch->s.rx_length_errors++; |
1156 | } | 1156 | } |
1157 | FLUSH_MEM_WRITE (); | 1157 | FLUSH_MEM_WRITE (); |
1158 | status = cxt1e1_max_mru; | 1158 | status = cxt1e1_max_mru; |
1159 | if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) | 1159 | if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) |
1160 | status |= EOBIRQ_ENABLE; | 1160 | status |= EOBIRQ_ENABLE; |
1161 | md->status = cpu_to_le32 (status); | 1161 | md->status = cpu_to_le32 (status); |
1162 | 1162 | ||
1163 | /* Check next mdesc in the ring */ | 1163 | /* Check next mdesc in the ring */ |
1164 | if (++ch->rxix_irq_srv >= ch->rxd_num) | 1164 | if (++ch->rxix_irq_srv >= ch->rxd_num) |
1165 | ch->rxix_irq_srv = 0; | 1165 | ch->rxix_irq_srv = 0; |
1166 | FLUSH_MEM_WRITE (); | 1166 | FLUSH_MEM_WRITE (); |
1167 | } | 1167 | } |
1168 | } | 1168 | } |
1169 | 1169 | ||
1170 | 1170 | ||
1171 | irqreturn_t | 1171 | irqreturn_t |
1172 | musycc_intr_th_handler (void *devp) | 1172 | musycc_intr_th_handler (void *devp) |
1173 | { | 1173 | { |
1174 | ci_t *ci = (ci_t *) devp; | 1174 | ci_t *ci = (ci_t *) devp; |
1175 | volatile u_int32_t status, currInt = 0; | 1175 | volatile u_int32_t status, currInt = 0; |
1176 | u_int32_t nextInt, intCnt; | 1176 | u_int32_t nextInt, intCnt; |
1177 | 1177 | ||
1178 | /* | 1178 | /* |
1179 | * Hardware not available, potential interrupt hang. But since interrupt | 1179 | * Hardware not available, potential interrupt hang. But since interrupt |
1180 | * might be shared, just return. | 1180 | * might be shared, just return. |
1181 | */ | 1181 | */ |
1182 | if (ci->state == C_INIT) | 1182 | if (ci->state == C_INIT) |
1183 | { | 1183 | { |
1184 | return IRQ_NONE; | 1184 | return IRQ_NONE; |
1185 | } | 1185 | } |
1186 | /* | 1186 | /* |
1187 | * Marked as hardware available. Don't service interrupts, just clear the | 1187 | * Marked as hardware available. Don't service interrupts, just clear the |
1188 | * event. | 1188 | * event. |
1189 | */ | 1189 | */ |
1190 | 1190 | ||
1191 | if (ci->state == C_IDLE) | 1191 | if (ci->state == C_IDLE) |
1192 | { | 1192 | { |
1193 | status = pci_read_32 ((u_int32_t *) &ci->reg->isd); | 1193 | status = pci_read_32 ((u_int32_t *) &ci->reg->isd); |
1194 | 1194 | ||
1195 | /* clear the interrupt but process nothing else */ | 1195 | /* clear the interrupt but process nothing else */ |
1196 | pci_write_32 ((u_int32_t *) &ci->reg->isd, status); | 1196 | pci_write_32 ((u_int32_t *) &ci->reg->isd, status); |
1197 | return IRQ_HANDLED; | 1197 | return IRQ_HANDLED; |
1198 | } | 1198 | } |
1199 | FLUSH_PCI_READ (); | 1199 | FLUSH_PCI_READ (); |
1200 | FLUSH_MEM_READ (); | 1200 | FLUSH_MEM_READ (); |
1201 | 1201 | ||
1202 | status = pci_read_32 ((u_int32_t *) &ci->reg->isd); | 1202 | status = pci_read_32 ((u_int32_t *) &ci->reg->isd); |
1203 | nextInt = INTRPTS_NEXTINT (status); | 1203 | nextInt = INTRPTS_NEXTINT (status); |
1204 | intCnt = INTRPTS_INTCNT (status); | 1204 | intCnt = INTRPTS_INTCNT (status); |
1205 | ci->intlog.drvr_intr_thcount++; | 1205 | ci->intlog.drvr_intr_thcount++; |
1206 | 1206 | ||
1207 | /*********************************************************/ | 1207 | /*********************************************************/ |
1208 | /* HW Bug Fix */ | 1208 | /* HW Bug Fix */ |
1209 | /* ---------- */ | 1209 | /* ---------- */ |
1210 | /* Under certain PCI Bus loading conditions, the */ | 1210 | /* Under certain PCI Bus loading conditions, the */ |
1211 | /* MUSYCC looses the data associated with an update */ | 1211 | /* MUSYCC looses the data associated with an update */ |
1212 | /* of its ISD and erroneously returns the immediately */ | 1212 | /* of its ISD and erroneously returns the immediately */ |
1213 | /* preceding 'nextInt' value. However, the 'intCnt' */ | 1213 | /* preceding 'nextInt' value. However, the 'intCnt' */ |
1214 | /* value appears to be correct. By not starting service */ | 1214 | /* value appears to be correct. By not starting service */ |
1215 | /* where the 'missing' 'nextInt' SHOULD point causes */ | 1215 | /* where the 'missing' 'nextInt' SHOULD point causes */ |
1216 | /* the IQD not to be serviced - the 'not serviced' */ | 1216 | /* the IQD not to be serviced - the 'not serviced' */ |
1217 | /* entries then remain and continue to increase as more */ | 1217 | /* entries then remain and continue to increase as more */ |
1218 | /* incorrect ISD's are encountered. */ | 1218 | /* incorrect ISD's are encountered. */ |
1219 | /*********************************************************/ | 1219 | /*********************************************************/ |
1220 | 1220 | ||
1221 | if (nextInt != INTRPTS_NEXTINT (ci->intlog.this_status_new)) | 1221 | if (nextInt != INTRPTS_NEXTINT (ci->intlog.this_status_new)) |
1222 | { | 1222 | { |
1223 | if (cxt1e1_log_level >= LOG_MONITOR) | 1223 | if (cxt1e1_log_level >= LOG_MONITOR) |
1224 | { | 1224 | { |
1225 | pr_info("%s: note - updated ISD from %08x to %08x\n", | 1225 | pr_info("%s: note - updated ISD from %08x to %08x\n", |
1226 | ci->devname, status, | 1226 | ci->devname, status, |
1227 | (status & (~INTRPTS_NEXTINT_M)) | ci->intlog.this_status_new); | 1227 | (status & (~INTRPTS_NEXTINT_M)) | ci->intlog.this_status_new); |
1228 | } | 1228 | } |
1229 | /* | 1229 | /* |
1230 | * Replace bogus status with software corrected value. | 1230 | * Replace bogus status with software corrected value. |
1231 | * | 1231 | * |
1232 | * It's not known whether, during this problem occurrence, if the | 1232 | * It's not known whether, during this problem occurrence, if the |
1233 | * INTFULL bit is correctly reported or not. | 1233 | * INTFULL bit is correctly reported or not. |
1234 | */ | 1234 | */ |
1235 | status = (status & (~INTRPTS_NEXTINT_M)) | (ci->intlog.this_status_new); | 1235 | status = (status & (~INTRPTS_NEXTINT_M)) | (ci->intlog.this_status_new); |
1236 | nextInt = INTRPTS_NEXTINT (status); | 1236 | nextInt = INTRPTS_NEXTINT (status); |
1237 | } | 1237 | } |
1238 | /**********************************************/ | 1238 | /**********************************************/ |
1239 | /* Cn847x Bug Fix */ | 1239 | /* Cn847x Bug Fix */ |
1240 | /* -------------- */ | 1240 | /* -------------- */ |
1241 | /* Fix for inability to write back same index */ | 1241 | /* Fix for inability to write back same index */ |
1242 | /* as read for a full interrupt queue. */ | 1242 | /* as read for a full interrupt queue. */ |
1243 | /**********************************************/ | 1243 | /**********************************************/ |
1244 | 1244 | ||
1245 | if (intCnt == INT_QUEUE_SIZE) | 1245 | if (intCnt == INT_QUEUE_SIZE) |
1246 | { | 1246 | { |
1247 | currInt = ((intCnt - 1) + nextInt) & (INT_QUEUE_SIZE - 1); | 1247 | currInt = ((intCnt - 1) + nextInt) & (INT_QUEUE_SIZE - 1); |
1248 | } else | 1248 | } else |
1249 | /************************************************/ | 1249 | /************************************************/ |
1250 | /* Interrupt Write Location Issues */ | 1250 | /* Interrupt Write Location Issues */ |
1251 | /* ------------------------------- */ | 1251 | /* ------------------------------- */ |
1252 | /* When the interrupt status descriptor is */ | 1252 | /* When the interrupt status descriptor is */ |
1253 | /* written, the interrupt line is de-asserted */ | 1253 | /* written, the interrupt line is de-asserted */ |
1254 | /* by the Cn847x. In the case of MIPS */ | 1254 | /* by the Cn847x. In the case of MIPS */ |
1255 | /* microprocessors, this must occur at the */ | 1255 | /* microprocessors, this must occur at the */ |
1256 | /* beginning of the interrupt handler so that */ | 1256 | /* beginning of the interrupt handler so that */ |
1257 | /* the interrupt handle is not re-entered due */ | 1257 | /* the interrupt handle is not re-entered due */ |
1258 | /* to interrupt dis-assertion latency. */ | 1258 | /* to interrupt dis-assertion latency. */ |
1259 | /* In the case of all other processors, this */ | 1259 | /* In the case of all other processors, this */ |
1260 | /* action should occur at the end of the */ | 1260 | /* action should occur at the end of the */ |
1261 | /* interrupt handler to avoid overwriting the */ | 1261 | /* interrupt handler to avoid overwriting the */ |
1262 | /* interrupt queue. */ | 1262 | /* interrupt queue. */ |
1263 | /************************************************/ | 1263 | /************************************************/ |
1264 | 1264 | ||
1265 | if (intCnt) | 1265 | if (intCnt) |
1266 | { | 1266 | { |
1267 | currInt = (intCnt + nextInt) & (INT_QUEUE_SIZE - 1); | 1267 | currInt = (intCnt + nextInt) & (INT_QUEUE_SIZE - 1); |
1268 | } else | 1268 | } else |
1269 | { | 1269 | { |
1270 | /* | 1270 | /* |
1271 | * NOTE: Servicing an interrupt whose ISD contains a count of ZERO | 1271 | * NOTE: Servicing an interrupt whose ISD contains a count of ZERO |
1272 | * can be indicative of a Shared Interrupt chain. Our driver can be | 1272 | * can be indicative of a Shared Interrupt chain. Our driver can be |
1273 | * called from the system's interrupt handler as a matter of the OS | 1273 | * called from the system's interrupt handler as a matter of the OS |
1274 | * walking the chain. As the chain is walked, the interrupt will | 1274 | * walking the chain. As the chain is walked, the interrupt will |
1275 | * eventually be serviced by the correct driver/handler. | 1275 | * eventually be serviced by the correct driver/handler. |
1276 | */ | 1276 | */ |
1277 | #if 0 | 1277 | #if 0 |
1278 | /* chained interrupt = not ours */ | 1278 | /* chained interrupt = not ours */ |
1279 | pr_info(">> %s: intCnt NULL, sts %x, possibly a chained interrupt!\n", | 1279 | pr_info(">> %s: intCnt NULL, sts %x, possibly a chained interrupt!\n", |
1280 | ci->devname, status); | 1280 | ci->devname, status); |
1281 | #endif | 1281 | #endif |
1282 | return IRQ_NONE; | 1282 | return IRQ_NONE; |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | ci->iqp_tailx = currInt; | 1285 | ci->iqp_tailx = currInt; |
1286 | 1286 | ||
1287 | currInt <<= INTRPTS_NEXTINT_S; | 1287 | currInt <<= INTRPTS_NEXTINT_S; |
1288 | ci->intlog.last_status_new = ci->intlog.this_status_new; | 1288 | ci->intlog.last_status_new = ci->intlog.this_status_new; |
1289 | ci->intlog.this_status_new = currInt; | 1289 | ci->intlog.this_status_new = currInt; |
1290 | 1290 | ||
1291 | if ((cxt1e1_log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M)) | 1291 | if ((cxt1e1_log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M)) |
1292 | { | 1292 | { |
1293 | pr_info("%s: Interrupt queue full condition occurred\n", ci->devname); | 1293 | pr_info("%s: Interrupt queue full condition occurred\n", ci->devname); |
1294 | } | 1294 | } |
1295 | if (cxt1e1_log_level >= LOG_DEBUG) | 1295 | if (cxt1e1_log_level >= LOG_DEBUG) |
1296 | pr_info("%s: interrupts pending, isd @ 0x%p: %x curr %d cnt %d NEXT %d\n", | 1296 | pr_info("%s: interrupts pending, isd @ 0x%p: %x curr %d cnt %d NEXT %d\n", |
1297 | ci->devname, &ci->reg->isd, | 1297 | ci->devname, &ci->reg->isd, |
1298 | status, nextInt, intCnt, (intCnt + nextInt) & (INT_QUEUE_SIZE - 1)); | 1298 | status, nextInt, intCnt, (intCnt + nextInt) & (INT_QUEUE_SIZE - 1)); |
1299 | 1299 | ||
1300 | FLUSH_MEM_WRITE (); | 1300 | FLUSH_MEM_WRITE (); |
1301 | #if defined(SBE_ISR_TASKLET) | 1301 | #if defined(SBE_ISR_TASKLET) |
1302 | pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt); | 1302 | pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt); |
1303 | atomic_inc (&ci->bh_pending); | 1303 | atomic_inc (&ci->bh_pending); |
1304 | tasklet_schedule (&ci->ci_musycc_isr_tasklet); | 1304 | tasklet_schedule (&ci->ci_musycc_isr_tasklet); |
1305 | #elif defined(SBE_ISR_IMMEDIATE) | 1305 | #elif defined(SBE_ISR_IMMEDIATE) |
1306 | pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt); | 1306 | pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt); |
1307 | atomic_inc (&ci->bh_pending); | 1307 | atomic_inc (&ci->bh_pending); |
1308 | queue_task (&ci->ci_musycc_isr_tq, &tq_immediate); | 1308 | queue_task (&ci->ci_musycc_isr_tq, &tq_immediate); |
1309 | mark_bh (IMMEDIATE_BH); | 1309 | mark_bh (IMMEDIATE_BH); |
1310 | #elif defined(SBE_ISR_INLINE) | 1310 | #elif defined(SBE_ISR_INLINE) |
1311 | (void) musycc_intr_bh_tasklet (ci); | 1311 | (void) musycc_intr_bh_tasklet (ci); |
1312 | pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt); | 1312 | pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt); |
1313 | #endif | 1313 | #endif |
1314 | return IRQ_HANDLED; | 1314 | return IRQ_HANDLED; |
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | 1317 | ||
1318 | #if defined(SBE_ISR_IMMEDIATE) | 1318 | #if defined(SBE_ISR_IMMEDIATE) |
1319 | unsigned long | 1319 | unsigned long |
1320 | #else | 1320 | #else |
1321 | void | 1321 | void |
1322 | #endif | 1322 | #endif |
1323 | musycc_intr_bh_tasklet (ci_t * ci) | 1323 | musycc_intr_bh_tasklet (ci_t * ci) |
1324 | { | 1324 | { |
1325 | mpi_t *pi; | 1325 | mpi_t *pi; |
1326 | mch_t *ch; | 1326 | mch_t *ch; |
1327 | unsigned int intCnt; | 1327 | unsigned int intCnt; |
1328 | volatile u_int32_t currInt = 0; | 1328 | volatile u_int32_t currInt = 0; |
1329 | volatile unsigned int headx, tailx; | 1329 | volatile unsigned int headx, tailx; |
1330 | int readCount, loopCount; | 1330 | int readCount, loopCount; |
1331 | int group, gchan, event, err, tx; | 1331 | int group, gchan, event, err, tx; |
1332 | u_int32_t badInt = INT_EMPTY_ENTRY; | 1332 | u_int32_t badInt = INT_EMPTY_ENTRY; |
1333 | u_int32_t badInt2 = INT_EMPTY_ENTRY2; | 1333 | u_int32_t badInt2 = INT_EMPTY_ENTRY2; |
1334 | 1334 | ||
1335 | /* | 1335 | /* |
1336 | * Hardware not available, potential interrupt hang. But since interrupt | 1336 | * Hardware not available, potential interrupt hang. But since interrupt |
1337 | * might be shared, just return. | 1337 | * might be shared, just return. |
1338 | */ | 1338 | */ |
1339 | if ((drvr_state != SBE_DRVR_AVAILABLE) || (ci->state == C_INIT)) | 1339 | if ((drvr_state != SBE_DRVR_AVAILABLE) || (ci->state == C_INIT)) |
1340 | { | 1340 | { |
1341 | #if defined(SBE_ISR_IMMEDIATE) | 1341 | #if defined(SBE_ISR_IMMEDIATE) |
1342 | return 0L; | 1342 | return 0L; |
1343 | #else | 1343 | #else |
1344 | return; | 1344 | return; |
1345 | #endif | 1345 | #endif |
1346 | } | 1346 | } |
1347 | #if defined(SBE_ISR_TASKLET) || defined(SBE_ISR_IMMEDIATE) | 1347 | #if defined(SBE_ISR_TASKLET) || defined(SBE_ISR_IMMEDIATE) |
1348 | if (drvr_state != SBE_DRVR_AVAILABLE) | 1348 | if (drvr_state != SBE_DRVR_AVAILABLE) |
1349 | { | 1349 | { |
1350 | #if defined(SBE_ISR_TASKLET) | 1350 | #if defined(SBE_ISR_TASKLET) |
1351 | return; | 1351 | return; |
1352 | #elif defined(SBE_ISR_IMMEDIATE) | 1352 | #elif defined(SBE_ISR_IMMEDIATE) |
1353 | return 0L; | 1353 | return 0L; |
1354 | #endif | 1354 | #endif |
1355 | } | 1355 | } |
1356 | #elif defined(SBE_ISR_INLINE) | 1356 | #elif defined(SBE_ISR_INLINE) |
1357 | /* no semaphore taken, no double checks */ | 1357 | /* no semaphore taken, no double checks */ |
1358 | #endif | 1358 | #endif |
1359 | 1359 | ||
1360 | ci->intlog.drvr_intr_bhcount++; | 1360 | ci->intlog.drvr_intr_bhcount++; |
1361 | FLUSH_MEM_READ (); | 1361 | FLUSH_MEM_READ (); |
1362 | { | 1362 | { |
1363 | unsigned int bh = atomic_read (&ci->bh_pending); | 1363 | unsigned int bh = atomic_read (&ci->bh_pending); |
1364 | 1364 | ||
1365 | max_bh = max (bh, max_bh); | 1365 | max_bh = max (bh, max_bh); |
1366 | } | 1366 | } |
1367 | atomic_set (&ci->bh_pending, 0);/* if here, no longer pending */ | 1367 | atomic_set (&ci->bh_pending, 0);/* if here, no longer pending */ |
1368 | while ((headx = ci->iqp_headx) != (tailx = ci->iqp_tailx)) | 1368 | while ((headx = ci->iqp_headx) != (tailx = ci->iqp_tailx)) |
1369 | { | 1369 | { |
1370 | intCnt = (tailx >= headx) ? (tailx - headx) : (tailx - headx + INT_QUEUE_SIZE); | 1370 | intCnt = (tailx >= headx) ? (tailx - headx) : (tailx - headx + INT_QUEUE_SIZE); |
1371 | currInt = le32_to_cpu (ci->iqd_p[headx]); | 1371 | currInt = le32_to_cpu (ci->iqd_p[headx]); |
1372 | 1372 | ||
1373 | max_intcnt = max (intCnt, max_intcnt); /* RLD DEBUG */ | 1373 | max_intcnt = max (intCnt, max_intcnt); /* RLD DEBUG */ |
1374 | 1374 | ||
1375 | /**************************************************/ | 1375 | /**************************************************/ |
1376 | /* HW Bug Fix */ | 1376 | /* HW Bug Fix */ |
1377 | /* ---------- */ | 1377 | /* ---------- */ |
1378 | /* The following code checks for the condition */ | 1378 | /* The following code checks for the condition */ |
1379 | /* of interrupt assertion before interrupt */ | 1379 | /* of interrupt assertion before interrupt */ |
1380 | /* queue update. This is a problem on several */ | 1380 | /* queue update. This is a problem on several */ |
1381 | /* PCI-Local bridge chips found on some products. */ | 1381 | /* PCI-Local bridge chips found on some products. */ |
1382 | /**************************************************/ | 1382 | /**************************************************/ |
1383 | 1383 | ||
1384 | readCount = 0; | 1384 | readCount = 0; |
1385 | if ((currInt == badInt) || (currInt == badInt2)) | 1385 | if ((currInt == badInt) || (currInt == badInt2)) |
1386 | ci->intlog.drvr_int_failure++; | 1386 | ci->intlog.drvr_int_failure++; |
1387 | 1387 | ||
1388 | while ((currInt == badInt) || (currInt == badInt2)) | 1388 | while ((currInt == badInt) || (currInt == badInt2)) |
1389 | { | 1389 | { |
1390 | for (loopCount = 0; loopCount < 0x30; loopCount++) | 1390 | for (loopCount = 0; loopCount < 0x30; loopCount++) |
1391 | OS_uwait_dummy (); /* use call to avoid optimization removal | 1391 | OS_uwait_dummy (); /* use call to avoid optimization removal |
1392 | * of dummy delay */ | 1392 | * of dummy delay */ |
1393 | FLUSH_MEM_READ (); | 1393 | FLUSH_MEM_READ (); |
1394 | currInt = le32_to_cpu (ci->iqd_p[headx]); | 1394 | currInt = le32_to_cpu (ci->iqd_p[headx]); |
1395 | if (readCount++ > 20) | 1395 | if (readCount++ > 20) |
1396 | break; | 1396 | break; |
1397 | } | 1397 | } |
1398 | 1398 | ||
1399 | if ((currInt == badInt) || (currInt == badInt2)) /* catch failure of Bug | 1399 | if ((currInt == badInt) || (currInt == badInt2)) /* catch failure of Bug |
1400 | * Fix checking */ | 1400 | * Fix checking */ |
1401 | { | 1401 | { |
1402 | if (cxt1e1_log_level >= LOG_WARN) | 1402 | if (cxt1e1_log_level >= LOG_WARN) |
1403 | pr_info("%s: Illegal Interrupt Detected @ 0x%p, mod %d.)\n", | 1403 | pr_info("%s: Illegal Interrupt Detected @ 0x%p, mod %d.)\n", |
1404 | ci->devname, &ci->iqd_p[headx], headx); | 1404 | ci->devname, &ci->iqd_p[headx], headx); |
1405 | 1405 | ||
1406 | /* | 1406 | /* |
1407 | * If the descriptor has not recovered, then leaving the EMPTY | 1407 | * If the descriptor has not recovered, then leaving the EMPTY |
1408 | * entry set will not signal to the MUSYCC that this descriptor | 1408 | * entry set will not signal to the MUSYCC that this descriptor |
1409 | * has been serviced. The Interrupt Queue can then start losing | 1409 | * has been serviced. The Interrupt Queue can then start losing |
1410 | * available descriptors and MUSYCC eventually encounters and | 1410 | * available descriptors and MUSYCC eventually encounters and |
1411 | * reports the INTFULL condition. Per manual, changing any bit | 1411 | * reports the INTFULL condition. Per manual, changing any bit |
1412 | * marks descriptor as available, thus the use of different | 1412 | * marks descriptor as available, thus the use of different |
1413 | * EMPTY_ENTRY values. | 1413 | * EMPTY_ENTRY values. |
1414 | */ | 1414 | */ |
1415 | 1415 | ||
1416 | if (currInt == badInt) | 1416 | if (currInt == badInt) |
1417 | { | 1417 | { |
1418 | ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY2); | 1418 | ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY2); |
1419 | } else | 1419 | } else |
1420 | { | 1420 | { |
1421 | ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY); | 1421 | ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY); |
1422 | } | 1422 | } |
1423 | ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */ | 1423 | ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */ |
1424 | FLUSH_MEM_WRITE (); | 1424 | FLUSH_MEM_WRITE (); |
1425 | FLUSH_MEM_READ (); | 1425 | FLUSH_MEM_READ (); |
1426 | continue; | 1426 | continue; |
1427 | } | 1427 | } |
1428 | group = INTRPT_GRP (currInt); | 1428 | group = INTRPT_GRP (currInt); |
1429 | gchan = INTRPT_CH (currInt); | 1429 | gchan = INTRPT_CH (currInt); |
1430 | event = INTRPT_EVENT (currInt); | 1430 | event = INTRPT_EVENT (currInt); |
1431 | err = INTRPT_ERROR (currInt); | 1431 | err = INTRPT_ERROR (currInt); |
1432 | tx = currInt & INTRPT_DIR_M; | 1432 | tx = currInt & INTRPT_DIR_M; |
1433 | 1433 | ||
1434 | ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY); | 1434 | ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY); |
1435 | FLUSH_MEM_WRITE (); | 1435 | FLUSH_MEM_WRITE (); |
1436 | 1436 | ||
1437 | if (cxt1e1_log_level >= LOG_DEBUG) | 1437 | if (cxt1e1_log_level >= LOG_DEBUG) |
1438 | { | 1438 | { |
1439 | if (err != 0) | 1439 | if (err != 0) |
1440 | pr_info(" %08x -> err: %2d,", currInt, err); | 1440 | pr_info(" %08x -> err: %2d,", currInt, err); |
1441 | 1441 | ||
1442 | pr_info("+ interrupt event: %d, grp: %d, chan: %2d, side: %cX\n", | 1442 | pr_info("+ interrupt event: %d, grp: %d, chan: %2d, side: %cX\n", |
1443 | event, group, gchan, tx ? 'T' : 'R'); | 1443 | event, group, gchan, tx ? 'T' : 'R'); |
1444 | } | 1444 | } |
1445 | pi = &ci->port[group]; /* notice that here we assume 1-1 group - | 1445 | pi = &ci->port[group]; /* notice that here we assume 1-1 group - |
1446 | * port mapping */ | 1446 | * port mapping */ |
1447 | ch = pi->chan[gchan]; | 1447 | ch = pi->chan[gchan]; |
1448 | switch (event) | 1448 | switch (event) |
1449 | { | 1449 | { |
1450 | case EVE_SACK: /* Service Request Acknowledge */ | 1450 | case EVE_SACK: /* Service Request Acknowledge */ |
1451 | if (cxt1e1_log_level >= LOG_DEBUG) | 1451 | if (cxt1e1_log_level >= LOG_DEBUG) |
1452 | { | 1452 | { |
1453 | volatile u_int32_t r; | 1453 | volatile u_int32_t r; |
1454 | 1454 | ||
1455 | r = pci_read_32 ((u_int32_t *) &pi->reg->srd); | 1455 | r = pci_read_32 ((u_int32_t *) &pi->reg->srd); |
1456 | pr_info("- SACK cmd: %08x (hdw= %08x)\n", pi->sr_last, r); | 1456 | pr_info("- SACK cmd: %08x (hdw= %08x)\n", pi->sr_last, r); |
1457 | } | 1457 | } |
1458 | SD_SEM_GIVE (&pi->sr_sem_wait); /* wake up waiting process */ | 1458 | SD_SEM_GIVE (&pi->sr_sem_wait); /* wake up waiting process */ |
1459 | break; | 1459 | break; |
1460 | case EVE_CHABT: /* Change To Abort Code (0x7e -> 0xff) */ | 1460 | case EVE_CHABT: /* Change To Abort Code (0x7e -> 0xff) */ |
1461 | case EVE_CHIC: /* Change To Idle Code (0xff -> 0x7e) */ | 1461 | case EVE_CHIC: /* Change To Idle Code (0xff -> 0x7e) */ |
1462 | break; | 1462 | break; |
1463 | case EVE_EOM: /* End Of Message */ | 1463 | case EVE_EOM: /* End Of Message */ |
1464 | case EVE_EOB: /* End Of Buffer (Transparent mode) */ | 1464 | case EVE_EOB: /* End Of Buffer (Transparent mode) */ |
1465 | if (tx) | 1465 | if (tx) |
1466 | { | 1466 | { |
1467 | musycc_bh_tx_eom (pi, gchan); | 1467 | musycc_bh_tx_eom (pi, gchan); |
1468 | } else | 1468 | } else |
1469 | { | 1469 | { |
1470 | musycc_bh_rx_eom (pi, gchan); | 1470 | musycc_bh_rx_eom (pi, gchan); |
1471 | } | 1471 | } |
1472 | #if 0 | 1472 | #if 0 |
1473 | break; | 1473 | break; |
1474 | #else | 1474 | #else |
1475 | /* | 1475 | /* |
1476 | * MUSYCC Interrupt Descriptor section states that EOB and EOM | 1476 | * MUSYCC Interrupt Descriptor section states that EOB and EOM |
1477 | * can be combined with the NONE error (as well as others). So | 1477 | * can be combined with the NONE error (as well as others). So |
1478 | * drop thru to catch this... | 1478 | * drop thru to catch this... |
1479 | */ | 1479 | */ |
1480 | #endif | 1480 | #endif |
1481 | case EVE_NONE: | 1481 | case EVE_NONE: |
1482 | if (err == ERR_SHT) | 1482 | if (err == ERR_SHT) |
1483 | { | 1483 | { |
1484 | ch->s.rx_length_errors++; | 1484 | ch->s.rx_length_errors++; |
1485 | } | 1485 | } |
1486 | break; | 1486 | break; |
1487 | default: | 1487 | default: |
1488 | if (cxt1e1_log_level >= LOG_WARN) | 1488 | if (cxt1e1_log_level >= LOG_WARN) |
1489 | pr_info("%s: unexpected interrupt event: %d, iqd[%d]: %08x, port: %d\n", ci->devname, | 1489 | pr_info("%s: unexpected interrupt event: %d, iqd[%d]: %08x, port: %d\n", ci->devname, |
1490 | event, headx, currInt, group); | 1490 | event, headx, currInt, group); |
1491 | break; | 1491 | break; |
1492 | } /* switch on event */ | 1492 | } /* switch on event */ |
1493 | 1493 | ||
1494 | 1494 | ||
1495 | /* | 1495 | /* |
1496 | * Per MUSYCC Manual, Section 6.4.8.3 [Transmit Errors], TX errors | 1496 | * Per MUSYCC Manual, Section 6.4.8.3 [Transmit Errors], TX errors |
1497 | * are service-affecting and require action to resume normal | 1497 | * are service-affecting and require action to resume normal |
1498 | * bit-level processing. | 1498 | * bit-level processing. |
1499 | */ | 1499 | */ |
1500 | 1500 | ||
1501 | switch (err) | 1501 | switch (err) |
1502 | { | 1502 | { |
1503 | case ERR_ONR: | 1503 | case ERR_ONR: |
1504 | /* | 1504 | /* |
1505 | * Per MUSYCC manual, Section 6.4.8.3 [Transmit Errors], this | 1505 | * Per MUSYCC manual, Section 6.4.8.3 [Transmit Errors], this |
1506 | * error requires Transmit channel reactivation. | 1506 | * error requires Transmit channel reactivation. |
1507 | * | 1507 | * |
1508 | * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], this error | 1508 | * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], this error |
1509 | * requires Receive channel reactivation. | 1509 | * requires Receive channel reactivation. |
1510 | */ | 1510 | */ |
1511 | if (tx) | 1511 | if (tx) |
1512 | { | 1512 | { |
1513 | 1513 | ||
1514 | /* | 1514 | /* |
1515 | * TX ONR Error only occurs when channel is configured for | 1515 | * TX ONR Error only occurs when channel is configured for |
1516 | * Transparent Mode. However, this code will catch and | 1516 | * Transparent Mode. However, this code will catch and |
1517 | * re-activate on ANY TX ONR error. | 1517 | * re-activate on ANY TX ONR error. |
1518 | */ | 1518 | */ |
1519 | 1519 | ||
1520 | /* | 1520 | /* |
1521 | * Set flag to re-enable on any next transmit attempt. | 1521 | * Set flag to re-enable on any next transmit attempt. |
1522 | */ | 1522 | */ |
1523 | ch->ch_start_tx = CH_START_TX_ONR; | 1523 | ch->ch_start_tx = CH_START_TX_ONR; |
1524 | 1524 | ||
1525 | { | 1525 | { |
1526 | #ifdef RLD_TRANS_DEBUG | 1526 | #ifdef RLD_TRANS_DEBUG |
1527 | if (1 || cxt1e1_log_level >= LOG_MONITOR) | 1527 | if (1 || cxt1e1_log_level >= LOG_MONITOR) |
1528 | #else | 1528 | #else |
1529 | if (cxt1e1_log_level >= LOG_MONITOR) | 1529 | if (cxt1e1_log_level >= LOG_MONITOR) |
1530 | #endif | 1530 | #endif |
1531 | { | 1531 | { |
1532 | pr_info("%s: TX buffer underflow [ONR] on channel %d, mode %x QStopped %x free %d\n", | 1532 | pr_info("%s: TX buffer underflow [ONR] on channel %d, mode %x QStopped %x free %d\n", |
1533 | ci->devname, ch->channum, ch->p.chan_mode, sd_queue_stopped (ch->user), ch->txd_free); | 1533 | ci->devname, ch->channum, ch->p.chan_mode, sd_queue_stopped (ch->user), ch->txd_free); |
1534 | #ifdef RLD_DEBUG | 1534 | #ifdef RLD_DEBUG |
1535 | if (ch->p.chan_mode == 2) /* problem = ONR on HDLC | 1535 | if (ch->p.chan_mode == 2) /* problem = ONR on HDLC |
1536 | * mode */ | 1536 | * mode */ |
1537 | { | 1537 | { |
1538 | pr_info("++ Failed Last %x Next %x QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n", | 1538 | pr_info("++ Failed Last %x Next %x QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n", |
1539 | (u_int32_t) ch->txd_irq_srv, (u_int32_t) ch->txd_usr_add, | 1539 | (u_int32_t) ch->txd_irq_srv, (u_int32_t) ch->txd_usr_add, |
1540 | sd_queue_stopped (ch->user), | 1540 | sd_queue_stopped (ch->user), |
1541 | ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode); | 1541 | ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode); |
1542 | musycc_dump_txbuffer_ring (ch, 0); | 1542 | musycc_dump_txbuffer_ring (ch, 0); |
1543 | } | 1543 | } |
1544 | #endif | 1544 | #endif |
1545 | } | 1545 | } |
1546 | } | 1546 | } |
1547 | } else /* RX buffer overrun */ | 1547 | } else /* RX buffer overrun */ |
1548 | { | 1548 | { |
1549 | /* | 1549 | /* |
1550 | * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], | 1550 | * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], |
1551 | * channel recovery for this RX ONR error IS required. It is | 1551 | * channel recovery for this RX ONR error IS required. It is |
1552 | * also suggested to increase the number of receive buffers | 1552 | * also suggested to increase the number of receive buffers |
1553 | * for this channel. Receive channel reactivation IS | 1553 | * for this channel. Receive channel reactivation IS |
1554 | * required, and data has been lost. | 1554 | * required, and data has been lost. |
1555 | */ | 1555 | */ |
1556 | ch->s.rx_over_errors++; | 1556 | ch->s.rx_over_errors++; |
1557 | ch->ch_start_rx = CH_START_RX_ONR; | 1557 | ch->ch_start_rx = CH_START_RX_ONR; |
1558 | 1558 | ||
1559 | if (cxt1e1_log_level >= LOG_WARN) | 1559 | if (cxt1e1_log_level >= LOG_WARN) |
1560 | { | 1560 | { |
1561 | pr_info("%s: RX buffer overflow [ONR] on channel %d, mode %x\n", | 1561 | pr_info("%s: RX buffer overflow [ONR] on channel %d, mode %x\n", |
1562 | ci->devname, ch->channum, ch->p.chan_mode); | 1562 | ci->devname, ch->channum, ch->p.chan_mode); |
1563 | //musycc_dump_rxbuffer_ring (ch, 0); /* RLD DEBUG */ | 1563 | //musycc_dump_rxbuffer_ring (ch, 0); /* RLD DEBUG */ |
1564 | } | 1564 | } |
1565 | } | 1565 | } |
1566 | musycc_chan_restart (ch); | 1566 | musycc_chan_restart (ch); |
1567 | break; | 1567 | break; |
1568 | case ERR_BUF: | 1568 | case ERR_BUF: |
1569 | if (tx) | 1569 | if (tx) |
1570 | { | 1570 | { |
1571 | ch->s.tx_fifo_errors++; | 1571 | ch->s.tx_fifo_errors++; |
1572 | ch->ch_start_tx = CH_START_TX_BUF; | 1572 | ch->ch_start_tx = CH_START_TX_BUF; |
1573 | /* | 1573 | /* |
1574 | * Per MUSYCC manual, Section 6.4.8.3 [Transmit Errors], | 1574 | * Per MUSYCC manual, Section 6.4.8.3 [Transmit Errors], |
1575 | * this BUFF error requires Transmit channel reactivation. | 1575 | * this BUFF error requires Transmit channel reactivation. |
1576 | */ | 1576 | */ |
1577 | if (cxt1e1_log_level >= LOG_MONITOR) | 1577 | if (cxt1e1_log_level >= LOG_MONITOR) |
1578 | pr_info("%s: TX buffer underrun [BUFF] on channel %d, mode %x\n", | 1578 | pr_info("%s: TX buffer underrun [BUFF] on channel %d, mode %x\n", |
1579 | ci->devname, ch->channum, ch->p.chan_mode); | 1579 | ci->devname, ch->channum, ch->p.chan_mode); |
1580 | } else /* RX buffer overrun */ | 1580 | } else /* RX buffer overrun */ |
1581 | { | 1581 | { |
1582 | ch->s.rx_over_errors++; | 1582 | ch->s.rx_over_errors++; |
1583 | /* | 1583 | /* |
1584 | * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], HDLC | 1584 | * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], HDLC |
1585 | * mode requires NO recovery for this RX BUFF error is | 1585 | * mode requires NO recovery for this RX BUFF error is |
1586 | * required. It is suggested to increase the FIFO buffer | 1586 | * required. It is suggested to increase the FIFO buffer |
1587 | * space for this channel. Receive channel reactivation is | 1587 | * space for this channel. Receive channel reactivation is |
1588 | * not required, but data has been lost. | 1588 | * not required, but data has been lost. |
1589 | */ | 1589 | */ |
1590 | if (cxt1e1_log_level >= LOG_WARN) | 1590 | if (cxt1e1_log_level >= LOG_WARN) |
1591 | pr_info("%s: RX buffer overrun [BUFF] on channel %d, mode %x\n", | 1591 | pr_info("%s: RX buffer overrun [BUFF] on channel %d, mode %x\n", |
1592 | ci->devname, ch->channum, ch->p.chan_mode); | 1592 | ci->devname, ch->channum, ch->p.chan_mode); |
1593 | /* | 1593 | /* |
1594 | * Per MUSYCC manual, Section 6.4.9.4 [Receive Errors], | 1594 | * Per MUSYCC manual, Section 6.4.9.4 [Receive Errors], |
1595 | * Transparent mode DOES require recovery for the RX BUFF | 1595 | * Transparent mode DOES require recovery for the RX BUFF |
1596 | * error. It is suggested to increase the FIFO buffer space | 1596 | * error. It is suggested to increase the FIFO buffer space |
1597 | * for this channel. Receive channel reactivation IS | 1597 | * for this channel. Receive channel reactivation IS |
1598 | * required and data has been lost. | 1598 | * required and data has been lost. |
1599 | */ | 1599 | */ |
1600 | if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) | 1600 | if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) |
1601 | ch->ch_start_rx = CH_START_RX_BUF; | 1601 | ch->ch_start_rx = CH_START_RX_BUF; |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | if (tx || (ch->p.chan_mode == CFG_CH_PROTO_TRANS)) | 1604 | if (tx || (ch->p.chan_mode == CFG_CH_PROTO_TRANS)) |
1605 | musycc_chan_restart (ch); | 1605 | musycc_chan_restart (ch); |
1606 | break; | 1606 | break; |
1607 | default: | 1607 | default: |
1608 | break; | 1608 | break; |
1609 | } /* switch on err */ | 1609 | } /* switch on err */ |
1610 | 1610 | ||
1611 | /* Check for interrupt lost condition */ | 1611 | /* Check for interrupt lost condition */ |
1612 | if ((currInt & INTRPT_ILOST_M) && (cxt1e1_log_level >= LOG_ERROR)) | 1612 | if ((currInt & INTRPT_ILOST_M) && (cxt1e1_log_level >= LOG_ERROR)) |
1613 | { | 1613 | { |
1614 | pr_info("%s: Interrupt queue overflow - ILOST asserted\n", | 1614 | pr_info("%s: Interrupt queue overflow - ILOST asserted\n", |
1615 | ci->devname); | 1615 | ci->devname); |
1616 | } | 1616 | } |
1617 | ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */ | 1617 | ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */ |
1618 | FLUSH_MEM_WRITE (); | 1618 | FLUSH_MEM_WRITE (); |
1619 | FLUSH_MEM_READ (); | 1619 | FLUSH_MEM_READ (); |
1620 | } /* while */ | 1620 | } /* while */ |
1621 | if ((cxt1e1_log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx)) | 1621 | if ((cxt1e1_log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx)) |
1622 | { | 1622 | { |
1623 | int bh; | 1623 | int bh; |
1624 | 1624 | ||
1625 | bh = atomic_read (&CI->bh_pending); | 1625 | bh = atomic_read (&CI->bh_pending); |
1626 | pr_info("_bh_: late arrivals, head %d != tail %d, pending %d\n", | 1626 | pr_info("_bh_: late arrivals, head %d != tail %d, pending %d\n", |
1627 | ci->iqp_headx, ci->iqp_tailx, bh); | 1627 | ci->iqp_headx, ci->iqp_tailx, bh); |
1628 | } | 1628 | } |
1629 | #if defined(SBE_ISR_IMMEDIATE) | 1629 | #if defined(SBE_ISR_IMMEDIATE) |
1630 | return 0L; | 1630 | return 0L; |
1631 | #endif | 1631 | #endif |
1632 | /* else, nothing returned */ | 1632 | /* else, nothing returned */ |
1633 | } | 1633 | } |
1634 | 1634 | ||
1635 | #if 0 | 1635 | #if 0 |
1636 | int __init | 1636 | int __init |
1637 | musycc_new_chan (ci_t * ci, int channum, void *user) | 1637 | musycc_new_chan (ci_t * ci, int channum, void *user) |
1638 | { | 1638 | { |
1639 | mch_t *ch; | 1639 | mch_t *ch; |
1640 | 1640 | ||
1641 | ch = ci->port[channum / MUSYCC_NCHANS].chan[channum % MUSYCC_NCHANS]; | 1641 | ch = ci->port[channum / MUSYCC_NCHANS].chan[channum % MUSYCC_NCHANS]; |
1642 | 1642 | ||
1643 | if (ch->state != UNASSIGNED) | 1643 | if (ch->state != UNASSIGNED) |
1644 | return EEXIST; | 1644 | return EEXIST; |
1645 | /* NOTE: mch_t already cleared during OS_kmalloc() */ | 1645 | /* NOTE: mch_t already cleared during OS_kmalloc() */ |
1646 | ch->state = DOWN; | 1646 | ch->state = DOWN; |
1647 | ch->user = user; | 1647 | ch->user = user; |
1648 | #if 0 | 1648 | #if 0 |
1649 | ch->status = 0; | 1649 | ch->status = 0; |
1650 | ch->p.status = 0; | 1650 | ch->p.status = 0; |
1651 | ch->p.intr_mask = 0; | 1651 | ch->p.intr_mask = 0; |
1652 | #endif | 1652 | #endif |
1653 | ch->p.chan_mode = CFG_CH_PROTO_HDLC_FCS16; | 1653 | ch->p.chan_mode = CFG_CH_PROTO_HDLC_FCS16; |
1654 | ch->p.idlecode = CFG_CH_FLAG_7E; | 1654 | ch->p.idlecode = CFG_CH_FLAG_7E; |
1655 | ch->p.pad_fill_count = 2; | 1655 | ch->p.pad_fill_count = 2; |
1656 | spin_lock_init (&ch->ch_rxlock); | 1656 | spin_lock_init (&ch->ch_rxlock); |
1657 | spin_lock_init (&ch->ch_txlock); | 1657 | spin_lock_init (&ch->ch_txlock); |
1658 | 1658 | ||
1659 | return 0; | 1659 | return 0; |
1660 | } | 1660 | } |
1661 | #endif | 1661 | #endif |
1662 | 1662 | ||
1663 | 1663 | ||
1664 | #ifdef SBE_PMCC4_ENABLE | 1664 | #ifdef SBE_PMCC4_ENABLE |
1665 | status_t | 1665 | status_t |
1666 | musycc_chan_down (ci_t * dummy, int channum) | 1666 | musycc_chan_down (ci_t * dummy, int channum) |
1667 | { | 1667 | { |
1668 | mpi_t *pi; | 1668 | mpi_t *pi; |
1669 | mch_t *ch; | 1669 | mch_t *ch; |
1670 | int i, gchan; | 1670 | int i, gchan; |
1671 | 1671 | ||
1672 | if (!(ch = sd_find_chan (dummy, channum))) | 1672 | if (!(ch = sd_find_chan (dummy, channum))) |
1673 | return EINVAL; | 1673 | return EINVAL; |
1674 | pi = ch->up; | 1674 | pi = ch->up; |
1675 | gchan = ch->gchan; | 1675 | gchan = ch->gchan; |
1676 | 1676 | ||
1677 | /* Deactivate the channel */ | 1677 | /* Deactivate the channel */ |
1678 | musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_RX_DIRECTION | gchan); | 1678 | musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_RX_DIRECTION | gchan); |
1679 | ch->ch_start_rx = 0; | 1679 | ch->ch_start_rx = 0; |
1680 | musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_TX_DIRECTION | gchan); | 1680 | musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_TX_DIRECTION | gchan); |
1681 | ch->ch_start_tx = 0; | 1681 | ch->ch_start_tx = 0; |
1682 | 1682 | ||
1683 | if (ch->state == DOWN) | 1683 | if (ch->state == DOWN) |
1684 | return 0; | 1684 | return 0; |
1685 | ch->state = DOWN; | 1685 | ch->state = DOWN; |
1686 | 1686 | ||
1687 | pi->regram->thp[gchan] = 0; | 1687 | pi->regram->thp[gchan] = 0; |
1688 | pi->regram->tmp[gchan] = 0; | 1688 | pi->regram->tmp[gchan] = 0; |
1689 | pi->regram->rhp[gchan] = 0; | 1689 | pi->regram->rhp[gchan] = 0; |
1690 | pi->regram->rmp[gchan] = 0; | 1690 | pi->regram->rmp[gchan] = 0; |
1691 | FLUSH_MEM_WRITE (); | 1691 | FLUSH_MEM_WRITE (); |
1692 | for (i = 0; i < ch->txd_num; i++) | 1692 | for (i = 0; i < ch->txd_num; i++) |
1693 | { | 1693 | { |
1694 | if (ch->mdt[i].mem_token != 0) | 1694 | if (ch->mdt[i].mem_token != 0) |
1695 | OS_mem_token_free (ch->mdt[i].mem_token); | 1695 | OS_mem_token_free (ch->mdt[i].mem_token); |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | for (i = 0; i < ch->rxd_num; i++) | 1698 | for (i = 0; i < ch->rxd_num; i++) |
1699 | { | 1699 | { |
1700 | if (ch->mdr[i].mem_token != 0) | 1700 | if (ch->mdr[i].mem_token != 0) |
1701 | OS_mem_token_free (ch->mdr[i].mem_token); | 1701 | OS_mem_token_free (ch->mdr[i].mem_token); |
1702 | } | 1702 | } |
1703 | 1703 | ||
1704 | OS_kfree (ch->mdr); | 1704 | OS_kfree (ch->mdr); |
1705 | ch->mdr = 0; | 1705 | ch->mdr = 0; |
1706 | ch->rxd_num = 0; | 1706 | ch->rxd_num = 0; |
1707 | OS_kfree (ch->mdt); | 1707 | OS_kfree (ch->mdt); |
1708 | ch->mdt = 0; | 1708 | ch->mdt = 0; |
1709 | ch->txd_num = 0; | 1709 | ch->txd_num = 0; |
1710 | 1710 | ||
1711 | musycc_update_timeslots (pi); | 1711 | musycc_update_timeslots (pi); |
1712 | c4_fifo_free (pi, ch->gchan); | 1712 | c4_fifo_free (pi, ch->gchan); |
1713 | 1713 | ||
1714 | pi->openchans--; | 1714 | pi->openchans--; |
1715 | return 0; | 1715 | return 0; |
1716 | } | 1716 | } |
1717 | #endif | 1717 | #endif |
1718 | 1718 | ||
1719 | 1719 | ||
1720 | int | 1720 | int |
1721 | musycc_del_chan (ci_t * ci, int channum) | 1721 | musycc_del_chan (ci_t * ci, int channum) |
1722 | { | 1722 | { |
1723 | mch_t *ch; | 1723 | mch_t *ch; |
1724 | 1724 | ||
1725 | if ((channum < 0) || (channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS))) /* sanity chk param */ | 1725 | if ((channum < 0) || (channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS))) /* sanity chk param */ |
1726 | return ECHRNG; | 1726 | return ECHRNG; |
1727 | if (!(ch = sd_find_chan (ci, channum))) | 1727 | if (!(ch = sd_find_chan (ci, channum))) |
1728 | return ENOENT; | 1728 | return ENOENT; |
1729 | if (ch->state == UP) | 1729 | if (ch->state == UP) |
1730 | musycc_chan_down (ci, channum); | 1730 | musycc_chan_down (ci, channum); |
1731 | ch->state = UNASSIGNED; | 1731 | ch->state = UNASSIGNED; |
1732 | return 0; | 1732 | return 0; |
1733 | } | 1733 | } |
1734 | 1734 | ||
1735 | 1735 | ||
1736 | int | 1736 | int |
1737 | musycc_del_chan_stats (ci_t * ci, int channum) | 1737 | musycc_del_chan_stats (ci_t * ci, int channum) |
1738 | { | 1738 | { |
1739 | mch_t *ch; | 1739 | mch_t *ch; |
1740 | 1740 | ||
1741 | if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */ | 1741 | if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */ |
1742 | return ECHRNG; | 1742 | return ECHRNG; |
1743 | if (!(ch = sd_find_chan (ci, channum))) | 1743 | if (!(ch = sd_find_chan (ci, channum))) |
1744 | return ENOENT; | 1744 | return ENOENT; |
1745 | 1745 | ||
1746 | memset (&ch->s, 0, sizeof (struct sbecom_chan_stats)); | 1746 | memset (&ch->s, 0, sizeof (struct sbecom_chan_stats)); |
1747 | return 0; | 1747 | return 0; |
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | 1750 | ||
1751 | int | 1751 | int |
1752 | musycc_start_xmit (ci_t * ci, int channum, void *mem_token) | 1752 | musycc_start_xmit (ci_t * ci, int channum, void *mem_token) |
1753 | { | 1753 | { |
1754 | mch_t *ch; | 1754 | mch_t *ch; |
1755 | struct mdesc *md; | 1755 | struct mdesc *md; |
1756 | void *m2; | 1756 | void *m2; |
1757 | #if 0 | 1757 | #if 0 |
1758 | unsigned long flags; | 1758 | unsigned long flags; |
1759 | #endif | 1759 | #endif |
1760 | int txd_need_cnt; | 1760 | int txd_need_cnt; |
1761 | u_int32_t len; | 1761 | u_int32_t len; |
1762 | 1762 | ||
1763 | if (!(ch = sd_find_chan (ci, channum))) | 1763 | if (!(ch = sd_find_chan (ci, channum))) |
1764 | return -ENOENT; | 1764 | return -ENOENT; |
1765 | 1765 | ||
1766 | if (ci->state != C_RUNNING) /* full interrupt processing available */ | 1766 | if (ci->state != C_RUNNING) /* full interrupt processing available */ |
1767 | return -EINVAL; | 1767 | return -EINVAL; |
1768 | if (ch->state != UP) | 1768 | if (ch->state != UP) |
1769 | return -EINVAL; | 1769 | return -EINVAL; |
1770 | 1770 | ||
1771 | if (!(ch->status & TX_ENABLED)) | 1771 | if (!(ch->status & TX_ENABLED)) |
1772 | return -EROFS; /* how else to flag unwritable state ? */ | 1772 | return -EROFS; /* how else to flag unwritable state ? */ |
1773 | 1773 | ||
1774 | #ifdef RLD_TRANS_DEBUGx | 1774 | #ifdef RLD_TRANS_DEBUGx |
1775 | if (1 || cxt1e1_log_level >= LOG_MONITOR2) | 1775 | if (1 || cxt1e1_log_level >= LOG_MONITOR2) |
1776 | #else | 1776 | #else |
1777 | if (cxt1e1_log_level >= LOG_MONITOR2) | 1777 | if (cxt1e1_log_level >= LOG_MONITOR2) |
1778 | #endif | 1778 | #endif |
1779 | { | 1779 | { |
1780 | pr_info("++ start_xmt[%d]: state %x start %x full %d free %d required %d stopped %x\n", | 1780 | pr_info("++ start_xmt[%d]: state %x start %x full %d free %d required %d stopped %x\n", |
1781 | channum, ch->state, ch->ch_start_tx, ch->tx_full, | 1781 | channum, ch->state, ch->ch_start_tx, ch->tx_full, |
1782 | ch->txd_free, ch->txd_required, sd_queue_stopped (ch->user)); | 1782 | ch->txd_free, ch->txd_required, sd_queue_stopped (ch->user)); |
1783 | } | 1783 | } |
1784 | /***********************************************/ | 1784 | /***********************************************/ |
1785 | /** Determine total amount of data to be sent **/ | 1785 | /** Determine total amount of data to be sent **/ |
1786 | /***********************************************/ | 1786 | /***********************************************/ |
1787 | m2 = mem_token; | 1787 | m2 = mem_token; |
1788 | txd_need_cnt = 0; | 1788 | txd_need_cnt = 0; |
1789 | for (len = OS_mem_token_tlen (m2); len > 0; | 1789 | for (len = OS_mem_token_tlen (m2); len > 0; |
1790 | m2 = (void *) OS_mem_token_next (m2)) | 1790 | m2 = (void *) OS_mem_token_next (m2)) |
1791 | { | 1791 | { |
1792 | if (!OS_mem_token_len (m2)) | 1792 | if (!OS_mem_token_len (m2)) |
1793 | continue; | 1793 | continue; |
1794 | txd_need_cnt++; | 1794 | txd_need_cnt++; |
1795 | len -= OS_mem_token_len (m2); | 1795 | len -= OS_mem_token_len (m2); |
1796 | } | 1796 | } |
1797 | 1797 | ||
1798 | if (txd_need_cnt == 0) | 1798 | if (txd_need_cnt == 0) |
1799 | { | 1799 | { |
1800 | if (cxt1e1_log_level >= LOG_MONITOR2) | 1800 | if (cxt1e1_log_level >= LOG_MONITOR2) |
1801 | pr_info("%s channel %d: no TX data in User buffer\n", ci->devname, channum); | 1801 | pr_info("%s channel %d: no TX data in User buffer\n", ci->devname, channum); |
1802 | OS_mem_token_free (mem_token); | 1802 | OS_mem_token_free (mem_token); |
1803 | return 0; /* no data to send */ | 1803 | return 0; /* no data to send */ |
1804 | } | 1804 | } |
1805 | /*************************************************/ | 1805 | /*************************************************/ |
1806 | /** Are there sufficient descriptors available? **/ | 1806 | /** Are there sufficient descriptors available? **/ |
1807 | /*************************************************/ | 1807 | /*************************************************/ |
1808 | if (txd_need_cnt > ch->txd_num) /* never enough descriptors for this | 1808 | if (txd_need_cnt > ch->txd_num) /* never enough descriptors for this |
1809 | * large a buffer */ | 1809 | * large a buffer */ |
1810 | { | 1810 | { |
1811 | if (cxt1e1_log_level >= LOG_DEBUG) | 1811 | if (cxt1e1_log_level >= LOG_DEBUG) |
1812 | { | 1812 | { |
1813 | pr_info("start_xmit: discarding buffer, insufficient descriptor cnt %d, need %d.\n", | 1813 | pr_info("start_xmit: discarding buffer, insufficient descriptor cnt %d, need %d.\n", |
1814 | ch->txd_num, txd_need_cnt + 1); | 1814 | ch->txd_num, txd_need_cnt + 1); |
1815 | } | 1815 | } |
1816 | ch->s.tx_dropped++; | 1816 | ch->s.tx_dropped++; |
1817 | OS_mem_token_free (mem_token); | 1817 | OS_mem_token_free (mem_token); |
1818 | return 0; | 1818 | return 0; |
1819 | } | 1819 | } |
1820 | #if 0 | 1820 | #if 0 |
1821 | spin_lock_irqsave (&ch->ch_txlock, flags); | 1821 | spin_lock_irqsave (&ch->ch_txlock, flags); |
1822 | #endif | 1822 | #endif |
1823 | /************************************************************/ | 1823 | /************************************************************/ |
1824 | /** flow control the line if not enough descriptors remain **/ | 1824 | /** flow control the line if not enough descriptors remain **/ |
1825 | /************************************************************/ | 1825 | /************************************************************/ |
1826 | if (txd_need_cnt > ch->txd_free) | 1826 | if (txd_need_cnt > ch->txd_free) |
1827 | { | 1827 | { |
1828 | if (cxt1e1_log_level >= LOG_MONITOR2) | 1828 | if (cxt1e1_log_level >= LOG_MONITOR2) |
1829 | { | 1829 | { |
1830 | pr_info("start_xmit[%d]: EBUSY - need more descriptors, have %d of %d need %d\n", | 1830 | pr_info("start_xmit[%d]: EBUSY - need more descriptors, have %d of %d need %d\n", |
1831 | channum, ch->txd_free, ch->txd_num, txd_need_cnt); | 1831 | channum, ch->txd_free, ch->txd_num, txd_need_cnt); |
1832 | } | 1832 | } |
1833 | ch->tx_full = 1; | 1833 | ch->tx_full = 1; |
1834 | ch->txd_required = txd_need_cnt; | 1834 | ch->txd_required = txd_need_cnt; |
1835 | sd_disable_xmit (ch->user); | 1835 | sd_disable_xmit (ch->user); |
1836 | #if 0 | 1836 | #if 0 |
1837 | spin_unlock_irqrestore (&ch->ch_txlock, flags); | 1837 | spin_unlock_irqrestore (&ch->ch_txlock, flags); |
1838 | #endif | 1838 | #endif |
1839 | return -EBUSY; /* tell user to try again later */ | 1839 | return -EBUSY; /* tell user to try again later */ |
1840 | } | 1840 | } |
1841 | /**************************************************/ | 1841 | /**************************************************/ |
1842 | /** Put the user data into MUSYCC data buffer(s) **/ | 1842 | /** Put the user data into MUSYCC data buffer(s) **/ |
1843 | /**************************************************/ | 1843 | /**************************************************/ |
1844 | m2 = mem_token; | 1844 | m2 = mem_token; |
1845 | md = ch->txd_usr_add; /* get current available descriptor */ | 1845 | md = ch->txd_usr_add; /* get current available descriptor */ |
1846 | 1846 | ||
1847 | for (len = OS_mem_token_tlen (m2); len > 0; m2 = OS_mem_token_next (m2)) | 1847 | for (len = OS_mem_token_tlen (m2); len > 0; m2 = OS_mem_token_next (m2)) |
1848 | { | 1848 | { |
1849 | int u = OS_mem_token_len (m2); | 1849 | int u = OS_mem_token_len (m2); |
1850 | 1850 | ||
1851 | if (!u) | 1851 | if (!u) |
1852 | continue; | 1852 | continue; |
1853 | len -= u; | 1853 | len -= u; |
1854 | 1854 | ||
1855 | /* | 1855 | /* |
1856 | * Enable following chunks, yet wait to enable the FIRST chunk until | 1856 | * Enable following chunks, yet wait to enable the FIRST chunk until |
1857 | * after ALL subsequent chunks are setup. | 1857 | * after ALL subsequent chunks are setup. |
1858 | */ | 1858 | */ |
1859 | if (md != ch->txd_usr_add) /* not first chunk */ | 1859 | if (md != ch->txd_usr_add) /* not first chunk */ |
1860 | u |= MUSYCC_TX_OWNED; /* transfer ownership from HOST to MUSYCC */ | 1860 | u |= MUSYCC_TX_OWNED; /* transfer ownership from HOST to MUSYCC */ |
1861 | 1861 | ||
1862 | if (len) /* not last chunk */ | 1862 | if (len) /* not last chunk */ |
1863 | u |= EOBIRQ_ENABLE; | 1863 | u |= EOBIRQ_ENABLE; |
1864 | else if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) | 1864 | else if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) |
1865 | { | 1865 | { |
1866 | /* | 1866 | /* |
1867 | * Per MUSYCC Ref 6.4.9 for Transparent Mode, the host must | 1867 | * Per MUSYCC Ref 6.4.9 for Transparent Mode, the host must |
1868 | * always clear EOMIRQ_ENABLE in every Transmit Buffer Descriptor | 1868 | * always clear EOMIRQ_ENABLE in every Transmit Buffer Descriptor |
1869 | * (IE. don't set herein). | 1869 | * (IE. don't set herein). |
1870 | */ | 1870 | */ |
1871 | u |= EOBIRQ_ENABLE; | 1871 | u |= EOBIRQ_ENABLE; |
1872 | } else | 1872 | } else |
1873 | u |= EOMIRQ_ENABLE; /* EOM, last HDLC chunk */ | 1873 | u |= EOMIRQ_ENABLE; /* EOM, last HDLC chunk */ |
1874 | 1874 | ||
1875 | 1875 | ||
1876 | /* last chunk in hdlc mode */ | 1876 | /* last chunk in hdlc mode */ |
1877 | u |= (ch->p.idlecode << IDLE_CODE); | 1877 | u |= (ch->p.idlecode << IDLE_CODE); |
1878 | if (ch->p.pad_fill_count) | 1878 | if (ch->p.pad_fill_count) |
1879 | { | 1879 | { |
1880 | #if 0 | 1880 | #if 0 |
1881 | /* NOOP NOTE: u_int8_t cannot be > 0xFF */ | 1881 | /* NOOP NOTE: u_int8_t cannot be > 0xFF */ |
1882 | /* sanitize pad_fill_count for maximums allowed by hardware */ | 1882 | /* sanitize pad_fill_count for maximums allowed by hardware */ |
1883 | if (ch->p.pad_fill_count > EXTRA_FLAGS_MASK) | 1883 | if (ch->p.pad_fill_count > EXTRA_FLAGS_MASK) |
1884 | ch->p.pad_fill_count = EXTRA_FLAGS_MASK; | 1884 | ch->p.pad_fill_count = EXTRA_FLAGS_MASK; |
1885 | #endif | 1885 | #endif |
1886 | u |= (PADFILL_ENABLE | (ch->p.pad_fill_count << EXTRA_FLAGS)); | 1886 | u |= (PADFILL_ENABLE | (ch->p.pad_fill_count << EXTRA_FLAGS)); |
1887 | } | 1887 | } |
1888 | md->mem_token = len ? 0 : mem_token; /* Fill in mds on last | 1888 | md->mem_token = len ? 0 : mem_token; /* Fill in mds on last |
1889 | * segment, others set ZERO | 1889 | * segment, others set ZERO |
1890 | * so that entire token is | 1890 | * so that entire token is |
1891 | * removed ONLY when ALL | 1891 | * removed ONLY when ALL |
1892 | * segments have been | 1892 | * segments have been |
1893 | * transmitted. */ | 1893 | * transmitted. */ |
1894 | 1894 | ||
1895 | md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m2))); | 1895 | md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m2))); |
1896 | FLUSH_MEM_WRITE (); | 1896 | FLUSH_MEM_WRITE (); |
1897 | md->status = cpu_to_le32 (u); | 1897 | md->status = cpu_to_le32 (u); |
1898 | --ch->txd_free; | 1898 | --ch->txd_free; |
1899 | md = md->snext; | 1899 | md = md->snext; |
1900 | } | 1900 | } |
1901 | FLUSH_MEM_WRITE (); | 1901 | FLUSH_MEM_WRITE (); |
1902 | 1902 | ||
1903 | 1903 | ||
1904 | /* | 1904 | /* |
1905 | * Now transfer ownership of first chunk from HOST to MUSYCC in order to | 1905 | * Now transfer ownership of first chunk from HOST to MUSYCC in order to |
1906 | * fire-off this XMIT. | 1906 | * fire-off this XMIT. |
1907 | */ | 1907 | */ |
1908 | ch->txd_usr_add->status |= __constant_cpu_to_le32 (MUSYCC_TX_OWNED); | 1908 | ch->txd_usr_add->status |= __constant_cpu_to_le32 (MUSYCC_TX_OWNED); |
1909 | FLUSH_MEM_WRITE (); | 1909 | FLUSH_MEM_WRITE (); |
1910 | ch->txd_usr_add = md; | 1910 | ch->txd_usr_add = md; |
1911 | 1911 | ||
1912 | len = OS_mem_token_tlen (mem_token); | 1912 | len = OS_mem_token_tlen (mem_token); |
1913 | atomic_add (len, &ch->tx_pending); | 1913 | atomic_add (len, &ch->tx_pending); |
1914 | atomic_add (len, &ci->tx_pending); | 1914 | atomic_add (len, &ci->tx_pending); |
1915 | ch->s.tx_packets++; | 1915 | ch->s.tx_packets++; |
1916 | ch->s.tx_bytes += len; | 1916 | ch->s.tx_bytes += len; |
1917 | /* | 1917 | /* |
1918 | * If an ONR was seen, then channel requires poking to restart | 1918 | * If an ONR was seen, then channel requires poking to restart |
1919 | * transmission. | 1919 | * transmission. |
1920 | */ | 1920 | */ |
1921 | if (ch->ch_start_tx) | 1921 | if (ch->ch_start_tx) |
1922 | { | 1922 | { |
1923 | musycc_chan_restart (ch); | 1923 | musycc_chan_restart (ch); |
1924 | } | 1924 | } |
1925 | #ifdef SBE_WAN256T3_ENABLE | 1925 | #ifdef SBE_WAN256T3_ENABLE |
1926 | wan256t3_led (ci, LED_TX, LEDV_G); | 1926 | wan256t3_led (ci, LED_TX, LEDV_G); |
1927 | #endif | 1927 | #endif |
1928 | return 0; | 1928 | return 0; |
1929 | } | 1929 | } |
1930 | 1930 | ||
1931 | 1931 | ||
1932 | /*** End-of-File ***/ | 1932 | /*** End-of-File ***/ |
1933 | 1933 |