Blame view
drivers/s390/cio/qdio.h
12.2 KB
779e6e1c7
|
1 |
/* |
a53c8fab3
|
2 |
* Copyright IBM Corp. 2000, 2009 |
779e6e1c7
|
3 4 5 |
* Author(s): Utz Bacher <utz.bacher@de.ibm.com> * Jan Glauber <jang@linux.vnet.ibm.com> */ |
1da177e4c
|
6 7 |
#ifndef _CIO_QDIO_H #define _CIO_QDIO_H |
0b642ede4
|
8 |
#include <asm/page.h> |
9d92a7e1b
|
9 |
#include <asm/schid.h> |
22f993476
|
10 |
#include <asm/debug.h> |
779e6e1c7
|
11 |
#include "chsc.h" |
a8237fc41
|
12 |
|
3a601bfef
|
13 |
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */ |
be8d97a54
|
14 15 |
#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */ #define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */ |
3a601bfef
|
16 |
#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */ |
1da177e4c
|
17 18 19 20 21 22 23 24 25 26 |
enum qdio_irq_states { QDIO_IRQ_STATE_INACTIVE, QDIO_IRQ_STATE_ESTABLISHED, QDIO_IRQ_STATE_ACTIVE, QDIO_IRQ_STATE_STOPPED, QDIO_IRQ_STATE_CLEANUP, QDIO_IRQ_STATE_ERR, NR_QDIO_IRQ_STATES, }; |
779e6e1c7
|
27 28 29 30 31 32 33 34 |
/* used as intparm in do_IO */ #define QDIO_DOING_ESTABLISH 1 #define QDIO_DOING_ACTIVATE 2 #define QDIO_DOING_CLEANUP 3 #define SLSB_STATE_NOT_INIT 0x0 #define SLSB_STATE_EMPTY 0x1 #define SLSB_STATE_PRIMED 0x2 |
104ea556e
|
35 |
#define SLSB_STATE_PENDING 0x3 |
779e6e1c7
|
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
#define SLSB_STATE_HALTED 0xe #define SLSB_STATE_ERROR 0xf #define SLSB_TYPE_INPUT 0x0 #define SLSB_TYPE_OUTPUT 0x20 #define SLSB_OWNER_PROG 0x80 #define SLSB_OWNER_CU 0x40 #define SLSB_P_INPUT_NOT_INIT \ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */ #define SLSB_P_INPUT_ACK \ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */ #define SLSB_CU_INPUT_EMPTY \ (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */ #define SLSB_P_INPUT_PRIMED \ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */ #define SLSB_P_INPUT_HALTED \ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */ #define SLSB_P_INPUT_ERROR \ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */ #define SLSB_P_OUTPUT_NOT_INIT \ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ #define SLSB_P_OUTPUT_EMPTY \ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ |
104ea556e
|
59 60 |
#define SLSB_P_OUTPUT_PENDING \ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */ |
779e6e1c7
|
61 62 63 64 65 66 67 68 69 70 71 72 |
#define SLSB_CU_OUTPUT_PRIMED \ (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ #define SLSB_P_OUTPUT_HALTED \ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */ #define SLSB_P_OUTPUT_ERROR \ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */ #define SLSB_ERROR_DURING_LOOKUP 0xff /* additional CIWs returned by extended Sense-ID */ #define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ #define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ |
1da177e4c
|
73 |
|
779e6e1c7
|
74 75 76 |
/* flags for st qdio sch data */ #define CHSC_FLAG_QDIO_CAPABILITY 0x80 #define CHSC_FLAG_VALIDITY 0x40 |
958c0ba40
|
77 78 79 80 |
/* SIGA flags */ #define QDIO_SIGA_WRITE 0x00 #define QDIO_SIGA_READ 0x01 #define QDIO_SIGA_SYNC 0x02 |
104ea556e
|
81 |
#define QDIO_SIGA_WRITEQ 0x04 |
958c0ba40
|
82 |
#define QDIO_SIGA_QEBSM_FLAG 0x80 |
779e6e1c7
|
83 84 85 86 87 88 89 |
#ifdef CONFIG_64BIT static inline int do_sqbs(u64 token, unsigned char state, int queue, int *start, int *count) { register unsigned long _ccq asm ("0") = *count; register unsigned long _token asm ("1") = token; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; |
1da177e4c
|
90 |
|
779e6e1c7
|
91 92 93 94 95 96 97 |
asm volatile( " .insn rsy,0xeb000000008A,%1,0,0(%2)" : "+d" (_ccq), "+d" (_queuestart) : "d" ((unsigned long)state), "d" (_token) : "memory", "cc"); *count = _ccq & 0xff; *start = _queuestart & 0xff; |
8129ee164
|
98 |
|
779e6e1c7
|
99 |
return (_ccq >> 32) & 0xff; |
8129ee164
|
100 |
} |
779e6e1c7
|
101 |
static inline int do_eqbs(u64 token, unsigned char *state, int queue, |
50f769df1
|
102 |
int *start, int *count, int ack) |
8129ee164
|
103 |
{ |
8129ee164
|
104 |
register unsigned long _ccq asm ("0") = *count; |
779e6e1c7
|
105 |
register unsigned long _token asm ("1") = token; |
8129ee164
|
106 |
unsigned long _queuestart = ((unsigned long)queue << 32) | *start; |
50f769df1
|
107 |
unsigned long _state = (unsigned long)ack << 63; |
8129ee164
|
108 |
|
94c12cc7d
|
109 110 111 |
asm volatile( " .insn rrf,0xB99c0000,%1,%2,0,0" : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) |
779e6e1c7
|
112 113 |
: "d" (_token) : "memory", "cc"); |
8129ee164
|
114 115 116 117 118 |
*count = _ccq & 0xff; *start = _queuestart & 0xff; *state = _state & 0xff; return (_ccq >> 32) & 0xff; |
1da177e4c
|
119 |
} |
779e6e1c7
|
120 121 122 123 |
#else static inline int do_sqbs(u64 token, unsigned char state, int queue, int *start, int *count) { return 0; } static inline int do_eqbs(u64 token, unsigned char *state, int queue, |
50f769df1
|
124 |
int *start, int *count, int ack) { return 0; } |
779e6e1c7
|
125 |
#endif /* CONFIG_64BIT */ |
1da177e4c
|
126 |
|
779e6e1c7
|
127 |
struct qdio_irq; |
1da177e4c
|
128 |
|
779e6e1c7
|
129 130 131 132 |
struct siga_flag { u8 input:1; u8 output:1; u8 sync:1; |
90adac58d
|
133 134 135 |
u8 sync_after_ai:1; u8 sync_out_after_pci:1; u8:3; |
779e6e1c7
|
136 |
} __attribute__ ((packed)); |
1da177e4c
|
137 |
|
6486cda6c
|
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
struct qdio_dev_perf_stat { unsigned int adapter_int; unsigned int qdio_int; unsigned int pci_request_int; unsigned int tasklet_inbound; unsigned int tasklet_inbound_resched; unsigned int tasklet_inbound_resched2; unsigned int tasklet_outbound; unsigned int siga_read; unsigned int siga_write; unsigned int siga_sync; unsigned int inbound_call; unsigned int inbound_handler; unsigned int stop_polling; unsigned int inbound_queue_full; unsigned int outbound_call; unsigned int outbound_handler; |
0195843bf
|
158 |
unsigned int outbound_queue_full; |
6486cda6c
|
159 160 161 162 163 164 |
unsigned int fast_requeue; unsigned int target_full; unsigned int eqbs; unsigned int eqbs_partial; unsigned int sqbs; unsigned int sqbs_partial; |
d36deae75
|
165 |
unsigned int int_discarded; |
432ac5e04
|
166 |
} ____cacheline_aligned; |
6486cda6c
|
167 |
|
d307297f7
|
168 169 170 171 172 173 174 175 176 177 178 |
struct qdio_queue_perf_stat { /* * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128. * Since max. 127 SBALs are scanned reuse entry for 128 as queue full * aka 127 SBALs found. */ unsigned int nr_sbals[8]; unsigned int nr_sbal_error; unsigned int nr_sbal_nop; unsigned int nr_sbal_total; }; |
d36deae75
|
179 180 181 |
enum qdio_queue_irq_states { QDIO_QUEUE_IRQS_DISABLED, }; |
779e6e1c7
|
182 183 184 |
struct qdio_input_q { /* input buffer acknowledgement flag */ int polling; |
e85dea0e4
|
185 186 |
/* first ACK'ed buffer */ int ack_start; |
50f769df1
|
187 188 |
/* how much sbals are acknowledged with qebsm */ int ack_count; |
779e6e1c7
|
189 190 |
/* last time of noticing incoming data */ u64 timestamp; |
d36deae75
|
191 192 193 194 |
/* upper-layer polling flag */ unsigned long queue_irq_state; /* callback to start upper-layer polling */ void (*queue_start_poll) (struct ccw_device *, int, unsigned long); |
1da177e4c
|
195 |
}; |
1da177e4c
|
196 |
|
779e6e1c7
|
197 |
struct qdio_output_q { |
779e6e1c7
|
198 199 |
/* PCIs are enabled for the queue */ int pci_out_enabled; |
104ea556e
|
200 201 202 203 204 205 |
/* cq: use asynchronous output buffers */ int use_cq; /* cq: aobs used for particual SBAL */ struct qaob **aobs; /* cq: sbal state related to asynchronous operation */ struct qdio_outbuf_state *sbal_state; |
779e6e1c7
|
206 207 |
/* timer to check for more outbound work */ struct timer_list timer; |
3d6c76ff3
|
208 209 |
/* used SBALs before tasklet schedule */ int scan_threshold; |
779e6e1c7
|
210 |
}; |
1da177e4c
|
211 |
|
d307297f7
|
212 213 214 215 |
/* * Note on cache alignment: grouped slsb and write mostly data at the beginning * sbal[] is read-only and starts on a new cacheline followed by read mostly. */ |
1da177e4c
|
216 |
struct qdio_q { |
779e6e1c7
|
217 |
struct slsb slsb; |
d307297f7
|
218 |
|
779e6e1c7
|
219 220 221 222 |
union { struct qdio_input_q in; struct qdio_output_q out; } u; |
1da177e4c
|
223 |
|
779e6e1c7
|
224 225 |
/* * inbound: next buffer the program should check for |
d307297f7
|
226 |
* outbound: next buffer to check if adapter processed it |
779e6e1c7
|
227 228 |
*/ int first_to_check; |
1da177e4c
|
229 |
|
779e6e1c7
|
230 |
/* first_to_check of the last time */ |
e85dea0e4
|
231 |
int last_move; |
1da177e4c
|
232 |
|
779e6e1c7
|
233 234 |
/* beginning position for calling the program */ int first_to_kick; |
1da177e4c
|
235 |
|
779e6e1c7
|
236 237 |
/* number of buffers in use by the adapter */ atomic_t nr_buf_used; |
1da177e4c
|
238 |
|
779e6e1c7
|
239 |
/* error condition during a data transfer */ |
1da177e4c
|
240 |
unsigned int qdio_error; |
1da177e4c
|
241 |
|
a2b860198
|
242 243 |
/* last scan of the queue */ u64 timestamp; |
d307297f7
|
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 |
struct tasklet_struct tasklet; struct qdio_queue_perf_stat q_stats; struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned; /* queue number */ int nr; /* bitmask of queue number */ int mask; /* input or output queue */ int is_input_q; /* list of thinint input queues */ struct list_head entry; /* upper-layer program handler */ qdio_handler_t (*handler); |
779e6e1c7
|
263 |
|
d307297f7
|
264 265 266 |
struct dentry *debugfs_q; struct qdio_irq *irq_ptr; struct sl *sl; |
779e6e1c7
|
267 |
/* |
5382fe11d
|
268 269 |
* A page is allocated under this pointer and used for slib and sl. * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2. |
779e6e1c7
|
270 271 |
*/ struct slib *slib; |
1da177e4c
|
272 273 274 |
} __attribute__ ((aligned(256))); struct qdio_irq { |
779e6e1c7
|
275 276 277 |
struct qib qib; u32 *dsci; /* address of device state change indicator */ struct ccw_device *cdev; |
3f09bb896
|
278 |
struct dentry *debugfs_dev; |
6486cda6c
|
279 |
struct dentry *debugfs_perf; |
1da177e4c
|
280 281 |
unsigned long int_parm; |
a8237fc41
|
282 |
struct subchannel_id schid; |
779e6e1c7
|
283 |
unsigned long sch_token; /* QEBSM facility */ |
8129ee164
|
284 |
|
1da177e4c
|
285 |
enum qdio_irq_states state; |
779e6e1c7
|
286 |
struct siga_flag siga_flag; /* siga sync information from qdioac */ |
1da177e4c
|
287 |
|
779e6e1c7
|
288 289 |
int nr_input_qs; int nr_output_qs; |
1da177e4c
|
290 291 |
struct ccw1 ccw; |
1da177e4c
|
292 293 |
struct ciw equeue; struct ciw aqueue; |
779e6e1c7
|
294 |
struct qdio_ssqd_desc ssqd_desc; |
779e6e1c7
|
295 |
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); |
1da177e4c
|
296 |
|
6486cda6c
|
297 |
int perf_stat_enabled; |
432ac5e04
|
298 |
|
1da177e4c
|
299 |
struct qdr *qdr; |
779e6e1c7
|
300 |
unsigned long chsc_page; |
1da177e4c
|
301 302 |
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; |
779e6e1c7
|
303 |
|
22f993476
|
304 |
debug_info_t *debug_area; |
779e6e1c7
|
305 |
struct mutex setup_mutex; |
432ac5e04
|
306 |
struct qdio_dev_perf_stat perf_stat; |
1da177e4c
|
307 |
}; |
779e6e1c7
|
308 309 310 |
/* helper functions */ #define queue_type(q) q->irq_ptr->qib.qfmt |
22f993476
|
311 |
#define SCH_NO(q) (q->irq_ptr->schid.sch_no) |
779e6e1c7
|
312 313 314 315 |
#define is_thinint_irq(irq) \ (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ css_general_characteristics.aif_osa) |
d307297f7
|
316 317 318 319 320 321 322 323 324 325 326 327 328 329 |
#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr)) #define qperf_inc(__q, __attr) \ ({ \ struct qdio_irq *qdev = (__q)->irq_ptr; \ if (qdev->perf_stat_enabled) \ (qdev->perf_stat.__attr)++; \ }) static inline void account_sbals_error(struct qdio_q *q, int count) { q->q_stats.nr_sbal_error += count; q->q_stats.nr_sbal_total += count; } |
6486cda6c
|
330 |
|
779e6e1c7
|
331 332 333 334 335 336 |
/* the highest iqdio queue is used for multicast */ static inline int multicast_outbound(struct qdio_q *q) { return (q->irq_ptr->nr_output_qs > 1) && (q->nr == q->irq_ptr->nr_output_qs - 1); } |
779e6e1c7
|
337 338 339 |
#define pci_out_supported(q) \ (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) #define is_qebsm(q) (q->irq_ptr->sch_token != 0) |
779e6e1c7
|
340 341 |
#define need_siga_in(q) (q->irq_ptr->siga_flag.input) #define need_siga_out(q) (q->irq_ptr->siga_flag.output) |
90adac58d
|
342 343 344 345 346 |
#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync)) #define need_siga_sync_after_ai(q) \ (unlikely(q->irq_ptr->siga_flag.sync_after_ai)) #define need_siga_sync_out_after_pci(q) \ (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci)) |
779e6e1c7
|
347 |
|
dbb0dd021
|
348 349 350 351 352 353 |
#define for_each_input_queue(irq_ptr, q, i) \ for (i = 0; i < irq_ptr->nr_input_qs && \ ({ q = irq_ptr->input_qs[i]; 1; }); i++) #define for_each_output_queue(irq_ptr, q, i) \ for (i = 0; i < irq_ptr->nr_output_qs && \ ({ q = irq_ptr->output_qs[i]; 1; }); i++) |
779e6e1c7
|
354 355 356 357 358 359 360 |
#define prev_buf(bufnr) \ ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) #define next_buf(bufnr) \ ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) #define add_buf(bufnr, inc) \ ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) |
50f769df1
|
361 362 |
#define sub_buf(bufnr, dec) \ ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) |
779e6e1c7
|
363 |
|
d36deae75
|
364 365 366 367 |
#define queue_irqs_enabled(q) \ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) #define queue_irqs_disabled(q) \ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0) |
a2b860198
|
368 |
extern u64 last_ai_time; |
779e6e1c7
|
369 |
/* prototypes for thin interrupt */ |
779e6e1c7
|
370 371 372 373 374 375 376 377 378 379 |
void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr); void tiqdio_inbound_processing(unsigned long q); int tiqdio_allocate_memory(void); void tiqdio_free_memory(void); int tiqdio_register_thinints(void); void tiqdio_unregister_thinints(void); |
5f4026f8b
|
380 381 |
void clear_nonshared_ind(struct qdio_irq *); int test_nonshared_ind(struct qdio_irq *); |
104ea556e
|
382 |
|
779e6e1c7
|
383 384 385 386 387 388 389 390 391 |
/* prototypes for setup */ void qdio_inbound_processing(unsigned long data); void qdio_outbound_processing(unsigned long data); void qdio_outbound_timer(unsigned long data); void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb); int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs); void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); |
bbd50e172
|
392 393 394 |
int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, struct subchannel_id *schid, struct qdio_ssqd_desc *data); |
779e6e1c7
|
395 396 397 398 |
int qdio_setup_irq(struct qdio_initialize *init_data); void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, struct ccw_device *cdev); void qdio_release_memory(struct qdio_irq *irq_ptr); |
50f769df1
|
399 400 |
int qdio_setup_create_sysfs(struct ccw_device *cdev); void qdio_setup_destroy_sysfs(struct ccw_device *cdev); |
779e6e1c7
|
401 402 |
int qdio_setup_init(void); void qdio_setup_exit(void); |
104ea556e
|
403 404 405 |
int qdio_enable_async_operation(struct qdio_output_q *q); void qdio_disable_async_operation(struct qdio_output_q *q); struct qaob *qdio_allocate_aob(void); |
779e6e1c7
|
406 |
|
60b5df2f1
|
407 408 |
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); |
779e6e1c7
|
409 |
#endif /* _CIO_QDIO_H */ |