Blame view
drivers/s390/cio/qdio_thinint.c
6.71 KB
779e6e1c7
|
1 |
/* |
a53c8fab3
|
2 |
* Copyright IBM Corp. 2000, 2009 |
779e6e1c7
|
3 4 5 6 7 |
* Author(s): Utz Bacher <utz.bacher@de.ibm.com> * Cornelia Huck <cornelia.huck@de.ibm.com> * Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/io.h> |
5a0e3ad6a
|
8 |
#include <linux/slab.h> |
30d77c3e1
|
9 |
#include <linux/kernel_stat.h> |
60063497a
|
10 |
#include <linux/atomic.h> |
779e6e1c7
|
11 12 13 14 15 16 17 18 19 |
#include <asm/debug.h> #include <asm/qdio.h> #include <asm/airq.h> #include <asm/isc.h> #include "cio.h" #include "ioasm.h" #include "qdio.h" #include "qdio_debug.h" |
779e6e1c7
|
20 21 22 23 24 25 26 |
/* * Restriction: only 63 iqdio subchannels would have its own indicator, * after that, subsequent subchannels share one indicator */ #define TIQDIO_NR_NONSHARED_IND 63 #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) |
5f4026f8b
|
27 28 29 30 31 32 33 |
#define TIQDIO_SHARED_IND 63 /* device state change indicators */ struct indicator_t { u32 ind; /* u32 because of compare-and-swap performance */ atomic_t count; /* use count, 0 or 1 for non-shared indicators */ }; |
779e6e1c7
|
34 35 36 |
/* list of thin interrupt input queues */ static LIST_HEAD(tiq_list); |
c4736d968
|
37 |
static DEFINE_MUTEX(tiq_list_lock); |
779e6e1c7
|
38 |
|
f4eae94f7
|
39 40 41 42 43 44 45 |
/* Adapter interrupt definitions */ static void tiqdio_thinint_handler(struct airq_struct *airq); static struct airq_struct tiqdio_airq = { .handler = tiqdio_thinint_handler, .isc = QDIO_AIRQ_ISC, }; |
779e6e1c7
|
46 |
|
5f4026f8b
|
47 |
static struct indicator_t *q_indicators; |
779e6e1c7
|
48 |
|
a2b860198
|
49 |
u64 last_ai_time; |
d36deae75
|
50 |
|
779e6e1c7
|
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
/* returns addr for the device state change indicator */ static u32 *get_indicator(void) { int i; for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) if (!atomic_read(&q_indicators[i].count)) { atomic_set(&q_indicators[i].count, 1); return &q_indicators[i].ind; } /* use the shared indicator */ atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); return &q_indicators[TIQDIO_SHARED_IND].ind; } static void put_indicator(u32 *addr) { int i; if (!addr) return; i = ((unsigned long)addr - (unsigned long)q_indicators) / sizeof(struct indicator_t); atomic_dec(&q_indicators[i].count); } void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) { |
b45474024
|
80 |
mutex_lock(&tiq_list_lock); |
104ea556e
|
81 |
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); |
b45474024
|
82 |
mutex_unlock(&tiq_list_lock); |
d0c9d4a89
|
83 |
xchg(irq_ptr->dsci, 1 << 7); |
779e6e1c7
|
84 |
} |
779e6e1c7
|
85 86 87 |
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) { struct qdio_q *q; |
779e6e1c7
|
88 |
|
104ea556e
|
89 90 91 92 |
q = irq_ptr->input_qs[0]; /* if establish triggered an error */ if (!q || !q->entry.prev || !q->entry.next) return; |
b45474024
|
93 |
|
104ea556e
|
94 95 96 97 |
mutex_lock(&tiq_list_lock); list_del_rcu(&q->entry); mutex_unlock(&tiq_list_lock); synchronize_rcu(); |
779e6e1c7
|
98 |
} |
5f4026f8b
|
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) { return irq_ptr->nr_input_qs > 1; } static inline int references_shared_dsci(struct qdio_irq *irq_ptr) { return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; } static inline int shared_ind(struct qdio_irq *irq_ptr) { return references_shared_dsci(irq_ptr) || has_multiple_inq_on_dsci(irq_ptr); } void clear_nonshared_ind(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return; if (shared_ind(irq_ptr)) return; xchg(irq_ptr->dsci, 0); } int test_nonshared_ind(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return 0; if (shared_ind(irq_ptr)) return 0; if (*irq_ptr->dsci) return 1; else return 0; } |
b02f0c2ea
|
135 |
static inline u32 clear_shared_ind(void) |
779e6e1c7
|
136 |
{ |
b02f0c2ea
|
137 138 139 |
if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) return 0; return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); |
779e6e1c7
|
140 |
} |
104ea556e
|
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) { struct qdio_q *q; int i; for_each_input_queue(irq, q, i) { if (!references_shared_dsci(irq) && has_multiple_inq_on_dsci(irq)) xchg(q->irq_ptr->dsci, 0); if (q->u.in.queue_start_poll) { /* skip if polling is enabled or already in work */ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state)) { qperf_inc(q, int_discarded); continue; } /* avoid dsci clear here, done after processing */ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, q->irq_ptr->int_parm); } else { |
5f4026f8b
|
163 |
if (!shared_ind(q->irq_ptr)) |
104ea556e
|
164 165 166 167 168 169 170 171 172 173 |
xchg(q->irq_ptr->dsci, 0); /* * Call inbound processing but not directly * since that could starve other thinint queues. */ tasklet_schedule(&q->tasklet); } } } |
cf9a031c2
|
174 175 |
/** * tiqdio_thinint_handler - thin interrupt handler for qdio |
d36deae75
|
176 177 |
* @alsi: pointer to adapter local summary indicator * @data: NULL |
cf9a031c2
|
178 |
*/ |
f4eae94f7
|
179 |
static void tiqdio_thinint_handler(struct airq_struct *airq) |
779e6e1c7
|
180 |
{ |
b02f0c2ea
|
181 |
u32 si_used = clear_shared_ind(); |
779e6e1c7
|
182 |
struct qdio_q *q; |
d36deae75
|
183 |
last_ai_time = S390_lowcore.int_clock; |
420f42ecf
|
184 |
inc_irq_stat(IRQIO_QAI); |
d36deae75
|
185 |
|
779e6e1c7
|
186 187 |
/* protect tiq_list entries, only changed in activate or shutdown */ rcu_read_lock(); |
cf9a031c2
|
188 |
/* check for work on all inbound thinint queues */ |
d36deae75
|
189 |
list_for_each_entry_rcu(q, &tiq_list, entry) { |
104ea556e
|
190 |
struct qdio_irq *irq; |
d36deae75
|
191 |
|
779e6e1c7
|
192 |
/* only process queues from changed sets */ |
104ea556e
|
193 194 |
irq = q->irq_ptr; if (unlikely(references_shared_dsci(irq))) { |
4f325184f
|
195 196 |
if (!si_used) continue; |
104ea556e
|
197 |
} else if (!*irq->dsci) |
d36deae75
|
198 |
continue; |
779e6e1c7
|
199 |
|
104ea556e
|
200 |
tiqdio_call_inq_handlers(irq); |
d36deae75
|
201 |
|
d36deae75
|
202 203 |
qperf_inc(q, adapter_int); } |
779e6e1c7
|
204 |
rcu_read_unlock(); |
779e6e1c7
|
205 206 207 208 |
} static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) { |
ca4ba153f
|
209 210 |
struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; u64 summary_indicator_addr, subchannel_indicator_addr; |
779e6e1c7
|
211 |
int rc; |
779e6e1c7
|
212 |
if (reset) { |
ca4ba153f
|
213 214 |
summary_indicator_addr = 0; subchannel_indicator_addr = 0; |
779e6e1c7
|
215 |
} else { |
f4eae94f7
|
216 |
summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr); |
ca4ba153f
|
217 |
subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); |
779e6e1c7
|
218 |
} |
ca4ba153f
|
219 220 |
rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, subchannel_indicator_addr); |
779e6e1c7
|
221 |
if (rc) { |
22f993476
|
222 |
DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, |
ca4ba153f
|
223 224 |
scssc->response.code); goto out; |
779e6e1c7
|
225 |
} |
22f993476
|
226 |
DBF_EVENT("setscind"); |
ca4ba153f
|
227 228 229 230 |
DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr)); DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr)); out: return rc; |
779e6e1c7
|
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
} /* allocate non-shared indicators and shared indicator */ int __init tiqdio_allocate_memory(void) { q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, GFP_KERNEL); if (!q_indicators) return -ENOMEM; return 0; } void tiqdio_free_memory(void) { kfree(q_indicators); } int __init tiqdio_register_thinints(void) { |
f4eae94f7
|
250 251 252 253 254 255 |
int rc; rc = register_adapter_interrupt(&tiqdio_airq); if (rc) { DBF_EVENT("RTI:%x", rc); return rc; |
779e6e1c7
|
256 257 258 259 260 261 262 263 |
} return 0; } int qdio_establish_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return 0; |
779e6e1c7
|
264 265 266 267 268 269 270 271 |
return set_subchannel_ind(irq_ptr, 0); } void qdio_setup_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return; irq_ptr->dsci = get_indicator(); |
22f993476
|
272 |
DBF_HEX(&irq_ptr->dsci, sizeof(void *)); |
779e6e1c7
|
273 274 275 276 277 278 279 280 |
} void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return; /* reset adapter interrupt indicators */ |
779e6e1c7
|
281 |
set_subchannel_ind(irq_ptr, 1); |
4814a2b3c
|
282 |
put_indicator(irq_ptr->dsci); |
779e6e1c7
|
283 284 285 286 |
} void __exit tiqdio_unregister_thinints(void) { |
9e890ad88
|
287 |
WARN_ON(!list_empty(&tiq_list)); |
f4eae94f7
|
288 |
unregister_adapter_interrupt(&tiqdio_airq); |
779e6e1c7
|
289 |
} |