Blame view
drivers/thunderbolt/nhi.c
32.3 KB
09c434b8a treewide: Add SPD... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
166031536 thunderbolt: Add ... |
2 |
/* |
15c6784c7 thunderbolt: Add ... |
3 |
* Thunderbolt driver - NHI driver |
166031536 thunderbolt: Add ... |
4 5 6 7 8 |
* * The NHI (native host interface) is the pci device that allows us to send and * receive frames from the thunderbolt bus. * * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
15c6784c7 thunderbolt: Add ... |
9 |
* Copyright (C) 2018, Intel Corporation |
166031536 thunderbolt: Add ... |
10 |
*/ |
23dd5bb49 thunderbolt: Add ... |
11 |
#include <linux/pm_runtime.h> |
166031536 thunderbolt: Add ... |
12 13 14 15 16 |
#include <linux/slab.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/module.h> |
cd446ee2e thunderbolt: Add ... |
17 |
#include <linux/delay.h> |
3cdb9446a thunderbolt: Add ... |
18 |
#include <linux/property.h> |
166031536 thunderbolt: Add ... |
19 20 21 |
#include "nhi.h" #include "nhi_regs.h" |
d6cc51cd1 thunderbolt: Setu... |
22 |
#include "tb.h" |
166031536 thunderbolt: Add ... |
23 24 |
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") |
046bee1f9 thunderbolt: Add ... |
25 |
/* |
9fb1e654d thunderbolt: Add ... |
26 27 28 29 |
* Used to enable end-to-end workaround for missing RX packets. Do not * use this ring for anything else. */ #define RING_E2E_UNUSED_HOPID 2 |
0b2863ac3 thunderbolt: Add ... |
30 |
#define RING_FIRST_USABLE_HOPID TB_PATH_MIN_HOPID |
9fb1e654d thunderbolt: Add ... |
31 32 |
/* |
046bee1f9 thunderbolt: Add ... |
33 34 35 36 37 |
* Minimal number of vectors when we use MSI-X. Two for control channel * Rx/Tx and the rest four are for cross domain DMA paths. */ #define MSIX_MIN_VECS 6 #define MSIX_MAX_VECS 16 |
166031536 thunderbolt: Add ... |
38 |
|
cd446ee2e thunderbolt: Add ... |
39 |
#define NHI_MAILBOX_TIMEOUT 500 /* ms */ |
166031536 thunderbolt: Add ... |
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
static int ring_interrupt_index(struct tb_ring *ring) { int bit = ring->hop; if (!ring->is_tx) bit += ring->nhi->hop_count; return bit; } /** * ring_interrupt_active() - activate/deactivate interrupts for a single ring * * ring->nhi->lock must be held. */ static void ring_interrupt_active(struct tb_ring *ring, bool active) { |
19bf4d4f9 thunderbolt: Supp... |
55 56 |
int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32 * 4; |
166031536 thunderbolt: Add ... |
57 58 59 |
int bit = ring_interrupt_index(ring) & 31; int mask = 1 << bit; u32 old, new; |
046bee1f9 thunderbolt: Add ... |
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
if (ring->irq > 0) { u32 step, shift, ivr, misc; void __iomem *ivr_base; int index; if (ring->is_tx) index = ring->hop; else index = ring->hop + ring->nhi->hop_count; /* * Ask the hardware to clear interrupt status bits automatically * since we already know which interrupt was triggered. */ misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { misc |= REG_DMA_MISC_INT_AUTO_CLEAR; iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); } ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; ivr = ioread32(ivr_base + step); ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift); if (active) ivr |= ring->vector << shift; iowrite32(ivr, ivr_base + step); } |
166031536 thunderbolt: Add ... |
90 91 92 93 94 |
old = ioread32(ring->nhi->iobase + reg); if (active) new = old | mask; else new = old & ~mask; |
daa5140f7 thunderbolt: Make... |
95 96 97 98 |
dev_dbg(&ring->nhi->pdev->dev, "%s interrupt at register %#x bit %d (%#x -> %#x) ", active ? "enabling" : "disabling", reg, bit, old, new); |
166031536 thunderbolt: Add ... |
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
if (new == old) dev_WARN(&ring->nhi->pdev->dev, "interrupt for %s %d is already %s ", RING_TYPE(ring), ring->hop, active ? "enabled" : "disabled"); iowrite32(new, ring->nhi->iobase + reg); } /** * nhi_disable_interrupts() - disable interrupts for all rings * * Use only during init and shutdown. */ static void nhi_disable_interrupts(struct tb_nhi *nhi) { int i = 0; /* disable interrupts */ for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); /* clear interrupt status bits */ for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); } /* ring helper methods */ static void __iomem *ring_desc_base(struct tb_ring *ring) { void __iomem *io = ring->nhi->iobase; io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; io += ring->hop * 16; return io; } static void __iomem *ring_options_base(struct tb_ring *ring) { void __iomem *io = ring->nhi->iobase; io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; io += ring->hop * 32; return io; } |
943795219 thunderbolt: Use ... |
143 |
static void ring_iowrite_cons(struct tb_ring *ring, u16 cons) |
166031536 thunderbolt: Add ... |
144 |
{ |
943795219 thunderbolt: Use ... |
145 146 147 148 149 150 151 152 153 154 155 156 |
/* * The other 16-bits in the register is read-only and writes to it * are ignored by the hardware so we can save one ioread32() by * filling the read-only bits with zeroes. */ iowrite32(cons, ring_desc_base(ring) + 8); } static void ring_iowrite_prod(struct tb_ring *ring, u16 prod) { /* See ring_iowrite_cons() above for explanation */ iowrite32(prod << 16, ring_desc_base(ring) + 8); |
166031536 thunderbolt: Add ... |
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 |
} static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) { iowrite32(value, ring_desc_base(ring) + offset); } static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) { iowrite32(value, ring_desc_base(ring) + offset); iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); } static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) { iowrite32(value, ring_options_base(ring) + offset); } static bool ring_full(struct tb_ring *ring) { return ((ring->head + 1) % ring->size) == ring->tail; } static bool ring_empty(struct tb_ring *ring) { return ring->head == ring->tail; } /** * ring_write_descriptors() - post frames from ring->queue to the controller * * ring->lock is held. */ static void ring_write_descriptors(struct tb_ring *ring) { struct ring_frame *frame, *n; struct ring_desc *descriptor; list_for_each_entry_safe(frame, n, &ring->queue, list) { if (ring_full(ring)) break; list_move_tail(&frame->list, &ring->in_flight); descriptor = &ring->descriptors[ring->head]; descriptor->phys = frame->buffer_phy; descriptor->time = 0; descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; if (ring->is_tx) { descriptor->length = frame->size; descriptor->eof = frame->eof; descriptor->sof = frame->sof; } ring->head = (ring->head + 1) % ring->size; |
943795219 thunderbolt: Use ... |
208 209 210 211 |
if (ring->is_tx) ring_iowrite_prod(ring, ring->head); else ring_iowrite_cons(ring, ring->head); |
166031536 thunderbolt: Add ... |
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
} } /** * ring_work() - progress completed frames * * If the ring is shutting down then all frames are marked as canceled and * their callbacks are invoked. * * Otherwise we collect all completed frame from the ring buffer, write new * frame to the ring buffer and invoke the callbacks for the completed frames. */ static void ring_work(struct work_struct *work) { struct tb_ring *ring = container_of(work, typeof(*ring), work); struct ring_frame *frame; bool canceled = false; |
22b7de100 thunderbolt: Use ... |
229 |
unsigned long flags; |
166031536 thunderbolt: Add ... |
230 |
LIST_HEAD(done); |
22b7de100 thunderbolt: Use ... |
231 232 |
spin_lock_irqsave(&ring->lock, flags); |
166031536 thunderbolt: Add ... |
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
if (!ring->running) { /* Move all frames to done and mark them as canceled. */ list_splice_tail_init(&ring->in_flight, &done); list_splice_tail_init(&ring->queue, &done); canceled = true; goto invoke_callback; } while (!ring_empty(ring)) { if (!(ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED)) break; frame = list_first_entry(&ring->in_flight, typeof(*frame), list); list_move_tail(&frame->list, &done); if (!ring->is_tx) { frame->size = ring->descriptors[ring->tail].length; frame->eof = ring->descriptors[ring->tail].eof; frame->sof = ring->descriptors[ring->tail].sof; frame->flags = ring->descriptors[ring->tail].flags; |
166031536 thunderbolt: Add ... |
254 255 256 257 258 259 |
} ring->tail = (ring->tail + 1) % ring->size; } ring_write_descriptors(ring); invoke_callback: |
22b7de100 thunderbolt: Use ... |
260 261 |
/* allow callbacks to schedule new work */ spin_unlock_irqrestore(&ring->lock, flags); |
166031536 thunderbolt: Add ... |
262 263 264 265 266 267 268 |
while (!list_empty(&done)) { frame = list_first_entry(&done, typeof(*frame), list); /* * The callback may reenqueue or delete frame. * Do not hold on to it. */ list_del_init(&frame->list); |
4ffe722ee thunderbolt: Add ... |
269 270 |
if (frame->callback) frame->callback(ring, frame, canceled); |
166031536 thunderbolt: Add ... |
271 272 |
} } |
3b3d9f4da thunderbolt: Expo... |
273 |
int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) |
166031536 thunderbolt: Add ... |
274 |
{ |
22b7de100 thunderbolt: Use ... |
275 |
unsigned long flags; |
166031536 thunderbolt: Add ... |
276 |
int ret = 0; |
22b7de100 thunderbolt: Use ... |
277 278 |
spin_lock_irqsave(&ring->lock, flags); |
166031536 thunderbolt: Add ... |
279 280 281 282 283 284 |
if (ring->running) { list_add_tail(&frame->list, &ring->queue); ring_write_descriptors(ring); } else { ret = -ESHUTDOWN; } |
22b7de100 thunderbolt: Use ... |
285 |
spin_unlock_irqrestore(&ring->lock, flags); |
166031536 thunderbolt: Add ... |
286 287 |
return ret; } |
3b3d9f4da thunderbolt: Expo... |
288 |
EXPORT_SYMBOL_GPL(__tb_ring_enqueue); |
166031536 thunderbolt: Add ... |
289 |
|
4ffe722ee thunderbolt: Add ... |
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 |
/** * tb_ring_poll() - Poll one completed frame from the ring * @ring: Ring to poll * * This function can be called when @start_poll callback of the @ring * has been called. It will read one completed frame from the ring and * return it to the caller. Returns %NULL if there is no more completed * frames. */ struct ring_frame *tb_ring_poll(struct tb_ring *ring) { struct ring_frame *frame = NULL; unsigned long flags; spin_lock_irqsave(&ring->lock, flags); if (!ring->running) goto unlock; if (ring_empty(ring)) goto unlock; if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) { frame = list_first_entry(&ring->in_flight, typeof(*frame), list); list_del_init(&frame->list); if (!ring->is_tx) { frame->size = ring->descriptors[ring->tail].length; frame->eof = ring->descriptors[ring->tail].eof; frame->sof = ring->descriptors[ring->tail].sof; frame->flags = ring->descriptors[ring->tail].flags; } ring->tail = (ring->tail + 1) % ring->size; } unlock: spin_unlock_irqrestore(&ring->lock, flags); return frame; } EXPORT_SYMBOL_GPL(tb_ring_poll); static void __ring_interrupt_mask(struct tb_ring *ring, bool mask) { int idx = ring_interrupt_index(ring); int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4; int bit = idx % 32; u32 val; val = ioread32(ring->nhi->iobase + reg); if (mask) val &= ~BIT(bit); else val |= BIT(bit); iowrite32(val, ring->nhi->iobase + reg); } /* Both @nhi->lock and @ring->lock should be held */ static void __ring_interrupt(struct tb_ring *ring) { if (!ring->running) return; if (ring->start_poll) { |
74657181e thunderbolt: Mask... |
353 |
__ring_interrupt_mask(ring, true); |
4ffe722ee thunderbolt: Add ... |
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 |
ring->start_poll(ring->poll_data); } else { schedule_work(&ring->work); } } /** * tb_ring_poll_complete() - Re-start interrupt for the ring * @ring: Ring to re-start the interrupt * * This will re-start (unmask) the ring interrupt once the user is done * with polling. */ void tb_ring_poll_complete(struct tb_ring *ring) { unsigned long flags; spin_lock_irqsave(&ring->nhi->lock, flags); spin_lock(&ring->lock); if (ring->start_poll) __ring_interrupt_mask(ring, false); spin_unlock(&ring->lock); spin_unlock_irqrestore(&ring->nhi->lock, flags); } EXPORT_SYMBOL_GPL(tb_ring_poll_complete); |
046bee1f9 thunderbolt: Add ... |
379 380 381 |
static irqreturn_t ring_msix(int irq, void *data) { struct tb_ring *ring = data; |
4ffe722ee thunderbolt: Add ... |
382 383 384 385 386 |
spin_lock(&ring->nhi->lock); spin_lock(&ring->lock); __ring_interrupt(ring); spin_unlock(&ring->lock); spin_unlock(&ring->nhi->lock); |
046bee1f9 thunderbolt: Add ... |
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 |
return IRQ_HANDLED; } static int ring_request_msix(struct tb_ring *ring, bool no_suspend) { struct tb_nhi *nhi = ring->nhi; unsigned long irqflags; int ret; if (!nhi->pdev->msix_enabled) return 0; ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); if (ret < 0) return ret; ring->vector = ret; ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); if (ring->irq < 0) return ring->irq; irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); } static void ring_release_msix(struct tb_ring *ring) { if (ring->irq <= 0) return; free_irq(ring->irq, ring); ida_simple_remove(&ring->nhi->msix_ida, ring->vector); ring->vector = 0; ring->irq = 0; } |
9a01c7c26 thunderbolt: Allo... |
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 |
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) { int ret = 0; spin_lock_irq(&nhi->lock); if (ring->hop < 0) { unsigned int i; /* * Automatically allocate HopID from the non-reserved * range 8 .. hop_count - 1. */ for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { if (ring->is_tx) { if (!nhi->tx_rings[i]) { ring->hop = i; break; } } else { if (!nhi->rx_rings[i]) { ring->hop = i; break; } } } } if (ring->hop < 0 || ring->hop >= nhi->hop_count) { dev_warn(&nhi->pdev->dev, "invalid hop: %d ", ring->hop); ret = -EINVAL; goto err_unlock; } if (ring->is_tx && nhi->tx_rings[ring->hop]) { dev_warn(&nhi->pdev->dev, "TX hop %d already allocated ", ring->hop); ret = -EBUSY; goto err_unlock; } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) { dev_warn(&nhi->pdev->dev, "RX hop %d already allocated ", ring->hop); ret = -EBUSY; goto err_unlock; } if (ring->is_tx) nhi->tx_rings[ring->hop] = ring; else nhi->rx_rings[ring->hop] = ring; err_unlock: spin_unlock_irq(&nhi->lock); return ret; } |
3b3d9f4da thunderbolt: Expo... |
481 482 |
static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, bool transmit, unsigned int flags, |
4ffe722ee thunderbolt: Add ... |
483 484 485 |
u16 sof_mask, u16 eof_mask, void (*start_poll)(void *), void *poll_data) |
166031536 thunderbolt: Add ... |
486 487 |
{ struct tb_ring *ring = NULL; |
daa5140f7 thunderbolt: Make... |
488 489 490 491 |
dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d ", transmit ? "TX" : "RX", hop, size); |
166031536 thunderbolt: Add ... |
492 |
|
9fb1e654d thunderbolt: Add ... |
493 494 495 |
/* Tx Ring 2 is reserved for E2E workaround */ if (transmit && hop == RING_E2E_UNUSED_HOPID) return NULL; |
166031536 thunderbolt: Add ... |
496 497 |
ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) |
59120e061 thunderbolt: Use ... |
498 |
return NULL; |
166031536 thunderbolt: Add ... |
499 |
|
22b7de100 thunderbolt: Use ... |
500 |
spin_lock_init(&ring->lock); |
166031536 thunderbolt: Add ... |
501 502 503 504 505 506 507 508 |
INIT_LIST_HEAD(&ring->queue); INIT_LIST_HEAD(&ring->in_flight); INIT_WORK(&ring->work, ring_work); ring->nhi = nhi; ring->hop = hop; ring->is_tx = transmit; ring->size = size; |
046bee1f9 thunderbolt: Add ... |
509 |
ring->flags = flags; |
9fb1e654d thunderbolt: Add ... |
510 511 |
ring->sof_mask = sof_mask; ring->eof_mask = eof_mask; |
166031536 thunderbolt: Add ... |
512 513 514 |
ring->head = 0; ring->tail = 0; ring->running = false; |
4ffe722ee thunderbolt: Add ... |
515 516 |
ring->start_poll = start_poll; ring->poll_data = poll_data; |
046bee1f9 thunderbolt: Add ... |
517 |
|
166031536 thunderbolt: Add ... |
518 519 520 521 |
ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, size * sizeof(*ring->descriptors), &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); if (!ring->descriptors) |
59120e061 thunderbolt: Use ... |
522 |
goto err_free_ring; |
166031536 thunderbolt: Add ... |
523 |
|
59120e061 thunderbolt: Use ... |
524 525 |
if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) goto err_free_descs; |
9a01c7c26 thunderbolt: Allo... |
526 |
if (nhi_alloc_hop(nhi, ring)) |
59120e061 thunderbolt: Use ... |
527 |
goto err_release_msix; |
59120e061 thunderbolt: Use ... |
528 |
|
166031536 thunderbolt: Add ... |
529 |
return ring; |
59120e061 thunderbolt: Use ... |
530 |
err_release_msix: |
59120e061 thunderbolt: Use ... |
531 532 533 534 535 536 |
ring_release_msix(ring); err_free_descs: dma_free_coherent(&ring->nhi->pdev->dev, ring->size * sizeof(*ring->descriptors), ring->descriptors, ring->descriptors_dma); err_free_ring: |
166031536 thunderbolt: Add ... |
537 |
kfree(ring); |
59120e061 thunderbolt: Use ... |
538 |
|
166031536 thunderbolt: Add ... |
539 540 |
return NULL; } |
3b3d9f4da thunderbolt: Expo... |
541 542 543 544 545 546 547 548 549 |
/** * tb_ring_alloc_tx() - Allocate DMA ring for transmit * @nhi: Pointer to the NHI the ring is to be allocated * @hop: HopID (ring) to allocate * @size: Number of entries in the ring * @flags: Flags for the ring */ struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, unsigned int flags) |
166031536 thunderbolt: Add ... |
550 |
{ |
4ffe722ee thunderbolt: Add ... |
551 |
return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL); |
166031536 thunderbolt: Add ... |
552 |
} |
3b3d9f4da thunderbolt: Expo... |
553 |
EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); |
166031536 thunderbolt: Add ... |
554 |
|
3b3d9f4da thunderbolt: Expo... |
555 556 557 |
/** * tb_ring_alloc_rx() - Allocate DMA ring for receive * @nhi: Pointer to the NHI the ring is to be allocated |
9a01c7c26 thunderbolt: Allo... |
558 |
* @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation. |
3b3d9f4da thunderbolt: Expo... |
559 560 561 562 |
* @size: Number of entries in the ring * @flags: Flags for the ring * @sof_mask: Mask of PDF values that start a frame * @eof_mask: Mask of PDF values that end a frame |
4ffe722ee thunderbolt: Add ... |
563 564 565 566 |
* @start_poll: If not %NULL the ring will call this function when an * interrupt is triggered and masked, instead of callback * in each Rx frame. * @poll_data: Optional data passed to @start_poll |
3b3d9f4da thunderbolt: Expo... |
567 568 |
*/ struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, |
4ffe722ee thunderbolt: Add ... |
569 570 |
unsigned int flags, u16 sof_mask, u16 eof_mask, void (*start_poll)(void *), void *poll_data) |
166031536 thunderbolt: Add ... |
571 |
{ |
4ffe722ee thunderbolt: Add ... |
572 573 |
return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask, start_poll, poll_data); |
166031536 thunderbolt: Add ... |
574 |
} |
3b3d9f4da thunderbolt: Expo... |
575 |
EXPORT_SYMBOL_GPL(tb_ring_alloc_rx); |
166031536 thunderbolt: Add ... |
576 577 |
/** |
3b3d9f4da thunderbolt: Expo... |
578 |
* tb_ring_start() - enable a ring |
166031536 thunderbolt: Add ... |
579 |
* |
3b3d9f4da thunderbolt: Expo... |
580 |
* Must not be invoked in parallel with tb_ring_stop(). |
166031536 thunderbolt: Add ... |
581 |
*/ |
3b3d9f4da thunderbolt: Expo... |
582 |
void tb_ring_start(struct tb_ring *ring) |
166031536 thunderbolt: Add ... |
583 |
{ |
9fb1e654d thunderbolt: Add ... |
584 585 |
u16 frame_size; u32 flags; |
59120e061 thunderbolt: Use ... |
586 587 |
spin_lock_irq(&ring->nhi->lock); spin_lock(&ring->lock); |
bdccf295d thunderbolt: Do n... |
588 589 |
if (ring->nhi->going_away) goto err; |
166031536 thunderbolt: Add ... |
590 591 592 593 594 |
if (ring->running) { dev_WARN(&ring->nhi->pdev->dev, "ring already started "); goto err; } |
daa5140f7 thunderbolt: Make... |
595 596 597 |
dev_dbg(&ring->nhi->pdev->dev, "starting %s %d ", RING_TYPE(ring), ring->hop); |
166031536 thunderbolt: Add ... |
598 |
|
9fb1e654d thunderbolt: Add ... |
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 |
if (ring->flags & RING_FLAG_FRAME) { /* Means 4096 */ frame_size = 0; flags = RING_FLAG_ENABLE; } else { frame_size = TB_FRAME_SIZE; flags = RING_FLAG_ENABLE | RING_FLAG_RAW; } if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { u32 hop; /* * In order not to lose Rx packets we enable end-to-end * workaround which transfers Rx credits to an unused Tx * HopID. */ hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT; hop &= REG_RX_OPTIONS_E2E_HOP_MASK; flags |= hop | RING_FLAG_E2E_FLOW_CONTROL; } |
166031536 thunderbolt: Add ... |
620 621 622 623 |
ring_iowrite64desc(ring, ring->descriptors_dma, 0); if (ring->is_tx) { ring_iowrite32desc(ring, ring->size, 12); ring_iowrite32options(ring, 0, 4); /* time releated ? */ |
9fb1e654d thunderbolt: Add ... |
624 |
ring_iowrite32options(ring, flags, 0); |
166031536 thunderbolt: Add ... |
625 |
} else { |
9fb1e654d thunderbolt: Add ... |
626 627 628 629 630 |
u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask; ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12); ring_iowrite32options(ring, sof_eof_mask, 4); ring_iowrite32options(ring, flags, 0); |
166031536 thunderbolt: Add ... |
631 632 633 634 |
} ring_interrupt_active(ring, true); ring->running = true; err: |
59120e061 thunderbolt: Use ... |
635 636 |
spin_unlock(&ring->lock); spin_unlock_irq(&ring->nhi->lock); |
166031536 thunderbolt: Add ... |
637 |
} |
3b3d9f4da thunderbolt: Expo... |
638 |
EXPORT_SYMBOL_GPL(tb_ring_start); |
166031536 thunderbolt: Add ... |
639 640 |
/** |
3b3d9f4da thunderbolt: Expo... |
641 |
* tb_ring_stop() - shutdown a ring |
166031536 thunderbolt: Add ... |
642 643 644 |
* * Must not be invoked from a callback. * |
3b3d9f4da thunderbolt: Expo... |
645 646 647 |
* This method will disable the ring. Further calls to * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been * called. |
166031536 thunderbolt: Add ... |
648 649 650 651 652 |
* * All enqueued frames will be canceled and their callbacks will be executed * with frame->canceled set to true (on the callback thread). This method * returns only after all callback invocations have finished. */ |
3b3d9f4da thunderbolt: Expo... |
653 |
void tb_ring_stop(struct tb_ring *ring) |
166031536 thunderbolt: Add ... |
654 |
{ |
59120e061 thunderbolt: Use ... |
655 656 |
spin_lock_irq(&ring->nhi->lock); spin_lock(&ring->lock); |
daa5140f7 thunderbolt: Make... |
657 658 659 |
dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d ", RING_TYPE(ring), ring->hop); |
bdccf295d thunderbolt: Do n... |
660 661 |
if (ring->nhi->going_away) goto err; |
166031536 thunderbolt: Add ... |
662 663 664 665 666 667 668 669 670 671 |
if (!ring->running) { dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped ", RING_TYPE(ring), ring->hop); goto err; } ring_interrupt_active(ring, false); ring_iowrite32options(ring, 0, 0); ring_iowrite64desc(ring, 0, 0); |
943795219 thunderbolt: Use ... |
672 |
ring_iowrite32desc(ring, 0, 8); |
166031536 thunderbolt: Add ... |
673 674 675 676 677 678 |
ring_iowrite32desc(ring, 0, 12); ring->head = 0; ring->tail = 0; ring->running = false; err: |
59120e061 thunderbolt: Use ... |
679 680 |
spin_unlock(&ring->lock); spin_unlock_irq(&ring->nhi->lock); |
166031536 thunderbolt: Add ... |
681 682 683 684 685 686 687 |
/* * schedule ring->work to invoke callbacks on all remaining frames. */ schedule_work(&ring->work); flush_work(&ring->work); } |
3b3d9f4da thunderbolt: Expo... |
688 |
EXPORT_SYMBOL_GPL(tb_ring_stop); |
166031536 thunderbolt: Add ... |
689 690 |
/* |
3b3d9f4da thunderbolt: Expo... |
691 |
* tb_ring_free() - free ring |
166031536 thunderbolt: Add ... |
692 693 694 695 696 697 698 699 |
* * When this method returns all invocations of ring->callback will have * finished. * * Ring must be stopped. * * Must NOT be called from ring_frame->callback! */ |
3b3d9f4da thunderbolt: Expo... |
700 |
void tb_ring_free(struct tb_ring *ring) |
166031536 thunderbolt: Add ... |
701 |
{ |
59120e061 thunderbolt: Use ... |
702 |
spin_lock_irq(&ring->nhi->lock); |
166031536 thunderbolt: Add ... |
703 704 705 706 707 708 709 710 711 712 713 714 715 716 |
/* * Dissociate the ring from the NHI. This also ensures that * nhi_interrupt_work cannot reschedule ring->work. */ if (ring->is_tx) ring->nhi->tx_rings[ring->hop] = NULL; else ring->nhi->rx_rings[ring->hop] = NULL; if (ring->running) { dev_WARN(&ring->nhi->pdev->dev, "%s %d still running ", RING_TYPE(ring), ring->hop); } |
4ffe722ee thunderbolt: Add ... |
717 |
spin_unlock_irq(&ring->nhi->lock); |
166031536 thunderbolt: Add ... |
718 |
|
046bee1f9 thunderbolt: Add ... |
719 |
ring_release_msix(ring); |
166031536 thunderbolt: Add ... |
720 721 722 |
dma_free_coherent(&ring->nhi->pdev->dev, ring->size * sizeof(*ring->descriptors), ring->descriptors, ring->descriptors_dma); |
f19b72c6e thunderbolt: Use ... |
723 |
ring->descriptors = NULL; |
166031536 thunderbolt: Add ... |
724 |
ring->descriptors_dma = 0; |
daa5140f7 thunderbolt: Make... |
725 726 727 |
dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d ", RING_TYPE(ring), ring->hop); |
166031536 thunderbolt: Add ... |
728 |
|
166031536 thunderbolt: Add ... |
729 |
/** |
046bee1f9 thunderbolt: Add ... |
730 731 732 |
* ring->work can no longer be scheduled (it is scheduled only * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it * to finish before freeing the ring. |
166031536 thunderbolt: Add ... |
733 734 |
*/ flush_work(&ring->work); |
166031536 thunderbolt: Add ... |
735 736 |
kfree(ring); } |
3b3d9f4da thunderbolt: Expo... |
737 |
EXPORT_SYMBOL_GPL(tb_ring_free); |
166031536 thunderbolt: Add ... |
738 |
|
cd446ee2e thunderbolt: Add ... |
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 |
/** * nhi_mailbox_cmd() - Send a command through NHI mailbox * @nhi: Pointer to the NHI structure * @cmd: Command to send * @data: Data to be send with the command * * Sends mailbox command to the firmware running on NHI. Returns %0 in * case of success and negative errno in case of failure. */ int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data) { ktime_t timeout; u32 val; iowrite32(data, nhi->iobase + REG_INMAIL_DATA); val = ioread32(nhi->iobase + REG_INMAIL_CMD); val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR); val |= REG_INMAIL_OP_REQUEST | cmd; iowrite32(val, nhi->iobase + REG_INMAIL_CMD); timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT); do { val = ioread32(nhi->iobase + REG_INMAIL_CMD); if (!(val & REG_INMAIL_OP_REQUEST)) break; usleep_range(10, 20); } while (ktime_before(ktime_get(), timeout)); if (val & REG_INMAIL_OP_REQUEST) return -ETIMEDOUT; if (val & REG_INMAIL_ERROR) return -EIO; return 0; } /** * nhi_mailbox_mode() - Return current firmware operation mode * @nhi: Pointer to the NHI structure * * The function reads current firmware operation mode using NHI mailbox * registers and returns it to the caller. */ enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi) { u32 val; val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); val &= REG_OUTMAIL_CMD_OPMODE_MASK; val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT; return (enum nhi_fw_mode)val; } |
166031536 thunderbolt: Add ... |
793 794 795 796 797 798 799 800 |
static void nhi_interrupt_work(struct work_struct *work) { struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); int value = 0; /* Suppress uninitialized usage warning. */ int bit; int hop = -1; int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ struct tb_ring *ring; |
59120e061 thunderbolt: Use ... |
801 |
spin_lock_irq(&nhi->lock); |
166031536 thunderbolt: Add ... |
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 |
/* * Starting at REG_RING_NOTIFY_BASE there are three status bitfields * (TX, RX, RX overflow). We iterate over the bits and read a new * dwords as required. The registers are cleared on read. */ for (bit = 0; bit < 3 * nhi->hop_count; bit++) { if (bit % 32 == 0) value = ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * (bit / 32)); if (++hop == nhi->hop_count) { hop = 0; type++; } if ((value & (1 << (bit % 32))) == 0) continue; if (type == 2) { dev_warn(&nhi->pdev->dev, "RX overflow for ring %d ", hop); continue; } if (type == 0) ring = nhi->tx_rings[hop]; else ring = nhi->rx_rings[hop]; if (ring == NULL) { dev_warn(&nhi->pdev->dev, "got interrupt for inactive %s ring %d ", type ? "RX" : "TX", hop); continue; } |
4ffe722ee thunderbolt: Add ... |
838 839 840 841 |
spin_lock(&ring->lock); __ring_interrupt(ring); spin_unlock(&ring->lock); |
166031536 thunderbolt: Add ... |
842 |
} |
59120e061 thunderbolt: Use ... |
843 |
spin_unlock_irq(&nhi->lock); |
166031536 thunderbolt: Add ... |
844 845 846 847 848 849 850 851 |
} static irqreturn_t nhi_msi(int irq, void *data) { struct tb_nhi *nhi = data; schedule_work(&nhi->interrupt_work); return IRQ_HANDLED; } |
3cdb9446a thunderbolt: Add ... |
852 |
static int __nhi_suspend_noirq(struct device *dev, bool wakeup) |
23dd5bb49 thunderbolt: Add ... |
853 854 855 |
{ struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); |
3cdb9446a thunderbolt: Add ... |
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 |
struct tb_nhi *nhi = tb->nhi; int ret; ret = tb_domain_suspend_noirq(tb); if (ret) return ret; if (nhi->ops && nhi->ops->suspend_noirq) { ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); if (ret) return ret; } return 0; } static int nhi_suspend_noirq(struct device *dev) { return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); } static bool nhi_wake_supported(struct pci_dev *pdev) { u8 val; /* * If power rails are sustainable for wakeup from S4 this * property is set by the BIOS. */ if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) return !!val; return true; } static int nhi_poweroff_noirq(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); bool wakeup; |
9d3cce0b6 thunderbolt: Intr... |
895 |
|
3cdb9446a thunderbolt: Add ... |
896 897 |
wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev); return __nhi_suspend_noirq(dev, wakeup); |
23dd5bb49 thunderbolt: Add ... |
898 |
} |
8c6bba10f thunderbolt: Conf... |
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 |
static void nhi_enable_int_throttling(struct tb_nhi *nhi) { /* Throttling is specified in 256ns increments */ u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256); unsigned int i; /* * Configure interrupt throttling for all vectors even if we * only use few. */ for (i = 0; i < MSIX_MAX_VECS; i++) { u32 reg = REG_INT_THROTTLING_RATE + i * 4; iowrite32(throttle, nhi->iobase + reg); } } |
23dd5bb49 thunderbolt: Add ... |
914 915 916 917 |
static int nhi_resume_noirq(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); |
3cdb9446a thunderbolt: Add ... |
918 919 |
struct tb_nhi *nhi = tb->nhi; int ret; |
9d3cce0b6 thunderbolt: Intr... |
920 |
|
bdccf295d thunderbolt: Do n... |
921 922 923 924 925 |
/* * Check that the device is still there. It may be that the user * unplugged last device which causes the host controller to go * away on PCs. */ |
3cdb9446a thunderbolt: Add ... |
926 927 928 929 930 931 932 933 |
if (!pci_device_is_present(pdev)) { nhi->going_away = true; } else { if (nhi->ops && nhi->ops->resume_noirq) { ret = nhi->ops->resume_noirq(nhi); if (ret) return ret; } |
8c6bba10f thunderbolt: Conf... |
934 |
nhi_enable_int_throttling(tb->nhi); |
3cdb9446a thunderbolt: Add ... |
935 |
} |
bdccf295d thunderbolt: Do n... |
936 |
|
9d3cce0b6 thunderbolt: Intr... |
937 |
return tb_domain_resume_noirq(tb); |
23dd5bb49 thunderbolt: Add ... |
938 |
} |
f67cf4911 thunderbolt: Add ... |
939 940 941 942 943 944 945 946 947 948 949 950 |
static int nhi_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); return tb_domain_suspend(tb); } static void nhi_complete(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); |
2d8ff0b58 thunderbolt: Add ... |
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 |
/* * If we were runtime suspended when system suspend started, * schedule runtime resume now. It should bring the domain back * to functional state. */ if (pm_runtime_suspended(&pdev->dev)) pm_runtime_resume(&pdev->dev); else tb_domain_complete(tb); } static int nhi_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); |
3cdb9446a thunderbolt: Add ... |
966 967 968 969 970 971 |
struct tb_nhi *nhi = tb->nhi; int ret; ret = tb_domain_runtime_suspend(tb); if (ret) return ret; |
2d8ff0b58 thunderbolt: Add ... |
972 |
|
3cdb9446a thunderbolt: Add ... |
973 974 975 976 977 978 |
if (nhi->ops && nhi->ops->runtime_suspend) { ret = nhi->ops->runtime_suspend(tb->nhi); if (ret) return ret; } return 0; |
2d8ff0b58 thunderbolt: Add ... |
979 980 981 982 983 984 |
} static int nhi_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); |
3cdb9446a thunderbolt: Add ... |
985 986 987 988 989 990 991 992 |
struct tb_nhi *nhi = tb->nhi; int ret; if (nhi->ops && nhi->ops->runtime_resume) { ret = nhi->ops->runtime_resume(nhi); if (ret) return ret; } |
2d8ff0b58 thunderbolt: Add ... |
993 |
|
3cdb9446a thunderbolt: Add ... |
994 |
nhi_enable_int_throttling(nhi); |
2d8ff0b58 thunderbolt: Add ... |
995 |
return tb_domain_runtime_resume(tb); |
f67cf4911 thunderbolt: Add ... |
996 |
} |
166031536 thunderbolt: Add ... |
997 998 999 |
static void nhi_shutdown(struct tb_nhi *nhi) { int i; |
daa5140f7 thunderbolt: Make... |
1000 1001 1002 |
dev_dbg(&nhi->pdev->dev, "shutdown "); |
166031536 thunderbolt: Add ... |
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 |
for (i = 0; i < nhi->hop_count; i++) { if (nhi->tx_rings[i]) dev_WARN(&nhi->pdev->dev, "TX ring %d is still active ", i); if (nhi->rx_rings[i]) dev_WARN(&nhi->pdev->dev, "RX ring %d is still active ", i); } nhi_disable_interrupts(nhi); /* * We have to release the irq before calling flush_work. Otherwise an * already executing IRQ handler could call schedule_work again. */ |
046bee1f9 thunderbolt: Add ... |
1019 1020 1021 1022 |
if (!nhi->pdev->msix_enabled) { devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); flush_work(&nhi->interrupt_work); } |
046bee1f9 thunderbolt: Add ... |
1023 |
ida_destroy(&nhi->msix_ida); |
3cdb9446a thunderbolt: Add ... |
1024 1025 1026 |
if (nhi->ops && nhi->ops->shutdown) nhi->ops->shutdown(nhi); |
046bee1f9 thunderbolt: Add ... |
1027 1028 1029 1030 1031 1032 1033 1034 1035 |
} static int nhi_init_msi(struct tb_nhi *nhi) { struct pci_dev *pdev = nhi->pdev; int res, irq, nvec; /* In case someone left them on. */ nhi_disable_interrupts(nhi); |
8c6bba10f thunderbolt: Conf... |
1036 |
nhi_enable_int_throttling(nhi); |
046bee1f9 thunderbolt: Add ... |
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 |
ida_init(&nhi->msix_ida); /* * The NHI has 16 MSI-X vectors or a single MSI. We first try to * get all MSI-X vectors and if we succeed, each ring will have * one MSI-X. If for some reason that does not work out, we * fallback to a single MSI. */ nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS, PCI_IRQ_MSIX); if (nvec < 0) { nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); if (nvec < 0) return nvec; INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); irq = pci_irq_vector(nhi->pdev, 0); if (irq < 0) return irq; res = devm_request_irq(&pdev->dev, irq, nhi_msi, IRQF_NO_SUSPEND, "thunderbolt", nhi); if (res) { dev_err(&pdev->dev, "request_irq failed, aborting "); return res; } } return 0; |
166031536 thunderbolt: Add ... |
1068 |
} |
3cdb9446a thunderbolt: Add ... |
1069 1070 1071 1072 1073 1074 1075 1076 1077 |
static bool nhi_imr_valid(struct pci_dev *pdev) { u8 val; if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val)) return !!val; return true; } |
166031536 thunderbolt: Add ... |
1078 1079 1080 |
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tb_nhi *nhi; |
d6cc51cd1 thunderbolt: Setu... |
1081 |
struct tb *tb; |
166031536 thunderbolt: Add ... |
1082 |
int res; |
3cdb9446a thunderbolt: Add ... |
1083 1084 1085 1086 1087 |
if (!nhi_imr_valid(pdev)) { dev_warn(&pdev->dev, "firmware image not valid, aborting "); return -ENODEV; } |
166031536 thunderbolt: Add ... |
1088 1089 1090 1091 1092 1093 |
res = pcim_enable_device(pdev); if (res) { dev_err(&pdev->dev, "cannot enable PCI device, aborting "); return res; } |
166031536 thunderbolt: Add ... |
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 |
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); if (res) { dev_err(&pdev->dev, "cannot obtain PCI resources, aborting "); return res; } nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); if (!nhi) return -ENOMEM; nhi->pdev = pdev; |
3cdb9446a thunderbolt: Add ... |
1106 |
nhi->ops = (const struct tb_nhi_ops *)id->driver_data; |
166031536 thunderbolt: Add ... |
1107 1108 1109 |
/* cannot fail - table is allocated bin pcim_iomap_regions */ nhi->iobase = pcim_iomap_table(pdev)[0]; nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; |
19bf4d4f9 thunderbolt: Supp... |
1110 |
if (nhi->hop_count != 12 && nhi->hop_count != 32) |
166031536 thunderbolt: Add ... |
1111 1112 1113 |
dev_warn(&pdev->dev, "unexpected hop count: %d ", nhi->hop_count); |
166031536 thunderbolt: Add ... |
1114 |
|
2a211f320 thunderbolt: Use ... |
1115 1116 1117 1118 |
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, sizeof(*nhi->tx_rings), GFP_KERNEL); nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, sizeof(*nhi->rx_rings), GFP_KERNEL); |
166031536 thunderbolt: Add ... |
1119 1120 |
if (!nhi->tx_rings || !nhi->rx_rings) return -ENOMEM; |
046bee1f9 thunderbolt: Add ... |
1121 |
res = nhi_init_msi(nhi); |
166031536 thunderbolt: Add ... |
1122 |
if (res) { |
046bee1f9 thunderbolt: Add ... |
1123 1124 |
dev_err(&pdev->dev, "cannot enable MSI, aborting "); |
166031536 thunderbolt: Add ... |
1125 1126 |
return res; } |
59120e061 thunderbolt: Use ... |
1127 |
spin_lock_init(&nhi->lock); |
166031536 thunderbolt: Add ... |
1128 |
|
dba3caf62 thunderbolt: Use ... |
1129 1130 1131 1132 1133 1134 1135 1136 |
res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (res) res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (res) { dev_err(&pdev->dev, "failed to set DMA mask "); return res; } |
166031536 thunderbolt: Add ... |
1137 |
pci_set_master(pdev); |
3cdb9446a thunderbolt: Add ... |
1138 1139 1140 1141 1142 |
if (nhi->ops && nhi->ops->init) { res = nhi->ops->init(nhi); if (res) return res; } |
f67cf4911 thunderbolt: Add ... |
1143 |
tb = icm_probe(nhi); |
9d3cce0b6 thunderbolt: Intr... |
1144 |
if (!tb) |
f67cf4911 thunderbolt: Add ... |
1145 1146 1147 1148 1149 |
tb = tb_probe(nhi); if (!tb) { dev_err(&nhi->pdev->dev, "failed to determine connection manager, aborting "); |
9d3cce0b6 thunderbolt: Intr... |
1150 |
return -ENODEV; |
f67cf4911 thunderbolt: Add ... |
1151 |
} |
daa5140f7 thunderbolt: Make... |
1152 1153 |
dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt "); |
9d3cce0b6 thunderbolt: Intr... |
1154 1155 1156 |
res = tb_domain_add(tb); if (res) { |
d6cc51cd1 thunderbolt: Setu... |
1157 1158 1159 1160 |
/* * At this point the RX/TX rings might already have been * activated. Do a proper shutdown. */ |
9d3cce0b6 thunderbolt: Intr... |
1161 |
tb_domain_put(tb); |
d6cc51cd1 thunderbolt: Setu... |
1162 |
nhi_shutdown(nhi); |
68a7a2ace thunderbolt: Do n... |
1163 |
return res; |
d6cc51cd1 thunderbolt: Setu... |
1164 1165 |
} pci_set_drvdata(pdev, tb); |
166031536 thunderbolt: Add ... |
1166 |
|
2d8ff0b58 thunderbolt: Add ... |
1167 1168 1169 1170 |
pm_runtime_allow(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); |
166031536 thunderbolt: Add ... |
1171 1172 1173 1174 1175 |
return 0; } static void nhi_remove(struct pci_dev *pdev) { |
d6cc51cd1 thunderbolt: Setu... |
1176 1177 |
struct tb *tb = pci_get_drvdata(pdev); struct tb_nhi *nhi = tb->nhi; |
9d3cce0b6 thunderbolt: Intr... |
1178 |
|
2d8ff0b58 thunderbolt: Add ... |
1179 1180 1181 |
pm_runtime_get_sync(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_forbid(&pdev->dev); |
9d3cce0b6 thunderbolt: Intr... |
1182 |
tb_domain_remove(tb); |
166031536 thunderbolt: Add ... |
1183 1184 |
nhi_shutdown(nhi); } |
23dd5bb49 thunderbolt: Add ... |
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 |
/* * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable * the tunnels asap. A corresponding pci quirk blocks the downstream bridges * resume_noirq until we are done. */ static const struct dev_pm_ops nhi_pm_ops = { .suspend_noirq = nhi_suspend_noirq, .resume_noirq = nhi_resume_noirq, .freeze_noirq = nhi_suspend_noirq, /* * we just disable hotplug, the * pci-tunnels stay alive. */ |
f2a659f7d thunderbolt: Resu... |
1197 |
.thaw_noirq = nhi_resume_noirq, |
23dd5bb49 thunderbolt: Add ... |
1198 |
.restore_noirq = nhi_resume_noirq, |
f67cf4911 thunderbolt: Add ... |
1199 1200 |
.suspend = nhi_suspend, .freeze = nhi_suspend, |
3cdb9446a thunderbolt: Add ... |
1201 |
.poweroff_noirq = nhi_poweroff_noirq, |
f67cf4911 thunderbolt: Add ... |
1202 1203 |
.poweroff = nhi_suspend, .complete = nhi_complete, |
2d8ff0b58 thunderbolt: Add ... |
1204 1205 |
.runtime_suspend = nhi_runtime_suspend, .runtime_resume = nhi_runtime_resume, |
23dd5bb49 thunderbolt: Add ... |
1206 |
}; |
620863f71 thunderbolt: Stat... |
1207 |
static struct pci_device_id nhi_ids[] = { |
166031536 thunderbolt: Add ... |
1208 1209 |
/* * We have to specify class, the TB bridges use the same device and |
1d111406c PCI: Add Intel Th... |
1210 |
* vendor (sub)id on gen 1 and gen 2 controllers. |
166031536 thunderbolt: Add ... |
1211 1212 1213 |
*/ { .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
1d111406c PCI: Add Intel Th... |
1214 |
.vendor = PCI_VENDOR_ID_INTEL, |
19bf4d4f9 thunderbolt: Supp... |
1215 1216 1217 1218 1219 1220 |
.device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, .subvendor = 0x2222, .subdevice = 0x1111, }, { .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, .vendor = PCI_VENDOR_ID_INTEL, |
1d111406c PCI: Add Intel Th... |
1221 |
.device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, |
166031536 thunderbolt: Add ... |
1222 1223 1224 1225 |
.subvendor = 0x2222, .subdevice = 0x1111, }, { .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
1d111406c PCI: Add Intel Th... |
1226 |
.vendor = PCI_VENDOR_ID_INTEL, |
82a6a81c2 thunderbolt: Add ... |
1227 1228 1229 1230 1231 1232 |
.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, .vendor = PCI_VENDOR_ID_INTEL, |
1d111406c PCI: Add Intel Th... |
1233 |
.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, |
a42fb351c thunderbolt: Allo... |
1234 |
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, |
166031536 thunderbolt: Add ... |
1235 |
}, |
5e2781bcb thunderbolt: Add ... |
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 |
/* Thunderbolt 3 */ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, |
4bac471da thunderbolt: Add ... |
1246 1247 |
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, |
3cdb9446a thunderbolt: Add ... |
1248 1249 1250 1251 |
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0), .driver_data = (kernel_ulong_t)&icl_nhi_ops }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1), .driver_data = (kernel_ulong_t)&icl_nhi_ops }, |
5e2781bcb thunderbolt: Add ... |
1252 |
|
166031536 thunderbolt: Add ... |
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 |
{ 0,} }; MODULE_DEVICE_TABLE(pci, nhi_ids); MODULE_LICENSE("GPL"); static struct pci_driver nhi_driver = { .name = "thunderbolt", .id_table = nhi_ids, .probe = nhi_probe, .remove = nhi_remove, |
23dd5bb49 thunderbolt: Add ... |
1264 |
.driver.pm = &nhi_pm_ops, |
166031536 thunderbolt: Add ... |
1265 1266 1267 1268 |
}; static int __init nhi_init(void) { |
9d3cce0b6 thunderbolt: Intr... |
1269 |
int ret; |
9d3cce0b6 thunderbolt: Intr... |
1270 1271 1272 1273 1274 1275 1276 |
ret = tb_domain_init(); if (ret) return ret; ret = pci_register_driver(&nhi_driver); if (ret) tb_domain_exit(); return ret; |
166031536 thunderbolt: Add ... |
1277 1278 1279 1280 1281 |
} static void __exit nhi_unload(void) { pci_unregister_driver(&nhi_driver); |
9d3cce0b6 thunderbolt: Intr... |
1282 |
tb_domain_exit(); |
166031536 thunderbolt: Add ... |
1283 |
} |
eafa717bc thunderbolt: Init... |
1284 |
rootfs_initcall(nhi_init); |
166031536 thunderbolt: Add ... |
1285 |
module_exit(nhi_unload); |