Blame view
block/blk-mq-rdma.c
1.3 KB
8c16567d8 block: switch all... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
24c5dc661 block: Add rdma a... |
2 3 |
/* * Copyright (c) 2017 Sagi Grimberg. |
24c5dc661 block: Add rdma a... |
4 5 6 7 8 9 10 |
*/ #include <linux/blk-mq.h> #include <linux/blk-mq-rdma.h> #include <rdma/ib_verbs.h> /** * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device |
0542cd57d block: Fix blk_mq... |
11 12 |
* @map: CPU to hardware queue map. * @dev: rdma device to provide a mapping for. |
24c5dc661 block: Add rdma a... |
13 14 15 16 17 18 19 20 21 22 23 |
* @first_vec: first interrupt vectors to use for queues (usually 0) * * This function assumes the rdma device @dev has at least as many available * interrupt vetors as @set has queues. It will then query it's affinity mask * and built queue mapping that maps a queue to the CPUs that have irq affinity * for the corresponding vector. * * In case either the driver passed a @dev with less vectors than * @set->nr_hw_queues, or @dev does not provide an affinity mask for a * vector, we fallback to the naive mapping. */ |
e42b3867d blk-mq-rdma: pass... |
24 |
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, |
24c5dc661 block: Add rdma a... |
25 26 27 28 |
struct ib_device *dev, int first_vec) { const struct cpumask *mask; unsigned int queue, cpu; |
e42b3867d blk-mq-rdma: pass... |
29 |
for (queue = 0; queue < map->nr_queues; queue++) { |
24c5dc661 block: Add rdma a... |
30 31 32 33 34 |
mask = ib_get_vector_affinity(dev, first_vec + queue); if (!mask) goto fallback; for_each_cpu(cpu, mask) |
e42b3867d blk-mq-rdma: pass... |
35 |
map->mq_map[cpu] = map->queue_offset + queue; |
24c5dc661 block: Add rdma a... |
36 37 38 39 40 |
} return 0; fallback: |
e42b3867d blk-mq-rdma: pass... |
41 |
return blk_mq_map_queues(map); |
24c5dc661 block: Add rdma a... |
42 43 |
} EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues); |