Blame view
block/blk-mq-cpumap.c
1.62 KB
75bb4625b blk-mq: add file ... |
1 2 3 4 5 |
/* * CPU <-> hardware queue mapping helpers * * Copyright (C) 2013-2014 Jens Axboe */ |
320ae51fe blk-mq: new multi... |
6 7 8 9 10 11 12 13 14 15 |
#include <linux/kernel.h> #include <linux/threads.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" |
03ffbcdd7 Merge branch 'irq... |
16 |
static int cpu_to_queue_index(unsigned int nr_queues, const int cpu) |
320ae51fe blk-mq: new multi... |
17 |
{ |
fe631457f blk-mq: map all H... |
18 |
return cpu % nr_queues; |
320ae51fe blk-mq: new multi... |
19 20 21 22 23 |
} static int get_first_sibling(unsigned int cpu) { unsigned int ret; |
06931e622 sched/topology: R... |
24 |
ret = cpumask_first(topology_sibling_cpumask(cpu)); |
320ae51fe blk-mq: new multi... |
25 26 27 28 29 |
if (ret < nr_cpu_ids) return ret; return cpu; } |
da695ba23 blk-mq: allow the... |
30 |
int blk_mq_map_queues(struct blk_mq_tag_set *set) |
320ae51fe blk-mq: new multi... |
31 |
{ |
da695ba23 blk-mq: allow the... |
32 33 |
unsigned int *map = set->mq_map; unsigned int nr_queues = set->nr_hw_queues; |
fe631457f blk-mq: map all H... |
34 |
unsigned int cpu, first_sibling; |
320ae51fe blk-mq: new multi... |
35 |
|
fe631457f blk-mq: map all H... |
36 |
for_each_possible_cpu(cpu) { |
320ae51fe blk-mq: new multi... |
37 |
/* |
fe631457f blk-mq: map all H... |
38 39 40 41 |
* First do sequential mapping between CPUs and queues. * In case we still have CPUs to map, and we have some number of * threads per cores then map sibling threads to the same queue for * performace optimizations. |
320ae51fe blk-mq: new multi... |
42 |
*/ |
fe631457f blk-mq: map all H... |
43 |
if (cpu < nr_queues) { |
03ffbcdd7 Merge branch 'irq... |
44 |
map[cpu] = cpu_to_queue_index(nr_queues, cpu); |
fe631457f blk-mq: map all H... |
45 46 47 |
} else { first_sibling = get_first_sibling(cpu); if (first_sibling == cpu) |
03ffbcdd7 Merge branch 'irq... |
48 |
map[cpu] = cpu_to_queue_index(nr_queues, cpu); |
fe631457f blk-mq: map all H... |
49 50 |
else map[cpu] = map[first_sibling]; |
320ae51fe blk-mq: new multi... |
51 |
} |
320ae51fe blk-mq: new multi... |
52 |
} |
320ae51fe blk-mq: new multi... |
53 54 |
return 0; } |
9e5a7e229 blk-mq: export bl... |
55 |
EXPORT_SYMBOL_GPL(blk_mq_map_queues); |
320ae51fe blk-mq: new multi... |
56 |
|
f14bbe77a blk-mq: pass in s... |
57 58 59 60 61 62 63 64 65 66 |
/* * We have no quick way of doing reverse lookups. This is only used at * queue init time, so runtime isn't important. */ int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) { int i; for_each_possible_cpu(i) { if (index == mq_map[i]) |
bffed4571 blk-mq: Avoid mem... |
67 |
return local_memory_node(cpu_to_node(i)); |
f14bbe77a blk-mq: pass in s... |
68 69 70 71 |
} return NUMA_NO_NODE; } |