Blame view
block/blk-mq-cpumap.c
2.45 KB
75bb4625b blk-mq: add file ... |
1 2 3 4 5 |
/* * CPU <-> hardware queue mapping helpers * * Copyright (C) 2013-2014 Jens Axboe */ |
320ae51fe blk-mq: new multi... |
6 7 8 9 10 11 12 13 14 15 |
#include <linux/kernel.h> #include <linux/threads.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" |
320ae51fe blk-mq: new multi... |
16 17 18 |
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues, const int cpu) { |
959f5f5b2 blk-mq: Use all a... |
19 |
return cpu * nr_queues / nr_cpus; |
320ae51fe blk-mq: new multi... |
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
} static int get_first_sibling(unsigned int cpu) { unsigned int ret; ret = cpumask_first(topology_thread_cpumask(cpu)); if (ret < nr_cpu_ids) return ret; return cpu; } int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) { unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; cpumask_var_t cpus; if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) return 1; cpumask_clear(cpus); nr_cpus = nr_uniq_cpus = 0; for_each_online_cpu(i) { nr_cpus++; first_sibling = get_first_sibling(i); if (!cpumask_test_cpu(first_sibling, cpus)) nr_uniq_cpus++; cpumask_set_cpu(i, cpus); } queue = 0; for_each_possible_cpu(i) { if (!cpu_online(i)) { map[i] = 0; continue; } /* * Easy case - we have equal or more hardware queues. Or * there are no thread siblings to take into account. Do * 1:1 if enough, or sequential mapping if less. */ if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) { map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue); queue++; continue; } /* * Less then nr_cpus queues, and we have some number of * threads per cores. Map sibling threads to the same * queue. */ first_sibling = get_first_sibling(i); if (first_sibling == i) { map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues, queue); queue++; } else map[i] = map[first_sibling]; } |
320ae51fe blk-mq: new multi... |
82 83 84 |
free_cpumask_var(cpus); return 0; } |
24d2f9030 blk-mq: split out... |
85 |
unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) |
320ae51fe blk-mq: new multi... |
86 87 88 89 |
{ unsigned int *map; /* If cpus are offline, map them to first hctx */ |
a33c1ba29 blk-mq: use 'nr_c... |
90 |
map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, |
24d2f9030 blk-mq: split out... |
91 |
set->numa_node); |
320ae51fe blk-mq: new multi... |
92 93 |
if (!map) return NULL; |
24d2f9030 blk-mq: split out... |
94 |
if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) |
320ae51fe blk-mq: new multi... |
95 96 97 98 99 |
return map; kfree(map); return NULL; } |
f14bbe77a blk-mq: pass in s... |
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
/* * We have no quick way of doing reverse lookups. This is only used at * queue init time, so runtime isn't important. */ int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) { int i; for_each_possible_cpu(i) { if (index == mq_map[i]) return cpu_to_node(i); } return NUMA_NO_NODE; } |