Blame view

block/blk-mq-cpumap.c 2.28 KB
75bb4625b   Jens Axboe   blk-mq: add file ...
1
2
3
4
5
  /*
   * CPU <-> hardware queue mapping helpers
   *
   * Copyright (C) 2013-2014 Jens Axboe
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
6
7
8
9
10
11
12
13
14
15
  #include <linux/kernel.h>
  #include <linux/threads.h>
  #include <linux/module.h>
  #include <linux/mm.h>
  #include <linux/smp.h>
  #include <linux/cpu.h>
  
  #include <linux/blk-mq.h>
  #include "blk.h"
  #include "blk-mq.h"
320ae51fe   Jens Axboe   blk-mq: new multi...
16
17
18
  static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
  			      const int cpu)
  {
959f5f5b2   Bart Van Assche   blk-mq: Use all a...
19
  	return cpu * nr_queues / nr_cpus;
320ae51fe   Jens Axboe   blk-mq: new multi...
20
21
22
23
24
  }
  
  static int get_first_sibling(unsigned int cpu)
  {
  	unsigned int ret;
06931e622   Bartosz Golaszewski   sched/topology: R...
25
  	ret = cpumask_first(topology_sibling_cpumask(cpu));
320ae51fe   Jens Axboe   blk-mq: new multi...
26
27
28
29
30
  	if (ret < nr_cpu_ids)
  		return ret;
  
  	return cpu;
  }
da695ba23   Christoph Hellwig   blk-mq: allow the...
31
  int blk_mq_map_queues(struct blk_mq_tag_set *set)
320ae51fe   Jens Axboe   blk-mq: new multi...
32
  {
da695ba23   Christoph Hellwig   blk-mq: allow the...
33
34
35
  	unsigned int *map = set->mq_map;
  	unsigned int nr_queues = set->nr_hw_queues;
  	const struct cpumask *online_mask = cpu_online_mask;
320ae51fe   Jens Axboe   blk-mq: new multi...
36
37
38
39
  	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
  	cpumask_var_t cpus;
  
  	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
da695ba23   Christoph Hellwig   blk-mq: allow the...
40
  		return -ENOMEM;
320ae51fe   Jens Axboe   blk-mq: new multi...
41
42
43
  
  	cpumask_clear(cpus);
  	nr_cpus = nr_uniq_cpus = 0;
5778322e6   Akinobu Mita   blk-mq: avoid ins...
44
  	for_each_cpu(i, online_mask) {
320ae51fe   Jens Axboe   blk-mq: new multi...
45
46
47
48
49
50
51
52
53
  		nr_cpus++;
  		first_sibling = get_first_sibling(i);
  		if (!cpumask_test_cpu(first_sibling, cpus))
  			nr_uniq_cpus++;
  		cpumask_set_cpu(i, cpus);
  	}
  
  	queue = 0;
  	for_each_possible_cpu(i) {
5778322e6   Akinobu Mita   blk-mq: avoid ins...
54
  		if (!cpumask_test_cpu(i, online_mask)) {
320ae51fe   Jens Axboe   blk-mq: new multi...
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
  			map[i] = 0;
  			continue;
  		}
  
  		/*
  		 * Easy case - we have equal or more hardware queues. Or
  		 * there are no thread siblings to take into account. Do
  		 * 1:1 if enough, or sequential mapping if less.
  		 */
  		if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
  			map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
  			queue++;
  			continue;
  		}
  
  		/*
  		 * Less then nr_cpus queues, and we have some number of
  		 * threads per cores. Map sibling threads to the same
  		 * queue.
  		 */
  		first_sibling = get_first_sibling(i);
  		if (first_sibling == i) {
  			map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
  							queue);
  			queue++;
  		} else
  			map[i] = map[first_sibling];
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
83
84
85
  	free_cpumask_var(cpus);
  	return 0;
  }
f14bbe77a   Jens Axboe   blk-mq: pass in s...
86
87
88
89
90
91
92
93
94
95
  /*
   * We have no quick way of doing reverse lookups. This is only used at
   * queue init time, so runtime isn't important.
   */
  int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
  {
  	int i;
  
  	for_each_possible_cpu(i) {
  		if (index == mq_map[i])
bffed4571   Raghavendra K T   blk-mq: Avoid mem...
96
  			return local_memory_node(cpu_to_node(i));
f14bbe77a   Jens Axboe   blk-mq: pass in s...
97
98
99
100
  	}
  
  	return NUMA_NO_NODE;
  }