Blame view

kernel/dma/pool.c 7.58 KB
e860c299a   David Rientjes   dma-remap: separa...
1
2
3
4
5
  // SPDX-License-Identifier: GPL-2.0
  /*
   * Copyright (C) 2012 ARM Ltd.
   * Copyright (C) 2020 Google LLC
   */
d7e673ec2   Nicolas Saenz Julienne   dma-pool: Only al...
6
  #include <linux/cma.h>
2edc5bb3c   David Rientjes   dma-pool: add poo...
7
  #include <linux/debugfs.h>
0b1abd1fb   Christoph Hellwig   dma-mapping: merg...
8
  #include <linux/dma-map-ops.h>
e860c299a   David Rientjes   dma-remap: separa...
9
  #include <linux/dma-direct.h>
e860c299a   David Rientjes   dma-remap: separa...
10
11
  #include <linux/init.h>
  #include <linux/genalloc.h>
76a19940b   David Rientjes   dma-direct: atomi...
12
  #include <linux/set_memory.h>
e860c299a   David Rientjes   dma-remap: separa...
13
  #include <linux/slab.h>
54adadf9b   David Rientjes   dma-pool: dynamic...
14
  #include <linux/workqueue.h>
e860c299a   David Rientjes   dma-remap: separa...
15

c84dc6e68   David Rientjes   dma-pool: add add...
16
  static struct gen_pool *atomic_pool_dma __ro_after_init;
2edc5bb3c   David Rientjes   dma-pool: add poo...
17
  static unsigned long pool_size_dma;
c84dc6e68   David Rientjes   dma-pool: add add...
18
  static struct gen_pool *atomic_pool_dma32 __ro_after_init;
2edc5bb3c   David Rientjes   dma-pool: add poo...
19
  static unsigned long pool_size_dma32;
c84dc6e68   David Rientjes   dma-pool: add add...
20
  static struct gen_pool *atomic_pool_kernel __ro_after_init;
2edc5bb3c   David Rientjes   dma-pool: add poo...
21
  static unsigned long pool_size_kernel;
e860c299a   David Rientjes   dma-remap: separa...
22

1d659236f   David Rientjes   dma-pool: scale t...
23
24
  /* Size can be defined by the coherent_pool command line */
  static size_t atomic_pool_size;
54adadf9b   David Rientjes   dma-pool: dynamic...
25
26
27
  
  /* Dynamic background expansion when the atomic pool is near capacity */
  static struct work_struct atomic_pool_work;
e860c299a   David Rientjes   dma-remap: separa...
28
29
30
31
32
33
34
  
  static int __init early_coherent_pool(char *p)
  {
  	atomic_pool_size = memparse(p, &p);
  	return 0;
  }
  early_param("coherent_pool", early_coherent_pool);
2edc5bb3c   David Rientjes   dma-pool: add poo...
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
  static void __init dma_atomic_pool_debugfs_init(void)
  {
  	struct dentry *root;
  
  	root = debugfs_create_dir("dma_pools", NULL);
  	if (IS_ERR_OR_NULL(root))
  		return;
  
  	debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
  	debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
  	debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
  }
  
  static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
  {
  	if (gfp & __GFP_DMA)
  		pool_size_dma += size;
  	else if (gfp & __GFP_DMA32)
  		pool_size_dma32 += size;
  	else
  		pool_size_kernel += size;
  }
d7e673ec2   Nicolas Saenz Julienne   dma-pool: Only al...
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
  static bool cma_in_zone(gfp_t gfp)
  {
  	unsigned long size;
  	phys_addr_t end;
  	struct cma *cma;
  
  	cma = dev_get_cma_area(NULL);
  	if (!cma)
  		return false;
  
  	size = cma_get_size(cma);
  	if (!size)
  		return false;
  
  	/* CMA can't cross zone boundaries, see cma_activate_area() */
  	end = cma_get_base(cma) + size - 1;
  	if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
  		return end <= DMA_BIT_MASK(zone_dma_bits);
  	if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
  		return end <= DMA_BIT_MASK(32);
  	return true;
  }
54adadf9b   David Rientjes   dma-pool: dynamic...
79
80
  static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
  			      gfp_t gfp)
e860c299a   David Rientjes   dma-remap: separa...
81
  {
54adadf9b   David Rientjes   dma-pool: dynamic...
82
  	unsigned int order;
892fc9f68   Dan Carpenter   dma-pool: Fix an ...
83
  	struct page *page = NULL;
e860c299a   David Rientjes   dma-remap: separa...
84
  	void *addr;
54adadf9b   David Rientjes   dma-pool: dynamic...
85
86
87
88
89
90
91
  	int ret = -ENOMEM;
  
  	/* Cannot allocate larger than MAX_ORDER-1 */
  	order = min(get_order(pool_size), MAX_ORDER-1);
  
  	do {
  		pool_size = 1 << (PAGE_SHIFT + order);
d7e673ec2   Nicolas Saenz Julienne   dma-pool: Only al...
92
93
94
95
96
  		if (cma_in_zone(gfp))
  			page = dma_alloc_from_contiguous(NULL, 1 << order,
  							 order, false);
  		if (!page)
  			page = alloc_pages(gfp, order);
54adadf9b   David Rientjes   dma-pool: dynamic...
97
  	} while (!page && order-- > 0);
e860c299a   David Rientjes   dma-remap: separa...
98
99
  	if (!page)
  		goto out;
c84dc6e68   David Rientjes   dma-pool: add add...
100
  	arch_dma_prep_coherent(page, pool_size);
e860c299a   David Rientjes   dma-remap: separa...
101

76a19940b   David Rientjes   dma-direct: atomi...
102
  #ifdef CONFIG_DMA_DIRECT_REMAP
c84dc6e68   David Rientjes   dma-pool: add add...
103
  	addr = dma_common_contiguous_remap(page, pool_size,
e860c299a   David Rientjes   dma-remap: separa...
104
105
106
  					   pgprot_dmacoherent(PAGE_KERNEL),
  					   __builtin_return_address(0));
  	if (!addr)
54adadf9b   David Rientjes   dma-pool: dynamic...
107
  		goto free_page;
76a19940b   David Rientjes   dma-direct: atomi...
108
109
110
111
112
  #else
  	addr = page_to_virt(page);
  #endif
  	/*
  	 * Memory in the atomic DMA pools must be unencrypted, the pools do not
2f5388a29   Christoph Hellwig   dma-direct: remov...
113
  	 * shrink so no re-encryption occurs in dma_direct_free().
76a19940b   David Rientjes   dma-direct: atomi...
114
115
116
117
118
  	 */
  	ret = set_memory_decrypted((unsigned long)page_to_virt(page),
  				   1 << order);
  	if (ret)
  		goto remove_mapping;
54adadf9b   David Rientjes   dma-pool: dynamic...
119
120
  	ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
  				pool_size, NUMA_NO_NODE);
e860c299a   David Rientjes   dma-remap: separa...
121
  	if (ret)
76a19940b   David Rientjes   dma-direct: atomi...
122
  		goto encrypt_mapping;
e860c299a   David Rientjes   dma-remap: separa...
123

2edc5bb3c   David Rientjes   dma-pool: add poo...
124
  	dma_atomic_pool_size_add(gfp, pool_size);
e860c299a   David Rientjes   dma-remap: separa...
125
  	return 0;
76a19940b   David Rientjes   dma-direct: atomi...
126
127
128
129
130
131
132
  encrypt_mapping:
  	ret = set_memory_encrypted((unsigned long)page_to_virt(page),
  				   1 << order);
  	if (WARN_ON_ONCE(ret)) {
  		/* Decrypt succeeded but encrypt failed, purposely leak */
  		goto out;
  	}
e860c299a   David Rientjes   dma-remap: separa...
133
  remove_mapping:
76a19940b   David Rientjes   dma-direct: atomi...
134
  #ifdef CONFIG_DMA_DIRECT_REMAP
c84dc6e68   David Rientjes   dma-pool: add add...
135
  	dma_common_free_remap(addr, pool_size);
76a19940b   David Rientjes   dma-direct: atomi...
136
137
  #endif
  free_page: __maybe_unused
d9765e41d   Nicolas Saenz Julienne   dma-pool: do not ...
138
  	__free_pages(page, order);
e860c299a   David Rientjes   dma-remap: separa...
139
  out:
54adadf9b   David Rientjes   dma-pool: dynamic...
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
  	return ret;
  }
  
  static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
  {
  	if (pool && gen_pool_avail(pool) < atomic_pool_size)
  		atomic_pool_expand(pool, gen_pool_size(pool), gfp);
  }
  
  static void atomic_pool_work_fn(struct work_struct *work)
  {
  	if (IS_ENABLED(CONFIG_ZONE_DMA))
  		atomic_pool_resize(atomic_pool_dma,
  				   GFP_KERNEL | GFP_DMA);
  	if (IS_ENABLED(CONFIG_ZONE_DMA32))
  		atomic_pool_resize(atomic_pool_dma32,
  				   GFP_KERNEL | GFP_DMA32);
  	atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
  }
  
  static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
  						      gfp_t gfp)
  {
  	struct gen_pool *pool;
  	int ret;
  
  	pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
  	if (!pool)
  		return NULL;
  
  	gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
  
  	ret = atomic_pool_expand(pool, pool_size, gfp);
  	if (ret) {
  		gen_pool_destroy(pool);
  		pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation
  ",
  		       pool_size >> 10, &gfp);
  		return NULL;
  	}
  
  	pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations
  ",
  		gen_pool_size(pool) >> 10, &gfp);
  	return pool;
e860c299a   David Rientjes   dma-remap: separa...
185
  }
c84dc6e68   David Rientjes   dma-pool: add add...
186
187
188
189
  
  static int __init dma_atomic_pool_init(void)
  {
  	int ret = 0;
c84dc6e68   David Rientjes   dma-pool: add add...
190

1d659236f   David Rientjes   dma-pool: scale t...
191
192
193
194
195
  	/*
  	 * If coherent_pool was not used on the command line, default the pool
  	 * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
  	 */
  	if (!atomic_pool_size) {
3ee06a6d5   Geert Uytterhoeven   dma-pool: fix too...
196
197
198
  		unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
  		pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
  		atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
1d659236f   David Rientjes   dma-pool: scale t...
199
  	}
54adadf9b   David Rientjes   dma-pool: dynamic...
200
201
202
203
204
205
  	INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
  
  	atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
  						    GFP_KERNEL);
  	if (!atomic_pool_kernel)
  		ret = -ENOMEM;
c84dc6e68   David Rientjes   dma-pool: add add...
206
  	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
54adadf9b   David Rientjes   dma-pool: dynamic...
207
208
209
210
  		atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
  						GFP_KERNEL | GFP_DMA);
  		if (!atomic_pool_dma)
  			ret = -ENOMEM;
c84dc6e68   David Rientjes   dma-pool: add add...
211
212
  	}
  	if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
54adadf9b   David Rientjes   dma-pool: dynamic...
213
214
215
216
  		atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
  						GFP_KERNEL | GFP_DMA32);
  		if (!atomic_pool_dma32)
  			ret = -ENOMEM;
c84dc6e68   David Rientjes   dma-pool: add add...
217
  	}
2edc5bb3c   David Rientjes   dma-pool: add poo...
218
219
  
  	dma_atomic_pool_debugfs_init();
c84dc6e68   David Rientjes   dma-pool: add add...
220
221
  	return ret;
  }
e860c299a   David Rientjes   dma-remap: separa...
222
  postcore_initcall(dma_atomic_pool_init);
9420139f5   Christoph Hellwig   dma-pool: fix coh...
223
  static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
e860c299a   David Rientjes   dma-remap: separa...
224
  {
9420139f5   Christoph Hellwig   dma-pool: fix coh...
225
226
227
228
229
230
231
232
233
234
  	if (prev == NULL) {
  		if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
  			return atomic_pool_dma32;
  		if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
  			return atomic_pool_dma;
  		return atomic_pool_kernel;
  	}
  	if (prev == atomic_pool_kernel)
  		return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
  	if (prev == atomic_pool_dma32)
c84dc6e68   David Rientjes   dma-pool: add add...
235
  		return atomic_pool_dma;
9420139f5   Christoph Hellwig   dma-pool: fix coh...
236
  	return NULL;
c84dc6e68   David Rientjes   dma-pool: add add...
237
  }
e860c299a   David Rientjes   dma-remap: separa...
238

9420139f5   Christoph Hellwig   dma-pool: fix coh...
239
240
241
  static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
  		struct gen_pool *pool, void **cpu_addr,
  		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
48b670385   Nicolas Saenz Julienne   dma-pool: introdu...
242
  {
9420139f5   Christoph Hellwig   dma-pool: fix coh...
243
244
  	unsigned long addr;
  	phys_addr_t phys;
48b670385   Nicolas Saenz Julienne   dma-pool: introdu...
245

9420139f5   Christoph Hellwig   dma-pool: fix coh...
246
247
248
  	addr = gen_pool_alloc(pool, size);
  	if (!addr)
  		return NULL;
48b670385   Nicolas Saenz Julienne   dma-pool: introdu...
249

9420139f5   Christoph Hellwig   dma-pool: fix coh...
250
251
252
253
254
  	phys = gen_pool_virt_to_phys(pool, addr);
  	if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
  		gen_pool_free(pool, addr, size);
  		return NULL;
  	}
48b670385   Nicolas Saenz Julienne   dma-pool: introdu...
255

9420139f5   Christoph Hellwig   dma-pool: fix coh...
256
257
  	if (gen_pool_avail(pool) < atomic_pool_size)
  		schedule_work(&atomic_pool_work);
48b670385   Nicolas Saenz Julienne   dma-pool: introdu...
258

9420139f5   Christoph Hellwig   dma-pool: fix coh...
259
260
261
  	*cpu_addr = (void *)addr;
  	memset(*cpu_addr, 0, size);
  	return pfn_to_page(__phys_to_pfn(phys));
48b670385   Nicolas Saenz Julienne   dma-pool: introdu...
262
  }
9420139f5   Christoph Hellwig   dma-pool: fix coh...
263
264
265
  struct page *dma_alloc_from_pool(struct device *dev, size_t size,
  		void **cpu_addr, gfp_t gfp,
  		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
e860c299a   David Rientjes   dma-remap: separa...
266
  {
81e9d894e   Nicolas Saenz Julienne   dma-pool: make su...
267
  	struct gen_pool *pool = NULL;
9420139f5   Christoph Hellwig   dma-pool: fix coh...
268
  	struct page *page;
81e9d894e   Nicolas Saenz Julienne   dma-pool: make su...
269

9420139f5   Christoph Hellwig   dma-pool: fix coh...
270
271
272
273
274
  	while ((pool = dma_guess_pool(pool, gfp))) {
  		page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
  					     phys_addr_ok);
  		if (page)
  			return page;
e860c299a   David Rientjes   dma-remap: separa...
275
  	}
9420139f5   Christoph Hellwig   dma-pool: fix coh...
276
277
278
  	WARN(1, "Failed to get suitable pool for %s
  ", dev_name(dev));
  	return NULL;
e860c299a   David Rientjes   dma-remap: separa...
279
  }
c84dc6e68   David Rientjes   dma-pool: add add...
280
  bool dma_free_from_pool(struct device *dev, void *start, size_t size)
e860c299a   David Rientjes   dma-remap: separa...
281
  {
81e9d894e   Nicolas Saenz Julienne   dma-pool: make su...
282
  	struct gen_pool *pool = NULL;
c84dc6e68   David Rientjes   dma-pool: add add...
283

9420139f5   Christoph Hellwig   dma-pool: fix coh...
284
285
286
287
288
  	while ((pool = dma_guess_pool(pool, 0))) {
  		if (!gen_pool_has_addr(pool, (unsigned long)start, size))
  			continue;
  		gen_pool_free(pool, (unsigned long)start, size);
  		return true;
81e9d894e   Nicolas Saenz Julienne   dma-pool: make su...
289
  	}
9420139f5   Christoph Hellwig   dma-pool: fix coh...
290
291
  
  	return false;
e860c299a   David Rientjes   dma-remap: separa...
292
  }