Blame view

drivers/gpu/drm/msm/msm_gem.c 16.5 KB
c8afe684c   Rob Clark   drm/msm: basic KM...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
  /*
   * Copyright (C) 2013 Red Hat
   * Author: Rob Clark <robdclark@gmail.com>
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License version 2 as published by
   * the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful, but WITHOUT
   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
   * this program.  If not, see <http://www.gnu.org/licenses/>.
   */
  
  #include <linux/spinlock.h>
  #include <linux/shmem_fs.h>
05b849111   Rob Clark   drm/msm: prime su...
20
  #include <linux/dma-buf.h>
c8afe684c   Rob Clark   drm/msm: basic KM...
21
22
23
  
  #include "msm_drv.h"
  #include "msm_gem.h"
7198e6b03   Rob Clark   drm/msm: add a3xx...
24
  #include "msm_gpu.h"
871d812aa   Rob Clark   drm/msm: add supp...
25
  #include "msm_mmu.h"
c8afe684c   Rob Clark   drm/msm: basic KM...
26

871d812aa   Rob Clark   drm/msm: add supp...
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
  static dma_addr_t physaddr(struct drm_gem_object *obj)
  {
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  	struct msm_drm_private *priv = obj->dev->dev_private;
  	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  			priv->vram.paddr;
  }
  
  /* allocate pages from VRAM carveout, used when no IOMMU: */
  static struct page **get_pages_vram(struct drm_gem_object *obj,
  		int npages)
  {
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  	struct msm_drm_private *priv = obj->dev->dev_private;
  	dma_addr_t paddr;
  	struct page **p;
  	int ret, i;
  
  	p = drm_malloc_ab(npages, sizeof(struct page *));
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
  			npages, 0, DRM_MM_SEARCH_DEFAULT);
  	if (ret) {
  		drm_free_large(p);
  		return ERR_PTR(ret);
  	}
  
  	paddr = physaddr(obj);
  	for (i = 0; i < npages; i++) {
  		p[i] = phys_to_page(paddr);
  		paddr += PAGE_SIZE;
  	}
  
  	return p;
  }
c8afe684c   Rob Clark   drm/msm: basic KM...
64
65
66
67
68
69
70
71
  
  /* called with dev->struct_mutex held */
  static struct page **get_pages(struct drm_gem_object *obj)
  {
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  
  	if (!msm_obj->pages) {
  		struct drm_device *dev = obj->dev;
871d812aa   Rob Clark   drm/msm: add supp...
72
  		struct page **p;
c8afe684c   Rob Clark   drm/msm: basic KM...
73
  		int npages = obj->size >> PAGE_SHIFT;
871d812aa   Rob Clark   drm/msm: add supp...
74
75
76
77
  		if (iommu_present(&platform_bus_type))
  			p = drm_gem_get_pages(obj, 0);
  		else
  			p = get_pages_vram(obj, npages);
c8afe684c   Rob Clark   drm/msm: basic KM...
78
79
80
81
82
83
84
85
  		if (IS_ERR(p)) {
  			dev_err(dev->dev, "could not get pages: %ld
  ",
  					PTR_ERR(p));
  			return p;
  		}
  
  		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
1f70e079c   Wei Yongjun   drm/msm: fix retu...
86
  		if (IS_ERR(msm_obj->sgt)) {
c8afe684c   Rob Clark   drm/msm: basic KM...
87
88
  			dev_err(dev->dev, "failed to allocate sgt
  ");
1f70e079c   Wei Yongjun   drm/msm: fix retu...
89
  			return ERR_CAST(msm_obj->sgt);
c8afe684c   Rob Clark   drm/msm: basic KM...
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
  		}
  
  		msm_obj->pages = p;
  
  		/* For non-cached buffers, ensure the new pages are clean
  		 * because display controller, GPU, etc. are not coherent:
  		 */
  		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
  					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  	}
  
  	return msm_obj->pages;
  }
  
  static void put_pages(struct drm_gem_object *obj)
  {
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  
  	if (msm_obj->pages) {
  		/* For non-cached buffers, ensure the new pages are clean
  		 * because display controller, GPU, etc. are not coherent:
  		 */
  		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
  					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  		sg_free_table(msm_obj->sgt);
  		kfree(msm_obj->sgt);
871d812aa   Rob Clark   drm/msm: add supp...
118
119
  		if (iommu_present(&platform_bus_type))
  			drm_gem_put_pages(obj, msm_obj->pages, true, false);
1ffa2425b   Micah Richert   drm/msm: fix memo...
120
  		else {
871d812aa   Rob Clark   drm/msm: add supp...
121
  			drm_mm_remove_node(msm_obj->vram_node);
1ffa2425b   Micah Richert   drm/msm: fix memo...
122
123
  			drm_free_large(msm_obj->pages);
  		}
871d812aa   Rob Clark   drm/msm: add supp...
124

c8afe684c   Rob Clark   drm/msm: basic KM...
125
126
127
  		msm_obj->pages = NULL;
  	}
  }
05b849111   Rob Clark   drm/msm: prime su...
128
129
130
131
132
133
134
135
136
137
138
139
140
141
  struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  {
  	struct drm_device *dev = obj->dev;
  	struct page **p;
  	mutex_lock(&dev->struct_mutex);
  	p = get_pages(obj);
  	mutex_unlock(&dev->struct_mutex);
  	return p;
  }
  
  void msm_gem_put_pages(struct drm_gem_object *obj)
  {
  	/* when we start tracking the pin count, then do something here */
  }
c8afe684c   Rob Clark   drm/msm: basic KM...
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
  int msm_gem_mmap_obj(struct drm_gem_object *obj,
  		struct vm_area_struct *vma)
  {
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  
  	vma->vm_flags &= ~VM_PFNMAP;
  	vma->vm_flags |= VM_MIXEDMAP;
  
  	if (msm_obj->flags & MSM_BO_WC) {
  		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
  		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  	} else {
  		/*
  		 * Shunt off cached objs to shmem file so they have their own
  		 * address_space (so unmap_mapping_range does what we want,
  		 * in particular in the case of mmap'd dmabufs)
  		 */
  		fput(vma->vm_file);
  		get_file(obj->filp);
  		vma->vm_pgoff = 0;
  		vma->vm_file  = obj->filp;
  
  		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  	}
  
  	return 0;
  }
  
  int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  {
  	int ret;
  
  	ret = drm_gem_mmap(filp, vma);
  	if (ret) {
  		DBG("mmap failed: %d", ret);
  		return ret;
  	}
  
  	return msm_gem_mmap_obj(vma->vm_private_data, vma);
  }
  
  int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  {
  	struct drm_gem_object *obj = vma->vm_private_data;
c8afe684c   Rob Clark   drm/msm: basic KM...
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
  	struct drm_device *dev = obj->dev;
  	struct page **pages;
  	unsigned long pfn;
  	pgoff_t pgoff;
  	int ret;
  
  	/* Make sure we don't parallel update on a fault, nor move or remove
  	 * something from beneath our feet
  	 */
  	ret = mutex_lock_interruptible(&dev->struct_mutex);
  	if (ret)
  		goto out;
  
  	/* make sure we have pages attached now */
  	pages = get_pages(obj);
  	if (IS_ERR(pages)) {
  		ret = PTR_ERR(pages);
  		goto out_unlock;
  	}
  
  	/* We don't use vmf->pgoff since that has the fake offset: */
  	pgoff = ((unsigned long)vmf->virtual_address -
  			vma->vm_start) >> PAGE_SHIFT;
871d812aa   Rob Clark   drm/msm: add supp...
210
  	pfn = page_to_pfn(pages[pgoff]);
c8afe684c   Rob Clark   drm/msm: basic KM...
211
212
213
214
215
216
217
218
219
220
221
  
  	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
  			pfn, pfn << PAGE_SHIFT);
  
  	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
  
  out_unlock:
  	mutex_unlock(&dev->struct_mutex);
  out:
  	switch (ret) {
  	case -EAGAIN:
c8afe684c   Rob Clark   drm/msm: basic KM...
222
223
224
  	case 0:
  	case -ERESTARTSYS:
  	case -EINTR:
505886d50   Rob Clark   drm/msm: EBUSY st...
225
226
227
228
229
  	case -EBUSY:
  		/*
  		 * EBUSY is ok: this just means that another thread
  		 * already did the job.
  		 */
c8afe684c   Rob Clark   drm/msm: basic KM...
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
  		return VM_FAULT_NOPAGE;
  	case -ENOMEM:
  		return VM_FAULT_OOM;
  	default:
  		return VM_FAULT_SIGBUS;
  	}
  }
  
  /** get mmap offset */
  static uint64_t mmap_offset(struct drm_gem_object *obj)
  {
  	struct drm_device *dev = obj->dev;
  	int ret;
  
  	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  
  	/* Make it mmapable */
  	ret = drm_gem_create_mmap_offset(obj);
  
  	if (ret) {
  		dev_err(dev->dev, "could not allocate mmap offset
  ");
  		return 0;
  	}
  
  	return drm_vma_node_offset_addr(&obj->vma_node);
  }
  
  uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  {
  	uint64_t offset;
  	mutex_lock(&obj->dev->struct_mutex);
  	offset = mmap_offset(obj);
  	mutex_unlock(&obj->dev->struct_mutex);
  	return offset;
  }
c8afe684c   Rob Clark   drm/msm: basic KM...
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
  /* should be called under struct_mutex.. although it can be called
   * from atomic context without struct_mutex to acquire an extra
   * iova ref if you know one is already held.
   *
   * That means when I do eventually need to add support for unpinning
   * the refcnt counter needs to be atomic_t.
   */
  int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
  		uint32_t *iova)
  {
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  	int ret = 0;
  
  	if (!msm_obj->domain[id].iova) {
  		struct msm_drm_private *priv = obj->dev->dev_private;
871d812aa   Rob Clark   drm/msm: add supp...
281
282
  		struct msm_mmu *mmu = priv->mmus[id];
  		struct page **pages = get_pages(obj);
c8afe684c   Rob Clark   drm/msm: basic KM...
283
284
  		if (IS_ERR(pages))
  			return PTR_ERR(pages);
871d812aa   Rob Clark   drm/msm: add supp...
285
286
287
288
289
290
291
292
293
  
  		if (iommu_present(&platform_bus_type)) {
  			uint32_t offset = (uint32_t)mmap_offset(obj);
  			ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
  					obj->size, IOMMU_READ | IOMMU_WRITE);
  			msm_obj->domain[id].iova = offset;
  		} else {
  			msm_obj->domain[id].iova = physaddr(obj);
  		}
c8afe684c   Rob Clark   drm/msm: basic KM...
294
295
296
297
298
299
300
301
302
303
  	}
  
  	if (!ret)
  		*iova = msm_obj->domain[id].iova;
  
  	return ret;
  }
  
  int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
  {
edd4fc63a   Rob Clark   drm/msm: rework i...
304
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
c8afe684c   Rob Clark   drm/msm: basic KM...
305
  	int ret;
edd4fc63a   Rob Clark   drm/msm: rework i...
306
307
308
309
310
311
312
313
  
  	/* this is safe right now because we don't unmap until the
  	 * bo is deleted:
  	 */
  	if (msm_obj->domain[id].iova) {
  		*iova = msm_obj->domain[id].iova;
  		return 0;
  	}
c8afe684c   Rob Clark   drm/msm: basic KM...
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
  	mutex_lock(&obj->dev->struct_mutex);
  	ret = msm_gem_get_iova_locked(obj, id, iova);
  	mutex_unlock(&obj->dev->struct_mutex);
  	return ret;
  }
  
  void msm_gem_put_iova(struct drm_gem_object *obj, int id)
  {
  	// XXX TODO ..
  	// NOTE: probably don't need a _locked() version.. we wouldn't
  	// normally unmap here, but instead just mark that it could be
  	// unmapped (if the iova refcnt drops to zero), but then later
  	// if another _get_iova_locked() fails we can start unmapping
  	// things that are no longer needed..
  }
  
  int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  		struct drm_mode_create_dumb *args)
  {
  	args->pitch = align_pitch(args->width, args->bpp);
  	args->size  = PAGE_ALIGN(args->pitch * args->height);
  	return msm_gem_new_handle(dev, file, args->size,
  			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
  }
c8afe684c   Rob Clark   drm/msm: basic KM...
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  		uint32_t handle, uint64_t *offset)
  {
  	struct drm_gem_object *obj;
  	int ret = 0;
  
  	/* GEM does all our handle to object mapping */
  	obj = drm_gem_object_lookup(dev, file, handle);
  	if (obj == NULL) {
  		ret = -ENOENT;
  		goto fail;
  	}
  
  	*offset = msm_gem_mmap_offset(obj);
  
  	drm_gem_object_unreference_unlocked(obj);
  
  fail:
  	return ret;
  }
  
  void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
  {
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  	if (!msm_obj->vaddr) {
  		struct page **pages = get_pages(obj);
  		if (IS_ERR(pages))
  			return ERR_CAST(pages);
  		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  	}
  	return msm_obj->vaddr;
  }
  
  void *msm_gem_vaddr(struct drm_gem_object *obj)
  {
  	void *ret;
  	mutex_lock(&obj->dev->struct_mutex);
  	ret = msm_gem_vaddr_locked(obj);
  	mutex_unlock(&obj->dev->struct_mutex);
  	return ret;
  }
edd4fc63a   Rob Clark   drm/msm: rework i...
381
382
383
384
385
  /* setup callback for when bo is no longer busy..
   * TODO probably want to differentiate read vs write..
   */
  int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
  		struct msm_fence_cb *cb)
c8afe684c   Rob Clark   drm/msm: basic KM...
386
387
388
  {
  	struct drm_device *dev = obj->dev;
  	struct msm_drm_private *priv = dev->dev_private;
7198e6b03   Rob Clark   drm/msm: add a3xx...
389
390
391
392
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  	int ret = 0;
  
  	mutex_lock(&dev->struct_mutex);
edd4fc63a   Rob Clark   drm/msm: rework i...
393
  	if (!list_empty(&cb->work.entry)) {
7198e6b03   Rob Clark   drm/msm: add a3xx...
394
395
  		ret = -EINVAL;
  	} else if (is_active(msm_obj)) {
edd4fc63a   Rob Clark   drm/msm: rework i...
396
397
  		cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
  		list_add_tail(&cb->work.entry, &priv->fence_cbs);
7198e6b03   Rob Clark   drm/msm: add a3xx...
398
  	} else {
edd4fc63a   Rob Clark   drm/msm: rework i...
399
  		queue_work(priv->wq, &cb->work);
7198e6b03   Rob Clark   drm/msm: add a3xx...
400
401
402
403
404
405
406
  	}
  	mutex_unlock(&dev->struct_mutex);
  
  	return ret;
  }
  
  void msm_gem_move_to_active(struct drm_gem_object *obj,
bf6811f30   Rob Clark   drm/msm: handle r...
407
  		struct msm_gpu *gpu, bool write, uint32_t fence)
7198e6b03   Rob Clark   drm/msm: add a3xx...
408
409
410
  {
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  	msm_obj->gpu = gpu;
bf6811f30   Rob Clark   drm/msm: handle r...
411
412
413
414
  	if (write)
  		msm_obj->write_fence = fence;
  	else
  		msm_obj->read_fence = fence;
7198e6b03   Rob Clark   drm/msm: add a3xx...
415
416
417
418
419
420
421
422
423
424
425
426
427
  	list_del_init(&msm_obj->mm_list);
  	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
  }
  
  void msm_gem_move_to_inactive(struct drm_gem_object *obj)
  {
  	struct drm_device *dev = obj->dev;
  	struct msm_drm_private *priv = dev->dev_private;
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  
  	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  
  	msm_obj->gpu = NULL;
bf6811f30   Rob Clark   drm/msm: handle r...
428
429
  	msm_obj->read_fence = 0;
  	msm_obj->write_fence = 0;
7198e6b03   Rob Clark   drm/msm: add a3xx...
430
431
  	list_del_init(&msm_obj->mm_list);
  	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
7198e6b03   Rob Clark   drm/msm: add a3xx...
432
433
434
435
436
437
438
439
  }
  
  int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
  		struct timespec *timeout)
  {
  	struct drm_device *dev = obj->dev;
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  	int ret = 0;
f816f2724   Rob Clark   drm/msm: return -...
440
  	if (is_active(msm_obj)) {
bf6811f30   Rob Clark   drm/msm: handle r...
441
  		uint32_t fence = 0;
f816f2724   Rob Clark   drm/msm: return -...
442

bf6811f30   Rob Clark   drm/msm: handle r...
443
444
445
446
  		if (op & MSM_PREP_READ)
  			fence = msm_obj->write_fence;
  		if (op & MSM_PREP_WRITE)
  			fence = max(fence, msm_obj->read_fence);
f816f2724   Rob Clark   drm/msm: return -...
447
448
  		if (op & MSM_PREP_NOSYNC)
  			timeout = NULL;
bf6811f30   Rob Clark   drm/msm: handle r...
449
450
  		ret = msm_wait_fence_interruptable(dev, fence, timeout);
  	}
7198e6b03   Rob Clark   drm/msm: add a3xx...
451
452
  
  	/* TODO cache maintenance */
c8afe684c   Rob Clark   drm/msm: basic KM...
453

7198e6b03   Rob Clark   drm/msm: add a3xx...
454
455
  	return ret;
  }
c8afe684c   Rob Clark   drm/msm: basic KM...
456

7198e6b03   Rob Clark   drm/msm: add a3xx...
457
458
459
  int msm_gem_cpu_fini(struct drm_gem_object *obj)
  {
  	/* TODO cache maintenance */
c8afe684c   Rob Clark   drm/msm: basic KM...
460
461
462
463
464
465
466
467
468
469
470
  	return 0;
  }
  
  #ifdef CONFIG_DEBUG_FS
  void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  {
  	struct drm_device *dev = obj->dev;
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  	uint64_t off = drm_vma_node_start(&obj->vma_node);
  
  	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
bf6811f30   Rob Clark   drm/msm: handle r...
471
472
  	seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d
  ",
7198e6b03   Rob Clark   drm/msm: add a3xx...
473
  			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
bf6811f30   Rob Clark   drm/msm: handle r...
474
475
  			msm_obj->read_fence, msm_obj->write_fence,
  			obj->name, obj->refcount.refcount.counter,
c8afe684c   Rob Clark   drm/msm: basic KM...
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
  			off, msm_obj->vaddr, obj->size);
  }
  
  void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
  {
  	struct msm_gem_object *msm_obj;
  	int count = 0;
  	size_t size = 0;
  
  	list_for_each_entry(msm_obj, list, mm_list) {
  		struct drm_gem_object *obj = &msm_obj->base;
  		seq_printf(m, "   ");
  		msm_gem_describe(obj, m);
  		count++;
  		size += obj->size;
  	}
  
  	seq_printf(m, "Total %d objects, %zu bytes
  ", count, size);
  }
  #endif
  
  void msm_gem_free_object(struct drm_gem_object *obj)
  {
  	struct drm_device *dev = obj->dev;
871d812aa   Rob Clark   drm/msm: add supp...
501
  	struct msm_drm_private *priv = obj->dev->dev_private;
c8afe684c   Rob Clark   drm/msm: basic KM...
502
503
504
505
  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  	int id;
  
  	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
7198e6b03   Rob Clark   drm/msm: add a3xx...
506
507
  	/* object should not be on active list: */
  	WARN_ON(is_active(msm_obj));
c8afe684c   Rob Clark   drm/msm: basic KM...
508
509
510
  	list_del(&msm_obj->mm_list);
  
  	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
871d812aa   Rob Clark   drm/msm: add supp...
511
512
  		struct msm_mmu *mmu = priv->mmus[id];
  		if (mmu && msm_obj->domain[id].iova) {
c8afe684c   Rob Clark   drm/msm: basic KM...
513
  			uint32_t offset = (uint32_t)mmap_offset(obj);
871d812aa   Rob Clark   drm/msm: add supp...
514
  			mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
c8afe684c   Rob Clark   drm/msm: basic KM...
515
516
517
518
  		}
  	}
  
  	drm_gem_free_mmap_offset(obj);
05b849111   Rob Clark   drm/msm: prime su...
519
520
521
522
523
524
525
526
527
  	if (obj->import_attach) {
  		if (msm_obj->vaddr)
  			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  
  		/* Don't drop the pages for imported dmabuf, as they are not
  		 * ours, just free the array we allocated:
  		 */
  		if (msm_obj->pages)
  			drm_free_large(msm_obj->pages);
c8afe684c   Rob Clark   drm/msm: basic KM...
528

05b849111   Rob Clark   drm/msm: prime su...
529
530
531
532
533
  	} else {
  		if (msm_obj->vaddr)
  			vunmap(msm_obj->vaddr);
  		put_pages(obj);
  	}
c8afe684c   Rob Clark   drm/msm: basic KM...
534

7198e6b03   Rob Clark   drm/msm: add a3xx...
535
536
  	if (msm_obj->resv == &msm_obj->_resv)
  		reservation_object_fini(msm_obj->resv);
c8afe684c   Rob Clark   drm/msm: basic KM...
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
  	drm_gem_object_release(obj);
  
  	kfree(msm_obj);
  }
  
  /* convenience method to construct a GEM buffer object, and userspace handle */
  int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  		uint32_t size, uint32_t flags, uint32_t *handle)
  {
  	struct drm_gem_object *obj;
  	int ret;
  
  	ret = mutex_lock_interruptible(&dev->struct_mutex);
  	if (ret)
  		return ret;
  
  	obj = msm_gem_new(dev, size, flags);
  
  	mutex_unlock(&dev->struct_mutex);
  
  	if (IS_ERR(obj))
  		return PTR_ERR(obj);
  
  	ret = drm_gem_handle_create(file, obj, handle);
  
  	/* drop reference from allocate - handle holds it now */
  	drm_gem_object_unreference_unlocked(obj);
  
  	return ret;
  }
05b849111   Rob Clark   drm/msm: prime su...
567
568
569
  static int msm_gem_new_impl(struct drm_device *dev,
  		uint32_t size, uint32_t flags,
  		struct drm_gem_object **obj)
c8afe684c   Rob Clark   drm/msm: basic KM...
570
571
572
  {
  	struct msm_drm_private *priv = dev->dev_private;
  	struct msm_gem_object *msm_obj;
871d812aa   Rob Clark   drm/msm: add supp...
573
  	unsigned sz;
c8afe684c   Rob Clark   drm/msm: basic KM...
574
575
576
577
578
579
580
581
582
583
  
  	switch (flags & MSM_BO_CACHE_MASK) {
  	case MSM_BO_UNCACHED:
  	case MSM_BO_CACHED:
  	case MSM_BO_WC:
  		break;
  	default:
  		dev_err(dev->dev, "invalid cache flag: %x
  ",
  				(flags & MSM_BO_CACHE_MASK));
05b849111   Rob Clark   drm/msm: prime su...
584
  		return -EINVAL;
c8afe684c   Rob Clark   drm/msm: basic KM...
585
  	}
871d812aa   Rob Clark   drm/msm: add supp...
586
587
588
589
590
  	sz = sizeof(*msm_obj);
  	if (!iommu_present(&platform_bus_type))
  		sz += sizeof(struct drm_mm_node);
  
  	msm_obj = kzalloc(sz, GFP_KERNEL);
05b849111   Rob Clark   drm/msm: prime su...
591
592
  	if (!msm_obj)
  		return -ENOMEM;
c8afe684c   Rob Clark   drm/msm: basic KM...
593

871d812aa   Rob Clark   drm/msm: add supp...
594
595
  	if (!iommu_present(&platform_bus_type))
  		msm_obj->vram_node = (void *)&msm_obj[1];
c8afe684c   Rob Clark   drm/msm: basic KM...
596
  	msm_obj->flags = flags;
7198e6b03   Rob Clark   drm/msm: add a3xx...
597
598
  	msm_obj->resv = &msm_obj->_resv;
  	reservation_object_init(msm_obj->resv);
c8afe684c   Rob Clark   drm/msm: basic KM...
599

7198e6b03   Rob Clark   drm/msm: add a3xx...
600
  	INIT_LIST_HEAD(&msm_obj->submit_entry);
c8afe684c   Rob Clark   drm/msm: basic KM...
601
  	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
05b849111   Rob Clark   drm/msm: prime su...
602
603
604
605
606
607
608
609
  	*obj = &msm_obj->base;
  
  	return 0;
  }
  
  struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  		uint32_t size, uint32_t flags)
  {
871d812aa   Rob Clark   drm/msm: add supp...
610
  	struct drm_gem_object *obj = NULL;
05b849111   Rob Clark   drm/msm: prime su...
611
612
613
614
615
616
617
618
619
  	int ret;
  
  	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  
  	size = PAGE_ALIGN(size);
  
  	ret = msm_gem_new_impl(dev, size, flags, &obj);
  	if (ret)
  		goto fail;
871d812aa   Rob Clark   drm/msm: add supp...
620
621
622
623
624
625
626
  	if (iommu_present(&platform_bus_type)) {
  		ret = drm_gem_object_init(dev, obj, size);
  		if (ret)
  			goto fail;
  	} else {
  		drm_gem_private_object_init(dev, obj, size);
  	}
05b849111   Rob Clark   drm/msm: prime su...
627
628
629
630
631
  
  	return obj;
  
  fail:
  	if (obj)
9999f105e   Rob Clark   drm/msm: fix dead...
632
  		drm_gem_object_unreference(obj);
05b849111   Rob Clark   drm/msm: prime su...
633
634
635
636
637
638
639
640
641
642
  
  	return ERR_PTR(ret);
  }
  
  struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  		uint32_t size, struct sg_table *sgt)
  {
  	struct msm_gem_object *msm_obj;
  	struct drm_gem_object *obj;
  	int ret, npages;
871d812aa   Rob Clark   drm/msm: add supp...
643
644
645
646
647
648
  	/* if we don't have IOMMU, don't bother pretending we can import: */
  	if (!iommu_present(&platform_bus_type)) {
  		dev_err(dev->dev, "cannot import without IOMMU
  ");
  		return ERR_PTR(-EINVAL);
  	}
05b849111   Rob Clark   drm/msm: prime su...
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
  	size = PAGE_ALIGN(size);
  
  	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
  	if (ret)
  		goto fail;
  
  	drm_gem_private_object_init(dev, obj, size);
  
  	npages = size / PAGE_SIZE;
  
  	msm_obj = to_msm_bo(obj);
  	msm_obj->sgt = sgt;
  	msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
  	if (!msm_obj->pages) {
  		ret = -ENOMEM;
  		goto fail;
  	}
  
  	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
  	if (ret)
  		goto fail;
c8afe684c   Rob Clark   drm/msm: basic KM...
670
671
672
673
674
675
676
677
  	return obj;
  
  fail:
  	if (obj)
  		drm_gem_object_unreference_unlocked(obj);
  
  	return ERR_PTR(ret);
  }