Commit 05b849111c07454fd2f5b074ca7eb56ccdb8828c
1 parent
959f58544b
Exists in
master
and in
16 other branches
drm/msm: prime support
Signed-off-by: Rob Clark <robdclark@gmail.com> Acked-by: David Brown <davidb@codeaurora.org>
Showing 5 changed files with 179 additions and 26 deletions Side-by-side Diff
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/msm_drv.c
... | ... | @@ -680,7 +680,10 @@ |
680 | 680 | }; |
681 | 681 | |
682 | 682 | static struct drm_driver msm_driver = { |
683 | - .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, | |
683 | + .driver_features = DRIVER_HAVE_IRQ | | |
684 | + DRIVER_GEM | | |
685 | + DRIVER_PRIME | | |
686 | + DRIVER_MODESET, | |
684 | 687 | .load = msm_load, |
685 | 688 | .unload = msm_unload, |
686 | 689 | .open = msm_open, |
... | ... | @@ -698,6 +701,16 @@ |
698 | 701 | .dumb_create = msm_gem_dumb_create, |
699 | 702 | .dumb_map_offset = msm_gem_dumb_map_offset, |
700 | 703 | .dumb_destroy = drm_gem_dumb_destroy, |
704 | + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
705 | + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
706 | + .gem_prime_export = drm_gem_prime_export, | |
707 | + .gem_prime_import = drm_gem_prime_import, | |
708 | + .gem_prime_pin = msm_gem_prime_pin, | |
709 | + .gem_prime_unpin = msm_gem_prime_unpin, | |
710 | + .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, | |
711 | + .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, | |
712 | + .gem_prime_vmap = msm_gem_prime_vmap, | |
713 | + .gem_prime_vunmap = msm_gem_prime_vunmap, | |
701 | 714 | #ifdef CONFIG_DEBUG_FS |
702 | 715 | .debugfs_init = msm_debugfs_init, |
703 | 716 | .debugfs_cleanup = msm_debugfs_cleanup, |
drivers/gpu/drm/msm/msm_drv.h
... | ... | @@ -141,13 +141,20 @@ |
141 | 141 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, |
142 | 142 | uint32_t *iova); |
143 | 143 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); |
144 | +struct page **msm_gem_get_pages(struct drm_gem_object *obj); | |
145 | +void msm_gem_put_pages(struct drm_gem_object *obj); | |
144 | 146 | void msm_gem_put_iova(struct drm_gem_object *obj, int id); |
145 | 147 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
146 | 148 | struct drm_mode_create_dumb *args); |
147 | -int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, | |
148 | - uint32_t handle); | |
149 | 149 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
150 | 150 | uint32_t handle, uint64_t *offset); |
151 | +struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); | |
152 | +void *msm_gem_prime_vmap(struct drm_gem_object *obj); | |
153 | +void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | |
154 | +struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, | |
155 | + size_t size, struct sg_table *sg); | |
156 | +int msm_gem_prime_pin(struct drm_gem_object *obj); | |
157 | +void msm_gem_prime_unpin(struct drm_gem_object *obj); | |
151 | 158 | void *msm_gem_vaddr_locked(struct drm_gem_object *obj); |
152 | 159 | void *msm_gem_vaddr(struct drm_gem_object *obj); |
153 | 160 | int msm_gem_queue_inactive_work(struct drm_gem_object *obj, |
... | ... | @@ -163,6 +170,8 @@ |
163 | 170 | uint32_t size, uint32_t flags, uint32_t *handle); |
164 | 171 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
165 | 172 | uint32_t size, uint32_t flags); |
173 | +struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |
174 | + uint32_t size, struct sg_table *sgt); | |
166 | 175 | |
167 | 176 | struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); |
168 | 177 | const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); |
drivers/gpu/drm/msm/msm_gem.c
... | ... | @@ -17,6 +17,7 @@ |
17 | 17 | |
18 | 18 | #include <linux/spinlock.h> |
19 | 19 | #include <linux/shmem_fs.h> |
20 | +#include <linux/dma-buf.h> | |
20 | 21 | |
21 | 22 | #include "msm_drv.h" |
22 | 23 | #include "msm_gem.h" |
... | ... | @@ -77,6 +78,21 @@ |
77 | 78 | } |
78 | 79 | } |
79 | 80 | |
81 | +struct page **msm_gem_get_pages(struct drm_gem_object *obj) | |
82 | +{ | |
83 | + struct drm_device *dev = obj->dev; | |
84 | + struct page **p; | |
85 | + mutex_lock(&dev->struct_mutex); | |
86 | + p = get_pages(obj); | |
87 | + mutex_unlock(&dev->struct_mutex); | |
88 | + return p; | |
89 | +} | |
90 | + | |
91 | +void msm_gem_put_pages(struct drm_gem_object *obj) | |
92 | +{ | |
93 | + /* when we start tracking the pin count, then do something here */ | |
94 | +} | |
95 | + | |
80 | 96 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
81 | 97 | struct vm_area_struct *vma) |
82 | 98 | { |
83 | 99 | |
84 | 100 | |
... | ... | @@ -510,11 +526,22 @@ |
510 | 526 | |
511 | 527 | drm_gem_free_mmap_offset(obj); |
512 | 528 | |
513 | - if (msm_obj->vaddr) | |
514 | - vunmap(msm_obj->vaddr); | |
529 | + if (obj->import_attach) { | |
530 | + if (msm_obj->vaddr) | |
531 | + dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); | |
515 | 532 | |
516 | - put_pages(obj); | |
533 | + /* Don't drop the pages for imported dmabuf, as they are not | |
534 | + * ours, just free the array we allocated: | |
535 | + */ | |
536 | + if (msm_obj->pages) | |
537 | + drm_free_large(msm_obj->pages); | |
517 | 538 | |
539 | + } else { | |
540 | + if (msm_obj->vaddr) | |
541 | + vunmap(msm_obj->vaddr); | |
542 | + put_pages(obj); | |
543 | + } | |
544 | + | |
518 | 545 | if (msm_obj->resv == &msm_obj->_resv) |
519 | 546 | reservation_object_fini(msm_obj->resv); |
520 | 547 | |
521 | 548 | |
522 | 549 | |
... | ... | @@ -549,18 +576,13 @@ |
549 | 576 | return ret; |
550 | 577 | } |
551 | 578 | |
552 | -struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |
553 | - uint32_t size, uint32_t flags) | |
579 | +static int msm_gem_new_impl(struct drm_device *dev, | |
580 | + uint32_t size, uint32_t flags, | |
581 | + struct drm_gem_object **obj) | |
554 | 582 | { |
555 | 583 | struct msm_drm_private *priv = dev->dev_private; |
556 | 584 | struct msm_gem_object *msm_obj; |
557 | - struct drm_gem_object *obj = NULL; | |
558 | - int ret; | |
559 | 585 | |
560 | - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
561 | - | |
562 | - size = PAGE_ALIGN(size); | |
563 | - | |
564 | 586 | switch (flags & MSM_BO_CACHE_MASK) { |
565 | 587 | case MSM_BO_UNCACHED: |
566 | 588 | case MSM_BO_CACHED: |
567 | 589 | |
568 | 590 | |
... | ... | @@ -569,22 +591,13 @@ |
569 | 591 | default: |
570 | 592 | dev_err(dev->dev, "invalid cache flag: %x\n", |
571 | 593 | (flags & MSM_BO_CACHE_MASK)); |
572 | - ret = -EINVAL; | |
573 | - goto fail; | |
594 | + return -EINVAL; | |
574 | 595 | } |
575 | 596 | |
576 | 597 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); |
577 | - if (!msm_obj) { | |
578 | - ret = -ENOMEM; | |
579 | - goto fail; | |
580 | - } | |
598 | + if (!msm_obj) | |
599 | + return -ENOMEM; | |
581 | 600 | |
582 | - obj = &msm_obj->base; | |
583 | - | |
584 | - ret = drm_gem_object_init(dev, obj, size); | |
585 | - if (ret) | |
586 | - goto fail; | |
587 | - | |
588 | 601 | msm_obj->flags = flags; |
589 | 602 | |
590 | 603 | msm_obj->resv = &msm_obj->_resv; |
... | ... | @@ -593,6 +606,67 @@ |
593 | 606 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
594 | 607 | INIT_LIST_HEAD(&msm_obj->inactive_work); |
595 | 608 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
609 | + | |
610 | + *obj = &msm_obj->base; | |
611 | + | |
612 | + return 0; | |
613 | +} | |
614 | + | |
615 | +struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |
616 | + uint32_t size, uint32_t flags) | |
617 | +{ | |
618 | + struct drm_gem_object *obj; | |
619 | + int ret; | |
620 | + | |
621 | + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
622 | + | |
623 | + size = PAGE_ALIGN(size); | |
624 | + | |
625 | + ret = msm_gem_new_impl(dev, size, flags, &obj); | |
626 | + if (ret) | |
627 | + goto fail; | |
628 | + | |
629 | + ret = drm_gem_object_init(dev, obj, size); | |
630 | + if (ret) | |
631 | + goto fail; | |
632 | + | |
633 | + return obj; | |
634 | + | |
635 | +fail: | |
636 | + if (obj) | |
637 | + drm_gem_object_unreference_unlocked(obj); | |
638 | + | |
639 | + return ERR_PTR(ret); | |
640 | +} | |
641 | + | |
642 | +struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |
643 | + uint32_t size, struct sg_table *sgt) | |
644 | +{ | |
645 | + struct msm_gem_object *msm_obj; | |
646 | + struct drm_gem_object *obj; | |
647 | + int ret, npages; | |
648 | + | |
649 | + size = PAGE_ALIGN(size); | |
650 | + | |
651 | + ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); | |
652 | + if (ret) | |
653 | + goto fail; | |
654 | + | |
655 | + drm_gem_private_object_init(dev, obj, size); | |
656 | + | |
657 | + npages = size / PAGE_SIZE; | |
658 | + | |
659 | + msm_obj = to_msm_bo(obj); | |
660 | + msm_obj->sgt = sgt; | |
661 | + msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); | |
662 | + if (!msm_obj->pages) { | |
663 | + ret = -ENOMEM; | |
664 | + goto fail; | |
665 | + } | |
666 | + | |
667 | + ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); | |
668 | + if (ret) | |
669 | + goto fail; | |
596 | 670 | |
597 | 671 | return obj; |
598 | 672 |
drivers/gpu/drm/msm/msm_gem_prime.c
1 | +/* | |
2 | + * Copyright (C) 2013 Red Hat | |
3 | + * Author: Rob Clark <robdclark@gmail.com> | |
4 | + * | |
5 | + * This program is free software; you can redistribute it and/or modify it | |
6 | + * under the terms of the GNU General Public License version 2 as published by | |
7 | + * the Free Software Foundation. | |
8 | + * | |
9 | + * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | + * more details. | |
13 | + * | |
14 | + * You should have received a copy of the GNU General Public License along with | |
15 | + * this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | + */ | |
17 | + | |
18 | +#include "msm_drv.h" | |
19 | +#include "msm_gem.h" | |
20 | + | |
21 | + | |
22 | +struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) | |
23 | +{ | |
24 | + struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
25 | + BUG_ON(!msm_obj->sgt); /* should have already pinned! */ | |
26 | + return msm_obj->sgt; | |
27 | +} | |
28 | + | |
29 | +void *msm_gem_prime_vmap(struct drm_gem_object *obj) | |
30 | +{ | |
31 | + return msm_gem_vaddr(obj); | |
32 | +} | |
33 | + | |
34 | +void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | |
35 | +{ | |
36 | + /* TODO msm_gem_vunmap() */ | |
37 | +} | |
38 | + | |
39 | +struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, | |
40 | + size_t size, struct sg_table *sg) | |
41 | +{ | |
42 | + return msm_gem_import(dev, size, sg); | |
43 | +} | |
44 | + | |
45 | +int msm_gem_prime_pin(struct drm_gem_object *obj) | |
46 | +{ | |
47 | + if (!obj->import_attach) | |
48 | + msm_gem_get_pages(obj); | |
49 | + return 0; | |
50 | +} | |
51 | + | |
52 | +void msm_gem_prime_unpin(struct drm_gem_object *obj) | |
53 | +{ | |
54 | + if (!obj->import_attach) | |
55 | + msm_gem_put_pages(obj); | |
56 | +} |