Commit b9d474500546160dd6af35f60cd8bc20edd13807

Authored by Sascha Hauer
Committed by Laurent Pinchart
1 parent 0b2443ed4e

DRM: Add DRM GEM CMA helper

Many embedded drm devices do not have a IOMMU and no dedicated
memory for graphics. These devices use CMA (Contiguous Memory
Allocator) backed graphics memory. This patch provides helper
functions to be able to share the code. The code technically does
not depend on CMA as the backend allocator, the name has been chosen
because CMA makes for a nice, short but still descriptive function
prefix.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Tested-by: Lars-Peter Clausen <lars@metafoo.de>
[Make DRM_GEM_CMA_HELPER a boolean Kconfig option]
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>

Showing 4 changed files with 302 additions and 0 deletions Side-by-side Diff

drivers/gpu/drm/Kconfig
... ... @@ -54,6 +54,12 @@
54 54 GPU memory types. Will be enabled automatically if a device driver
55 55 uses it.
56 56  
  57 +config DRM_GEM_CMA_HELPER
  58 + bool
  59 + depends on DRM
  60 + help
  61 + Choose this if you need the GEM CMA helper functions
  62 +
57 63 config DRM_TDFX
58 64 tristate "3dfx Banshee/Voodoo3+"
59 65 depends on DRM && PCI
drivers/gpu/drm/Makefile
... ... @@ -15,6 +15,7 @@
15 15 drm_trace_points.o drm_global.o drm_prime.o
16 16  
17 17 drm-$(CONFIG_COMPAT) += drm_ioc32.o
  18 +drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
18 19  
19 20 drm-usb-y := drm_usb.o
20 21  
drivers/gpu/drm/drm_gem_cma_helper.c
  1 +/*
  2 + * drm gem CMA (contiguous memory allocator) helper functions
  3 + *
  4 + * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5 + *
  6 + * Based on Samsung Exynos code
  7 + *
  8 + * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  9 + *
  10 + * This program is free software; you can redistribute it and/or
  11 + * modify it under the terms of the GNU General Public License
  12 + * as published by the Free Software Foundation; either version 2
  13 + * of the License, or (at your option) any later version.
  14 + * This program is distributed in the hope that it will be useful,
  15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17 + * GNU General Public License for more details.
  18 + */
  19 +
  20 +#include <linux/mm.h>
  21 +#include <linux/slab.h>
  22 +#include <linux/mutex.h>
  23 +#include <linux/export.h>
  24 +#include <linux/dma-mapping.h>
  25 +
  26 +#include <drm/drmP.h>
  27 +#include <drm/drm.h>
  28 +#include <drm/drm_gem_cma_helper.h>
  29 +
  30 +static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
  31 +{
  32 + return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
  33 +}
  34 +
  35 +static void drm_gem_cma_buf_destroy(struct drm_device *drm,
  36 + struct drm_gem_cma_object *cma_obj)
  37 +{
  38 + dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
  39 + cma_obj->paddr);
  40 +}
  41 +
  42 +/*
  43 + * drm_gem_cma_create - allocate an object with the given size
  44 + *
  45 + * returns a struct drm_gem_cma_object* on success or ERR_PTR values
  46 + * on failure.
  47 + */
  48 +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
  49 + unsigned int size)
  50 +{
  51 + struct drm_gem_cma_object *cma_obj;
  52 + struct drm_gem_object *gem_obj;
  53 + int ret;
  54 +
  55 + size = round_up(size, PAGE_SIZE);
  56 +
  57 + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
  58 + if (!cma_obj)
  59 + return ERR_PTR(-ENOMEM);
  60 +
  61 + cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
  62 + &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
  63 + if (!cma_obj->vaddr) {
  64 + dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
  65 + ret = -ENOMEM;
  66 + goto err_dma_alloc;
  67 + }
  68 +
  69 + gem_obj = &cma_obj->base;
  70 +
  71 + ret = drm_gem_object_init(drm, gem_obj, size);
  72 + if (ret)
  73 + goto err_obj_init;
  74 +
  75 + ret = drm_gem_create_mmap_offset(gem_obj);
  76 + if (ret)
  77 + goto err_create_mmap_offset;
  78 +
  79 + return cma_obj;
  80 +
  81 +err_create_mmap_offset:
  82 + drm_gem_object_release(gem_obj);
  83 +
  84 +err_obj_init:
  85 + drm_gem_cma_buf_destroy(drm, cma_obj);
  86 +
  87 +err_dma_alloc:
  88 + kfree(cma_obj);
  89 +
  90 + return ERR_PTR(ret);
  91 +}
  92 +EXPORT_SYMBOL_GPL(drm_gem_cma_create);
  93 +
  94 +/*
  95 + * drm_gem_cma_create_with_handle - allocate an object with the given
  96 + * size and create a gem handle on it
  97 + *
  98 + * returns a struct drm_gem_cma_object* on success or ERR_PTR values
  99 + * on failure.
  100 + */
  101 +static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
  102 + struct drm_file *file_priv,
  103 + struct drm_device *drm, unsigned int size,
  104 + unsigned int *handle)
  105 +{
  106 + struct drm_gem_cma_object *cma_obj;
  107 + struct drm_gem_object *gem_obj;
  108 + int ret;
  109 +
  110 + cma_obj = drm_gem_cma_create(drm, size);
  111 + if (IS_ERR(cma_obj))
  112 + return cma_obj;
  113 +
  114 + gem_obj = &cma_obj->base;
  115 +
  116 + /*
  117 + * allocate a id of idr table where the obj is registered
  118 + * and handle has the id what user can see.
  119 + */
  120 + ret = drm_gem_handle_create(file_priv, gem_obj, handle);
  121 + if (ret)
  122 + goto err_handle_create;
  123 +
  124 + /* drop reference from allocate - handle holds it now. */
  125 + drm_gem_object_unreference_unlocked(gem_obj);
  126 +
  127 + return cma_obj;
  128 +
  129 +err_handle_create:
  130 + drm_gem_cma_free_object(gem_obj);
  131 +
  132 + return ERR_PTR(ret);
  133 +}
  134 +
  135 +/*
  136 + * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
  137 + * function
  138 + */
  139 +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
  140 +{
  141 + struct drm_gem_cma_object *cma_obj;
  142 +
  143 + if (gem_obj->map_list.map)
  144 + drm_gem_free_mmap_offset(gem_obj);
  145 +
  146 + drm_gem_object_release(gem_obj);
  147 +
  148 + cma_obj = to_drm_gem_cma_obj(gem_obj);
  149 +
  150 + drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
  151 +
  152 + kfree(cma_obj);
  153 +}
  154 +EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
  155 +
  156 +/*
  157 + * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
  158 + * function
  159 + *
  160 + * This aligns the pitch and size arguments to the minimum required. wrap
  161 + * this into your own function if you need bigger alignment.
  162 + */
  163 +int drm_gem_cma_dumb_create(struct drm_file *file_priv,
  164 + struct drm_device *dev, struct drm_mode_create_dumb *args)
  165 +{
  166 + struct drm_gem_cma_object *cma_obj;
  167 + int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  168 +
  169 + if (args->pitch < min_pitch)
  170 + args->pitch = min_pitch;
  171 +
  172 + if (args->size < args->pitch * args->height)
  173 + args->size = args->pitch * args->height;
  174 +
  175 + cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
  176 + args->size, &args->handle);
  177 + if (IS_ERR(cma_obj))
  178 + return PTR_ERR(cma_obj);
  179 +
  180 + return 0;
  181 +}
  182 +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
  183 +
  184 +/*
  185 + * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
  186 + * function
  187 + */
  188 +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
  189 + struct drm_device *drm, uint32_t handle, uint64_t *offset)
  190 +{
  191 + struct drm_gem_object *gem_obj;
  192 +
  193 + mutex_lock(&drm->struct_mutex);
  194 +
  195 + gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
  196 + if (!gem_obj) {
  197 + dev_err(drm->dev, "failed to lookup gem object\n");
  198 + mutex_unlock(&drm->struct_mutex);
  199 + return -EINVAL;
  200 + }
  201 +
  202 + *offset = get_gem_mmap_offset(gem_obj);
  203 +
  204 + drm_gem_object_unreference(gem_obj);
  205 +
  206 + mutex_unlock(&drm->struct_mutex);
  207 +
  208 + return 0;
  209 +}
  210 +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
  211 +
  212 +const struct vm_operations_struct drm_gem_cma_vm_ops = {
  213 + .open = drm_gem_vm_open,
  214 + .close = drm_gem_vm_close,
  215 +};
  216 +EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
  217 +
  218 +/*
  219 + * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
  220 + */
  221 +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
  222 +{
  223 + struct drm_gem_object *gem_obj;
  224 + struct drm_gem_cma_object *cma_obj;
  225 + int ret;
  226 +
  227 + ret = drm_gem_mmap(filp, vma);
  228 + if (ret)
  229 + return ret;
  230 +
  231 + gem_obj = vma->vm_private_data;
  232 + cma_obj = to_drm_gem_cma_obj(gem_obj);
  233 +
  234 + ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
  235 + vma->vm_end - vma->vm_start, vma->vm_page_prot);
  236 + if (ret)
  237 + drm_gem_vm_close(vma);
  238 +
  239 + return ret;
  240 +}
  241 +EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
  242 +
  243 +/*
  244 + * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
  245 + */
  246 +int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
  247 + struct drm_device *drm, unsigned int handle)
  248 +{
  249 + return drm_gem_handle_delete(file_priv, handle);
  250 +}
  251 +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
include/drm/drm_gem_cma_helper.h
  1 +#ifndef __DRM_GEM_CMA_HELPER_H__
  2 +#define __DRM_GEM_CMA_HELPER_H__
  3 +
  4 +struct drm_gem_cma_object {
  5 + struct drm_gem_object base;
  6 + dma_addr_t paddr;
  7 + void *vaddr;
  8 +};
  9 +
  10 +static inline struct drm_gem_cma_object *
  11 +to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
  12 +{
  13 + return container_of(gem_obj, struct drm_gem_cma_object, base);
  14 +}
  15 +
  16 +/* free gem object. */
  17 +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
  18 +
  19 +/* create memory region for drm framebuffer. */
  20 +int drm_gem_cma_dumb_create(struct drm_file *file_priv,
  21 + struct drm_device *drm, struct drm_mode_create_dumb *args);
  22 +
  23 +/* map memory region for drm framebuffer to user space. */
  24 +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
  25 + struct drm_device *drm, uint32_t handle, uint64_t *offset);
  26 +
  27 +/* set vm_flags and we can change the vm attribute to other one at here. */
  28 +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
  29 +
  30 +/*
  31 + * destroy memory region allocated.
  32 + * - a gem handle and physical memory region pointed by a gem object
  33 + * would be released by drm_gem_handle_delete().
  34 + */
  35 +int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
  36 + struct drm_device *drm, unsigned int handle);
  37 +
  38 +/* allocate physical memory. */
  39 +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
  40 + unsigned int size);
  41 +
  42 +extern const struct vm_operations_struct drm_gem_cma_vm_ops;
  43 +
  44 +#endif /* __DRM_GEM_CMA_HELPER_H__ */