Commit b8eade24c9891b8f153c40cf310ef4696c873af9
Committed by
Dave Airlie
1 parent
293d3f6a70
Exists in
master
and in
13 other branches
drm/exynos: use %pad for dma_addr_t
Use %pad for dma_addr_t, because a dma_addr_t type can vary based on build options. So, it prevents possible build warnings in printks. Signed-off-by: Jingoo Han <jg1.han@samsung.com> Reviewed-by: Daniel Kurtz <djkurtz@chromium.org> Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Showing 2 changed files with 2 additions and 2 deletions Inline Diff
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
1 | /* exynos_drm_dmabuf.c | 1 | /* exynos_drm_dmabuf.c |
2 | * | 2 | * |
3 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
4 | * Author: Inki Dae <inki.dae@samsung.com> | 4 | * Author: Inki Dae <inki.dae@samsung.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
8 | * Free Software Foundation; either version 2 of the License, or (at your | 8 | * Free Software Foundation; either version 2 of the License, or (at your |
9 | * option) any later version. | 9 | * option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <drm/drmP.h> | 12 | #include <drm/drmP.h> |
13 | #include <drm/exynos_drm.h> | 13 | #include <drm/exynos_drm.h> |
14 | #include "exynos_drm_dmabuf.h" | 14 | #include "exynos_drm_dmabuf.h" |
15 | #include "exynos_drm_drv.h" | 15 | #include "exynos_drm_drv.h" |
16 | #include "exynos_drm_gem.h" | 16 | #include "exynos_drm_gem.h" |
17 | 17 | ||
18 | #include <linux/dma-buf.h> | 18 | #include <linux/dma-buf.h> |
19 | 19 | ||
20 | struct exynos_drm_dmabuf_attachment { | 20 | struct exynos_drm_dmabuf_attachment { |
21 | struct sg_table sgt; | 21 | struct sg_table sgt; |
22 | enum dma_data_direction dir; | 22 | enum dma_data_direction dir; |
23 | bool is_mapped; | 23 | bool is_mapped; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf) | 26 | static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf) |
27 | { | 27 | { |
28 | return to_exynos_gem_obj(buf->priv); | 28 | return to_exynos_gem_obj(buf->priv); |
29 | } | 29 | } |
30 | 30 | ||
31 | static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, | 31 | static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, |
32 | struct device *dev, | 32 | struct device *dev, |
33 | struct dma_buf_attachment *attach) | 33 | struct dma_buf_attachment *attach) |
34 | { | 34 | { |
35 | struct exynos_drm_dmabuf_attachment *exynos_attach; | 35 | struct exynos_drm_dmabuf_attachment *exynos_attach; |
36 | 36 | ||
37 | exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL); | 37 | exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL); |
38 | if (!exynos_attach) | 38 | if (!exynos_attach) |
39 | return -ENOMEM; | 39 | return -ENOMEM; |
40 | 40 | ||
41 | exynos_attach->dir = DMA_NONE; | 41 | exynos_attach->dir = DMA_NONE; |
42 | attach->priv = exynos_attach; | 42 | attach->priv = exynos_attach; |
43 | 43 | ||
44 | return 0; | 44 | return 0; |
45 | } | 45 | } |
46 | 46 | ||
47 | static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf, | 47 | static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf, |
48 | struct dma_buf_attachment *attach) | 48 | struct dma_buf_attachment *attach) |
49 | { | 49 | { |
50 | struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; | 50 | struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; |
51 | struct sg_table *sgt; | 51 | struct sg_table *sgt; |
52 | 52 | ||
53 | if (!exynos_attach) | 53 | if (!exynos_attach) |
54 | return; | 54 | return; |
55 | 55 | ||
56 | sgt = &exynos_attach->sgt; | 56 | sgt = &exynos_attach->sgt; |
57 | 57 | ||
58 | if (exynos_attach->dir != DMA_NONE) | 58 | if (exynos_attach->dir != DMA_NONE) |
59 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, | 59 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, |
60 | exynos_attach->dir); | 60 | exynos_attach->dir); |
61 | 61 | ||
62 | sg_free_table(sgt); | 62 | sg_free_table(sgt); |
63 | kfree(exynos_attach); | 63 | kfree(exynos_attach); |
64 | attach->priv = NULL; | 64 | attach->priv = NULL; |
65 | } | 65 | } |
66 | 66 | ||
67 | static struct sg_table * | 67 | static struct sg_table * |
68 | exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, | 68 | exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, |
69 | enum dma_data_direction dir) | 69 | enum dma_data_direction dir) |
70 | { | 70 | { |
71 | struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; | 71 | struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; |
72 | struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf); | 72 | struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf); |
73 | struct drm_device *dev = gem_obj->base.dev; | 73 | struct drm_device *dev = gem_obj->base.dev; |
74 | struct exynos_drm_gem_buf *buf; | 74 | struct exynos_drm_gem_buf *buf; |
75 | struct scatterlist *rd, *wr; | 75 | struct scatterlist *rd, *wr; |
76 | struct sg_table *sgt = NULL; | 76 | struct sg_table *sgt = NULL; |
77 | unsigned int i; | 77 | unsigned int i; |
78 | int nents, ret; | 78 | int nents, ret; |
79 | 79 | ||
80 | /* just return current sgt if already requested. */ | 80 | /* just return current sgt if already requested. */ |
81 | if (exynos_attach->dir == dir && exynos_attach->is_mapped) | 81 | if (exynos_attach->dir == dir && exynos_attach->is_mapped) |
82 | return &exynos_attach->sgt; | 82 | return &exynos_attach->sgt; |
83 | 83 | ||
84 | buf = gem_obj->buffer; | 84 | buf = gem_obj->buffer; |
85 | if (!buf) { | 85 | if (!buf) { |
86 | DRM_ERROR("buffer is null.\n"); | 86 | DRM_ERROR("buffer is null.\n"); |
87 | return ERR_PTR(-ENOMEM); | 87 | return ERR_PTR(-ENOMEM); |
88 | } | 88 | } |
89 | 89 | ||
90 | sgt = &exynos_attach->sgt; | 90 | sgt = &exynos_attach->sgt; |
91 | 91 | ||
92 | ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL); | 92 | ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL); |
93 | if (ret) { | 93 | if (ret) { |
94 | DRM_ERROR("failed to alloc sgt.\n"); | 94 | DRM_ERROR("failed to alloc sgt.\n"); |
95 | return ERR_PTR(-ENOMEM); | 95 | return ERR_PTR(-ENOMEM); |
96 | } | 96 | } |
97 | 97 | ||
98 | mutex_lock(&dev->struct_mutex); | 98 | mutex_lock(&dev->struct_mutex); |
99 | 99 | ||
100 | rd = buf->sgt->sgl; | 100 | rd = buf->sgt->sgl; |
101 | wr = sgt->sgl; | 101 | wr = sgt->sgl; |
102 | for (i = 0; i < sgt->orig_nents; ++i) { | 102 | for (i = 0; i < sgt->orig_nents; ++i) { |
103 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); | 103 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); |
104 | rd = sg_next(rd); | 104 | rd = sg_next(rd); |
105 | wr = sg_next(wr); | 105 | wr = sg_next(wr); |
106 | } | 106 | } |
107 | 107 | ||
108 | if (dir != DMA_NONE) { | 108 | if (dir != DMA_NONE) { |
109 | nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); | 109 | nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); |
110 | if (!nents) { | 110 | if (!nents) { |
111 | DRM_ERROR("failed to map sgl with iommu.\n"); | 111 | DRM_ERROR("failed to map sgl with iommu.\n"); |
112 | sg_free_table(sgt); | 112 | sg_free_table(sgt); |
113 | sgt = ERR_PTR(-EIO); | 113 | sgt = ERR_PTR(-EIO); |
114 | goto err_unlock; | 114 | goto err_unlock; |
115 | } | 115 | } |
116 | } | 116 | } |
117 | 117 | ||
118 | exynos_attach->is_mapped = true; | 118 | exynos_attach->is_mapped = true; |
119 | exynos_attach->dir = dir; | 119 | exynos_attach->dir = dir; |
120 | attach->priv = exynos_attach; | 120 | attach->priv = exynos_attach; |
121 | 121 | ||
122 | DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); | 122 | DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); |
123 | 123 | ||
124 | err_unlock: | 124 | err_unlock: |
125 | mutex_unlock(&dev->struct_mutex); | 125 | mutex_unlock(&dev->struct_mutex); |
126 | return sgt; | 126 | return sgt; |
127 | } | 127 | } |
128 | 128 | ||
129 | static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, | 129 | static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, |
130 | struct sg_table *sgt, | 130 | struct sg_table *sgt, |
131 | enum dma_data_direction dir) | 131 | enum dma_data_direction dir) |
132 | { | 132 | { |
133 | /* Nothing to do. */ | 133 | /* Nothing to do. */ |
134 | } | 134 | } |
135 | 135 | ||
136 | static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, | 136 | static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, |
137 | unsigned long page_num) | 137 | unsigned long page_num) |
138 | { | 138 | { |
139 | /* TODO */ | 139 | /* TODO */ |
140 | 140 | ||
141 | return NULL; | 141 | return NULL; |
142 | } | 142 | } |
143 | 143 | ||
144 | static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, | 144 | static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, |
145 | unsigned long page_num, | 145 | unsigned long page_num, |
146 | void *addr) | 146 | void *addr) |
147 | { | 147 | { |
148 | /* TODO */ | 148 | /* TODO */ |
149 | } | 149 | } |
150 | 150 | ||
151 | static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf, | 151 | static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf, |
152 | unsigned long page_num) | 152 | unsigned long page_num) |
153 | { | 153 | { |
154 | /* TODO */ | 154 | /* TODO */ |
155 | 155 | ||
156 | return NULL; | 156 | return NULL; |
157 | } | 157 | } |
158 | 158 | ||
159 | static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf, | 159 | static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf, |
160 | unsigned long page_num, void *addr) | 160 | unsigned long page_num, void *addr) |
161 | { | 161 | { |
162 | /* TODO */ | 162 | /* TODO */ |
163 | } | 163 | } |
164 | 164 | ||
165 | static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, | 165 | static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, |
166 | struct vm_area_struct *vma) | 166 | struct vm_area_struct *vma) |
167 | { | 167 | { |
168 | return -ENOTTY; | 168 | return -ENOTTY; |
169 | } | 169 | } |
170 | 170 | ||
171 | static struct dma_buf_ops exynos_dmabuf_ops = { | 171 | static struct dma_buf_ops exynos_dmabuf_ops = { |
172 | .attach = exynos_gem_attach_dma_buf, | 172 | .attach = exynos_gem_attach_dma_buf, |
173 | .detach = exynos_gem_detach_dma_buf, | 173 | .detach = exynos_gem_detach_dma_buf, |
174 | .map_dma_buf = exynos_gem_map_dma_buf, | 174 | .map_dma_buf = exynos_gem_map_dma_buf, |
175 | .unmap_dma_buf = exynos_gem_unmap_dma_buf, | 175 | .unmap_dma_buf = exynos_gem_unmap_dma_buf, |
176 | .kmap = exynos_gem_dmabuf_kmap, | 176 | .kmap = exynos_gem_dmabuf_kmap, |
177 | .kmap_atomic = exynos_gem_dmabuf_kmap_atomic, | 177 | .kmap_atomic = exynos_gem_dmabuf_kmap_atomic, |
178 | .kunmap = exynos_gem_dmabuf_kunmap, | 178 | .kunmap = exynos_gem_dmabuf_kunmap, |
179 | .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, | 179 | .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, |
180 | .mmap = exynos_gem_dmabuf_mmap, | 180 | .mmap = exynos_gem_dmabuf_mmap, |
181 | .release = drm_gem_dmabuf_release, | 181 | .release = drm_gem_dmabuf_release, |
182 | }; | 182 | }; |
183 | 183 | ||
184 | struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, | 184 | struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, |
185 | struct drm_gem_object *obj, int flags) | 185 | struct drm_gem_object *obj, int flags) |
186 | { | 186 | { |
187 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | 187 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); |
188 | 188 | ||
189 | return dma_buf_export(obj, &exynos_dmabuf_ops, | 189 | return dma_buf_export(obj, &exynos_dmabuf_ops, |
190 | exynos_gem_obj->base.size, flags); | 190 | exynos_gem_obj->base.size, flags); |
191 | } | 191 | } |
192 | 192 | ||
193 | struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, | 193 | struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, |
194 | struct dma_buf *dma_buf) | 194 | struct dma_buf *dma_buf) |
195 | { | 195 | { |
196 | struct dma_buf_attachment *attach; | 196 | struct dma_buf_attachment *attach; |
197 | struct sg_table *sgt; | 197 | struct sg_table *sgt; |
198 | struct scatterlist *sgl; | 198 | struct scatterlist *sgl; |
199 | struct exynos_drm_gem_obj *exynos_gem_obj; | 199 | struct exynos_drm_gem_obj *exynos_gem_obj; |
200 | struct exynos_drm_gem_buf *buffer; | 200 | struct exynos_drm_gem_buf *buffer; |
201 | int ret; | 201 | int ret; |
202 | 202 | ||
203 | /* is this one of own objects? */ | 203 | /* is this one of own objects? */ |
204 | if (dma_buf->ops == &exynos_dmabuf_ops) { | 204 | if (dma_buf->ops == &exynos_dmabuf_ops) { |
205 | struct drm_gem_object *obj; | 205 | struct drm_gem_object *obj; |
206 | 206 | ||
207 | obj = dma_buf->priv; | 207 | obj = dma_buf->priv; |
208 | 208 | ||
209 | /* is it from our device? */ | 209 | /* is it from our device? */ |
210 | if (obj->dev == drm_dev) { | 210 | if (obj->dev == drm_dev) { |
211 | /* | 211 | /* |
212 | * Importing dmabuf exported from out own gem increases | 212 | * Importing dmabuf exported from out own gem increases |
213 | * refcount on gem itself instead of f_count of dmabuf. | 213 | * refcount on gem itself instead of f_count of dmabuf. |
214 | */ | 214 | */ |
215 | drm_gem_object_reference(obj); | 215 | drm_gem_object_reference(obj); |
216 | return obj; | 216 | return obj; |
217 | } | 217 | } |
218 | } | 218 | } |
219 | 219 | ||
220 | attach = dma_buf_attach(dma_buf, drm_dev->dev); | 220 | attach = dma_buf_attach(dma_buf, drm_dev->dev); |
221 | if (IS_ERR(attach)) | 221 | if (IS_ERR(attach)) |
222 | return ERR_PTR(-EINVAL); | 222 | return ERR_PTR(-EINVAL); |
223 | 223 | ||
224 | get_dma_buf(dma_buf); | 224 | get_dma_buf(dma_buf); |
225 | 225 | ||
226 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | 226 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); |
227 | if (IS_ERR(sgt)) { | 227 | if (IS_ERR(sgt)) { |
228 | ret = PTR_ERR(sgt); | 228 | ret = PTR_ERR(sgt); |
229 | goto err_buf_detach; | 229 | goto err_buf_detach; |
230 | } | 230 | } |
231 | 231 | ||
232 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); | 232 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
233 | if (!buffer) { | 233 | if (!buffer) { |
234 | ret = -ENOMEM; | 234 | ret = -ENOMEM; |
235 | goto err_unmap_attach; | 235 | goto err_unmap_attach; |
236 | } | 236 | } |
237 | 237 | ||
238 | exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); | 238 | exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); |
239 | if (!exynos_gem_obj) { | 239 | if (!exynos_gem_obj) { |
240 | ret = -ENOMEM; | 240 | ret = -ENOMEM; |
241 | goto err_free_buffer; | 241 | goto err_free_buffer; |
242 | } | 242 | } |
243 | 243 | ||
244 | sgl = sgt->sgl; | 244 | sgl = sgt->sgl; |
245 | 245 | ||
246 | buffer->size = dma_buf->size; | 246 | buffer->size = dma_buf->size; |
247 | buffer->dma_addr = sg_dma_address(sgl); | 247 | buffer->dma_addr = sg_dma_address(sgl); |
248 | 248 | ||
249 | if (sgt->nents == 1) { | 249 | if (sgt->nents == 1) { |
250 | /* always physically continuous memory if sgt->nents is 1. */ | 250 | /* always physically continuous memory if sgt->nents is 1. */ |
251 | exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; | 251 | exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; |
252 | } else { | 252 | } else { |
253 | /* | 253 | /* |
254 | * this case could be CONTIG or NONCONTIG type but for now | 254 | * this case could be CONTIG or NONCONTIG type but for now |
255 | * sets NONCONTIG. | 255 | * sets NONCONTIG. |
256 | * TODO. we have to find a way that exporter can notify | 256 | * TODO. we have to find a way that exporter can notify |
257 | * the type of its own buffer to importer. | 257 | * the type of its own buffer to importer. |
258 | */ | 258 | */ |
259 | exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; | 259 | exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; |
260 | } | 260 | } |
261 | 261 | ||
262 | exynos_gem_obj->buffer = buffer; | 262 | exynos_gem_obj->buffer = buffer; |
263 | buffer->sgt = sgt; | 263 | buffer->sgt = sgt; |
264 | exynos_gem_obj->base.import_attach = attach; | 264 | exynos_gem_obj->base.import_attach = attach; |
265 | 265 | ||
266 | DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, | 266 | DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, |
267 | buffer->size); | 267 | buffer->size); |
268 | 268 | ||
269 | return &exynos_gem_obj->base; | 269 | return &exynos_gem_obj->base; |
270 | 270 | ||
271 | err_free_buffer: | 271 | err_free_buffer: |
272 | kfree(buffer); | 272 | kfree(buffer); |
273 | buffer = NULL; | 273 | buffer = NULL; |
274 | err_unmap_attach: | 274 | err_unmap_attach: |
275 | dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); | 275 | dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); |
276 | err_buf_detach: | 276 | err_buf_detach: |
277 | dma_buf_detach(dma_buf, attach); | 277 | dma_buf_detach(dma_buf, attach); |
278 | dma_buf_put(dma_buf); | 278 | dma_buf_put(dma_buf); |
279 | 279 | ||
280 | return ERR_PTR(ret); | 280 | return ERR_PTR(ret); |
281 | } | 281 | } |
282 | 282 | ||
283 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | 283 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); |
284 | MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module"); | 284 | MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module"); |
285 | MODULE_LICENSE("GPL"); | 285 | MODULE_LICENSE("GPL"); |
286 | 286 |
drivers/gpu/drm/exynos/exynos_drm_vidi.c
1 | /* exynos_drm_vidi.c | 1 | /* exynos_drm_vidi.c |
2 | * | 2 | * |
3 | * Copyright (C) 2012 Samsung Electronics Co.Ltd | 3 | * Copyright (C) 2012 Samsung Electronics Co.Ltd |
4 | * Authors: | 4 | * Authors: |
5 | * Inki Dae <inki.dae@samsung.com> | 5 | * Inki Dae <inki.dae@samsung.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the | 8 | * under the terms of the GNU General Public License as published by the |
9 | * Free Software Foundation; either version 2 of the License, or (at your | 9 | * Free Software Foundation; either version 2 of the License, or (at your |
10 | * option) any later version. | 10 | * option) any later version. |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | #include <drm/drmP.h> | 13 | #include <drm/drmP.h> |
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | 17 | ||
18 | #include <drm/exynos_drm.h> | 18 | #include <drm/exynos_drm.h> |
19 | 19 | ||
20 | #include <drm/drm_edid.h> | 20 | #include <drm/drm_edid.h> |
21 | #include <drm/drm_crtc_helper.h> | 21 | #include <drm/drm_crtc_helper.h> |
22 | 22 | ||
23 | #include "exynos_drm_drv.h" | 23 | #include "exynos_drm_drv.h" |
24 | #include "exynos_drm_crtc.h" | 24 | #include "exynos_drm_crtc.h" |
25 | #include "exynos_drm_encoder.h" | 25 | #include "exynos_drm_encoder.h" |
26 | #include "exynos_drm_vidi.h" | 26 | #include "exynos_drm_vidi.h" |
27 | 27 | ||
28 | /* vidi has totally three virtual windows. */ | 28 | /* vidi has totally three virtual windows. */ |
29 | #define WINDOWS_NR 3 | 29 | #define WINDOWS_NR 3 |
30 | 30 | ||
31 | #define get_vidi_mgr(dev) platform_get_drvdata(to_platform_device(dev)) | 31 | #define get_vidi_mgr(dev) platform_get_drvdata(to_platform_device(dev)) |
32 | #define ctx_from_connector(c) container_of(c, struct vidi_context, \ | 32 | #define ctx_from_connector(c) container_of(c, struct vidi_context, \ |
33 | connector) | 33 | connector) |
34 | 34 | ||
35 | struct vidi_win_data { | 35 | struct vidi_win_data { |
36 | unsigned int offset_x; | 36 | unsigned int offset_x; |
37 | unsigned int offset_y; | 37 | unsigned int offset_y; |
38 | unsigned int ovl_width; | 38 | unsigned int ovl_width; |
39 | unsigned int ovl_height; | 39 | unsigned int ovl_height; |
40 | unsigned int fb_width; | 40 | unsigned int fb_width; |
41 | unsigned int fb_height; | 41 | unsigned int fb_height; |
42 | unsigned int bpp; | 42 | unsigned int bpp; |
43 | dma_addr_t dma_addr; | 43 | dma_addr_t dma_addr; |
44 | unsigned int buf_offsize; | 44 | unsigned int buf_offsize; |
45 | unsigned int line_size; /* bytes */ | 45 | unsigned int line_size; /* bytes */ |
46 | bool enabled; | 46 | bool enabled; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct vidi_context { | 49 | struct vidi_context { |
50 | struct drm_device *drm_dev; | 50 | struct drm_device *drm_dev; |
51 | struct drm_crtc *crtc; | 51 | struct drm_crtc *crtc; |
52 | struct drm_encoder *encoder; | 52 | struct drm_encoder *encoder; |
53 | struct drm_connector connector; | 53 | struct drm_connector connector; |
54 | struct vidi_win_data win_data[WINDOWS_NR]; | 54 | struct vidi_win_data win_data[WINDOWS_NR]; |
55 | struct edid *raw_edid; | 55 | struct edid *raw_edid; |
56 | unsigned int clkdiv; | 56 | unsigned int clkdiv; |
57 | unsigned int default_win; | 57 | unsigned int default_win; |
58 | unsigned long irq_flags; | 58 | unsigned long irq_flags; |
59 | unsigned int connected; | 59 | unsigned int connected; |
60 | bool vblank_on; | 60 | bool vblank_on; |
61 | bool suspended; | 61 | bool suspended; |
62 | bool direct_vblank; | 62 | bool direct_vblank; |
63 | struct work_struct work; | 63 | struct work_struct work; |
64 | struct mutex lock; | 64 | struct mutex lock; |
65 | int pipe; | 65 | int pipe; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static const char fake_edid_info[] = { | 68 | static const char fake_edid_info[] = { |
69 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05, | 69 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05, |
70 | 0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78, | 70 | 0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78, |
71 | 0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd, | 71 | 0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd, |
72 | 0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | 72 | 0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, |
73 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00, | 73 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00, |
74 | 0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, | 74 | 0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, |
75 | 0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00, | 75 | 0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00, |
76 | 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, | 76 | 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, |
77 | 0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | 77 | 0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
78 | 0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47, | 78 | 0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47, |
79 | 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1, | 79 | 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1, |
80 | 0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83, | 80 | 0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83, |
81 | 0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00, | 81 | 0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00, |
82 | 0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c, | 82 | 0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c, |
83 | 0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a, | 83 | 0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a, |
84 | 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00, | 84 | 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00, |
85 | 0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c, | 85 | 0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c, |
86 | 0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, | 86 | 0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, |
87 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 87 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
88 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 88 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
89 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 89 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
90 | 0x00, 0x00, 0x00, 0x06 | 90 | 0x00, 0x00, 0x00, 0x06 |
91 | }; | 91 | }; |
92 | 92 | ||
93 | static void vidi_apply(struct exynos_drm_manager *mgr) | 93 | static void vidi_apply(struct exynos_drm_manager *mgr) |
94 | { | 94 | { |
95 | struct vidi_context *ctx = mgr->ctx; | 95 | struct vidi_context *ctx = mgr->ctx; |
96 | struct exynos_drm_manager_ops *mgr_ops = mgr->ops; | 96 | struct exynos_drm_manager_ops *mgr_ops = mgr->ops; |
97 | struct vidi_win_data *win_data; | 97 | struct vidi_win_data *win_data; |
98 | int i; | 98 | int i; |
99 | 99 | ||
100 | for (i = 0; i < WINDOWS_NR; i++) { | 100 | for (i = 0; i < WINDOWS_NR; i++) { |
101 | win_data = &ctx->win_data[i]; | 101 | win_data = &ctx->win_data[i]; |
102 | if (win_data->enabled && (mgr_ops && mgr_ops->win_commit)) | 102 | if (win_data->enabled && (mgr_ops && mgr_ops->win_commit)) |
103 | mgr_ops->win_commit(mgr, i); | 103 | mgr_ops->win_commit(mgr, i); |
104 | } | 104 | } |
105 | 105 | ||
106 | if (mgr_ops && mgr_ops->commit) | 106 | if (mgr_ops && mgr_ops->commit) |
107 | mgr_ops->commit(mgr); | 107 | mgr_ops->commit(mgr); |
108 | } | 108 | } |
109 | 109 | ||
110 | static void vidi_commit(struct exynos_drm_manager *mgr) | 110 | static void vidi_commit(struct exynos_drm_manager *mgr) |
111 | { | 111 | { |
112 | struct vidi_context *ctx = mgr->ctx; | 112 | struct vidi_context *ctx = mgr->ctx; |
113 | 113 | ||
114 | if (ctx->suspended) | 114 | if (ctx->suspended) |
115 | return; | 115 | return; |
116 | } | 116 | } |
117 | 117 | ||
118 | static int vidi_enable_vblank(struct exynos_drm_manager *mgr) | 118 | static int vidi_enable_vblank(struct exynos_drm_manager *mgr) |
119 | { | 119 | { |
120 | struct vidi_context *ctx = mgr->ctx; | 120 | struct vidi_context *ctx = mgr->ctx; |
121 | 121 | ||
122 | if (ctx->suspended) | 122 | if (ctx->suspended) |
123 | return -EPERM; | 123 | return -EPERM; |
124 | 124 | ||
125 | if (!test_and_set_bit(0, &ctx->irq_flags)) | 125 | if (!test_and_set_bit(0, &ctx->irq_flags)) |
126 | ctx->vblank_on = true; | 126 | ctx->vblank_on = true; |
127 | 127 | ||
128 | ctx->direct_vblank = true; | 128 | ctx->direct_vblank = true; |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * in case of page flip request, vidi_finish_pageflip function | 131 | * in case of page flip request, vidi_finish_pageflip function |
132 | * will not be called because direct_vblank is true and then | 132 | * will not be called because direct_vblank is true and then |
133 | * that function will be called by manager_ops->win_commit callback | 133 | * that function will be called by manager_ops->win_commit callback |
134 | */ | 134 | */ |
135 | schedule_work(&ctx->work); | 135 | schedule_work(&ctx->work); |
136 | 136 | ||
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
140 | static void vidi_disable_vblank(struct exynos_drm_manager *mgr) | 140 | static void vidi_disable_vblank(struct exynos_drm_manager *mgr) |
141 | { | 141 | { |
142 | struct vidi_context *ctx = mgr->ctx; | 142 | struct vidi_context *ctx = mgr->ctx; |
143 | 143 | ||
144 | if (ctx->suspended) | 144 | if (ctx->suspended) |
145 | return; | 145 | return; |
146 | 146 | ||
147 | if (test_and_clear_bit(0, &ctx->irq_flags)) | 147 | if (test_and_clear_bit(0, &ctx->irq_flags)) |
148 | ctx->vblank_on = false; | 148 | ctx->vblank_on = false; |
149 | } | 149 | } |
150 | 150 | ||
151 | static void vidi_win_mode_set(struct exynos_drm_manager *mgr, | 151 | static void vidi_win_mode_set(struct exynos_drm_manager *mgr, |
152 | struct exynos_drm_overlay *overlay) | 152 | struct exynos_drm_overlay *overlay) |
153 | { | 153 | { |
154 | struct vidi_context *ctx = mgr->ctx; | 154 | struct vidi_context *ctx = mgr->ctx; |
155 | struct vidi_win_data *win_data; | 155 | struct vidi_win_data *win_data; |
156 | int win; | 156 | int win; |
157 | unsigned long offset; | 157 | unsigned long offset; |
158 | 158 | ||
159 | if (!overlay) { | 159 | if (!overlay) { |
160 | DRM_ERROR("overlay is NULL\n"); | 160 | DRM_ERROR("overlay is NULL\n"); |
161 | return; | 161 | return; |
162 | } | 162 | } |
163 | 163 | ||
164 | win = overlay->zpos; | 164 | win = overlay->zpos; |
165 | if (win == DEFAULT_ZPOS) | 165 | if (win == DEFAULT_ZPOS) |
166 | win = ctx->default_win; | 166 | win = ctx->default_win; |
167 | 167 | ||
168 | if (win < 0 || win >= WINDOWS_NR) | 168 | if (win < 0 || win >= WINDOWS_NR) |
169 | return; | 169 | return; |
170 | 170 | ||
171 | offset = overlay->fb_x * (overlay->bpp >> 3); | 171 | offset = overlay->fb_x * (overlay->bpp >> 3); |
172 | offset += overlay->fb_y * overlay->pitch; | 172 | offset += overlay->fb_y * overlay->pitch; |
173 | 173 | ||
174 | DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch); | 174 | DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch); |
175 | 175 | ||
176 | win_data = &ctx->win_data[win]; | 176 | win_data = &ctx->win_data[win]; |
177 | 177 | ||
178 | win_data->offset_x = overlay->crtc_x; | 178 | win_data->offset_x = overlay->crtc_x; |
179 | win_data->offset_y = overlay->crtc_y; | 179 | win_data->offset_y = overlay->crtc_y; |
180 | win_data->ovl_width = overlay->crtc_width; | 180 | win_data->ovl_width = overlay->crtc_width; |
181 | win_data->ovl_height = overlay->crtc_height; | 181 | win_data->ovl_height = overlay->crtc_height; |
182 | win_data->fb_width = overlay->fb_width; | 182 | win_data->fb_width = overlay->fb_width; |
183 | win_data->fb_height = overlay->fb_height; | 183 | win_data->fb_height = overlay->fb_height; |
184 | win_data->dma_addr = overlay->dma_addr[0] + offset; | 184 | win_data->dma_addr = overlay->dma_addr[0] + offset; |
185 | win_data->bpp = overlay->bpp; | 185 | win_data->bpp = overlay->bpp; |
186 | win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * | 186 | win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * |
187 | (overlay->bpp >> 3); | 187 | (overlay->bpp >> 3); |
188 | win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); | 188 | win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); |
189 | 189 | ||
190 | /* | 190 | /* |
191 | * some parts of win_data should be transferred to user side | 191 | * some parts of win_data should be transferred to user side |
192 | * through specific ioctl. | 192 | * through specific ioctl. |
193 | */ | 193 | */ |
194 | 194 | ||
195 | DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", | 195 | DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", |
196 | win_data->offset_x, win_data->offset_y); | 196 | win_data->offset_x, win_data->offset_y); |
197 | DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", | 197 | DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", |
198 | win_data->ovl_width, win_data->ovl_height); | 198 | win_data->ovl_width, win_data->ovl_height); |
199 | DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr); | 199 | DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr); |
200 | DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", | 200 | DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", |
201 | overlay->fb_width, overlay->crtc_width); | 201 | overlay->fb_width, overlay->crtc_width); |
202 | } | 202 | } |
203 | 203 | ||
204 | static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos) | 204 | static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos) |
205 | { | 205 | { |
206 | struct vidi_context *ctx = mgr->ctx; | 206 | struct vidi_context *ctx = mgr->ctx; |
207 | struct vidi_win_data *win_data; | 207 | struct vidi_win_data *win_data; |
208 | int win = zpos; | 208 | int win = zpos; |
209 | 209 | ||
210 | if (ctx->suspended) | 210 | if (ctx->suspended) |
211 | return; | 211 | return; |
212 | 212 | ||
213 | if (win == DEFAULT_ZPOS) | 213 | if (win == DEFAULT_ZPOS) |
214 | win = ctx->default_win; | 214 | win = ctx->default_win; |
215 | 215 | ||
216 | if (win < 0 || win >= WINDOWS_NR) | 216 | if (win < 0 || win >= WINDOWS_NR) |
217 | return; | 217 | return; |
218 | 218 | ||
219 | win_data = &ctx->win_data[win]; | 219 | win_data = &ctx->win_data[win]; |
220 | 220 | ||
221 | win_data->enabled = true; | 221 | win_data->enabled = true; |
222 | 222 | ||
223 | DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr); | 223 | DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr); |
224 | 224 | ||
225 | if (ctx->vblank_on) | 225 | if (ctx->vblank_on) |
226 | schedule_work(&ctx->work); | 226 | schedule_work(&ctx->work); |
227 | } | 227 | } |
228 | 228 | ||
229 | static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos) | 229 | static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos) |
230 | { | 230 | { |
231 | struct vidi_context *ctx = mgr->ctx; | 231 | struct vidi_context *ctx = mgr->ctx; |
232 | struct vidi_win_data *win_data; | 232 | struct vidi_win_data *win_data; |
233 | int win = zpos; | 233 | int win = zpos; |
234 | 234 | ||
235 | if (win == DEFAULT_ZPOS) | 235 | if (win == DEFAULT_ZPOS) |
236 | win = ctx->default_win; | 236 | win = ctx->default_win; |
237 | 237 | ||
238 | if (win < 0 || win >= WINDOWS_NR) | 238 | if (win < 0 || win >= WINDOWS_NR) |
239 | return; | 239 | return; |
240 | 240 | ||
241 | win_data = &ctx->win_data[win]; | 241 | win_data = &ctx->win_data[win]; |
242 | win_data->enabled = false; | 242 | win_data->enabled = false; |
243 | 243 | ||
244 | /* TODO. */ | 244 | /* TODO. */ |
245 | } | 245 | } |
246 | 246 | ||
247 | static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable) | 247 | static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable) |
248 | { | 248 | { |
249 | struct vidi_context *ctx = mgr->ctx; | 249 | struct vidi_context *ctx = mgr->ctx; |
250 | 250 | ||
251 | DRM_DEBUG_KMS("%s\n", __FILE__); | 251 | DRM_DEBUG_KMS("%s\n", __FILE__); |
252 | 252 | ||
253 | if (enable != false && enable != true) | 253 | if (enable != false && enable != true) |
254 | return -EINVAL; | 254 | return -EINVAL; |
255 | 255 | ||
256 | if (enable) { | 256 | if (enable) { |
257 | ctx->suspended = false; | 257 | ctx->suspended = false; |
258 | 258 | ||
259 | /* if vblank was enabled status, enable it again. */ | 259 | /* if vblank was enabled status, enable it again. */ |
260 | if (test_and_clear_bit(0, &ctx->irq_flags)) | 260 | if (test_and_clear_bit(0, &ctx->irq_flags)) |
261 | vidi_enable_vblank(mgr); | 261 | vidi_enable_vblank(mgr); |
262 | 262 | ||
263 | vidi_apply(mgr); | 263 | vidi_apply(mgr); |
264 | } else { | 264 | } else { |
265 | ctx->suspended = true; | 265 | ctx->suspended = true; |
266 | } | 266 | } |
267 | 267 | ||
268 | return 0; | 268 | return 0; |
269 | } | 269 | } |
270 | 270 | ||
271 | static void vidi_dpms(struct exynos_drm_manager *mgr, int mode) | 271 | static void vidi_dpms(struct exynos_drm_manager *mgr, int mode) |
272 | { | 272 | { |
273 | struct vidi_context *ctx = mgr->ctx; | 273 | struct vidi_context *ctx = mgr->ctx; |
274 | 274 | ||
275 | DRM_DEBUG_KMS("%d\n", mode); | 275 | DRM_DEBUG_KMS("%d\n", mode); |
276 | 276 | ||
277 | mutex_lock(&ctx->lock); | 277 | mutex_lock(&ctx->lock); |
278 | 278 | ||
279 | switch (mode) { | 279 | switch (mode) { |
280 | case DRM_MODE_DPMS_ON: | 280 | case DRM_MODE_DPMS_ON: |
281 | vidi_power_on(mgr, true); | 281 | vidi_power_on(mgr, true); |
282 | break; | 282 | break; |
283 | case DRM_MODE_DPMS_STANDBY: | 283 | case DRM_MODE_DPMS_STANDBY: |
284 | case DRM_MODE_DPMS_SUSPEND: | 284 | case DRM_MODE_DPMS_SUSPEND: |
285 | case DRM_MODE_DPMS_OFF: | 285 | case DRM_MODE_DPMS_OFF: |
286 | vidi_power_on(mgr, false); | 286 | vidi_power_on(mgr, false); |
287 | break; | 287 | break; |
288 | default: | 288 | default: |
289 | DRM_DEBUG_KMS("unspecified mode %d\n", mode); | 289 | DRM_DEBUG_KMS("unspecified mode %d\n", mode); |
290 | break; | 290 | break; |
291 | } | 291 | } |
292 | 292 | ||
293 | mutex_unlock(&ctx->lock); | 293 | mutex_unlock(&ctx->lock); |
294 | } | 294 | } |
295 | 295 | ||
296 | static int vidi_mgr_initialize(struct exynos_drm_manager *mgr, | 296 | static int vidi_mgr_initialize(struct exynos_drm_manager *mgr, |
297 | struct drm_device *drm_dev, int pipe) | 297 | struct drm_device *drm_dev, int pipe) |
298 | { | 298 | { |
299 | struct vidi_context *ctx = mgr->ctx; | 299 | struct vidi_context *ctx = mgr->ctx; |
300 | 300 | ||
301 | DRM_ERROR("vidi initialize ct=%p dev=%p pipe=%d\n", ctx, drm_dev, pipe); | 301 | DRM_ERROR("vidi initialize ct=%p dev=%p pipe=%d\n", ctx, drm_dev, pipe); |
302 | 302 | ||
303 | ctx->drm_dev = drm_dev; | 303 | ctx->drm_dev = drm_dev; |
304 | ctx->pipe = pipe; | 304 | ctx->pipe = pipe; |
305 | 305 | ||
306 | /* | 306 | /* |
307 | * enable drm irq mode. | 307 | * enable drm irq mode. |
308 | * - with irq_enabled = 1, we can use the vblank feature. | 308 | * - with irq_enabled = 1, we can use the vblank feature. |
309 | * | 309 | * |
310 | * P.S. note that we wouldn't use drm irq handler but | 310 | * P.S. note that we wouldn't use drm irq handler but |
311 | * just specific driver own one instead because | 311 | * just specific driver own one instead because |
312 | * drm framework supports only one irq handler. | 312 | * drm framework supports only one irq handler. |
313 | */ | 313 | */ |
314 | drm_dev->irq_enabled = 1; | 314 | drm_dev->irq_enabled = 1; |
315 | 315 | ||
316 | /* | 316 | /* |
317 | * with vblank_disable_allowed = 1, vblank interrupt will be disabled | 317 | * with vblank_disable_allowed = 1, vblank interrupt will be disabled |
318 | * by drm timer once a current process gives up ownership of | 318 | * by drm timer once a current process gives up ownership of |
319 | * vblank event.(after drm_vblank_put function is called) | 319 | * vblank event.(after drm_vblank_put function is called) |
320 | */ | 320 | */ |
321 | drm_dev->vblank_disable_allowed = 1; | 321 | drm_dev->vblank_disable_allowed = 1; |
322 | 322 | ||
323 | return 0; | 323 | return 0; |
324 | } | 324 | } |
325 | 325 | ||
326 | static struct exynos_drm_manager_ops vidi_manager_ops = { | 326 | static struct exynos_drm_manager_ops vidi_manager_ops = { |
327 | .initialize = vidi_mgr_initialize, | 327 | .initialize = vidi_mgr_initialize, |
328 | .dpms = vidi_dpms, | 328 | .dpms = vidi_dpms, |
329 | .commit = vidi_commit, | 329 | .commit = vidi_commit, |
330 | .enable_vblank = vidi_enable_vblank, | 330 | .enable_vblank = vidi_enable_vblank, |
331 | .disable_vblank = vidi_disable_vblank, | 331 | .disable_vblank = vidi_disable_vblank, |
332 | .win_mode_set = vidi_win_mode_set, | 332 | .win_mode_set = vidi_win_mode_set, |
333 | .win_commit = vidi_win_commit, | 333 | .win_commit = vidi_win_commit, |
334 | .win_disable = vidi_win_disable, | 334 | .win_disable = vidi_win_disable, |
335 | }; | 335 | }; |
336 | 336 | ||
337 | static struct exynos_drm_manager vidi_manager = { | 337 | static struct exynos_drm_manager vidi_manager = { |
338 | .type = EXYNOS_DISPLAY_TYPE_VIDI, | 338 | .type = EXYNOS_DISPLAY_TYPE_VIDI, |
339 | .ops = &vidi_manager_ops, | 339 | .ops = &vidi_manager_ops, |
340 | }; | 340 | }; |
341 | 341 | ||
342 | static void vidi_fake_vblank_handler(struct work_struct *work) | 342 | static void vidi_fake_vblank_handler(struct work_struct *work) |
343 | { | 343 | { |
344 | struct vidi_context *ctx = container_of(work, struct vidi_context, | 344 | struct vidi_context *ctx = container_of(work, struct vidi_context, |
345 | work); | 345 | work); |
346 | 346 | ||
347 | if (ctx->pipe < 0) | 347 | if (ctx->pipe < 0) |
348 | return; | 348 | return; |
349 | 349 | ||
350 | /* refresh rate is about 50Hz. */ | 350 | /* refresh rate is about 50Hz. */ |
351 | usleep_range(16000, 20000); | 351 | usleep_range(16000, 20000); |
352 | 352 | ||
353 | mutex_lock(&ctx->lock); | 353 | mutex_lock(&ctx->lock); |
354 | 354 | ||
355 | if (ctx->direct_vblank) { | 355 | if (ctx->direct_vblank) { |
356 | drm_handle_vblank(ctx->drm_dev, ctx->pipe); | 356 | drm_handle_vblank(ctx->drm_dev, ctx->pipe); |
357 | ctx->direct_vblank = false; | 357 | ctx->direct_vblank = false; |
358 | mutex_unlock(&ctx->lock); | 358 | mutex_unlock(&ctx->lock); |
359 | return; | 359 | return; |
360 | } | 360 | } |
361 | 361 | ||
362 | mutex_unlock(&ctx->lock); | 362 | mutex_unlock(&ctx->lock); |
363 | 363 | ||
364 | exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); | 364 | exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); |
365 | } | 365 | } |
366 | 366 | ||
367 | static int vidi_show_connection(struct device *dev, | 367 | static int vidi_show_connection(struct device *dev, |
368 | struct device_attribute *attr, char *buf) | 368 | struct device_attribute *attr, char *buf) |
369 | { | 369 | { |
370 | int rc; | 370 | int rc; |
371 | struct exynos_drm_manager *mgr = get_vidi_mgr(dev); | 371 | struct exynos_drm_manager *mgr = get_vidi_mgr(dev); |
372 | struct vidi_context *ctx = mgr->ctx; | 372 | struct vidi_context *ctx = mgr->ctx; |
373 | 373 | ||
374 | mutex_lock(&ctx->lock); | 374 | mutex_lock(&ctx->lock); |
375 | 375 | ||
376 | rc = sprintf(buf, "%d\n", ctx->connected); | 376 | rc = sprintf(buf, "%d\n", ctx->connected); |
377 | 377 | ||
378 | mutex_unlock(&ctx->lock); | 378 | mutex_unlock(&ctx->lock); |
379 | 379 | ||
380 | return rc; | 380 | return rc; |
381 | } | 381 | } |
382 | 382 | ||
383 | static int vidi_store_connection(struct device *dev, | 383 | static int vidi_store_connection(struct device *dev, |
384 | struct device_attribute *attr, | 384 | struct device_attribute *attr, |
385 | const char *buf, size_t len) | 385 | const char *buf, size_t len) |
386 | { | 386 | { |
387 | struct exynos_drm_manager *mgr = get_vidi_mgr(dev); | 387 | struct exynos_drm_manager *mgr = get_vidi_mgr(dev); |
388 | struct vidi_context *ctx = mgr->ctx; | 388 | struct vidi_context *ctx = mgr->ctx; |
389 | int ret; | 389 | int ret; |
390 | 390 | ||
391 | ret = kstrtoint(buf, 0, &ctx->connected); | 391 | ret = kstrtoint(buf, 0, &ctx->connected); |
392 | if (ret) | 392 | if (ret) |
393 | return ret; | 393 | return ret; |
394 | 394 | ||
395 | if (ctx->connected > 1) | 395 | if (ctx->connected > 1) |
396 | return -EINVAL; | 396 | return -EINVAL; |
397 | 397 | ||
398 | /* use fake edid data for test. */ | 398 | /* use fake edid data for test. */ |
399 | if (!ctx->raw_edid) | 399 | if (!ctx->raw_edid) |
400 | ctx->raw_edid = (struct edid *)fake_edid_info; | 400 | ctx->raw_edid = (struct edid *)fake_edid_info; |
401 | 401 | ||
402 | /* if raw_edid isn't same as fake data then it can't be tested. */ | 402 | /* if raw_edid isn't same as fake data then it can't be tested. */ |
403 | if (ctx->raw_edid != (struct edid *)fake_edid_info) { | 403 | if (ctx->raw_edid != (struct edid *)fake_edid_info) { |
404 | DRM_DEBUG_KMS("edid data is not fake data.\n"); | 404 | DRM_DEBUG_KMS("edid data is not fake data.\n"); |
405 | return -EINVAL; | 405 | return -EINVAL; |
406 | } | 406 | } |
407 | 407 | ||
408 | DRM_DEBUG_KMS("requested connection.\n"); | 408 | DRM_DEBUG_KMS("requested connection.\n"); |
409 | 409 | ||
410 | drm_helper_hpd_irq_event(ctx->drm_dev); | 410 | drm_helper_hpd_irq_event(ctx->drm_dev); |
411 | 411 | ||
412 | return len; | 412 | return len; |
413 | } | 413 | } |
414 | 414 | ||
415 | static DEVICE_ATTR(connection, 0644, vidi_show_connection, | 415 | static DEVICE_ATTR(connection, 0644, vidi_show_connection, |
416 | vidi_store_connection); | 416 | vidi_store_connection); |
417 | 417 | ||
418 | int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, | 418 | int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, |
419 | struct drm_file *file_priv) | 419 | struct drm_file *file_priv) |
420 | { | 420 | { |
421 | struct vidi_context *ctx = NULL; | 421 | struct vidi_context *ctx = NULL; |
422 | struct drm_encoder *encoder; | 422 | struct drm_encoder *encoder; |
423 | struct exynos_drm_display *display; | 423 | struct exynos_drm_display *display; |
424 | struct drm_exynos_vidi_connection *vidi = data; | 424 | struct drm_exynos_vidi_connection *vidi = data; |
425 | 425 | ||
426 | if (!vidi) { | 426 | if (!vidi) { |
427 | DRM_DEBUG_KMS("user data for vidi is null.\n"); | 427 | DRM_DEBUG_KMS("user data for vidi is null.\n"); |
428 | return -EINVAL; | 428 | return -EINVAL; |
429 | } | 429 | } |
430 | 430 | ||
431 | if (vidi->connection > 1) { | 431 | if (vidi->connection > 1) { |
432 | DRM_DEBUG_KMS("connection should be 0 or 1.\n"); | 432 | DRM_DEBUG_KMS("connection should be 0 or 1.\n"); |
433 | return -EINVAL; | 433 | return -EINVAL; |
434 | } | 434 | } |
435 | 435 | ||
436 | list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list, | 436 | list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list, |
437 | head) { | 437 | head) { |
438 | display = exynos_drm_get_display(encoder); | 438 | display = exynos_drm_get_display(encoder); |
439 | 439 | ||
440 | if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) { | 440 | if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) { |
441 | ctx = display->ctx; | 441 | ctx = display->ctx; |
442 | break; | 442 | break; |
443 | } | 443 | } |
444 | } | 444 | } |
445 | 445 | ||
446 | if (!ctx) { | 446 | if (!ctx) { |
447 | DRM_DEBUG_KMS("not found virtual device type encoder.\n"); | 447 | DRM_DEBUG_KMS("not found virtual device type encoder.\n"); |
448 | return -EINVAL; | 448 | return -EINVAL; |
449 | } | 449 | } |
450 | 450 | ||
451 | if (ctx->connected == vidi->connection) { | 451 | if (ctx->connected == vidi->connection) { |
452 | DRM_DEBUG_KMS("same connection request.\n"); | 452 | DRM_DEBUG_KMS("same connection request.\n"); |
453 | return -EINVAL; | 453 | return -EINVAL; |
454 | } | 454 | } |
455 | 455 | ||
456 | if (vidi->connection) { | 456 | if (vidi->connection) { |
457 | struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid; | 457 | struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid; |
458 | if (!drm_edid_is_valid(raw_edid)) { | 458 | if (!drm_edid_is_valid(raw_edid)) { |
459 | DRM_DEBUG_KMS("edid data is invalid.\n"); | 459 | DRM_DEBUG_KMS("edid data is invalid.\n"); |
460 | return -EINVAL; | 460 | return -EINVAL; |
461 | } | 461 | } |
462 | ctx->raw_edid = drm_edid_duplicate(raw_edid); | 462 | ctx->raw_edid = drm_edid_duplicate(raw_edid); |
463 | if (!ctx->raw_edid) { | 463 | if (!ctx->raw_edid) { |
464 | DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); | 464 | DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); |
465 | return -ENOMEM; | 465 | return -ENOMEM; |
466 | } | 466 | } |
467 | } else { | 467 | } else { |
468 | /* | 468 | /* |
469 | * with connection = 0, free raw_edid | 469 | * with connection = 0, free raw_edid |
470 | * only if raw edid data isn't same as fake data. | 470 | * only if raw edid data isn't same as fake data. |
471 | */ | 471 | */ |
472 | if (ctx->raw_edid && ctx->raw_edid != | 472 | if (ctx->raw_edid && ctx->raw_edid != |
473 | (struct edid *)fake_edid_info) { | 473 | (struct edid *)fake_edid_info) { |
474 | kfree(ctx->raw_edid); | 474 | kfree(ctx->raw_edid); |
475 | ctx->raw_edid = NULL; | 475 | ctx->raw_edid = NULL; |
476 | } | 476 | } |
477 | } | 477 | } |
478 | 478 | ||
479 | ctx->connected = vidi->connection; | 479 | ctx->connected = vidi->connection; |
480 | drm_helper_hpd_irq_event(ctx->drm_dev); | 480 | drm_helper_hpd_irq_event(ctx->drm_dev); |
481 | 481 | ||
482 | return 0; | 482 | return 0; |
483 | } | 483 | } |
484 | 484 | ||
485 | static enum drm_connector_status vidi_detect(struct drm_connector *connector, | 485 | static enum drm_connector_status vidi_detect(struct drm_connector *connector, |
486 | bool force) | 486 | bool force) |
487 | { | 487 | { |
488 | struct vidi_context *ctx = ctx_from_connector(connector); | 488 | struct vidi_context *ctx = ctx_from_connector(connector); |
489 | 489 | ||
490 | /* | 490 | /* |
491 | * connection request would come from user side | 491 | * connection request would come from user side |
492 | * to do hotplug through specific ioctl. | 492 | * to do hotplug through specific ioctl. |
493 | */ | 493 | */ |
494 | return ctx->connected ? connector_status_connected : | 494 | return ctx->connected ? connector_status_connected : |
495 | connector_status_disconnected; | 495 | connector_status_disconnected; |
496 | } | 496 | } |
497 | 497 | ||
498 | static void vidi_connector_destroy(struct drm_connector *connector) | 498 | static void vidi_connector_destroy(struct drm_connector *connector) |
499 | { | 499 | { |
500 | } | 500 | } |
501 | 501 | ||
502 | static struct drm_connector_funcs vidi_connector_funcs = { | 502 | static struct drm_connector_funcs vidi_connector_funcs = { |
503 | .dpms = drm_helper_connector_dpms, | 503 | .dpms = drm_helper_connector_dpms, |
504 | .fill_modes = drm_helper_probe_single_connector_modes, | 504 | .fill_modes = drm_helper_probe_single_connector_modes, |
505 | .detect = vidi_detect, | 505 | .detect = vidi_detect, |
506 | .destroy = vidi_connector_destroy, | 506 | .destroy = vidi_connector_destroy, |
507 | }; | 507 | }; |
508 | 508 | ||
509 | static int vidi_get_modes(struct drm_connector *connector) | 509 | static int vidi_get_modes(struct drm_connector *connector) |
510 | { | 510 | { |
511 | struct vidi_context *ctx = ctx_from_connector(connector); | 511 | struct vidi_context *ctx = ctx_from_connector(connector); |
512 | struct edid *edid; | 512 | struct edid *edid; |
513 | int edid_len; | 513 | int edid_len; |
514 | 514 | ||
515 | /* | 515 | /* |
516 | * the edid data comes from user side and it would be set | 516 | * the edid data comes from user side and it would be set |
517 | * to ctx->raw_edid through specific ioctl. | 517 | * to ctx->raw_edid through specific ioctl. |
518 | */ | 518 | */ |
519 | if (!ctx->raw_edid) { | 519 | if (!ctx->raw_edid) { |
520 | DRM_DEBUG_KMS("raw_edid is null.\n"); | 520 | DRM_DEBUG_KMS("raw_edid is null.\n"); |
521 | return -EFAULT; | 521 | return -EFAULT; |
522 | } | 522 | } |
523 | 523 | ||
524 | edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; | 524 | edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; |
525 | edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); | 525 | edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); |
526 | if (!edid) { | 526 | if (!edid) { |
527 | DRM_DEBUG_KMS("failed to allocate edid\n"); | 527 | DRM_DEBUG_KMS("failed to allocate edid\n"); |
528 | return -ENOMEM; | 528 | return -ENOMEM; |
529 | } | 529 | } |
530 | 530 | ||
531 | drm_mode_connector_update_edid_property(connector, edid); | 531 | drm_mode_connector_update_edid_property(connector, edid); |
532 | 532 | ||
533 | return drm_add_edid_modes(connector, edid); | 533 | return drm_add_edid_modes(connector, edid); |
534 | } | 534 | } |
535 | 535 | ||
536 | static int vidi_mode_valid(struct drm_connector *connector, | 536 | static int vidi_mode_valid(struct drm_connector *connector, |
537 | struct drm_display_mode *mode) | 537 | struct drm_display_mode *mode) |
538 | { | 538 | { |
539 | return MODE_OK; | 539 | return MODE_OK; |
540 | } | 540 | } |
541 | 541 | ||
542 | static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector) | 542 | static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector) |
543 | { | 543 | { |
544 | struct vidi_context *ctx = ctx_from_connector(connector); | 544 | struct vidi_context *ctx = ctx_from_connector(connector); |
545 | 545 | ||
546 | return ctx->encoder; | 546 | return ctx->encoder; |
547 | } | 547 | } |
548 | 548 | ||
549 | static struct drm_connector_helper_funcs vidi_connector_helper_funcs = { | 549 | static struct drm_connector_helper_funcs vidi_connector_helper_funcs = { |
550 | .get_modes = vidi_get_modes, | 550 | .get_modes = vidi_get_modes, |
551 | .mode_valid = vidi_mode_valid, | 551 | .mode_valid = vidi_mode_valid, |
552 | .best_encoder = vidi_best_encoder, | 552 | .best_encoder = vidi_best_encoder, |
553 | }; | 553 | }; |
554 | 554 | ||
555 | static int vidi_create_connector(struct exynos_drm_display *display, | 555 | static int vidi_create_connector(struct exynos_drm_display *display, |
556 | struct drm_encoder *encoder) | 556 | struct drm_encoder *encoder) |
557 | { | 557 | { |
558 | struct vidi_context *ctx = display->ctx; | 558 | struct vidi_context *ctx = display->ctx; |
559 | struct drm_connector *connector = &ctx->connector; | 559 | struct drm_connector *connector = &ctx->connector; |
560 | int ret; | 560 | int ret; |
561 | 561 | ||
562 | ctx->encoder = encoder; | 562 | ctx->encoder = encoder; |
563 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 563 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
564 | 564 | ||
565 | ret = drm_connector_init(ctx->drm_dev, connector, | 565 | ret = drm_connector_init(ctx->drm_dev, connector, |
566 | &vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); | 566 | &vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); |
567 | if (ret) { | 567 | if (ret) { |
568 | DRM_ERROR("Failed to initialize connector with drm\n"); | 568 | DRM_ERROR("Failed to initialize connector with drm\n"); |
569 | return ret; | 569 | return ret; |
570 | } | 570 | } |
571 | 571 | ||
572 | drm_connector_helper_add(connector, &vidi_connector_helper_funcs); | 572 | drm_connector_helper_add(connector, &vidi_connector_helper_funcs); |
573 | drm_sysfs_connector_add(connector); | 573 | drm_sysfs_connector_add(connector); |
574 | drm_mode_connector_attach_encoder(connector, encoder); | 574 | drm_mode_connector_attach_encoder(connector, encoder); |
575 | 575 | ||
576 | return 0; | 576 | return 0; |
577 | } | 577 | } |
578 | 578 | ||
579 | 579 | ||
580 | static struct exynos_drm_display_ops vidi_display_ops = { | 580 | static struct exynos_drm_display_ops vidi_display_ops = { |
581 | .create_connector = vidi_create_connector, | 581 | .create_connector = vidi_create_connector, |
582 | }; | 582 | }; |
583 | 583 | ||
584 | static struct exynos_drm_display vidi_display = { | 584 | static struct exynos_drm_display vidi_display = { |
585 | .type = EXYNOS_DISPLAY_TYPE_VIDI, | 585 | .type = EXYNOS_DISPLAY_TYPE_VIDI, |
586 | .ops = &vidi_display_ops, | 586 | .ops = &vidi_display_ops, |
587 | }; | 587 | }; |
588 | 588 | ||
589 | static int vidi_probe(struct platform_device *pdev) | 589 | static int vidi_probe(struct platform_device *pdev) |
590 | { | 590 | { |
591 | struct device *dev = &pdev->dev; | 591 | struct device *dev = &pdev->dev; |
592 | struct vidi_context *ctx; | 592 | struct vidi_context *ctx; |
593 | int ret; | 593 | int ret; |
594 | 594 | ||
595 | ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); | 595 | ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); |
596 | if (!ctx) | 596 | if (!ctx) |
597 | return -ENOMEM; | 597 | return -ENOMEM; |
598 | 598 | ||
599 | ctx->default_win = 0; | 599 | ctx->default_win = 0; |
600 | 600 | ||
601 | INIT_WORK(&ctx->work, vidi_fake_vblank_handler); | 601 | INIT_WORK(&ctx->work, vidi_fake_vblank_handler); |
602 | 602 | ||
603 | vidi_manager.ctx = ctx; | 603 | vidi_manager.ctx = ctx; |
604 | vidi_display.ctx = ctx; | 604 | vidi_display.ctx = ctx; |
605 | 605 | ||
606 | mutex_init(&ctx->lock); | 606 | mutex_init(&ctx->lock); |
607 | 607 | ||
608 | platform_set_drvdata(pdev, &vidi_manager); | 608 | platform_set_drvdata(pdev, &vidi_manager); |
609 | 609 | ||
610 | ret = device_create_file(dev, &dev_attr_connection); | 610 | ret = device_create_file(dev, &dev_attr_connection); |
611 | if (ret < 0) | 611 | if (ret < 0) |
612 | DRM_INFO("failed to create connection sysfs.\n"); | 612 | DRM_INFO("failed to create connection sysfs.\n"); |
613 | 613 | ||
614 | exynos_drm_manager_register(&vidi_manager); | 614 | exynos_drm_manager_register(&vidi_manager); |
615 | exynos_drm_display_register(&vidi_display); | 615 | exynos_drm_display_register(&vidi_display); |
616 | 616 | ||
617 | return 0; | 617 | return 0; |
618 | } | 618 | } |
619 | 619 | ||
620 | static int vidi_remove(struct platform_device *pdev) | 620 | static int vidi_remove(struct platform_device *pdev) |
621 | { | 621 | { |
622 | struct vidi_context *ctx = platform_get_drvdata(pdev); | 622 | struct vidi_context *ctx = platform_get_drvdata(pdev); |
623 | 623 | ||
624 | exynos_drm_display_unregister(&vidi_display); | 624 | exynos_drm_display_unregister(&vidi_display); |
625 | exynos_drm_manager_unregister(&vidi_manager); | 625 | exynos_drm_manager_unregister(&vidi_manager); |
626 | 626 | ||
627 | if (ctx->raw_edid != (struct edid *)fake_edid_info) { | 627 | if (ctx->raw_edid != (struct edid *)fake_edid_info) { |
628 | kfree(ctx->raw_edid); | 628 | kfree(ctx->raw_edid); |
629 | ctx->raw_edid = NULL; | 629 | ctx->raw_edid = NULL; |
630 | } | 630 | } |
631 | 631 | ||
632 | return 0; | 632 | return 0; |
633 | } | 633 | } |
634 | 634 | ||
635 | struct platform_driver vidi_driver = { | 635 | struct platform_driver vidi_driver = { |
636 | .probe = vidi_probe, | 636 | .probe = vidi_probe, |
637 | .remove = vidi_remove, | 637 | .remove = vidi_remove, |
638 | .driver = { | 638 | .driver = { |
639 | .name = "exynos-drm-vidi", | 639 | .name = "exynos-drm-vidi", |
640 | .owner = THIS_MODULE, | 640 | .owner = THIS_MODULE, |
641 | }, | 641 | }, |
642 | }; | 642 | }; |
643 | 643 |