Commit f54dc1486d13d44766cdb6551d094313f077b535
Committed by
Jason Liu
1 parent
b4520dcfc6
Exists in
smarc_8mm_imx_4.14.98_2.0.0_ga
and in
4 other branches
MGS-4656 [#imx-1381] fix the wrong device check for imx8 gpu subsystem
should check imx8-gpu instead of imx8x-gpu string for i.mx8 gpu sub-system Signed-off-by: Xianzhong <xianzhong.li@nxp.com> (cherry picked from commit ce6483c43465651469c75b0e5093bdc749866f84)
Showing 1 changed file with 6 additions and 4 deletions Inline Diff
drivers/mxc/gpu-viv/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * | 2 | * |
3 | * The MIT License (MIT) | 3 | * The MIT License (MIT) |
4 | * | 4 | * |
5 | * Copyright (c) 2014 - 2018 Vivante Corporation | 5 | * Copyright (c) 2014 - 2018 Vivante Corporation |
6 | * Copyright 2019 NXP | ||
6 | * | 7 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | 8 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the "Software"), | 9 | * copy of this software and associated documentation files (the "Software"), |
9 | * to deal in the Software without restriction, including without limitation | 10 | * to deal in the Software without restriction, including without limitation |
10 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
11 | * and/or sell copies of the Software, and to permit persons to whom the | 12 | * and/or sell copies of the Software, and to permit persons to whom the |
12 | * Software is furnished to do so, subject to the following conditions: | 13 | * Software is furnished to do so, subject to the following conditions: |
13 | * | 14 | * |
14 | * The above copyright notice and this permission notice shall be included in | 15 | * The above copyright notice and this permission notice shall be included in |
15 | * all copies or substantial portions of the Software. | 16 | * all copies or substantial portions of the Software. |
16 | * | 17 | * |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
20 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 23 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
23 | * DEALINGS IN THE SOFTWARE. | 24 | * DEALINGS IN THE SOFTWARE. |
24 | * | 25 | * |
25 | ***************************************************************************** | 26 | ***************************************************************************** |
26 | * | 27 | * |
27 | * The GPL License (GPL) | 28 | * The GPL License (GPL) |
28 | * | 29 | * |
29 | * Copyright (C) 2014 - 2018 Vivante Corporation | 30 | * Copyright (C) 2014 - 2018 Vivante Corporation |
31 | * Copyright 2019 NXP | ||
30 | * | 32 | * |
31 | * This program is free software; you can redistribute it and/or | 33 | * This program is free software; you can redistribute it and/or |
32 | * modify it under the terms of the GNU General Public License | 34 | * modify it under the terms of the GNU General Public License |
33 | * as published by the Free Software Foundation; either version 2 | 35 | * as published by the Free Software Foundation; either version 2 |
34 | * of the License, or (at your option) any later version. | 36 | * of the License, or (at your option) any later version. |
35 | * | 37 | * |
36 | * This program is distributed in the hope that it will be useful, | 38 | * This program is distributed in the hope that it will be useful, |
37 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 39 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
38 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 40 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
39 | * GNU General Public License for more details. | 41 | * GNU General Public License for more details. |
40 | * | 42 | * |
41 | * You should have received a copy of the GNU General Public License | 43 | * You should have received a copy of the GNU General Public License |
42 | * along with this program; if not, write to the Free Software Foundation, | 44 | * along with this program; if not, write to the Free Software Foundation, |
43 | * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 45 | * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
44 | * | 46 | * |
45 | ***************************************************************************** | 47 | ***************************************************************************** |
46 | * | 48 | * |
47 | * Note: This software is released under dual MIT and GPL licenses. A | 49 | * Note: This software is released under dual MIT and GPL licenses. A |
48 | * recipient may use this file under the terms of either the MIT license or | 50 | * recipient may use this file under the terms of either the MIT license or |
49 | * GPL License. If you wish to use only one license not the other, you can | 51 | * GPL License. If you wish to use only one license not the other, you can |
50 | * indicate your decision by deleting one of the above license notices in your | 52 | * indicate your decision by deleting one of the above license notices in your |
51 | * version of this file. | 53 | * version of this file. |
52 | * | 54 | * |
53 | *****************************************************************************/ | 55 | *****************************************************************************/ |
54 | 56 | ||
55 | 57 | ||
56 | #include "gc_hal_kernel_linux.h" | 58 | #include "gc_hal_kernel_linux.h" |
57 | #include "gc_hal_kernel_platform.h" | 59 | #include "gc_hal_kernel_platform.h" |
58 | #include "gc_hal_kernel_device.h" | 60 | #include "gc_hal_kernel_device.h" |
59 | #include "gc_hal_driver.h" | 61 | #include "gc_hal_driver.h" |
60 | #include <linux/slab.h> | 62 | #include <linux/slab.h> |
61 | 63 | ||
62 | #if defined(CONFIG_PM_OPP) | 64 | #if defined(CONFIG_PM_OPP) |
63 | #include <linux/pm_opp.h> | 65 | #include <linux/pm_opp.h> |
64 | #endif | 66 | #endif |
65 | 67 | ||
66 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 68 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
67 | # include <linux/of_platform.h> | 69 | # include <linux/of_platform.h> |
68 | # include <linux/of_gpio.h> | 70 | # include <linux/of_gpio.h> |
69 | # include <linux/of_address.h> | 71 | # include <linux/of_address.h> |
70 | #endif | 72 | #endif |
71 | 73 | ||
72 | #if USE_PLATFORM_DRIVER | 74 | #if USE_PLATFORM_DRIVER |
73 | # include <linux/platform_device.h> | 75 | # include <linux/platform_device.h> |
74 | #endif | 76 | #endif |
75 | 77 | ||
76 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0)) || defined(IMX8_SCU_CONTROL) | 78 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0)) || defined(IMX8_SCU_CONTROL) |
77 | # define IMX_GPU_SUBSYSTEM 1 | 79 | # define IMX_GPU_SUBSYSTEM 1 |
78 | # include <linux/component.h> | 80 | # include <linux/component.h> |
79 | #endif | 81 | #endif |
80 | 82 | ||
81 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) | 83 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) |
82 | # include <mach/viv_gpu.h> | 84 | # include <mach/viv_gpu.h> |
83 | #elif defined (CONFIG_PM) | 85 | #elif defined (CONFIG_PM) |
84 | # include <linux/pm_runtime.h> | 86 | # include <linux/pm_runtime.h> |
85 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) | 87 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) |
86 | # include <mach/busfreq.h> | 88 | # include <mach/busfreq.h> |
87 | # elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29) | 89 | # elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29) |
88 | # include <linux/busfreq-imx6.h> | 90 | # include <linux/busfreq-imx6.h> |
89 | # include <linux/reset.h> | 91 | # include <linux/reset.h> |
90 | # else | 92 | # else |
91 | # include <linux/busfreq-imx.h> | 93 | # include <linux/busfreq-imx.h> |
92 | # include <linux/reset.h> | 94 | # include <linux/reset.h> |
93 | # endif | 95 | # endif |
94 | #endif | 96 | #endif |
95 | 97 | ||
96 | #include <linux/clk.h> | 98 | #include <linux/clk.h> |
97 | 99 | ||
98 | #if defined(IMX8_SCU_CONTROL) | 100 | #if defined(IMX8_SCU_CONTROL) |
99 | # include <soc/imx8/sc/sci.h> | 101 | # include <soc/imx8/sc/sci.h> |
100 | static sc_ipc_t gpu_ipcHandle; | 102 | static sc_ipc_t gpu_ipcHandle; |
101 | #endif | 103 | #endif |
102 | 104 | ||
103 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) | 105 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) |
104 | # include <mach/hardware.h> | 106 | # include <mach/hardware.h> |
105 | #endif | 107 | #endif |
106 | 108 | ||
107 | #include <linux/pm_runtime.h> | 109 | #include <linux/pm_runtime.h> |
108 | #include <linux/regulator/consumer.h> | 110 | #include <linux/regulator/consumer.h> |
109 | 111 | ||
110 | #ifdef CONFIG_DEVICE_THERMAL | 112 | #ifdef CONFIG_DEVICE_THERMAL |
111 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 113 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
112 | # include <linux/device_cooling.h> | 114 | # include <linux/device_cooling.h> |
113 | # define REG_THERMAL_NOTIFIER(a) register_devfreq_cooling_notifier(a); | 115 | # define REG_THERMAL_NOTIFIER(a) register_devfreq_cooling_notifier(a); |
114 | # define UNREG_THERMAL_NOTIFIER(a) unregister_devfreq_cooling_notifier(a); | 116 | # define UNREG_THERMAL_NOTIFIER(a) unregister_devfreq_cooling_notifier(a); |
115 | # else | 117 | # else |
116 | extern int register_thermal_notifier(struct notifier_block *nb); | 118 | extern int register_thermal_notifier(struct notifier_block *nb); |
117 | extern int unregister_thermal_notifier(struct notifier_block *nb); | 119 | extern int unregister_thermal_notifier(struct notifier_block *nb); |
118 | # define REG_THERMAL_NOTIFIER(a) register_thermal_notifier(a); | 120 | # define REG_THERMAL_NOTIFIER(a) register_thermal_notifier(a); |
119 | # define UNREG_THERMAL_NOTIFIER(a) unregister_thermal_notifier(a); | 121 | # define UNREG_THERMAL_NOTIFIER(a) unregister_thermal_notifier(a); |
120 | # endif | 122 | # endif |
121 | #endif | 123 | #endif |
122 | 124 | ||
123 | #ifndef gcdFSL_CONTIGUOUS_SIZE | 125 | #ifndef gcdFSL_CONTIGUOUS_SIZE |
124 | # define gcdFSL_CONTIGUOUS_SIZE (4 << 20) | 126 | # define gcdFSL_CONTIGUOUS_SIZE (4 << 20) |
125 | #endif | 127 | #endif |
126 | 128 | ||
127 | static int initgpu3DMinClock = 1; | 129 | static int initgpu3DMinClock = 1; |
128 | module_param(initgpu3DMinClock, int, 0644); | 130 | module_param(initgpu3DMinClock, int, 0644); |
129 | 131 | ||
130 | struct platform_device *pdevice; | 132 | struct platform_device *pdevice; |
131 | 133 | ||
132 | #ifdef CONFIG_GPU_LOW_MEMORY_KILLER | 134 | #ifdef CONFIG_GPU_LOW_MEMORY_KILLER |
133 | # include <linux/kernel.h> | 135 | # include <linux/kernel.h> |
134 | # include <linux/mm.h> | 136 | # include <linux/mm.h> |
135 | # include <linux/oom.h> | 137 | # include <linux/oom.h> |
136 | # include <linux/sched.h> | 138 | # include <linux/sched.h> |
137 | # include <linux/profile.h> | 139 | # include <linux/profile.h> |
138 | 140 | ||
139 | static unsigned long timeout; | 141 | static unsigned long timeout; |
140 | struct task_struct *almostfail; | 142 | struct task_struct *almostfail; |
141 | 143 | ||
142 | static int | 144 | static int |
143 | notif_func(struct notifier_block *self, unsigned long val, void *data) | 145 | notif_func(struct notifier_block *self, unsigned long val, void *data) |
144 | { | 146 | { |
145 | struct task_struct *task = data; | 147 | struct task_struct *task = data; |
146 | 148 | ||
147 | if (task == almostfail) | 149 | if (task == almostfail) |
148 | almostfail = NULL; | 150 | almostfail = NULL; |
149 | 151 | ||
150 | return NOTIFY_DONE; | 152 | return NOTIFY_DONE; |
151 | } | 153 | } |
152 | 154 | ||
153 | static struct notifier_block task_nb = { | 155 | static struct notifier_block task_nb = { |
154 | .notifier_call = notif_func, | 156 | .notifier_call = notif_func, |
155 | }; | 157 | }; |
156 | 158 | ||
157 | static int force_shrink_mem(IN gckKERNEL Kernel) | 159 | static int force_shrink_mem(IN gckKERNEL Kernel) |
158 | { | 160 | { |
159 | struct task_struct *p = NULL; | 161 | struct task_struct *p = NULL; |
160 | struct task_struct *selected = NULL; | 162 | struct task_struct *selected = NULL; |
161 | int cur_size = 0; | 163 | int cur_size = 0; |
162 | int set_size = 0; | 164 | int set_size = 0; |
163 | int oom_val = 0; | 165 | int oom_val = 0; |
164 | int mem_adj = 0; | 166 | int mem_adj = 0; |
165 | int retVal = -1; | 167 | int retVal = -1; |
166 | 168 | ||
167 | if (almostfail && time_before_eq(jiffies, timeout)) | 169 | if (almostfail && time_before_eq(jiffies, timeout)) |
168 | return 0; | 170 | return 0; |
169 | 171 | ||
170 | rcu_read_lock(); | 172 | rcu_read_lock(); |
171 | 173 | ||
172 | for_each_process(p) { | 174 | for_each_process(p) { |
173 | gcuDATABASE_INFO info; | 175 | gcuDATABASE_INFO info; |
174 | struct mm_struct *mm; | 176 | struct mm_struct *mm; |
175 | struct signal_struct *sig; | 177 | struct signal_struct *sig; |
176 | 178 | ||
177 | cur_size = 0; | 179 | cur_size = 0; |
178 | 180 | ||
179 | task_lock(p); | 181 | task_lock(p); |
180 | sig = p->signal; | 182 | sig = p->signal; |
181 | mm = p->mm; | 183 | mm = p->mm; |
182 | if (!sig || !mm) { | 184 | if (!sig || !mm) { |
183 | task_unlock(p); | 185 | task_unlock(p); |
184 | continue; | 186 | continue; |
185 | } | 187 | } |
186 | oom_val = sig->oom_score_adj; | 188 | oom_val = sig->oom_score_adj; |
187 | if (oom_val < 0) { | 189 | if (oom_val < 0) { |
188 | task_unlock(p); | 190 | task_unlock(p); |
189 | continue; | 191 | continue; |
190 | } | 192 | } |
191 | task_unlock(p); | 193 | task_unlock(p); |
192 | 194 | ||
193 | rcu_read_unlock(); | 195 | rcu_read_unlock(); |
194 | if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_VIDEO_MEMORY, &info) == gcvSTATUS_OK){ | 196 | if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_VIDEO_MEMORY, &info) == gcvSTATUS_OK){ |
195 | cur_size += info.counters.bytes / PAGE_SIZE; | 197 | cur_size += info.counters.bytes / PAGE_SIZE; |
196 | } | 198 | } |
197 | if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_CONTIGUOUS, &info) == gcvSTATUS_OK){ | 199 | if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_CONTIGUOUS, &info) == gcvSTATUS_OK){ |
198 | cur_size += info.counters.bytes / PAGE_SIZE; | 200 | cur_size += info.counters.bytes / PAGE_SIZE; |
199 | } | 201 | } |
200 | rcu_read_lock(); | 202 | rcu_read_lock(); |
201 | 203 | ||
202 | if (cur_size <= 0) continue; | 204 | if (cur_size <= 0) continue; |
203 | 205 | ||
204 | printk("<gpu> pid %d (%s), adj %d, size %d\n", p->pid, p->comm, oom_val, cur_size); | 206 | printk("<gpu> pid %d (%s), adj %d, size %d\n", p->pid, p->comm, oom_val, cur_size); |
205 | 207 | ||
206 | if (selected) { | 208 | if (selected) { |
207 | if ((oom_val < mem_adj) || (oom_val == mem_adj && cur_size <= set_size)) continue; | 209 | if ((oom_val < mem_adj) || (oom_val == mem_adj && cur_size <= set_size)) continue; |
208 | } | 210 | } |
209 | set_size = cur_size; | 211 | set_size = cur_size; |
210 | mem_adj = oom_val; | 212 | mem_adj = oom_val; |
211 | selected = p; | 213 | selected = p; |
212 | } | 214 | } |
213 | 215 | ||
214 | if (selected && mem_adj > 0) { | 216 | if (selected && mem_adj > 0) { |
215 | printk("<gpu> send sigkill to %d (%s), adj %d, size %d\n", | 217 | printk("<gpu> send sigkill to %d (%s), adj %d, size %d\n", |
216 | selected->pid, selected->comm, mem_adj, set_size); | 218 | selected->pid, selected->comm, mem_adj, set_size); |
217 | almostfail = selected; | 219 | almostfail = selected; |
218 | timeout = jiffies + HZ; | 220 | timeout = jiffies + HZ; |
219 | force_sig(SIGKILL, selected); | 221 | force_sig(SIGKILL, selected); |
220 | retVal = 0; | 222 | retVal = 0; |
221 | } | 223 | } |
222 | 224 | ||
223 | rcu_read_unlock(); | 225 | rcu_read_unlock(); |
224 | 226 | ||
225 | return retVal; | 227 | return retVal; |
226 | } | 228 | } |
227 | 229 | ||
228 | extern gckKERNEL | 230 | extern gckKERNEL |
229 | _GetValidKernel( | 231 | _GetValidKernel( |
230 | gckGALDEVICE Device | 232 | gckGALDEVICE Device |
231 | ); | 233 | ); |
232 | 234 | ||
233 | static gceSTATUS | 235 | static gceSTATUS |
234 | _ShrinkMemory( | 236 | _ShrinkMemory( |
235 | IN gcsPLATFORM * Platform | 237 | IN gcsPLATFORM * Platform |
236 | ) | 238 | ) |
237 | { | 239 | { |
238 | struct platform_device *pdev; | 240 | struct platform_device *pdev; |
239 | gckGALDEVICE galDevice; | 241 | gckGALDEVICE galDevice; |
240 | gckKERNEL kernel; | 242 | gckKERNEL kernel; |
241 | gceSTATUS status = gcvSTATUS_OK; | 243 | gceSTATUS status = gcvSTATUS_OK; |
242 | 244 | ||
243 | pdev = Platform->device; | 245 | pdev = Platform->device; |
244 | 246 | ||
245 | galDevice = platform_get_drvdata(pdev); | 247 | galDevice = platform_get_drvdata(pdev); |
246 | 248 | ||
247 | kernel = _GetValidKernel(galDevice); | 249 | kernel = _GetValidKernel(galDevice); |
248 | 250 | ||
249 | if (kernel != gcvNULL) | 251 | if (kernel != gcvNULL) |
250 | { | 252 | { |
251 | if (force_shrink_mem(kernel) != 0) | 253 | if (force_shrink_mem(kernel) != 0) |
252 | status = gcvSTATUS_OUT_OF_MEMORY; | 254 | status = gcvSTATUS_OUT_OF_MEMORY; |
253 | } | 255 | } |
254 | else | 256 | else |
255 | { | 257 | { |
256 | printk("%s: can't find kernel!\n", __FUNCTION__); | 258 | printk("%s: can't find kernel!\n", __FUNCTION__); |
257 | } | 259 | } |
258 | 260 | ||
259 | return status; | 261 | return status; |
260 | } | 262 | } |
261 | #endif | 263 | #endif |
262 | 264 | ||
263 | #if gcdENABLE_FSCALE_VAL_ADJUST && (defined(CONFIG_DEVICE_THERMAL) || defined(CONFIG_DEVICE_THERMAL_MODULE)) | 265 | #if gcdENABLE_FSCALE_VAL_ADJUST && (defined(CONFIG_DEVICE_THERMAL) || defined(CONFIG_DEVICE_THERMAL_MODULE)) |
264 | static int thermal_hot_pm_notify(struct notifier_block *nb, unsigned long event, | 266 | static int thermal_hot_pm_notify(struct notifier_block *nb, unsigned long event, |
265 | void *dummy) | 267 | void *dummy) |
266 | { | 268 | { |
267 | static gctUINT orgFscale, minFscale, maxFscale; | 269 | static gctUINT orgFscale, minFscale, maxFscale; |
268 | static gctBOOL bAlreadyTooHot = gcvFALSE; | 270 | static gctBOOL bAlreadyTooHot = gcvFALSE; |
269 | gckHARDWARE hardware; | 271 | gckHARDWARE hardware; |
270 | gckGALDEVICE galDevice; | 272 | gckGALDEVICE galDevice; |
271 | 273 | ||
272 | galDevice = platform_get_drvdata(pdevice); | 274 | galDevice = platform_get_drvdata(pdevice); |
273 | if (!galDevice) | 275 | if (!galDevice) |
274 | { | 276 | { |
275 | /* GPU is not ready, so it is meaningless to change GPU freq. */ | 277 | /* GPU is not ready, so it is meaningless to change GPU freq. */ |
276 | return NOTIFY_OK; | 278 | return NOTIFY_OK; |
277 | } | 279 | } |
278 | 280 | ||
279 | if (!galDevice->kernels[gcvCORE_MAJOR]) | 281 | if (!galDevice->kernels[gcvCORE_MAJOR]) |
280 | { | 282 | { |
281 | return NOTIFY_OK; | 283 | return NOTIFY_OK; |
282 | } | 284 | } |
283 | 285 | ||
284 | hardware = galDevice->kernels[gcvCORE_MAJOR]->hardware; | 286 | hardware = galDevice->kernels[gcvCORE_MAJOR]->hardware; |
285 | 287 | ||
286 | if (!hardware) | 288 | if (!hardware) |
287 | { | 289 | { |
288 | return NOTIFY_OK; | 290 | return NOTIFY_OK; |
289 | } | 291 | } |
290 | 292 | ||
291 | if (event && !bAlreadyTooHot) { | 293 | if (event && !bAlreadyTooHot) { |
292 | gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale); | 294 | gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale); |
293 | gckHARDWARE_SetFscaleValue(hardware, minFscale); | 295 | gckHARDWARE_SetFscaleValue(hardware, minFscale); |
294 | bAlreadyTooHot = gcvTRUE; | 296 | bAlreadyTooHot = gcvTRUE; |
295 | printk("System is too hot. GPU3D will work at %d/64 clock.\n", minFscale); | 297 | printk("System is too hot. GPU3D will work at %d/64 clock.\n", minFscale); |
296 | } else if (!event && bAlreadyTooHot) { | 298 | } else if (!event && bAlreadyTooHot) { |
297 | gckHARDWARE_SetFscaleValue(hardware, orgFscale); | 299 | gckHARDWARE_SetFscaleValue(hardware, orgFscale); |
298 | printk("Hot alarm is canceled. GPU3D clock will return to %d/64\n", orgFscale); | 300 | printk("Hot alarm is canceled. GPU3D clock will return to %d/64\n", orgFscale); |
299 | bAlreadyTooHot = gcvFALSE; | 301 | bAlreadyTooHot = gcvFALSE; |
300 | } | 302 | } |
301 | return NOTIFY_OK; | 303 | return NOTIFY_OK; |
302 | } | 304 | } |
303 | 305 | ||
304 | static struct notifier_block thermal_hot_pm_notifier = | 306 | static struct notifier_block thermal_hot_pm_notifier = |
305 | { | 307 | { |
306 | .notifier_call = thermal_hot_pm_notify, | 308 | .notifier_call = thermal_hot_pm_notify, |
307 | }; | 309 | }; |
308 | 310 | ||
309 | static ssize_t gpu3DMinClock_show(struct device_driver *dev, char *buf) | 311 | static ssize_t gpu3DMinClock_show(struct device_driver *dev, char *buf) |
310 | { | 312 | { |
311 | gctUINT currentf = 0, minf = 0, maxf = 0; | 313 | gctUINT currentf = 0, minf = 0, maxf = 0; |
312 | gckGALDEVICE galDevice; | 314 | gckGALDEVICE galDevice; |
313 | 315 | ||
314 | galDevice = platform_get_drvdata(pdevice); | 316 | galDevice = platform_get_drvdata(pdevice); |
315 | 317 | ||
316 | minf = 0; | 318 | minf = 0; |
317 | if (galDevice->kernels[gcvCORE_MAJOR]) | 319 | if (galDevice->kernels[gcvCORE_MAJOR]) |
318 | { | 320 | { |
319 | gckHARDWARE_GetFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware, | 321 | gckHARDWARE_GetFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware, |
320 | ¤tf, &minf, &maxf); | 322 | ¤tf, &minf, &maxf); |
321 | } | 323 | } |
322 | 324 | ||
323 | snprintf(buf, PAGE_SIZE, "%d\n", minf); | 325 | snprintf(buf, PAGE_SIZE, "%d\n", minf); |
324 | return strlen(buf); | 326 | return strlen(buf); |
325 | } | 327 | } |
326 | 328 | ||
327 | static ssize_t gpu3DMinClock_store(struct device_driver *dev, const char *buf, size_t count) | 329 | static ssize_t gpu3DMinClock_store(struct device_driver *dev, const char *buf, size_t count) |
328 | { | 330 | { |
329 | 331 | ||
330 | gctINT fields; | 332 | gctINT fields; |
331 | gctUINT MinFscaleValue; | 333 | gctUINT MinFscaleValue; |
332 | gckGALDEVICE galDevice; | 334 | gckGALDEVICE galDevice; |
333 | 335 | ||
334 | galDevice = platform_get_drvdata(pdevice); | 336 | galDevice = platform_get_drvdata(pdevice); |
335 | 337 | ||
336 | if (galDevice->kernels[gcvCORE_MAJOR]) | 338 | if (galDevice->kernels[gcvCORE_MAJOR]) |
337 | { | 339 | { |
338 | fields = sscanf(buf, "%d", &MinFscaleValue); | 340 | fields = sscanf(buf, "%d", &MinFscaleValue); |
339 | 341 | ||
340 | if (fields < 1) | 342 | if (fields < 1) |
341 | return -EINVAL; | 343 | return -EINVAL; |
342 | 344 | ||
343 | gckHARDWARE_SetMinFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware,MinFscaleValue); | 345 | gckHARDWARE_SetMinFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware,MinFscaleValue); |
344 | } | 346 | } |
345 | 347 | ||
346 | return count; | 348 | return count; |
347 | } | 349 | } |
348 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) | 350 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) |
349 | static DRIVER_ATTR_RW(gpu3DMinClock); | 351 | static DRIVER_ATTR_RW(gpu3DMinClock); |
350 | #else | 352 | #else |
351 | static DRIVER_ATTR(gpu3DMinClock, S_IRUGO | S_IWUSR, gpu3DMinClock_show, gpu3DMinClock_store); | 353 | static DRIVER_ATTR(gpu3DMinClock, S_IRUGO | S_IWUSR, gpu3DMinClock_show, gpu3DMinClock_store); |
352 | #endif | 354 | #endif |
353 | #endif | 355 | #endif |
354 | 356 | ||
355 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) | 357 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) |
356 | static const struct of_device_id mxs_gpu_dt_ids[] = { | 358 | static const struct of_device_id mxs_gpu_dt_ids[] = { |
357 | #ifdef IMX_GPU_SUBSYSTEM | 359 | #ifdef IMX_GPU_SUBSYSTEM |
358 | { .compatible = "fsl,imx8-gpu-ss", }, | 360 | { .compatible = "fsl,imx8-gpu-ss", }, |
359 | #endif | 361 | #endif |
360 | { .compatible = "fsl,imx6q-gpu", }, /*Backward Compatiblity */ | 362 | { .compatible = "fsl,imx6q-gpu", }, /*Backward Compatiblity */ |
361 | {/* sentinel */} | 363 | {/* sentinel */} |
362 | }; | 364 | }; |
363 | MODULE_DEVICE_TABLE(of, mxs_gpu_dt_ids); | 365 | MODULE_DEVICE_TABLE(of, mxs_gpu_dt_ids); |
364 | #endif | 366 | #endif |
365 | 367 | ||
366 | struct gpu_clk | 368 | struct gpu_clk |
367 | { | 369 | { |
368 | struct clk *clk_core; | 370 | struct clk *clk_core; |
369 | struct clk *clk_shader; | 371 | struct clk *clk_shader; |
370 | struct clk *clk_axi; | 372 | struct clk *clk_axi; |
371 | struct clk *clk_ahb; | 373 | struct clk *clk_ahb; |
372 | }; | 374 | }; |
373 | 375 | ||
374 | #if defined(CONFIG_PM_OPP) | 376 | #if defined(CONFIG_PM_OPP) |
375 | typedef enum _GOVERN_MODE | 377 | typedef enum _GOVERN_MODE |
376 | { | 378 | { |
377 | OVERDRIVE, | 379 | OVERDRIVE, |
378 | NOMINAL, | 380 | NOMINAL, |
379 | UNDERDRIVE, | 381 | UNDERDRIVE, |
380 | GOVERN_COUNT | 382 | GOVERN_COUNT |
381 | } | 383 | } |
382 | GOVERN_MODE; | 384 | GOVERN_MODE; |
383 | 385 | ||
384 | static const char *govern_modes[] = | 386 | static const char *govern_modes[] = |
385 | { | 387 | { |
386 | "overdrive", | 388 | "overdrive", |
387 | "nominal", | 389 | "nominal", |
388 | "underdrive" | 390 | "underdrive" |
389 | }; | 391 | }; |
390 | 392 | ||
391 | struct gpu_govern | 393 | struct gpu_govern |
392 | { | 394 | { |
393 | unsigned long core_clk_freq[GOVERN_COUNT]; | 395 | unsigned long core_clk_freq[GOVERN_COUNT]; |
394 | unsigned long shader_clk_freq[GOVERN_COUNT]; | 396 | unsigned long shader_clk_freq[GOVERN_COUNT]; |
395 | struct device* dev; | 397 | struct device* dev; |
396 | int num_modes; | 398 | int num_modes; |
397 | int current_mode; | 399 | int current_mode; |
398 | }; | 400 | }; |
399 | #endif | 401 | #endif |
400 | 402 | ||
401 | struct imx_priv | 403 | struct imx_priv |
402 | { | 404 | { |
403 | struct gpu_clk imx_gpu_clks[gcdMAX_GPU_COUNT]; | 405 | struct gpu_clk imx_gpu_clks[gcdMAX_GPU_COUNT]; |
404 | 406 | ||
405 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) | 407 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) |
406 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 408 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
407 | /*Power management.*/ | 409 | /*Power management.*/ |
408 | struct regulator *gpu_regulator; | 410 | struct regulator *gpu_regulator; |
409 | # endif | 411 | # endif |
410 | #endif | 412 | #endif |
411 | /*Run time pm*/ | 413 | /*Run time pm*/ |
412 | struct device *pmdev[gcdMAX_GPU_COUNT]; | 414 | struct device *pmdev[gcdMAX_GPU_COUNT]; |
413 | 415 | ||
414 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 416 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
415 | struct reset_control *rstc[gcdMAX_GPU_COUNT]; | 417 | struct reset_control *rstc[gcdMAX_GPU_COUNT]; |
416 | #endif | 418 | #endif |
417 | 419 | ||
418 | #if defined(IMX8_SCU_CONTROL) | 420 | #if defined(IMX8_SCU_CONTROL) |
419 | sc_rsrc_t sc_gpu_pid[gcdMAX_GPU_COUNT]; | 421 | sc_rsrc_t sc_gpu_pid[gcdMAX_GPU_COUNT]; |
420 | #endif | 422 | #endif |
421 | 423 | ||
422 | int gpu3dCount; | 424 | int gpu3dCount; |
423 | 425 | ||
424 | #if defined(CONFIG_PM_OPP) | 426 | #if defined(CONFIG_PM_OPP) |
425 | struct gpu_govern imx_gpu_govern; | 427 | struct gpu_govern imx_gpu_govern; |
426 | #endif | 428 | #endif |
427 | }; | 429 | }; |
428 | 430 | ||
429 | static struct imx_priv imxPriv; | 431 | static struct imx_priv imxPriv; |
430 | 432 | ||
431 | #if defined(CONFIG_PM_OPP) | 433 | #if defined(CONFIG_PM_OPP) |
432 | static ssize_t gpu_govern_show(struct device_driver *dev, char *buf) | 434 | static ssize_t gpu_govern_show(struct device_driver *dev, char *buf) |
433 | { | 435 | { |
434 | struct imx_priv *priv = &imxPriv; | 436 | struct imx_priv *priv = &imxPriv; |
435 | int i; | 437 | int i; |
436 | ssize_t len; | 438 | ssize_t len; |
437 | int max_modes; | 439 | int max_modes; |
438 | 440 | ||
439 | unsigned long core_freq; | 441 | unsigned long core_freq; |
440 | unsigned long shader_freq; | 442 | unsigned long shader_freq; |
441 | 443 | ||
442 | if (priv->imx_gpu_govern.num_modes == GOVERN_COUNT) | 444 | if (priv->imx_gpu_govern.num_modes == GOVERN_COUNT) |
443 | max_modes = priv->imx_gpu_govern.num_modes - 1; | 445 | max_modes = priv->imx_gpu_govern.num_modes - 1; |
444 | else | 446 | else |
445 | max_modes = priv->imx_gpu_govern.num_modes; | 447 | max_modes = priv->imx_gpu_govern.num_modes; |
446 | 448 | ||
447 | len = sprintf(buf, "GPU support %d modes\n", priv->imx_gpu_govern.num_modes); | 449 | len = sprintf(buf, "GPU support %d modes\n", priv->imx_gpu_govern.num_modes); |
448 | 450 | ||
449 | 451 | ||
450 | for (i = priv->imx_gpu_govern.current_mode; i <= max_modes; i++) | 452 | for (i = priv->imx_gpu_govern.current_mode; i <= max_modes; i++) |
451 | { | 453 | { |
452 | core_freq = priv->imx_gpu_govern.core_clk_freq[i]; | 454 | core_freq = priv->imx_gpu_govern.core_clk_freq[i]; |
453 | shader_freq = priv->imx_gpu_govern.shader_clk_freq[i]; | 455 | shader_freq = priv->imx_gpu_govern.shader_clk_freq[i]; |
454 | 456 | ||
455 | len += sprintf(buf + len, | 457 | len += sprintf(buf + len, |
456 | "%s:\tcore_clk frequency: %lu\tshader_clk frequency: %lu\n", | 458 | "%s:\tcore_clk frequency: %lu\tshader_clk frequency: %lu\n", |
457 | govern_modes[i], core_freq, shader_freq); | 459 | govern_modes[i], core_freq, shader_freq); |
458 | } | 460 | } |
459 | 461 | ||
460 | len += sprintf(buf + len, "Currently GPU runs on mode %s\n", | 462 | len += sprintf(buf + len, "Currently GPU runs on mode %s\n", |
461 | govern_modes[priv->imx_gpu_govern.current_mode]); | 463 | govern_modes[priv->imx_gpu_govern.current_mode]); |
462 | 464 | ||
463 | return len; | 465 | return len; |
464 | } | 466 | } |
465 | 467 | ||
466 | static ssize_t gpu_govern_store(struct device_driver *dev, const char *buf, size_t count) | 468 | static ssize_t gpu_govern_store(struct device_driver *dev, const char *buf, size_t count) |
467 | { | 469 | { |
468 | unsigned long core_freq = 0; | 470 | unsigned long core_freq = 0; |
469 | unsigned long shader_freq = 0; | 471 | unsigned long shader_freq = 0; |
470 | struct imx_priv *priv = &imxPriv; | 472 | struct imx_priv *priv = &imxPriv; |
471 | int core = gcvCORE_MAJOR; | 473 | int core = gcvCORE_MAJOR; |
472 | int i; | 474 | int i; |
473 | 475 | ||
474 | for (i = 0; i < GOVERN_COUNT; i++) | 476 | for (i = 0; i < GOVERN_COUNT; i++) |
475 | { | 477 | { |
476 | if (strstr(buf, govern_modes[i])) | 478 | if (strstr(buf, govern_modes[i])) |
477 | { | 479 | { |
478 | break; | 480 | break; |
479 | } | 481 | } |
480 | } | 482 | } |
481 | 483 | ||
482 | if (i == GOVERN_COUNT) | 484 | if (i == GOVERN_COUNT) |
483 | { | 485 | { |
484 | return count; | 486 | return count; |
485 | } | 487 | } |
486 | 488 | ||
487 | core_freq = priv->imx_gpu_govern.core_clk_freq[i]; | 489 | core_freq = priv->imx_gpu_govern.core_clk_freq[i]; |
488 | shader_freq = priv->imx_gpu_govern.shader_clk_freq[i]; | 490 | shader_freq = priv->imx_gpu_govern.shader_clk_freq[i]; |
489 | priv->imx_gpu_govern.current_mode = i; | 491 | priv->imx_gpu_govern.current_mode = i; |
490 | 492 | ||
491 | for (core = gcvCORE_MAJOR; core <= gcvCORE_3D_MAX; core++) | 493 | for (core = gcvCORE_MAJOR; core <= gcvCORE_3D_MAX; core++) |
492 | { | 494 | { |
493 | struct clk* clk_core = priv->imx_gpu_clks[core].clk_core; | 495 | struct clk* clk_core = priv->imx_gpu_clks[core].clk_core; |
494 | struct clk* clk_shader = priv->imx_gpu_clks[core].clk_shader; | 496 | struct clk* clk_shader = priv->imx_gpu_clks[core].clk_shader; |
495 | 497 | ||
496 | if (clk_core != NULL && clk_shader != NULL && | 498 | if (clk_core != NULL && clk_shader != NULL && |
497 | core_freq != 0 && shader_freq != 0) | 499 | core_freq != 0 && shader_freq != 0) |
498 | { | 500 | { |
499 | clk_set_rate(clk_core, core_freq); | 501 | clk_set_rate(clk_core, core_freq); |
500 | clk_set_rate(clk_shader, shader_freq); | 502 | clk_set_rate(clk_shader, shader_freq); |
501 | } | 503 | } |
502 | } | 504 | } |
503 | 505 | ||
504 | return count; | 506 | return count; |
505 | } | 507 | } |
506 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) | 508 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) |
507 | static DRIVER_ATTR_RW(gpu_govern); | 509 | static DRIVER_ATTR_RW(gpu_govern); |
508 | #else | 510 | #else |
509 | static DRIVER_ATTR(gpu_govern, S_IRUGO | S_IWUSR, gpu_govern_show, gpu_govern_store); | 511 | static DRIVER_ATTR(gpu_govern, S_IRUGO | S_IWUSR, gpu_govern_show, gpu_govern_store); |
510 | #endif | 512 | #endif |
511 | 513 | ||
512 | int init_gpu_opp_table(struct device *dev) | 514 | int init_gpu_opp_table(struct device *dev) |
513 | { | 515 | { |
514 | const struct property *prop; | 516 | const struct property *prop; |
515 | const __be32 *val; | 517 | const __be32 *val; |
516 | int nr; | 518 | int nr; |
517 | int ret = 0; | 519 | int ret = 0; |
518 | int i, p; | 520 | int i, p; |
519 | int core = gcvCORE_MAJOR; | 521 | int core = gcvCORE_MAJOR; |
520 | struct imx_priv *priv = &imxPriv; | 522 | struct imx_priv *priv = &imxPriv; |
521 | 523 | ||
522 | struct clk *clk_core; | 524 | struct clk *clk_core; |
523 | struct clk *clk_shader; | 525 | struct clk *clk_shader; |
524 | 526 | ||
525 | unsigned long core_freq, shader_freq; | 527 | unsigned long core_freq, shader_freq; |
526 | 528 | ||
527 | priv->imx_gpu_govern.num_modes = 0; | 529 | priv->imx_gpu_govern.num_modes = 0; |
528 | 530 | ||
529 | prop = of_find_property(dev->of_node, "operating-points", NULL); | 531 | prop = of_find_property(dev->of_node, "operating-points", NULL); |
530 | if (!prop) { | 532 | if (!prop) { |
531 | return 0; | 533 | return 0; |
532 | } | 534 | } |
533 | 535 | ||
534 | if (!prop->value) { | 536 | if (!prop->value) { |
535 | dev_err(dev, "operating-points invalid. Frequency scaling will not work\n"); | 537 | dev_err(dev, "operating-points invalid. Frequency scaling will not work\n"); |
536 | return -ENODATA; | 538 | return -ENODATA; |
537 | } | 539 | } |
538 | 540 | ||
539 | /* | 541 | /* |
540 | * Each OPP is a set of tuples consisting of frequency and | 542 | * Each OPP is a set of tuples consisting of frequency and |
541 | * voltage like <freq-kHz vol-uV>. | 543 | * voltage like <freq-kHz vol-uV>. |
542 | */ | 544 | */ |
543 | nr = prop->length / sizeof(u32); | 545 | nr = prop->length / sizeof(u32); |
544 | if (nr % 2) { | 546 | if (nr % 2) { |
545 | dev_err(dev, "%s: Invalid OPP list\n", __func__); | 547 | dev_err(dev, "%s: Invalid OPP list\n", __func__); |
546 | return -EINVAL; | 548 | return -EINVAL; |
547 | } | 549 | } |
548 | 550 | ||
549 | /* | 551 | /* |
550 | * We handle both cases where UNDERDRIVE is represented by a single tuple | 552 | * We handle both cases where UNDERDRIVE is represented by a single tuple |
551 | * or when it is represented by two. More than 4 tuples means that we have | 553 | * or when it is represented by two. More than 4 tuples means that we have |
552 | * the current mode defaulting to OVERDRIVE, while less than 3 means only | 554 | * the current mode defaulting to OVERDRIVE, while less than 3 means only |
553 | * nominal. Lastly just two tuples means UNDERDRIVE. Note that the tuples | 555 | * nominal. Lastly just two tuples means UNDERDRIVE. Note that the tuples |
554 | * are divisible by 2 (X Y) hence there's no need to test for odd values. | 556 | * are divisible by 2 (X Y) hence there's no need to test for odd values. |
555 | */ | 557 | */ |
556 | if (nr < 6) | 558 | if (nr < 6) |
557 | priv->imx_gpu_govern.current_mode = UNDERDRIVE; | 559 | priv->imx_gpu_govern.current_mode = UNDERDRIVE; |
558 | else if (nr == 6 || nr == 8) | 560 | else if (nr == 6 || nr == 8) |
559 | priv->imx_gpu_govern.current_mode = NOMINAL; | 561 | priv->imx_gpu_govern.current_mode = NOMINAL; |
560 | else | 562 | else |
561 | priv->imx_gpu_govern.current_mode = OVERDRIVE; | 563 | priv->imx_gpu_govern.current_mode = OVERDRIVE; |
562 | 564 | ||
563 | val = prop->value; | 565 | val = prop->value; |
564 | 566 | ||
565 | for (p = 0, i = priv->imx_gpu_govern.current_mode; nr > 0 && i < GOVERN_COUNT; nr -= 4) | 567 | for (p = 0, i = priv->imx_gpu_govern.current_mode; nr > 0 && i < GOVERN_COUNT; nr -= 4) |
566 | { | 568 | { |
567 | unsigned long core_freq, core_volt, shader_freq, shader_volt; | 569 | unsigned long core_freq, core_volt, shader_freq, shader_volt; |
568 | 570 | ||
569 | core_freq = be32_to_cpup(val++) * 1000; | 571 | core_freq = be32_to_cpup(val++) * 1000; |
570 | core_volt = be32_to_cpup(val++); | 572 | core_volt = be32_to_cpup(val++); |
571 | 573 | ||
572 | if (nr == 2) | 574 | if (nr == 2) |
573 | { | 575 | { |
574 | shader_freq = core_freq; | 576 | shader_freq = core_freq; |
575 | shader_volt = core_volt; | 577 | shader_volt = core_volt; |
576 | } | 578 | } |
577 | else | 579 | else |
578 | { | 580 | { |
579 | shader_freq = be32_to_cpup(val++) * 1000; | 581 | shader_freq = be32_to_cpup(val++) * 1000; |
580 | shader_volt = be32_to_cpup(val++); | 582 | shader_volt = be32_to_cpup(val++); |
581 | } | 583 | } |
582 | 584 | ||
583 | /* We only register core_clk frequency */ | 585 | /* We only register core_clk frequency */ |
584 | if (dev_pm_opp_add(dev, core_freq, core_volt)) | 586 | if (dev_pm_opp_add(dev, core_freq, core_volt)) |
585 | { | 587 | { |
586 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | 588 | dev_warn(dev, "%s: Failed to add OPP %ld\n", |
587 | __func__, core_freq); | 589 | __func__, core_freq); |
588 | continue; | 590 | continue; |
589 | } | 591 | } |
590 | 592 | ||
591 | priv->imx_gpu_govern.core_clk_freq[i] = core_freq; | 593 | priv->imx_gpu_govern.core_clk_freq[i] = core_freq; |
592 | priv->imx_gpu_govern.shader_clk_freq[i] = shader_freq; | 594 | priv->imx_gpu_govern.shader_clk_freq[i] = shader_freq; |
593 | 595 | ||
594 | p++; | 596 | p++; |
595 | i++; | 597 | i++; |
596 | } | 598 | } |
597 | 599 | ||
598 | priv->imx_gpu_govern.num_modes = p; | 600 | priv->imx_gpu_govern.num_modes = p; |
599 | priv->imx_gpu_govern.dev = dev; | 601 | priv->imx_gpu_govern.dev = dev; |
600 | 602 | ||
601 | if (priv->imx_gpu_govern.num_modes > 0) | 603 | if (priv->imx_gpu_govern.num_modes > 0) |
602 | { | 604 | { |
603 | ret = driver_create_file(dev->driver, &driver_attr_gpu_govern); | 605 | ret = driver_create_file(dev->driver, &driver_attr_gpu_govern); |
604 | if (ret) { | 606 | if (ret) { |
605 | dev_err(dev, "create gpu_govern attr failed (%d)\n", ret); | 607 | dev_err(dev, "create gpu_govern attr failed (%d)\n", ret); |
606 | return ret; | 608 | return ret; |
607 | } | 609 | } |
608 | 610 | ||
609 | /* | 611 | /* |
610 | * This could be redundant, but it is useful for testing DTS with | 612 | * This could be redundant, but it is useful for testing DTS with |
611 | * different OPPs that have assigned-clock rates different than the | 613 | * different OPPs that have assigned-clock rates different than the |
612 | * ones specified in OPP tuple array. Otherwise we will display | 614 | * ones specified in OPP tuple array. Otherwise we will display |
613 | * different clock values when the driver is loaded. Further | 615 | * different clock values when the driver is loaded. Further |
614 | * modifications of the governor will display correctly but not when | 616 | * modifications of the governor will display correctly but not when |
615 | * the driver has been loaded. | 617 | * the driver has been loaded. |
616 | */ | 618 | */ |
617 | core_freq = priv->imx_gpu_govern.core_clk_freq[priv->imx_gpu_govern.current_mode]; | 619 | core_freq = priv->imx_gpu_govern.core_clk_freq[priv->imx_gpu_govern.current_mode]; |
618 | shader_freq = priv->imx_gpu_govern.shader_clk_freq[priv->imx_gpu_govern.current_mode]; | 620 | shader_freq = priv->imx_gpu_govern.shader_clk_freq[priv->imx_gpu_govern.current_mode]; |
619 | 621 | ||
620 | if (core_freq && shader_freq) { | 622 | if (core_freq && shader_freq) { |
621 | for (; core <= gcvCORE_3D_MAX; core++) { | 623 | for (; core <= gcvCORE_3D_MAX; core++) { |
622 | clk_core = priv->imx_gpu_clks[core].clk_core; | 624 | clk_core = priv->imx_gpu_clks[core].clk_core; |
623 | clk_shader = priv->imx_gpu_clks[core].clk_shader; | 625 | clk_shader = priv->imx_gpu_clks[core].clk_shader; |
624 | 626 | ||
625 | if (clk_core != NULL && clk_shader != NULL) { | 627 | if (clk_core != NULL && clk_shader != NULL) { |
626 | clk_set_rate(clk_core, core_freq); | 628 | clk_set_rate(clk_core, core_freq); |
627 | clk_set_rate(clk_shader, shader_freq); | 629 | clk_set_rate(clk_shader, shader_freq); |
628 | } | 630 | } |
629 | } | 631 | } |
630 | } | 632 | } |
631 | 633 | ||
632 | } | 634 | } |
633 | 635 | ||
634 | return ret; | 636 | return ret; |
635 | } | 637 | } |
636 | 638 | ||
637 | int remove_gpu_opp_table(void) | 639 | int remove_gpu_opp_table(void) |
638 | { | 640 | { |
639 | struct imx_priv *priv = &imxPriv; | 641 | struct imx_priv *priv = &imxPriv; |
640 | struct device* dev = priv->imx_gpu_govern.dev; | 642 | struct device* dev = priv->imx_gpu_govern.dev; |
641 | int i = 0; | 643 | int i = 0; |
642 | int max_modes; | 644 | int max_modes; |
643 | 645 | ||
644 | if (priv->imx_gpu_govern.num_modes == GOVERN_COUNT) | 646 | if (priv->imx_gpu_govern.num_modes == GOVERN_COUNT) |
645 | max_modes = priv->imx_gpu_govern.num_modes - 1; | 647 | max_modes = priv->imx_gpu_govern.num_modes - 1; |
646 | else | 648 | else |
647 | max_modes = priv->imx_gpu_govern.num_modes; | 649 | max_modes = priv->imx_gpu_govern.num_modes; |
648 | 650 | ||
649 | /* if we don't have any modes available we don't have OPP */ | 651 | /* if we don't have any modes available we don't have OPP */ |
650 | if (max_modes == 0) | 652 | if (max_modes == 0) |
651 | return 0; | 653 | return 0; |
652 | 654 | ||
653 | for (i = priv->imx_gpu_govern.current_mode; i <= max_modes; i++) | 655 | for (i = priv->imx_gpu_govern.current_mode; i <= max_modes; i++) |
654 | { | 656 | { |
655 | unsigned long core_freq; | 657 | unsigned long core_freq; |
656 | 658 | ||
657 | core_freq = priv->imx_gpu_govern.core_clk_freq[i]; | 659 | core_freq = priv->imx_gpu_govern.core_clk_freq[i]; |
658 | dev_pm_opp_remove(dev, core_freq); | 660 | dev_pm_opp_remove(dev, core_freq); |
659 | } | 661 | } |
660 | 662 | ||
661 | if (i > 0) | 663 | if (i > 0) |
662 | { | 664 | { |
663 | driver_remove_file(dev->driver, &driver_attr_gpu_govern); | 665 | driver_remove_file(dev->driver, &driver_attr_gpu_govern); |
664 | } | 666 | } |
665 | 667 | ||
666 | return 0; | 668 | return 0; |
667 | } | 669 | } |
668 | #endif | 670 | #endif |
669 | 671 | ||
670 | #ifdef IMX_GPU_SUBSYSTEM | 672 | #ifdef IMX_GPU_SUBSYSTEM |
671 | 673 | ||
672 | static int use_imx_gpu_subsystem; | 674 | static int use_imx_gpu_subsystem; |
673 | 675 | ||
674 | /* sub device component ops. */ | 676 | /* sub device component ops. */ |
675 | static int mxc_gpu_sub_bind(struct device *dev, | 677 | static int mxc_gpu_sub_bind(struct device *dev, |
676 | struct device *master, void *data) | 678 | struct device *master, void *data) |
677 | { | 679 | { |
678 | return 0; | 680 | return 0; |
679 | } | 681 | } |
680 | 682 | ||
681 | static void mxc_gpu_sub_unbind(struct device *dev, struct device *master, | 683 | static void mxc_gpu_sub_unbind(struct device *dev, struct device *master, |
682 | void *data) | 684 | void *data) |
683 | { | 685 | { |
684 | } | 686 | } |
685 | 687 | ||
686 | static const struct component_ops mxc_gpu_sub_ops = | 688 | static const struct component_ops mxc_gpu_sub_ops = |
687 | { | 689 | { |
688 | .bind = mxc_gpu_sub_bind, | 690 | .bind = mxc_gpu_sub_bind, |
689 | .unbind = mxc_gpu_sub_unbind, | 691 | .unbind = mxc_gpu_sub_unbind, |
690 | }; | 692 | }; |
691 | 693 | ||
692 | /* sub device driver. */ | 694 | /* sub device driver. */ |
693 | static const struct of_device_id mxc_gpu_sub_match[] = | 695 | static const struct of_device_id mxc_gpu_sub_match[] = |
694 | { | 696 | { |
695 | { .compatible = "fsl,imx8-gpu"}, | 697 | { .compatible = "fsl,imx8-gpu"}, |
696 | { /* sentinel */ } | 698 | { /* sentinel */ } |
697 | }; | 699 | }; |
698 | 700 | ||
699 | static int mxc_gpu_sub_probe(struct platform_device *pdev) | 701 | static int mxc_gpu_sub_probe(struct platform_device *pdev) |
700 | { | 702 | { |
701 | return component_add(&pdev->dev, &mxc_gpu_sub_ops); | 703 | return component_add(&pdev->dev, &mxc_gpu_sub_ops); |
702 | } | 704 | } |
703 | 705 | ||
704 | static int mxc_gpu_sub_remove(struct platform_device *pdev) | 706 | static int mxc_gpu_sub_remove(struct platform_device *pdev) |
705 | { | 707 | { |
706 | component_del(&pdev->dev, &mxc_gpu_sub_ops); | 708 | component_del(&pdev->dev, &mxc_gpu_sub_ops); |
707 | return 0; | 709 | return 0; |
708 | } | 710 | } |
709 | 711 | ||
710 | struct platform_driver mxc_gpu_sub_driver = | 712 | struct platform_driver mxc_gpu_sub_driver = |
711 | { | 713 | { |
712 | .driver = { | 714 | .driver = { |
713 | .name = "mxc-gpu", | 715 | .name = "mxc-gpu", |
714 | .owner = THIS_MODULE, | 716 | .owner = THIS_MODULE, |
715 | .of_match_table = mxc_gpu_sub_match, | 717 | .of_match_table = mxc_gpu_sub_match, |
716 | }, | 718 | }, |
717 | 719 | ||
718 | .probe = mxc_gpu_sub_probe, | 720 | .probe = mxc_gpu_sub_probe, |
719 | .remove = mxc_gpu_sub_remove, | 721 | .remove = mxc_gpu_sub_remove, |
720 | }; | 722 | }; |
721 | 723 | ||
722 | static int register_mxc_gpu_sub_driver(void) | 724 | static int register_mxc_gpu_sub_driver(void) |
723 | { | 725 | { |
724 | return use_imx_gpu_subsystem ? platform_driver_register(&mxc_gpu_sub_driver) : 0; | 726 | return use_imx_gpu_subsystem ? platform_driver_register(&mxc_gpu_sub_driver) : 0; |
725 | } | 727 | } |
726 | 728 | ||
727 | static void unregister_mxc_gpu_sub_driver(void) | 729 | static void unregister_mxc_gpu_sub_driver(void) |
728 | { | 730 | { |
729 | if (use_imx_gpu_subsystem) { | 731 | if (use_imx_gpu_subsystem) { |
730 | platform_driver_unregister(&mxc_gpu_sub_driver); | 732 | platform_driver_unregister(&mxc_gpu_sub_driver); |
731 | } | 733 | } |
732 | } | 734 | } |
733 | 735 | ||
734 | static int patch_param_imx8_subsystem(struct platform_device *pdev, | 736 | static int patch_param_imx8_subsystem(struct platform_device *pdev, |
735 | gcsMODULE_PARAMETERS *args) | 737 | gcsMODULE_PARAMETERS *args) |
736 | { | 738 | { |
737 | int i = 0; | 739 | int i = 0; |
738 | struct resource* res; | 740 | struct resource* res; |
739 | struct device_node *node = pdev->dev.of_node; | 741 | struct device_node *node = pdev->dev.of_node; |
740 | struct device_node *core_node; | 742 | struct device_node *core_node; |
741 | int core = gcvCORE_MAJOR; | 743 | int core = gcvCORE_MAJOR; |
742 | 744 | ||
743 | while ((core_node = of_parse_phandle(node, "cores", i++)) != NULL) { | 745 | while ((core_node = of_parse_phandle(node, "cores", i++)) != NULL) { |
744 | struct platform_device *pdev_gpu; | 746 | struct platform_device *pdev_gpu; |
745 | int irqLine = -1; | 747 | int irqLine = -1; |
746 | 748 | ||
747 | if (!of_device_is_available(core_node)) { | 749 | if (!of_device_is_available(core_node)) { |
748 | of_node_put(core_node); | 750 | of_node_put(core_node); |
749 | continue; | 751 | continue; |
750 | } | 752 | } |
751 | 753 | ||
752 | pdev_gpu = of_find_device_by_node(core_node); | 754 | pdev_gpu = of_find_device_by_node(core_node); |
753 | 755 | ||
754 | if (!pdev_gpu) | 756 | if (!pdev_gpu) |
755 | break; | 757 | break; |
756 | 758 | ||
757 | irqLine = platform_get_irq(pdev_gpu, 0); | 759 | irqLine = platform_get_irq(pdev_gpu, 0); |
758 | 760 | ||
759 | if (irqLine < 0) | 761 | if (irqLine < 0) |
760 | break; | 762 | break; |
761 | 763 | ||
762 | res = platform_get_resource(pdev_gpu, IORESOURCE_MEM, 0); | 764 | res = platform_get_resource(pdev_gpu, IORESOURCE_MEM, 0); |
763 | 765 | ||
764 | if (!res) | 766 | if (!res) |
765 | break; | 767 | break; |
766 | 768 | ||
767 | args->irqs[core] = irqLine; | 769 | args->irqs[core] = irqLine; |
768 | args->registerBases[core] = res->start; | 770 | args->registerBases[core] = res->start; |
769 | args->registerSizes[core] = res->end - res->start + 1; | 771 | args->registerSizes[core] = res->end - res->start + 1; |
770 | 772 | ||
771 | of_node_put(core_node); | 773 | of_node_put(core_node); |
772 | ++core; | 774 | ++core; |
773 | } | 775 | } |
774 | 776 | ||
775 | if (core_node) | 777 | if (core_node) |
776 | of_node_put(core_node); | 778 | of_node_put(core_node); |
777 | 779 | ||
778 | return 0; | 780 | return 0; |
779 | } | 781 | } |
780 | 782 | ||
781 | static inline int get_power_imx8_subsystem(struct device *pdev) | 783 | static inline int get_power_imx8_subsystem(struct device *pdev) |
782 | { | 784 | { |
783 | struct imx_priv *priv = &imxPriv; | 785 | struct imx_priv *priv = &imxPriv; |
784 | struct clk *clk_core = NULL; | 786 | struct clk *clk_core = NULL; |
785 | struct clk *clk_shader = NULL; | 787 | struct clk *clk_shader = NULL; |
786 | struct clk *clk_axi = NULL; | 788 | struct clk *clk_axi = NULL; |
787 | 789 | ||
788 | /* Initialize the clock structure */ | 790 | /* Initialize the clock structure */ |
789 | int i = 0; | 791 | int i = 0; |
790 | struct device_node *node = pdev->of_node; | 792 | struct device_node *node = pdev->of_node; |
791 | struct device_node *core_node; | 793 | struct device_node *core_node; |
792 | int core = gcvCORE_MAJOR; | 794 | int core = gcvCORE_MAJOR; |
793 | 795 | ||
794 | #if defined(IMX8_SCU_CONTROL) | 796 | #if defined(IMX8_SCU_CONTROL) |
795 | sc_err_t sciErr; | 797 | sc_err_t sciErr; |
796 | uint32_t mu_id; | 798 | uint32_t mu_id; |
797 | 799 | ||
798 | sciErr = sc_ipc_getMuID(&mu_id); | 800 | sciErr = sc_ipc_getMuID(&mu_id); |
799 | 801 | ||
800 | if (sciErr != SC_ERR_NONE) { | 802 | if (sciErr != SC_ERR_NONE) { |
801 | printk("galcore; cannot obtain mu id\n"); | 803 | printk("galcore; cannot obtain mu id\n"); |
802 | return -EINVAL; | 804 | return -EINVAL; |
803 | } | 805 | } |
804 | 806 | ||
805 | sciErr = sc_ipc_open(&gpu_ipcHandle, mu_id); | 807 | sciErr = sc_ipc_open(&gpu_ipcHandle, mu_id); |
806 | 808 | ||
807 | if (sciErr != SC_ERR_NONE) { | 809 | if (sciErr != SC_ERR_NONE) { |
808 | printk("galcore: cannot open MU channel to SCU\n"); | 810 | printk("galcore: cannot open MU channel to SCU\n"); |
809 | return -EINVAL; | 811 | return -EINVAL; |
810 | } | 812 | } |
811 | #endif | 813 | #endif |
812 | 814 | ||
813 | while ((core_node = of_parse_phandle(node, "cores", i++)) != NULL) { | 815 | while ((core_node = of_parse_phandle(node, "cores", i++)) != NULL) { |
814 | struct platform_device *pdev_gpu = NULL; | 816 | struct platform_device *pdev_gpu = NULL; |
815 | clk_shader = NULL; | 817 | clk_shader = NULL; |
816 | clk_core = NULL; | 818 | clk_core = NULL; |
817 | clk_axi = NULL; | 819 | clk_axi = NULL; |
818 | 820 | ||
819 | if (!of_device_is_available(core_node)) { | 821 | if (!of_device_is_available(core_node)) { |
820 | of_node_put(core_node); | 822 | of_node_put(core_node); |
821 | continue; | 823 | continue; |
822 | } | 824 | } |
823 | 825 | ||
824 | pdev_gpu = of_find_device_by_node(core_node); | 826 | pdev_gpu = of_find_device_by_node(core_node); |
825 | 827 | ||
826 | if (!pdev_gpu) | 828 | if (!pdev_gpu) |
827 | break; | 829 | break; |
828 | 830 | ||
829 | clk_core = clk_get(&pdev_gpu->dev, "core"); | 831 | clk_core = clk_get(&pdev_gpu->dev, "core"); |
830 | 832 | ||
831 | if (IS_ERR(clk_core)) { | 833 | if (IS_ERR(clk_core)) { |
832 | printk("galcore: clk_get clk_core failed\n"); | 834 | printk("galcore: clk_get clk_core failed\n"); |
833 | break; | 835 | break; |
834 | } | 836 | } |
835 | 837 | ||
836 | clk_axi = clk_get(&pdev_gpu->dev, "bus"); | 838 | clk_axi = clk_get(&pdev_gpu->dev, "bus"); |
837 | 839 | ||
838 | if (IS_ERR(clk_axi)) | 840 | if (IS_ERR(clk_axi)) |
839 | clk_axi = NULL; | 841 | clk_axi = NULL; |
840 | 842 | ||
841 | clk_shader = clk_get(&pdev_gpu->dev, "shader"); | 843 | clk_shader = clk_get(&pdev_gpu->dev, "shader"); |
842 | 844 | ||
843 | if (IS_ERR(clk_shader)) { | 845 | if (IS_ERR(clk_shader)) { |
844 | printk("galcore: clk_get clk_3d_shader failed\n"); | 846 | printk("galcore: clk_get clk_3d_shader failed\n"); |
845 | continue; | 847 | continue; |
846 | } | 848 | } |
847 | 849 | ||
848 | #if defined(CONFIG_ANDROID) && LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) | 850 | #if defined(CONFIG_ANDROID) && LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) |
849 | /* TODO: freescale BSP issue in some platform like imx8dv. */ | 851 | /* TODO: freescale BSP issue in some platform like imx8dv. */ |
850 | clk_prepare(clk_core); | 852 | clk_prepare(clk_core); |
851 | clk_set_rate(clk_core, 800000000); | 853 | clk_set_rate(clk_core, 800000000); |
852 | clk_unprepare(clk_core); | 854 | clk_unprepare(clk_core); |
853 | 855 | ||
854 | clk_prepare(clk_shader); | 856 | clk_prepare(clk_shader); |
855 | clk_set_rate(clk_shader, 800000000); | 857 | clk_set_rate(clk_shader, 800000000); |
856 | clk_unprepare(clk_shader); | 858 | clk_unprepare(clk_shader); |
857 | #endif | 859 | #endif |
858 | 860 | ||
859 | priv->imx_gpu_clks[core].clk_shader = clk_shader; | 861 | priv->imx_gpu_clks[core].clk_shader = clk_shader; |
860 | priv->imx_gpu_clks[core].clk_core = clk_core; | 862 | priv->imx_gpu_clks[core].clk_core = clk_core; |
861 | priv->imx_gpu_clks[core].clk_axi = clk_axi; | 863 | priv->imx_gpu_clks[core].clk_axi = clk_axi; |
862 | 864 | ||
863 | #if defined(IMX8_SCU_CONTROL) | 865 | #if defined(IMX8_SCU_CONTROL) |
864 | if (of_property_read_u32(core_node, "fsl,sc_gpu_pid", &priv->sc_gpu_pid[core])) { | 866 | if (of_property_read_u32(core_node, "fsl,sc_gpu_pid", &priv->sc_gpu_pid[core])) { |
865 | priv->sc_gpu_pid[core] = 0; | 867 | priv->sc_gpu_pid[core] = 0; |
866 | } | 868 | } |
867 | #endif | 869 | #endif |
868 | 870 | ||
869 | #ifdef CONFIG_PM | 871 | #ifdef CONFIG_PM |
870 | pm_runtime_get_noresume(&pdev_gpu->dev); | 872 | pm_runtime_get_noresume(&pdev_gpu->dev); |
871 | pm_runtime_set_active(&pdev_gpu->dev); | 873 | pm_runtime_set_active(&pdev_gpu->dev); |
872 | pm_runtime_enable(&pdev_gpu->dev); | 874 | pm_runtime_enable(&pdev_gpu->dev); |
873 | pm_runtime_put_sync(&pdev_gpu->dev); | 875 | pm_runtime_put_sync(&pdev_gpu->dev); |
874 | priv->pmdev[core] = &pdev_gpu->dev; | 876 | priv->pmdev[core] = &pdev_gpu->dev; |
875 | #endif | 877 | #endif |
876 | of_node_put(core_node); | 878 | of_node_put(core_node); |
877 | ++core; | 879 | ++core; |
878 | } | 880 | } |
879 | 881 | ||
880 | priv->gpu3dCount = core; | 882 | priv->gpu3dCount = core; |
881 | 883 | ||
882 | if (core_node) | 884 | if (core_node) |
883 | of_node_put(core_node); | 885 | of_node_put(core_node); |
884 | 886 | ||
885 | return 0; | 887 | return 0; |
886 | } | 888 | } |
887 | 889 | ||
888 | #endif | 890 | #endif |
889 | 891 | ||
890 | static int patch_param_imx6(struct platform_device *pdev, | 892 | static int patch_param_imx6(struct platform_device *pdev, |
891 | gcsMODULE_PARAMETERS *args) | 893 | gcsMODULE_PARAMETERS *args) |
892 | { | 894 | { |
893 | struct resource* res; | 895 | struct resource* res; |
894 | 896 | ||
895 | res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d"); | 897 | res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d"); |
896 | 898 | ||
897 | if (res) | 899 | if (res) |
898 | args->irqLine = res->start; | 900 | args->irqLine = res->start; |
899 | 901 | ||
900 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d"); | 902 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d"); |
901 | 903 | ||
902 | if (res) { | 904 | if (res) { |
903 | args->registerMemBase = res->start; | 905 | args->registerMemBase = res->start; |
904 | args->registerMemSize = res->end - res->start + 1; | 906 | args->registerMemSize = res->end - res->start + 1; |
905 | } | 907 | } |
906 | 908 | ||
907 | res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d"); | 909 | res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d"); |
908 | 910 | ||
909 | if (res) | 911 | if (res) |
910 | args->irqLine2D = res->start; | 912 | args->irqLine2D = res->start; |
911 | 913 | ||
912 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d"); | 914 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d"); |
913 | 915 | ||
914 | if (res) { | 916 | if (res) { |
915 | args->registerMemBase2D = res->start; | 917 | args->registerMemBase2D = res->start; |
916 | args->registerMemSize2D = res->end - res->start + 1; | 918 | args->registerMemSize2D = res->end - res->start + 1; |
917 | } | 919 | } |
918 | 920 | ||
919 | res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg"); | 921 | res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg"); |
920 | 922 | ||
921 | if (res) | 923 | if (res) |
922 | args->irqLineVG = res->start; | 924 | args->irqLineVG = res->start; |
923 | 925 | ||
924 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg"); | 926 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg"); |
925 | 927 | ||
926 | if (res) { | 928 | if (res) { |
927 | args->registerMemBaseVG = res->start; | 929 | args->registerMemBaseVG = res->start; |
928 | args->registerMemSizeVG = res->end - res->start + 1; | 930 | args->registerMemSizeVG = res->end - res->start + 1; |
929 | } | 931 | } |
930 | 932 | ||
931 | return 0; | 933 | return 0; |
932 | } | 934 | } |
933 | 935 | ||
934 | static int patch_param(struct platform_device *pdev, | 936 | static int patch_param(struct platform_device *pdev, |
935 | gcsMODULE_PARAMETERS *args) | 937 | gcsMODULE_PARAMETERS *args) |
936 | { | 938 | { |
937 | struct resource* res; | 939 | struct resource* res; |
938 | 940 | ||
939 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 941 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
940 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) | 942 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) |
941 | struct device_node *dn =pdev->dev.of_node; | 943 | struct device_node *dn =pdev->dev.of_node; |
942 | const u32 *prop; | 944 | const u32 *prop; |
943 | #else | 945 | #else |
944 | struct viv_gpu_platform_data *pdata; | 946 | struct viv_gpu_platform_data *pdata; |
945 | #endif | 947 | #endif |
946 | 948 | ||
947 | pdevice = pdev; | 949 | pdevice = pdev; |
948 | 950 | ||
949 | #ifdef IMX_GPU_SUBSYSTEM | 951 | #ifdef IMX_GPU_SUBSYSTEM |
950 | if (pdev->dev.of_node && use_imx_gpu_subsystem) | 952 | if (pdev->dev.of_node && use_imx_gpu_subsystem) |
951 | patch_param_imx8_subsystem(pdev, args); | 953 | patch_param_imx8_subsystem(pdev, args); |
952 | else | 954 | else |
953 | #endif | 955 | #endif |
954 | patch_param_imx6(pdev, args); | 956 | patch_param_imx6(pdev, args); |
955 | 957 | ||
956 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) | 958 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) |
957 | if(args->compression == -1) | 959 | if(args->compression == -1) |
958 | { | 960 | { |
959 | const u32 *property; | 961 | const u32 *property; |
960 | args->compression = gcvCOMPRESSION_OPTION_DEFAULT; | 962 | args->compression = gcvCOMPRESSION_OPTION_DEFAULT; |
961 | property = of_get_property(pdev->dev.of_node, "depth-compression", NULL); | 963 | property = of_get_property(pdev->dev.of_node, "depth-compression", NULL); |
962 | if (property && *property == 0) | 964 | if (property && *property == 0) |
963 | { | 965 | { |
964 | args->compression &= ~gcvCOMPRESSION_OPTION_DEPTH; | 966 | args->compression &= ~gcvCOMPRESSION_OPTION_DEPTH; |
965 | } | 967 | } |
966 | } | 968 | } |
967 | #endif | 969 | #endif |
968 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr"); | 970 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr"); |
969 | 971 | ||
970 | if (res && !args->baseAddress && !args->physSize) { | 972 | if (res && !args->baseAddress && !args->physSize) { |
971 | args->baseAddress = res->start; | 973 | args->baseAddress = res->start; |
972 | args->physSize = res->end - res->start + 1; | 974 | args->physSize = res->end - res->start + 1; |
973 | } | 975 | } |
974 | 976 | ||
975 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0) | 977 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0) |
976 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "contiguous_mem"); | 978 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "contiguous_mem"); |
977 | 979 | ||
978 | if (res) { | 980 | if (res) { |
979 | if (args->contiguousBase == 0) | 981 | if (args->contiguousBase == 0) |
980 | args->contiguousBase = res->start; | 982 | args->contiguousBase = res->start; |
981 | 983 | ||
982 | if (args->contiguousSize == ~0U) | 984 | if (args->contiguousSize == ~0U) |
983 | args->contiguousSize = res->end - res->start + 1; | 985 | args->contiguousSize = res->end - res->start + 1; |
984 | } | 986 | } |
985 | 987 | ||
986 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 988 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
987 | args->contiguousBase = 0; | 989 | args->contiguousBase = 0; |
988 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) | 990 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) |
989 | prop = of_get_property(dn, "contiguousbase", NULL); | 991 | prop = of_get_property(dn, "contiguousbase", NULL); |
990 | 992 | ||
991 | if (prop) | 993 | if (prop) |
992 | args->contiguousBase = *prop; | 994 | args->contiguousBase = *prop; |
993 | 995 | ||
994 | of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize); | 996 | of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize); |
995 | #else | 997 | #else |
996 | 998 | ||
997 | pdata = pdev->dev.platform_data; | 999 | pdata = pdev->dev.platform_data; |
998 | 1000 | ||
999 | if (pdata) { | 1001 | if (pdata) { |
1000 | args->contiguousBase = pdata->reserved_mem_base; | 1002 | args->contiguousBase = pdata->reserved_mem_base; |
1001 | args->contiguousSize = pdata->reserved_mem_size; | 1003 | args->contiguousSize = pdata->reserved_mem_size; |
1002 | } | 1004 | } |
1003 | 1005 | ||
1004 | #endif | 1006 | #endif |
1005 | 1007 | ||
1006 | if (args->contiguousSize == ~0U) { | 1008 | if (args->contiguousSize == ~0U) { |
1007 | printk("Warning: No contiguous memory is reserverd for gpu.!\n"); | 1009 | printk("Warning: No contiguous memory is reserverd for gpu.!\n"); |
1008 | printk("Warning: Will use default value(%d) for the reserved memory!\n", gcdFSL_CONTIGUOUS_SIZE); | 1010 | printk("Warning: Will use default value(%d) for the reserved memory!\n", gcdFSL_CONTIGUOUS_SIZE); |
1009 | 1011 | ||
1010 | args->contiguousSize = gcdFSL_CONTIGUOUS_SIZE; | 1012 | args->contiguousSize = gcdFSL_CONTIGUOUS_SIZE; |
1011 | } | 1013 | } |
1012 | 1014 | ||
1013 | args->gpu3DMinClock = initgpu3DMinClock; | 1015 | args->gpu3DMinClock = initgpu3DMinClock; |
1014 | 1016 | ||
1015 | if (args->physSize == 0) { | 1017 | if (args->physSize == 0) { |
1016 | #if defined(IMX8_PHYS_BASE) | 1018 | #if defined(IMX8_PHYS_BASE) |
1017 | args->baseAddress = IMX8_PHYS_BASE; | 1019 | args->baseAddress = IMX8_PHYS_BASE; |
1018 | #endif | 1020 | #endif |
1019 | 1021 | ||
1020 | #if defined(IMX8_PHYS_SIZE) | 1022 | #if defined(IMX8_PHYS_SIZE) |
1021 | args->physSize = IMX8_PHYS_SIZE; | 1023 | args->physSize = IMX8_PHYS_SIZE; |
1022 | #else | 1024 | #else |
1023 | args->physSize = 0x80000000; | 1025 | args->physSize = 0x80000000; |
1024 | #endif | 1026 | #endif |
1025 | } | 1027 | } |
1026 | 1028 | ||
1027 | return 0; | 1029 | return 0; |
1028 | } | 1030 | } |
1029 | 1031 | ||
1030 | int init_priv(void) | 1032 | int init_priv(void) |
1031 | { | 1033 | { |
1032 | memset(&imxPriv, 0, sizeof(imxPriv)); | 1034 | memset(&imxPriv, 0, sizeof(imxPriv)); |
1033 | 1035 | ||
1034 | #ifdef CONFIG_GPU_LOW_MEMORY_KILLER | 1036 | #ifdef CONFIG_GPU_LOW_MEMORY_KILLER |
1035 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) | 1037 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) |
1036 | task_free_register(&task_nb); | 1038 | task_free_register(&task_nb); |
1037 | # else | 1039 | # else |
1038 | task_handoff_register(&task_nb); | 1040 | task_handoff_register(&task_nb); |
1039 | # endif | 1041 | # endif |
1040 | #endif | 1042 | #endif |
1041 | 1043 | ||
1042 | return 0; | 1044 | return 0; |
1043 | } | 1045 | } |
1044 | 1046 | ||
1045 | void | 1047 | void |
1046 | free_priv(void) | 1048 | free_priv(void) |
1047 | { | 1049 | { |
1048 | #ifdef CONFIG_GPU_LOW_MEMORY_KILLER | 1050 | #ifdef CONFIG_GPU_LOW_MEMORY_KILLER |
1049 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) | 1051 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) |
1050 | task_free_unregister(&task_nb); | 1052 | task_free_unregister(&task_nb); |
1051 | # else | 1053 | # else |
1052 | task_handoff_unregister(&task_nb); | 1054 | task_handoff_unregister(&task_nb); |
1053 | # endif | 1055 | # endif |
1054 | #endif | 1056 | #endif |
1055 | } | 1057 | } |
1056 | 1058 | ||
1057 | static int set_clock(int gpu, int enable); | 1059 | static int set_clock(int gpu, int enable); |
1058 | 1060 | ||
1059 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 1061 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
1060 | static void imx6sx_optimize_qosc_for_GPU(void) | 1062 | static void imx6sx_optimize_qosc_for_GPU(void) |
1061 | { | 1063 | { |
1062 | struct device_node *np; | 1064 | struct device_node *np; |
1063 | void __iomem *src_base; | 1065 | void __iomem *src_base; |
1064 | 1066 | ||
1065 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-qosc"); | 1067 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-qosc"); |
1066 | if (!np) | 1068 | if (!np) |
1067 | return; | 1069 | return; |
1068 | 1070 | ||
1069 | src_base = of_iomap(np, 0); | 1071 | src_base = of_iomap(np, 0); |
1070 | WARN_ON(!src_base); | 1072 | WARN_ON(!src_base); |
1071 | 1073 | ||
1072 | set_clock(gcvCORE_MAJOR, 1); | 1074 | set_clock(gcvCORE_MAJOR, 1); |
1073 | 1075 | ||
1074 | writel_relaxed(0, src_base); /* Disable clkgate & soft_rst */ | 1076 | writel_relaxed(0, src_base); /* Disable clkgate & soft_rst */ |
1075 | writel_relaxed(0, src_base+0x60); /* Enable all masters */ | 1077 | writel_relaxed(0, src_base+0x60); /* Enable all masters */ |
1076 | writel_relaxed(0, src_base+0x1400); /* Disable clkgate & soft_rst for gpu */ | 1078 | writel_relaxed(0, src_base+0x1400); /* Disable clkgate & soft_rst for gpu */ |
1077 | writel_relaxed(0x0f000222, src_base+0x1400+0xd0); /* Set Write QoS 2 for gpu */ | 1079 | writel_relaxed(0x0f000222, src_base+0x1400+0xd0); /* Set Write QoS 2 for gpu */ |
1078 | writel_relaxed(0x0f000822, src_base+0x1400+0xe0); /* Set Read QoS 8 for gpu */ | 1080 | writel_relaxed(0x0f000822, src_base+0x1400+0xe0); /* Set Read QoS 8 for gpu */ |
1079 | 1081 | ||
1080 | set_clock(gcvCORE_MAJOR, 0); | 1082 | set_clock(gcvCORE_MAJOR, 0); |
1081 | return; | 1083 | return; |
1082 | } | 1084 | } |
1083 | #endif | 1085 | #endif |
1084 | 1086 | ||
1085 | static inline int get_power_imx6(struct device *pdev) | 1087 | static inline int get_power_imx6(struct device *pdev) |
1086 | { | 1088 | { |
1087 | struct imx_priv *priv = &imxPriv; | 1089 | struct imx_priv *priv = &imxPriv; |
1088 | struct clk *clk_core = NULL; | 1090 | struct clk *clk_core = NULL; |
1089 | struct clk *clk_shader = NULL; | 1091 | struct clk *clk_shader = NULL; |
1090 | struct clk *clk_axi = NULL; | 1092 | struct clk *clk_axi = NULL; |
1091 | struct clk *clk_ahb = NULL; | 1093 | struct clk *clk_ahb = NULL; |
1092 | 1094 | ||
1093 | #ifdef CONFIG_RESET_CONTROLLER | 1095 | #ifdef CONFIG_RESET_CONTROLLER |
1094 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 1096 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
1095 | struct reset_control *rstc; | 1097 | struct reset_control *rstc; |
1096 | 1098 | ||
1097 | rstc = devm_reset_control_get(pdev, "gpu3d"); | 1099 | rstc = devm_reset_control_get(pdev, "gpu3d"); |
1098 | priv->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc; | 1100 | priv->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc; |
1099 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0) | 1101 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0) |
1100 | rstc = devm_reset_control_get_shared(pdev, "gpu2d"); | 1102 | rstc = devm_reset_control_get_shared(pdev, "gpu2d"); |
1101 | priv->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc; | 1103 | priv->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc; |
1102 | rstc = devm_reset_control_get_shared(pdev, "gpuvg"); | 1104 | rstc = devm_reset_control_get_shared(pdev, "gpuvg"); |
1103 | # else | 1105 | # else |
1104 | rstc = devm_reset_control_get(pdev, "gpu2d"); | 1106 | rstc = devm_reset_control_get(pdev, "gpu2d"); |
1105 | priv->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc; | 1107 | priv->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc; |
1106 | rstc = devm_reset_control_get(pdev, "gpuvg"); | 1108 | rstc = devm_reset_control_get(pdev, "gpuvg"); |
1107 | # endif | 1109 | # endif |
1108 | priv->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc; | 1110 | priv->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc; |
1109 | # endif | 1111 | # endif |
1110 | #endif | 1112 | #endif |
1111 | 1113 | ||
1112 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) | 1114 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) |
1113 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) | 1115 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) |
1114 | /* Get gpu regulator */ | 1116 | /* Get gpu regulator */ |
1115 | priv->gpu_regulator = regulator_get(pdev, "cpu_vddgpu"); | 1117 | priv->gpu_regulator = regulator_get(pdev, "cpu_vddgpu"); |
1116 | # elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 1118 | # elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
1117 | priv->gpu_regulator = devm_regulator_get(pdev, "pu"); | 1119 | priv->gpu_regulator = devm_regulator_get(pdev, "pu"); |
1118 | # endif | 1120 | # endif |
1119 | 1121 | ||
1120 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 1122 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
1121 | if (IS_ERR(priv->gpu_regulator)) { | 1123 | if (IS_ERR(priv->gpu_regulator)) { |
1122 | printk("%s: Failed to get gpu regulator\n", __FUNCTION__); | 1124 | printk("%s: Failed to get gpu regulator\n", __FUNCTION__); |
1123 | return -ENXIO; | 1125 | return -ENXIO; |
1124 | } | 1126 | } |
1125 | # endif | 1127 | # endif |
1126 | #endif | 1128 | #endif |
1127 | 1129 | ||
1128 | clk_core = clk_get(pdev, "gpu3d_clk"); | 1130 | clk_core = clk_get(pdev, "gpu3d_clk"); |
1129 | 1131 | ||
1130 | if (!IS_ERR(clk_core)) { | 1132 | if (!IS_ERR(clk_core)) { |
1131 | clk_axi = clk_get(pdev, "gpu3d_axi_clk"); | 1133 | clk_axi = clk_get(pdev, "gpu3d_axi_clk"); |
1132 | clk_shader = clk_get(pdev, "gpu3d_shader_clk"); | 1134 | clk_shader = clk_get(pdev, "gpu3d_shader_clk"); |
1133 | 1135 | ||
1134 | if (IS_ERR(clk_shader)) { | 1136 | if (IS_ERR(clk_shader)) { |
1135 | clk_put(clk_core); | 1137 | clk_put(clk_core); |
1136 | clk_core = NULL; | 1138 | clk_core = NULL; |
1137 | clk_shader = NULL; | 1139 | clk_shader = NULL; |
1138 | printk("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n"); | 1140 | printk("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n"); |
1139 | } | 1141 | } |
1140 | 1142 | ||
1141 | clk_ahb = clk_get(pdev, "gpu3d_ahb_clk"); | 1143 | clk_ahb = clk_get(pdev, "gpu3d_ahb_clk"); |
1142 | if (IS_ERR(clk_ahb)) { | 1144 | if (IS_ERR(clk_ahb)) { |
1143 | clk_ahb = NULL; | 1145 | clk_ahb = NULL; |
1144 | } | 1146 | } |
1145 | } else { | 1147 | } else { |
1146 | clk_core = NULL; | 1148 | clk_core = NULL; |
1147 | printk("galcore: clk_get gpu3d_clk failed, disable 3d!\n"); | 1149 | printk("galcore: clk_get gpu3d_clk failed, disable 3d!\n"); |
1148 | } | 1150 | } |
1149 | 1151 | ||
1150 | priv->imx_gpu_clks[gcvCORE_MAJOR].clk_core = clk_core; | 1152 | priv->imx_gpu_clks[gcvCORE_MAJOR].clk_core = clk_core; |
1151 | priv->imx_gpu_clks[gcvCORE_MAJOR].clk_shader = clk_shader; | 1153 | priv->imx_gpu_clks[gcvCORE_MAJOR].clk_shader = clk_shader; |
1152 | priv->imx_gpu_clks[gcvCORE_MAJOR].clk_axi = clk_axi; | 1154 | priv->imx_gpu_clks[gcvCORE_MAJOR].clk_axi = clk_axi; |
1153 | priv->imx_gpu_clks[gcvCORE_MAJOR].clk_ahb = clk_ahb; | 1155 | priv->imx_gpu_clks[gcvCORE_MAJOR].clk_ahb = clk_ahb; |
1154 | 1156 | ||
1155 | clk_core = clk_get(pdev, "gpu2d_clk"); | 1157 | clk_core = clk_get(pdev, "gpu2d_clk"); |
1156 | 1158 | ||
1157 | if (IS_ERR(clk_core)) { | 1159 | if (IS_ERR(clk_core)) { |
1158 | clk_core = NULL; | 1160 | clk_core = NULL; |
1159 | printk("galcore: clk_get 2d core clock failed, disable 2d/vg!\n"); | 1161 | printk("galcore: clk_get 2d core clock failed, disable 2d/vg!\n"); |
1160 | } else { | 1162 | } else { |
1161 | clk_axi = clk_get(pdev, "gpu2d_axi_clk"); | 1163 | clk_axi = clk_get(pdev, "gpu2d_axi_clk"); |
1162 | if (IS_ERR(clk_axi)) { | 1164 | if (IS_ERR(clk_axi)) { |
1163 | clk_axi = NULL; | 1165 | clk_axi = NULL; |
1164 | printk("galcore: clk_get 2d axi clock failed, disable 2d\n"); | 1166 | printk("galcore: clk_get 2d axi clock failed, disable 2d\n"); |
1165 | } | 1167 | } |
1166 | clk_ahb = clk_get(pdev, "gpu2d_ahb_clk"); | 1168 | clk_ahb = clk_get(pdev, "gpu2d_ahb_clk"); |
1167 | if (IS_ERR(clk_ahb)) { | 1169 | if (IS_ERR(clk_ahb)) { |
1168 | clk_ahb = NULL; | 1170 | clk_ahb = NULL; |
1169 | } | 1171 | } |
1170 | 1172 | ||
1171 | priv->imx_gpu_clks[gcvCORE_2D].clk_ahb = clk_ahb; | 1173 | priv->imx_gpu_clks[gcvCORE_2D].clk_ahb = clk_ahb; |
1172 | priv->imx_gpu_clks[gcvCORE_2D].clk_core = clk_core; | 1174 | priv->imx_gpu_clks[gcvCORE_2D].clk_core = clk_core; |
1173 | priv->imx_gpu_clks[gcvCORE_2D].clk_axi = clk_axi; | 1175 | priv->imx_gpu_clks[gcvCORE_2D].clk_axi = clk_axi; |
1174 | 1176 | ||
1175 | clk_axi = clk_get(pdev, "openvg_axi_clk"); | 1177 | clk_axi = clk_get(pdev, "openvg_axi_clk"); |
1176 | 1178 | ||
1177 | if (IS_ERR(clk_axi)) { | 1179 | if (IS_ERR(clk_axi)) { |
1178 | clk_axi = NULL; | 1180 | clk_axi = NULL; |
1179 | printk("galcore: clk_get vg clock failed, disable vg!\n"); | 1181 | printk("galcore: clk_get vg clock failed, disable vg!\n"); |
1180 | } | 1182 | } |
1181 | 1183 | ||
1182 | priv->imx_gpu_clks[gcvCORE_VG].clk_core = clk_core; | 1184 | priv->imx_gpu_clks[gcvCORE_VG].clk_core = clk_core; |
1183 | priv->imx_gpu_clks[gcvCORE_VG].clk_axi = clk_axi; | 1185 | priv->imx_gpu_clks[gcvCORE_VG].clk_axi = clk_axi; |
1184 | } | 1186 | } |
1185 | 1187 | ||
1186 | #ifdef CONFIG_PM | 1188 | #ifdef CONFIG_PM |
1187 | pm_runtime_enable(pdev); | 1189 | pm_runtime_enable(pdev); |
1188 | 1190 | ||
1189 | priv->pmdev[gcvCORE_MAJOR] = pdev; | 1191 | priv->pmdev[gcvCORE_MAJOR] = pdev; |
1190 | priv->pmdev[gcvCORE_2D] = pdev; | 1192 | priv->pmdev[gcvCORE_2D] = pdev; |
1191 | priv->pmdev[gcvCORE_VG] = pdev; | 1193 | priv->pmdev[gcvCORE_VG] = pdev; |
1192 | #endif | 1194 | #endif |
1193 | 1195 | ||
1194 | return 0; | 1196 | return 0; |
1195 | } | 1197 | } |
1196 | 1198 | ||
1197 | static inline int get_power(struct device *pdev) | 1199 | static inline int get_power(struct device *pdev) |
1198 | { | 1200 | { |
1199 | int ret; | 1201 | int ret; |
1200 | 1202 | ||
1201 | /*Initialize the clock structure*/ | 1203 | /*Initialize the clock structure*/ |
1202 | #ifdef IMX_GPU_SUBSYSTEM | 1204 | #ifdef IMX_GPU_SUBSYSTEM |
1203 | if (pdev->of_node && use_imx_gpu_subsystem) | 1205 | if (pdev->of_node && use_imx_gpu_subsystem) |
1204 | ret = get_power_imx8_subsystem(pdev); | 1206 | ret = get_power_imx8_subsystem(pdev); |
1205 | else | 1207 | else |
1206 | #endif | 1208 | #endif |
1207 | ret = get_power_imx6(pdev); | 1209 | ret = get_power_imx6(pdev); |
1208 | 1210 | ||
1209 | if (ret) | 1211 | if (ret) |
1210 | return ret; | 1212 | return ret; |
1211 | 1213 | ||
1212 | #if gcdENABLE_FSCALE_VAL_ADJUST && (defined(CONFIG_DEVICE_THERMAL) || defined(CONFIG_DEVICE_THERMAL_MODULE)) | 1214 | #if gcdENABLE_FSCALE_VAL_ADJUST && (defined(CONFIG_DEVICE_THERMAL) || defined(CONFIG_DEVICE_THERMAL_MODULE)) |
1213 | REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier); | 1215 | REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier); |
1214 | 1216 | ||
1215 | ret = driver_create_file(pdev->driver, &driver_attr_gpu3DMinClock); | 1217 | ret = driver_create_file(pdev->driver, &driver_attr_gpu3DMinClock); |
1216 | 1218 | ||
1217 | if (ret) | 1219 | if (ret) |
1218 | dev_err(pdev, "create gpu3DMinClock attr failed (%d)\n", ret); | 1220 | dev_err(pdev, "create gpu3DMinClock attr failed (%d)\n", ret); |
1219 | #endif | 1221 | #endif |
1220 | 1222 | ||
1221 | #if defined(CONFIG_PM_OPP) | 1223 | #if defined(CONFIG_PM_OPP) |
1222 | ret = init_gpu_opp_table(pdev); | 1224 | ret = init_gpu_opp_table(pdev); |
1223 | if (ret) | 1225 | if (ret) |
1224 | dev_err(pdev, "OPP init failed!\n"); | 1226 | dev_err(pdev, "OPP init failed!\n"); |
1225 | #endif | 1227 | #endif |
1226 | 1228 | ||
1227 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 1229 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
1228 | imx6sx_optimize_qosc_for_GPU(); | 1230 | imx6sx_optimize_qosc_for_GPU(); |
1229 | #endif | 1231 | #endif |
1230 | 1232 | ||
1231 | return 0; | 1233 | return 0; |
1232 | } | 1234 | } |
1233 | 1235 | ||
1234 | static inline void put_power(void) | 1236 | static inline void put_power(void) |
1235 | { | 1237 | { |
1236 | int core = 0; | 1238 | int core = 0; |
1237 | struct gpu_clk *imx_clk = NULL; | 1239 | struct gpu_clk *imx_clk = NULL; |
1238 | struct imx_priv *priv = &imxPriv; | 1240 | struct imx_priv *priv = &imxPriv; |
1239 | struct device *pmdev_last = NULL;/*legacy gpu device entry for imx6*/ | 1241 | struct device *pmdev_last = NULL;/*legacy gpu device entry for imx6*/ |
1240 | struct clk *clk_core_last = NULL;/*vg has same core clk as 2d */ | 1242 | struct clk *clk_core_last = NULL;/*vg has same core clk as 2d */ |
1241 | 1243 | ||
1242 | for (core = 0; core < gcdMAX_GPU_COUNT; core++) { | 1244 | for (core = 0; core < gcdMAX_GPU_COUNT; core++) { |
1243 | imx_clk = &priv->imx_gpu_clks[core]; | 1245 | imx_clk = &priv->imx_gpu_clks[core]; |
1244 | 1246 | ||
1245 | if (imx_clk->clk_core && imx_clk->clk_core != clk_core_last) { | 1247 | if (imx_clk->clk_core && imx_clk->clk_core != clk_core_last) { |
1246 | clk_put(imx_clk->clk_core); | 1248 | clk_put(imx_clk->clk_core); |
1247 | clk_core_last = imx_clk->clk_core; | 1249 | clk_core_last = imx_clk->clk_core; |
1248 | imx_clk->clk_core = NULL; | 1250 | imx_clk->clk_core = NULL; |
1249 | } | 1251 | } |
1250 | 1252 | ||
1251 | if (imx_clk->clk_shader) { | 1253 | if (imx_clk->clk_shader) { |
1252 | clk_put(imx_clk->clk_shader); | 1254 | clk_put(imx_clk->clk_shader); |
1253 | imx_clk->clk_shader = NULL; | 1255 | imx_clk->clk_shader = NULL; |
1254 | } | 1256 | } |
1255 | 1257 | ||
1256 | if (imx_clk->clk_axi) { | 1258 | if (imx_clk->clk_axi) { |
1257 | clk_put(imx_clk->clk_axi); | 1259 | clk_put(imx_clk->clk_axi); |
1258 | imx_clk->clk_axi = NULL; | 1260 | imx_clk->clk_axi = NULL; |
1259 | } | 1261 | } |
1260 | 1262 | ||
1261 | if (imx_clk->clk_ahb) { | 1263 | if (imx_clk->clk_ahb) { |
1262 | clk_put(imx_clk->clk_ahb); | 1264 | clk_put(imx_clk->clk_ahb); |
1263 | imx_clk->clk_ahb = NULL; | 1265 | imx_clk->clk_ahb = NULL; |
1264 | } | 1266 | } |
1265 | 1267 | ||
1266 | #ifdef CONFIG_PM | 1268 | #ifdef CONFIG_PM |
1267 | if (priv->pmdev[core] && priv->pmdev[core] != pmdev_last){ | 1269 | if (priv->pmdev[core] && priv->pmdev[core] != pmdev_last){ |
1268 | pm_runtime_disable(priv->pmdev[core]); | 1270 | pm_runtime_disable(priv->pmdev[core]); |
1269 | pmdev_last = priv->pmdev[core]; | 1271 | pmdev_last = priv->pmdev[core]; |
1270 | } | 1272 | } |
1271 | #endif | 1273 | #endif |
1272 | } | 1274 | } |
1273 | 1275 | ||
1274 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) | 1276 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) |
1275 | if (priv->gpu_regulator) { | 1277 | if (priv->gpu_regulator) { |
1276 | regulator_put(priv->gpu_regulator); | 1278 | regulator_put(priv->gpu_regulator); |
1277 | priv->gpu_regulator = NULL; | 1279 | priv->gpu_regulator = NULL; |
1278 | } | 1280 | } |
1279 | #endif | 1281 | #endif |
1280 | 1282 | ||
1281 | #if gcdENABLE_FSCALE_VAL_ADJUST && (defined(CONFIG_DEVICE_THERMAL) || defined(CONFIG_DEVICE_THERMAL_MODULE)) | 1283 | #if gcdENABLE_FSCALE_VAL_ADJUST && (defined(CONFIG_DEVICE_THERMAL) || defined(CONFIG_DEVICE_THERMAL_MODULE)) |
1282 | UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier); | 1284 | UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier); |
1283 | 1285 | ||
1284 | driver_remove_file(pdevice->dev.driver, &driver_attr_gpu3DMinClock); | 1286 | driver_remove_file(pdevice->dev.driver, &driver_attr_gpu3DMinClock); |
1285 | #endif | 1287 | #endif |
1286 | 1288 | ||
1287 | #if defined(CONFIG_PM_OPP) | 1289 | #if defined(CONFIG_PM_OPP) |
1288 | remove_gpu_opp_table(); | 1290 | remove_gpu_opp_table(); |
1289 | #endif | 1291 | #endif |
1290 | 1292 | ||
1291 | #if defined(IMX8_SCU_CONTROL) | 1293 | #if defined(IMX8_SCU_CONTROL) |
1292 | if (gpu_ipcHandle) | 1294 | if (gpu_ipcHandle) |
1293 | sc_ipc_close(gpu_ipcHandle); | 1295 | sc_ipc_close(gpu_ipcHandle); |
1294 | #endif | 1296 | #endif |
1295 | } | 1297 | } |
1296 | 1298 | ||
1297 | static inline int set_power(int gpu, int enable) | 1299 | static inline int set_power(int gpu, int enable) |
1298 | { | 1300 | { |
1299 | #ifdef CONFIG_PM | 1301 | #ifdef CONFIG_PM |
1300 | struct imx_priv* priv = &imxPriv; | 1302 | struct imx_priv* priv = &imxPriv; |
1301 | #endif | 1303 | #endif |
1302 | 1304 | ||
1303 | if (enable) { | 1305 | if (enable) { |
1304 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) | 1306 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) |
1305 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 1307 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
1306 | if (!IS_ERR(priv->gpu_regulator)) { | 1308 | if (!IS_ERR(priv->gpu_regulator)) { |
1307 | int ret = regulator_enable(priv->gpu_regulator); | 1309 | int ret = regulator_enable(priv->gpu_regulator); |
1308 | 1310 | ||
1309 | if (ret) | 1311 | if (ret) |
1310 | printk("%s: fail to enable pu regulator %d!\n", __FUNCTION__, ret); | 1312 | printk("%s: fail to enable pu regulator %d!\n", __FUNCTION__, ret); |
1311 | } | 1313 | } |
1312 | # else | 1314 | # else |
1313 | imx_gpc_power_up_pu(true); | 1315 | imx_gpc_power_up_pu(true); |
1314 | # endif | 1316 | # endif |
1315 | #endif | 1317 | #endif |
1316 | 1318 | ||
1317 | #ifdef CONFIG_PM | 1319 | #ifdef CONFIG_PM |
1318 | pm_runtime_get_sync(priv->pmdev[gpu]); | 1320 | pm_runtime_get_sync(priv->pmdev[gpu]); |
1319 | #endif | 1321 | #endif |
1320 | 1322 | ||
1321 | #if defined(IMX8_SCU_CONTROL) | 1323 | #if defined(IMX8_SCU_CONTROL) |
1322 | if (priv->sc_gpu_pid[gpu]) { | 1324 | if (priv->sc_gpu_pid[gpu]) { |
1323 | sc_err_t sciErr = sc_misc_set_control(gpu_ipcHandle, priv->sc_gpu_pid[gpu], SC_C_ID, gpu); | 1325 | sc_err_t sciErr = sc_misc_set_control(gpu_ipcHandle, priv->sc_gpu_pid[gpu], SC_C_ID, gpu); |
1324 | if (sciErr != SC_ERR_NONE) | 1326 | if (sciErr != SC_ERR_NONE) |
1325 | printk("galcore: failed to set gpu id for 3d_%d\n", gpu); | 1327 | printk("galcore: failed to set gpu id for 3d_%d\n", gpu); |
1326 | 1328 | ||
1327 | if (priv->gpu3dCount > 1) { | 1329 | if (priv->gpu3dCount > 1) { |
1328 | sciErr = sc_misc_set_control(gpu_ipcHandle, priv->sc_gpu_pid[gpu], SC_C_SINGLE_MODE, 0); | 1330 | sciErr = sc_misc_set_control(gpu_ipcHandle, priv->sc_gpu_pid[gpu], SC_C_SINGLE_MODE, 0); |
1329 | if (sciErr != SC_ERR_NONE) | 1331 | if (sciErr != SC_ERR_NONE) |
1330 | printk("galcore: failed to set gpu dual mode for 3d_%d\n", gpu); | 1332 | printk("galcore: failed to set gpu dual mode for 3d_%d\n", gpu); |
1331 | } else { | 1333 | } else { |
1332 | sciErr = sc_misc_set_control(gpu_ipcHandle, priv->sc_gpu_pid[gpu], SC_C_SINGLE_MODE, 1); | 1334 | sciErr = sc_misc_set_control(gpu_ipcHandle, priv->sc_gpu_pid[gpu], SC_C_SINGLE_MODE, 1); |
1333 | if (sciErr != SC_ERR_NONE) | 1335 | if (sciErr != SC_ERR_NONE) |
1334 | printk("galcore: failed to set gpu single mode for 3d_%d\n", gpu); | 1336 | printk("galcore: failed to set gpu single mode for 3d_%d\n", gpu); |
1335 | } | 1337 | } |
1336 | } | 1338 | } |
1337 | #endif | 1339 | #endif |
1338 | } else { | 1340 | } else { |
1339 | #ifdef CONFIG_PM | 1341 | #ifdef CONFIG_PM |
1340 | pm_runtime_put_sync(priv->pmdev[gpu]); | 1342 | pm_runtime_put_sync(priv->pmdev[gpu]); |
1341 | #endif | 1343 | #endif |
1342 | 1344 | ||
1343 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) | 1345 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) |
1344 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 1346 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
1345 | if (!IS_ERR(priv->gpu_regulator)) | 1347 | if (!IS_ERR(priv->gpu_regulator)) |
1346 | regulator_disable(priv->gpu_regulator); | 1348 | regulator_disable(priv->gpu_regulator); |
1347 | # else | 1349 | # else |
1348 | imx_gpc_power_up_pu(false); | 1350 | imx_gpc_power_up_pu(false); |
1349 | # endif | 1351 | # endif |
1350 | #endif | 1352 | #endif |
1351 | } | 1353 | } |
1352 | 1354 | ||
1353 | return 0; | 1355 | return 0; |
1354 | } | 1356 | } |
1355 | 1357 | ||
1356 | int set_clock(int gpu, int enable) | 1358 | int set_clock(int gpu, int enable) |
1357 | { | 1359 | { |
1358 | struct imx_priv* priv = &imxPriv; | 1360 | struct imx_priv* priv = &imxPriv; |
1359 | struct clk *clk_core = priv->imx_gpu_clks[gpu].clk_core; | 1361 | struct clk *clk_core = priv->imx_gpu_clks[gpu].clk_core; |
1360 | struct clk *clk_shader = priv->imx_gpu_clks[gpu].clk_shader; | 1362 | struct clk *clk_shader = priv->imx_gpu_clks[gpu].clk_shader; |
1361 | struct clk *clk_axi = priv->imx_gpu_clks[gpu].clk_axi; | 1363 | struct clk *clk_axi = priv->imx_gpu_clks[gpu].clk_axi; |
1362 | struct clk *clk_ahb = priv->imx_gpu_clks[gpu].clk_ahb; | 1364 | struct clk *clk_ahb = priv->imx_gpu_clks[gpu].clk_ahb; |
1363 | 1365 | ||
1364 | if (enable) { | 1366 | if (enable) { |
1365 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) | 1367 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) |
1366 | if (clk_core) | 1368 | if (clk_core) |
1367 | clk_prepare(clk_core); | 1369 | clk_prepare(clk_core); |
1368 | 1370 | ||
1369 | if (clk_shader) | 1371 | if (clk_shader) |
1370 | clk_prepare(clk_shader); | 1372 | clk_prepare(clk_shader); |
1371 | 1373 | ||
1372 | if (clk_axi) | 1374 | if (clk_axi) |
1373 | clk_prepare(clk_axi); | 1375 | clk_prepare(clk_axi); |
1374 | 1376 | ||
1375 | if (clk_ahb) | 1377 | if (clk_ahb) |
1376 | clk_prepare(clk_ahb); | 1378 | clk_prepare(clk_ahb); |
1377 | #endif | 1379 | #endif |
1378 | if (clk_core) | 1380 | if (clk_core) |
1379 | clk_enable(clk_core); | 1381 | clk_enable(clk_core); |
1380 | 1382 | ||
1381 | if (clk_shader) | 1383 | if (clk_shader) |
1382 | clk_enable(clk_shader); | 1384 | clk_enable(clk_shader); |
1383 | 1385 | ||
1384 | if (clk_axi) | 1386 | if (clk_axi) |
1385 | clk_enable(clk_axi); | 1387 | clk_enable(clk_axi); |
1386 | 1388 | ||
1387 | if (clk_ahb) | 1389 | if (clk_ahb) |
1388 | clk_enable(clk_ahb); | 1390 | clk_enable(clk_ahb); |
1389 | } else { | 1391 | } else { |
1390 | if (clk_core) | 1392 | if (clk_core) |
1391 | clk_disable(clk_core); | 1393 | clk_disable(clk_core); |
1392 | 1394 | ||
1393 | if (clk_shader) | 1395 | if (clk_shader) |
1394 | clk_disable(clk_shader); | 1396 | clk_disable(clk_shader); |
1395 | 1397 | ||
1396 | if (clk_axi) | 1398 | if (clk_axi) |
1397 | clk_disable(clk_axi); | 1399 | clk_disable(clk_axi); |
1398 | 1400 | ||
1399 | if (clk_ahb) | 1401 | if (clk_ahb) |
1400 | clk_disable(clk_ahb); | 1402 | clk_disable(clk_ahb); |
1401 | 1403 | ||
1402 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) | 1404 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) |
1403 | if (clk_core) | 1405 | if (clk_core) |
1404 | clk_unprepare(clk_core); | 1406 | clk_unprepare(clk_core); |
1405 | 1407 | ||
1406 | if (clk_shader) | 1408 | if (clk_shader) |
1407 | clk_unprepare(clk_shader); | 1409 | clk_unprepare(clk_shader); |
1408 | 1410 | ||
1409 | if (clk_axi) | 1411 | if (clk_axi) |
1410 | clk_unprepare(clk_axi); | 1412 | clk_unprepare(clk_axi); |
1411 | 1413 | ||
1412 | if (clk_ahb) | 1414 | if (clk_ahb) |
1413 | clk_unprepare(clk_ahb); | 1415 | clk_unprepare(clk_ahb); |
1414 | #endif | 1416 | #endif |
1415 | } | 1417 | } |
1416 | 1418 | ||
1417 | return gcvSTATUS_OK; | 1419 | return gcvSTATUS_OK; |
1418 | } | 1420 | } |
1419 | 1421 | ||
1420 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) | 1422 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) |
1421 | #ifdef CONFIG_PM | 1423 | #ifdef CONFIG_PM |
1422 | static int gpu_runtime_suspend(struct device *dev) | 1424 | static int gpu_runtime_suspend(struct device *dev) |
1423 | { | 1425 | { |
1424 | release_bus_freq(BUS_FREQ_HIGH); | 1426 | release_bus_freq(BUS_FREQ_HIGH); |
1425 | return 0; | 1427 | return 0; |
1426 | } | 1428 | } |
1427 | 1429 | ||
1428 | static int gpu_runtime_resume(struct device *dev) | 1430 | static int gpu_runtime_resume(struct device *dev) |
1429 | { | 1431 | { |
1430 | request_bus_freq(BUS_FREQ_HIGH); | 1432 | request_bus_freq(BUS_FREQ_HIGH); |
1431 | return 0; | 1433 | return 0; |
1432 | } | 1434 | } |
1433 | 1435 | ||
1434 | static struct dev_pm_ops gpu_pm_ops; | 1436 | static struct dev_pm_ops gpu_pm_ops; |
1435 | #endif | 1437 | #endif |
1436 | #endif | 1438 | #endif |
1437 | 1439 | ||
1438 | 1440 | ||
1439 | static int adjust_platform_driver(struct platform_driver *driver) | 1441 | static int adjust_platform_driver(struct platform_driver *driver) |
1440 | { | 1442 | { |
1441 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) | 1443 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) |
1442 | driver->driver.of_match_table = mxs_gpu_dt_ids; | 1444 | driver->driver.of_match_table = mxs_gpu_dt_ids; |
1443 | #endif | 1445 | #endif |
1444 | 1446 | ||
1445 | #ifdef CONFIG_PM | 1447 | #ifdef CONFIG_PM |
1446 | /* Override PM callbacks to add runtime PM callbacks. */ | 1448 | /* Override PM callbacks to add runtime PM callbacks. */ |
1447 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) | 1449 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) |
1448 | /* Fill local structure with original value. */ | 1450 | /* Fill local structure with original value. */ |
1449 | memcpy(&gpu_pm_ops, driver->driver.pm, sizeof(struct dev_pm_ops)); | 1451 | memcpy(&gpu_pm_ops, driver->driver.pm, sizeof(struct dev_pm_ops)); |
1450 | 1452 | ||
1451 | /* Add runtime PM callback. */ | 1453 | /* Add runtime PM callback. */ |
1452 | #ifdef CONFIG_PM | 1454 | #ifdef CONFIG_PM |
1453 | gpu_pm_ops.runtime_suspend = gpu_runtime_suspend; | 1455 | gpu_pm_ops.runtime_suspend = gpu_runtime_suspend; |
1454 | gpu_pm_ops.runtime_resume = gpu_runtime_resume; | 1456 | gpu_pm_ops.runtime_resume = gpu_runtime_resume; |
1455 | gpu_pm_ops.runtime_idle = NULL; | 1457 | gpu_pm_ops.runtime_idle = NULL; |
1456 | #endif | 1458 | #endif |
1457 | 1459 | ||
1458 | /* Replace callbacks. */ | 1460 | /* Replace callbacks. */ |
1459 | driver->driver.pm = &gpu_pm_ops; | 1461 | driver->driver.pm = &gpu_pm_ops; |
1460 | #endif | 1462 | #endif |
1461 | #endif | 1463 | #endif |
1462 | 1464 | ||
1463 | return 0; | 1465 | return 0; |
1464 | } | 1466 | } |
1465 | 1467 | ||
1466 | #define SRC_SCR_OFFSET 0 | 1468 | #define SRC_SCR_OFFSET 0 |
1467 | #define BP_SRC_SCR_GPU3D_RST 1 | 1469 | #define BP_SRC_SCR_GPU3D_RST 1 |
1468 | #define BP_SRC_SCR_GPU2D_RST 4 | 1470 | #define BP_SRC_SCR_GPU2D_RST 4 |
1469 | 1471 | ||
1470 | static inline int reset_gpu(int gpu) | 1472 | static inline int reset_gpu(int gpu) |
1471 | { | 1473 | { |
1472 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) | 1474 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) |
1473 | void __iomem *src_base = IO_ADDRESS(SRC_BASE_ADDR); | 1475 | void __iomem *src_base = IO_ADDRESS(SRC_BASE_ADDR); |
1474 | uint32_t bit_offset, val; | 1476 | uint32_t bit_offset, val; |
1475 | 1477 | ||
1476 | if (gpu == gcvCORE_MAJOR) | 1478 | if (gpu == gcvCORE_MAJOR) |
1477 | bit_offset = BP_SRC_SCR_GPU3D_RST; | 1479 | bit_offset = BP_SRC_SCR_GPU3D_RST; |
1478 | else if ((gpu == gcvCORE_VG) ||(gpu == gcvCORE_2D)) | 1480 | else if ((gpu == gcvCORE_VG) ||(gpu == gcvCORE_2D)) |
1479 | bit_offset = BP_SRC_SCR_GPU2D_RST; | 1481 | bit_offset = BP_SRC_SCR_GPU2D_RST; |
1480 | else | 1482 | else |
1481 | return -ENXIO; | 1483 | return -ENXIO; |
1482 | 1484 | ||
1483 | val = __raw_readl(src_base + SRC_SCR_OFFSET); | 1485 | val = __raw_readl(src_base + SRC_SCR_OFFSET); |
1484 | val &= ~(1 << (bit_offset)); | 1486 | val &= ~(1 << (bit_offset)); |
1485 | val |= (1 << (bit_offset)); | 1487 | val |= (1 << (bit_offset)); |
1486 | __raw_writel(val, src_base + SRC_SCR_OFFSET); | 1488 | __raw_writel(val, src_base + SRC_SCR_OFFSET); |
1487 | 1489 | ||
1488 | while ((__raw_readl(src_base + SRC_SCR_OFFSET) & (1 << (bit_offset))) != 0); | 1490 | while ((__raw_readl(src_base + SRC_SCR_OFFSET) & (1 << (bit_offset))) != 0); |
1489 | 1491 | ||
1490 | return -ENODEV; | 1492 | return -ENODEV; |
1491 | 1493 | ||
1492 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) | 1494 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) |
1493 | struct imx_priv* priv = &imxPriv; | 1495 | struct imx_priv* priv = &imxPriv; |
1494 | struct reset_control *rstc = priv->rstc[gpu]; | 1496 | struct reset_control *rstc = priv->rstc[gpu]; |
1495 | 1497 | ||
1496 | if (rstc) | 1498 | if (rstc) |
1497 | reset_control_reset(rstc); | 1499 | reset_control_reset(rstc); |
1498 | #else | 1500 | #else |
1499 | imx_src_reset_gpu((int)gpu); | 1501 | imx_src_reset_gpu((int)gpu); |
1500 | #endif | 1502 | #endif |
1501 | 1503 | ||
1502 | return 0; | 1504 | return 0; |
1503 | } | 1505 | } |
1504 | 1506 | ||
1505 | static gceSTATUS | 1507 | static gceSTATUS |
1506 | _AdjustParam( | 1508 | _AdjustParam( |
1507 | gcsPLATFORM * Platform, | 1509 | gcsPLATFORM * Platform, |
1508 | gcsMODULE_PARAMETERS *Args | 1510 | gcsMODULE_PARAMETERS *Args |
1509 | ) | 1511 | ) |
1510 | { | 1512 | { |
1511 | patch_param(Platform->device, Args); | 1513 | patch_param(Platform->device, Args); |
1512 | 1514 | ||
1513 | if (of_find_compatible_node(NULL, NULL, "fsl,imx8mq-gpu") && | 1515 | if (of_find_compatible_node(NULL, NULL, "fsl,imx8mq-gpu") && |
1514 | ((Args->baseAddress + totalram_pages * PAGE_SIZE) > 0x100000000)) | 1516 | ((Args->baseAddress + totalram_pages * PAGE_SIZE) > 0x100000000)) |
1515 | { | 1517 | { |
1516 | Platform->flagBits |= gcvPLATFORM_FLAG_LIMIT_4G_ADDRESS; | 1518 | Platform->flagBits |= gcvPLATFORM_FLAG_LIMIT_4G_ADDRESS; |
1517 | } | 1519 | } |
1518 | 1520 | ||
1519 | if (of_find_compatible_node(NULL, NULL, "fsl,imx8mm-gpu")) | 1521 | if (of_find_compatible_node(NULL, NULL, "fsl,imx8mm-gpu")) |
1520 | { | 1522 | { |
1521 | Platform->flagBits |= gcvPLATFORM_FLAG_IMX_MM; | 1523 | Platform->flagBits |= gcvPLATFORM_FLAG_IMX_MM; |
1522 | } | 1524 | } |
1523 | return gcvSTATUS_OK; | 1525 | return gcvSTATUS_OK; |
1524 | } | 1526 | } |
1525 | 1527 | ||
1526 | static gceSTATUS | 1528 | static gceSTATUS |
1527 | _GetPower( | 1529 | _GetPower( |
1528 | gcsPLATFORM * Platform | 1530 | gcsPLATFORM * Platform |
1529 | ) | 1531 | ) |
1530 | { | 1532 | { |
1531 | int ret = get_power(&Platform->device->dev); | 1533 | int ret = get_power(&Platform->device->dev); |
1532 | 1534 | ||
1533 | if (ret) | 1535 | if (ret) |
1534 | return gcvSTATUS_GENERIC_IO; | 1536 | return gcvSTATUS_GENERIC_IO; |
1535 | 1537 | ||
1536 | return gcvSTATUS_OK; | 1538 | return gcvSTATUS_OK; |
1537 | } | 1539 | } |
1538 | 1540 | ||
1539 | static gceSTATUS | 1541 | static gceSTATUS |
1540 | _PutPower( | 1542 | _PutPower( |
1541 | gcsPLATFORM * Platform | 1543 | gcsPLATFORM * Platform |
1542 | ) | 1544 | ) |
1543 | { | 1545 | { |
1544 | put_power(); | 1546 | put_power(); |
1545 | return gcvSTATUS_OK; | 1547 | return gcvSTATUS_OK; |
1546 | } | 1548 | } |
1547 | 1549 | ||
1548 | 1550 | ||
1549 | static gceSTATUS | 1551 | static gceSTATUS |
1550 | _SetPower( | 1552 | _SetPower( |
1551 | gcsPLATFORM * Platform, | 1553 | gcsPLATFORM * Platform, |
1552 | gceCORE GPU, | 1554 | gceCORE GPU, |
1553 | gctBOOL Enable | 1555 | gctBOOL Enable |
1554 | ) | 1556 | ) |
1555 | { | 1557 | { |
1556 | return set_power((int)GPU, Enable) ? gcvSTATUS_GENERIC_IO | 1558 | return set_power((int)GPU, Enable) ? gcvSTATUS_GENERIC_IO |
1557 | : gcvSTATUS_OK; | 1559 | : gcvSTATUS_OK; |
1558 | } | 1560 | } |
1559 | 1561 | ||
1560 | static gceSTATUS | 1562 | static gceSTATUS |
1561 | _SetClock( | 1563 | _SetClock( |
1562 | gcsPLATFORM * Platform, | 1564 | gcsPLATFORM * Platform, |
1563 | gceCORE GPU, | 1565 | gceCORE GPU, |
1564 | gctBOOL Enable | 1566 | gctBOOL Enable |
1565 | ) | 1567 | ) |
1566 | { | 1568 | { |
1567 | set_clock((int)GPU, Enable); | 1569 | set_clock((int)GPU, Enable); |
1568 | return gcvSTATUS_OK; | 1570 | return gcvSTATUS_OK; |
1569 | } | 1571 | } |
1570 | 1572 | ||
1571 | static gceSTATUS | 1573 | static gceSTATUS |
1572 | _Reset( | 1574 | _Reset( |
1573 | gcsPLATFORM * Platform, | 1575 | gcsPLATFORM * Platform, |
1574 | gceCORE GPU | 1576 | gceCORE GPU |
1575 | ) | 1577 | ) |
1576 | { | 1578 | { |
1577 | int ret; | 1579 | int ret; |
1578 | 1580 | ||
1579 | ret = reset_gpu((int)GPU); | 1581 | ret = reset_gpu((int)GPU); |
1580 | 1582 | ||
1581 | if (!ret) | 1583 | if (!ret) |
1582 | return gcvSTATUS_OK; | 1584 | return gcvSTATUS_OK; |
1583 | else if (ret == -ENODEV) | 1585 | else if (ret == -ENODEV) |
1584 | return gcvSTATUS_NOT_SUPPORTED; | 1586 | return gcvSTATUS_NOT_SUPPORTED; |
1585 | else | 1587 | else |
1586 | return gcvSTATUS_INVALID_CONFIG; | 1588 | return gcvSTATUS_INVALID_CONFIG; |
1587 | } | 1589 | } |
1588 | 1590 | ||
1589 | struct soc_platform_ops imx_platform_ops = | 1591 | struct soc_platform_ops imx_platform_ops = |
1590 | { | 1592 | { |
1591 | .adjustParam = _AdjustParam, | 1593 | .adjustParam = _AdjustParam, |
1592 | .getPower = _GetPower, | 1594 | .getPower = _GetPower, |
1593 | .putPower = _PutPower, | 1595 | .putPower = _PutPower, |
1594 | .setPower = _SetPower, | 1596 | .setPower = _SetPower, |
1595 | .setClock = _SetClock, | 1597 | .setClock = _SetClock, |
1596 | .reset = _Reset, | 1598 | .reset = _Reset, |
1597 | #ifdef CONFIG_GPU_LOW_MEMORY_KILLER | 1599 | #ifdef CONFIG_GPU_LOW_MEMORY_KILLER |
1598 | .shrinkMemory = _ShrinkMemory, | 1600 | .shrinkMemory = _ShrinkMemory, |
1599 | #endif | 1601 | #endif |
1600 | }; | 1602 | }; |
1601 | 1603 | ||
1602 | static struct soc_platform imx_platform = | 1604 | static struct soc_platform imx_platform = |
1603 | { | 1605 | { |
1604 | .name = __FILE__, | 1606 | .name = __FILE__, |
1605 | .ops = &imx_platform_ops, | 1607 | .ops = &imx_platform_ops, |
1606 | }; | 1608 | }; |
1607 | 1609 | ||
1608 | int soc_platform_init(struct platform_driver *pdrv, | 1610 | int soc_platform_init(struct platform_driver *pdrv, |
1609 | struct soc_platform **platform) | 1611 | struct soc_platform **platform) |
1610 | { | 1612 | { |
1611 | #ifdef IMX_GPU_SUBSYSTEM | 1613 | #ifdef IMX_GPU_SUBSYSTEM |
1612 | if (of_find_compatible_node(NULL, NULL, "fsl,imx8-gpu-ss")) { | 1614 | if (of_find_compatible_node(NULL, NULL, "fsl,imx8-gpu-ss")) { |
1613 | use_imx_gpu_subsystem = 1; | 1615 | use_imx_gpu_subsystem = 1; |
1614 | } | ||
1615 | 1616 | ||
1616 | if (of_find_compatible_node(NULL, NULL, "fsl,imx8x-gpu")) { | 1617 | if (!of_find_compatible_node(NULL, NULL, "fsl,imx8-gpu")) { |
1617 | printk(KERN_ERR "Incorrect device-tree, please update dtb."); | 1618 | printk(KERN_ERR "Incorrect device-tree, please update dtb."); |
1618 | return -EINVAL; | 1619 | return -EINVAL; |
1620 | } | ||
1619 | } | 1621 | } |
1620 | #endif | 1622 | #endif |
1621 | 1623 | ||
1622 | adjust_platform_driver(pdrv); | 1624 | adjust_platform_driver(pdrv); |
1623 | init_priv(); | 1625 | init_priv(); |
1624 | 1626 | ||
1625 | #ifdef IMX_GPU_SUBSYSTEM | 1627 | #ifdef IMX_GPU_SUBSYSTEM |
1626 | register_mxc_gpu_sub_driver(); | 1628 | register_mxc_gpu_sub_driver(); |
1627 | #endif | 1629 | #endif |
1628 | 1630 | ||
1629 | *platform = &imx_platform; | 1631 | *platform = &imx_platform; |
1630 | return 0; | 1632 | return 0; |
1631 | } | 1633 | } |
1632 | 1634 | ||
1633 | int soc_platform_terminate(struct soc_platform *platform) | 1635 | int soc_platform_terminate(struct soc_platform *platform) |
1634 | { | 1636 | { |
1635 | #ifdef IMX_GPU_SUBSYSTEM | 1637 | #ifdef IMX_GPU_SUBSYSTEM |
1636 | unregister_mxc_gpu_sub_driver(); | 1638 | unregister_mxc_gpu_sub_driver(); |
1637 | #endif | 1639 | #endif |
1638 | 1640 | ||
1639 | free_priv(); | 1641 | free_priv(); |
1640 | return 0; | 1642 | return 0; |
1641 | } | 1643 | } |
1642 | 1644 |