Commit 22191ac353445ad8fafc5a78aefcd94e78963041

Authored by Bryan O'Donoghue
Committed by Stefano Babic
1 parent a1ffd9e2e6

drivers/crypto/fsl: assign job-rings to non-TrustZone

After enabling TrustZone various parts of the CAAM silicon become
inaccessible to non TrustZone contexts. The job-ring registers are designed
to allow non TrustZone contexts like Linux to still submit jobs to CAAM
even after TrustZone has been enabled.

The default job-ring permissions after the BootROM look like this for
job-ring zero.

ms=0x00008001 ls=0x00008001

The MS field is JRaMIDR_MS (job ring MID most significant).

Referring to "Security Reference Manual for i.MX 7Dual and 7Solo
Applications Processors, Rev. 0, 03/2017" section 8.10.4 we see that
JROWN_NS controls whether or not a job-ring is accessible from non
TrustZone.

Bit 15 (TrustZone) is the logical inverse of bit 3 hence the above value of
0x8001 shows that JROWN_NS=0 and TrustZone=1.

Clearly then as soon as TrustZone becomes active the job-ring registers are
no longer accessible from Linux, which is not what we want.

This patch explicitly sets all job-ring registers to JROWN_NS=1 (non
TrustZone) by default and to the Non-Secure MID 001. Both settings are
required to successfully assign a job-ring to non-secure mode. If a piece
of TrustZone firmware requires ownership of job-ring registers it can unset
the JROWN_NS bit itself.

This patch in conjunction with a modification of the Linux kernel to skip
HWRNG initialisation makes CAAM usable to Linux with TrustZone enabled.

Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
Cc: Fabio Estevam <fabio.estevam@nxp.com>
Cc: Peng Fan <peng.fan@nxp.com>
Cc: Alex Porosanu <alexandru.porosanu@nxp.com>
Cc: Ruchika Gupta <ruchika.gupta@nxp.com>
Cc: Aneesh Bansal <aneesh.bansal@nxp.com>
Link: https://github.com/OP-TEE/optee_os/issues/1408
Link: https://tinyurl.com/yam5gv9a
Tested-by: Lukas Auer <lukas.auer@aisec.fraunhofer.de>

Showing 2 changed files with 11 additions and 0 deletions Inline Diff

drivers/crypto/fsl/jr.c
1 /* 1 /*
2 * Copyright 2008-2014 Freescale Semiconductor, Inc. 2 * Copyright 2008-2014 Freescale Semiconductor, Inc.
3 * 3 *
4 * SPDX-License-Identifier: GPL-2.0+ 4 * SPDX-License-Identifier: GPL-2.0+
5 * 5 *
6 * Based on CAAM driver in drivers/crypto/caam in Linux 6 * Based on CAAM driver in drivers/crypto/caam in Linux
7 */ 7 */
8 8
9 #include <common.h> 9 #include <common.h>
10 #include <malloc.h> 10 #include <malloc.h>
11 #include "fsl_sec.h" 11 #include "fsl_sec.h"
12 #include "jr.h" 12 #include "jr.h"
13 #include "jobdesc.h" 13 #include "jobdesc.h"
14 #include "desc_constr.h" 14 #include "desc_constr.h"
15 #ifdef CONFIG_FSL_CORENET 15 #ifdef CONFIG_FSL_CORENET
16 #include <asm/fsl_pamu.h> 16 #include <asm/fsl_pamu.h>
17 #endif 17 #endif
18 18
19 #define CIRC_CNT(head, tail, size) (((head) - (tail)) & (size - 1)) 19 #define CIRC_CNT(head, tail, size) (((head) - (tail)) & (size - 1))
20 #define CIRC_SPACE(head, tail, size) CIRC_CNT((tail), (head) + 1, (size)) 20 #define CIRC_SPACE(head, tail, size) CIRC_CNT((tail), (head) + 1, (size))
21 21
22 uint32_t sec_offset[CONFIG_SYS_FSL_MAX_NUM_OF_SEC] = { 22 uint32_t sec_offset[CONFIG_SYS_FSL_MAX_NUM_OF_SEC] = {
23 0, 23 0,
24 #if defined(CONFIG_ARCH_C29X) 24 #if defined(CONFIG_ARCH_C29X)
25 CONFIG_SYS_FSL_SEC_IDX_OFFSET, 25 CONFIG_SYS_FSL_SEC_IDX_OFFSET,
26 2 * CONFIG_SYS_FSL_SEC_IDX_OFFSET 26 2 * CONFIG_SYS_FSL_SEC_IDX_OFFSET
27 #endif 27 #endif
28 }; 28 };
29 29
30 #define SEC_ADDR(idx) \ 30 #define SEC_ADDR(idx) \
31 ((CONFIG_SYS_FSL_SEC_ADDR + sec_offset[idx])) 31 ((CONFIG_SYS_FSL_SEC_ADDR + sec_offset[idx]))
32 32
33 #define SEC_JR0_ADDR(idx) \ 33 #define SEC_JR0_ADDR(idx) \
34 (SEC_ADDR(idx) + \ 34 (SEC_ADDR(idx) + \
35 (CONFIG_SYS_FSL_JR0_OFFSET - CONFIG_SYS_FSL_SEC_OFFSET)) 35 (CONFIG_SYS_FSL_JR0_OFFSET - CONFIG_SYS_FSL_SEC_OFFSET))
36 36
37 struct jobring jr0[CONFIG_SYS_FSL_MAX_NUM_OF_SEC]; 37 struct jobring jr0[CONFIG_SYS_FSL_MAX_NUM_OF_SEC];
38 38
39 static inline void start_jr0(uint8_t sec_idx) 39 static inline void start_jr0(uint8_t sec_idx)
40 { 40 {
41 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 41 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
42 u32 ctpr_ms = sec_in32(&sec->ctpr_ms); 42 u32 ctpr_ms = sec_in32(&sec->ctpr_ms);
43 u32 scfgr = sec_in32(&sec->scfgr); 43 u32 scfgr = sec_in32(&sec->scfgr);
44 44
45 if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_INCL) { 45 if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_INCL) {
46 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or 46 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
47 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SEC_SCFGR_VIRT_EN = 1 47 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SEC_SCFGR_VIRT_EN = 1
48 */ 48 */
49 if ((ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR) || 49 if ((ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR) ||
50 (scfgr & SEC_SCFGR_VIRT_EN)) 50 (scfgr & SEC_SCFGR_VIRT_EN))
51 sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0); 51 sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0);
52 } else { 52 } else {
53 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */ 53 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
54 if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR) 54 if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR)
55 sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0); 55 sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0);
56 } 56 }
57 } 57 }
58 58
59 static inline void jr_reset_liodn(uint8_t sec_idx) 59 static inline void jr_reset_liodn(uint8_t sec_idx)
60 { 60 {
61 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 61 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
62 sec_out32(&sec->jrliodnr[0].ls, 0); 62 sec_out32(&sec->jrliodnr[0].ls, 0);
63 } 63 }
64 64
65 static inline void jr_disable_irq(uint8_t sec_idx) 65 static inline void jr_disable_irq(uint8_t sec_idx)
66 { 66 {
67 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 67 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
68 uint32_t jrcfg = sec_in32(&regs->jrcfg1); 68 uint32_t jrcfg = sec_in32(&regs->jrcfg1);
69 69
70 jrcfg = jrcfg | JR_INTMASK; 70 jrcfg = jrcfg | JR_INTMASK;
71 71
72 sec_out32(&regs->jrcfg1, jrcfg); 72 sec_out32(&regs->jrcfg1, jrcfg);
73 } 73 }
74 74
75 static void jr_initregs(uint8_t sec_idx) 75 static void jr_initregs(uint8_t sec_idx)
76 { 76 {
77 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 77 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
78 struct jobring *jr = &jr0[sec_idx]; 78 struct jobring *jr = &jr0[sec_idx];
79 phys_addr_t ip_base = virt_to_phys((void *)jr->input_ring); 79 phys_addr_t ip_base = virt_to_phys((void *)jr->input_ring);
80 phys_addr_t op_base = virt_to_phys((void *)jr->output_ring); 80 phys_addr_t op_base = virt_to_phys((void *)jr->output_ring);
81 81
82 #ifdef CONFIG_PHYS_64BIT 82 #ifdef CONFIG_PHYS_64BIT
83 sec_out32(&regs->irba_h, ip_base >> 32); 83 sec_out32(&regs->irba_h, ip_base >> 32);
84 #else 84 #else
85 sec_out32(&regs->irba_h, 0x0); 85 sec_out32(&regs->irba_h, 0x0);
86 #endif 86 #endif
87 sec_out32(&regs->irba_l, (uint32_t)ip_base); 87 sec_out32(&regs->irba_l, (uint32_t)ip_base);
88 #ifdef CONFIG_PHYS_64BIT 88 #ifdef CONFIG_PHYS_64BIT
89 sec_out32(&regs->orba_h, op_base >> 32); 89 sec_out32(&regs->orba_h, op_base >> 32);
90 #else 90 #else
91 sec_out32(&regs->orba_h, 0x0); 91 sec_out32(&regs->orba_h, 0x0);
92 #endif 92 #endif
93 sec_out32(&regs->orba_l, (uint32_t)op_base); 93 sec_out32(&regs->orba_l, (uint32_t)op_base);
94 sec_out32(&regs->ors, JR_SIZE); 94 sec_out32(&regs->ors, JR_SIZE);
95 sec_out32(&regs->irs, JR_SIZE); 95 sec_out32(&regs->irs, JR_SIZE);
96 96
97 if (!jr->irq) 97 if (!jr->irq)
98 jr_disable_irq(sec_idx); 98 jr_disable_irq(sec_idx);
99 } 99 }
100 100
101 static int jr_init(uint8_t sec_idx) 101 static int jr_init(uint8_t sec_idx)
102 { 102 {
103 struct jobring *jr = &jr0[sec_idx]; 103 struct jobring *jr = &jr0[sec_idx];
104 104
105 memset(jr, 0, sizeof(struct jobring)); 105 memset(jr, 0, sizeof(struct jobring));
106 106
107 jr->jq_id = DEFAULT_JR_ID; 107 jr->jq_id = DEFAULT_JR_ID;
108 jr->irq = DEFAULT_IRQ; 108 jr->irq = DEFAULT_IRQ;
109 109
110 #ifdef CONFIG_FSL_CORENET 110 #ifdef CONFIG_FSL_CORENET
111 jr->liodn = DEFAULT_JR_LIODN; 111 jr->liodn = DEFAULT_JR_LIODN;
112 #endif 112 #endif
113 jr->size = JR_SIZE; 113 jr->size = JR_SIZE;
114 jr->input_ring = (dma_addr_t *)memalign(ARCH_DMA_MINALIGN, 114 jr->input_ring = (dma_addr_t *)memalign(ARCH_DMA_MINALIGN,
115 JR_SIZE * sizeof(dma_addr_t)); 115 JR_SIZE * sizeof(dma_addr_t));
116 if (!jr->input_ring) 116 if (!jr->input_ring)
117 return -1; 117 return -1;
118 118
119 jr->op_size = roundup(JR_SIZE * sizeof(struct op_ring), 119 jr->op_size = roundup(JR_SIZE * sizeof(struct op_ring),
120 ARCH_DMA_MINALIGN); 120 ARCH_DMA_MINALIGN);
121 jr->output_ring = 121 jr->output_ring =
122 (struct op_ring *)memalign(ARCH_DMA_MINALIGN, jr->op_size); 122 (struct op_ring *)memalign(ARCH_DMA_MINALIGN, jr->op_size);
123 if (!jr->output_ring) 123 if (!jr->output_ring)
124 return -1; 124 return -1;
125 125
126 memset(jr->input_ring, 0, JR_SIZE * sizeof(dma_addr_t)); 126 memset(jr->input_ring, 0, JR_SIZE * sizeof(dma_addr_t));
127 memset(jr->output_ring, 0, jr->op_size); 127 memset(jr->output_ring, 0, jr->op_size);
128 128
129 start_jr0(sec_idx); 129 start_jr0(sec_idx);
130 130
131 jr_initregs(sec_idx); 131 jr_initregs(sec_idx);
132 132
133 return 0; 133 return 0;
134 } 134 }
135 135
136 static int jr_sw_cleanup(uint8_t sec_idx) 136 static int jr_sw_cleanup(uint8_t sec_idx)
137 { 137 {
138 struct jobring *jr = &jr0[sec_idx]; 138 struct jobring *jr = &jr0[sec_idx];
139 139
140 jr->head = 0; 140 jr->head = 0;
141 jr->tail = 0; 141 jr->tail = 0;
142 jr->read_idx = 0; 142 jr->read_idx = 0;
143 jr->write_idx = 0; 143 jr->write_idx = 0;
144 memset(jr->info, 0, sizeof(jr->info)); 144 memset(jr->info, 0, sizeof(jr->info));
145 memset(jr->input_ring, 0, jr->size * sizeof(dma_addr_t)); 145 memset(jr->input_ring, 0, jr->size * sizeof(dma_addr_t));
146 memset(jr->output_ring, 0, jr->size * sizeof(struct op_ring)); 146 memset(jr->output_ring, 0, jr->size * sizeof(struct op_ring));
147 147
148 return 0; 148 return 0;
149 } 149 }
150 150
151 static int jr_hw_reset(uint8_t sec_idx) 151 static int jr_hw_reset(uint8_t sec_idx)
152 { 152 {
153 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 153 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
154 uint32_t timeout = 100000; 154 uint32_t timeout = 100000;
155 uint32_t jrint, jrcr; 155 uint32_t jrint, jrcr;
156 156
157 sec_out32(&regs->jrcr, JRCR_RESET); 157 sec_out32(&regs->jrcr, JRCR_RESET);
158 do { 158 do {
159 jrint = sec_in32(&regs->jrint); 159 jrint = sec_in32(&regs->jrint);
160 } while (((jrint & JRINT_ERR_HALT_MASK) == 160 } while (((jrint & JRINT_ERR_HALT_MASK) ==
161 JRINT_ERR_HALT_INPROGRESS) && --timeout); 161 JRINT_ERR_HALT_INPROGRESS) && --timeout);
162 162
163 jrint = sec_in32(&regs->jrint); 163 jrint = sec_in32(&regs->jrint);
164 if (((jrint & JRINT_ERR_HALT_MASK) != 164 if (((jrint & JRINT_ERR_HALT_MASK) !=
165 JRINT_ERR_HALT_INPROGRESS) && timeout == 0) 165 JRINT_ERR_HALT_INPROGRESS) && timeout == 0)
166 return -1; 166 return -1;
167 167
168 timeout = 100000; 168 timeout = 100000;
169 sec_out32(&regs->jrcr, JRCR_RESET); 169 sec_out32(&regs->jrcr, JRCR_RESET);
170 do { 170 do {
171 jrcr = sec_in32(&regs->jrcr); 171 jrcr = sec_in32(&regs->jrcr);
172 } while ((jrcr & JRCR_RESET) && --timeout); 172 } while ((jrcr & JRCR_RESET) && --timeout);
173 173
174 if (timeout == 0) 174 if (timeout == 0)
175 return -1; 175 return -1;
176 176
177 return 0; 177 return 0;
178 } 178 }
179 179
180 /* -1 --- error, can't enqueue -- no space available */ 180 /* -1 --- error, can't enqueue -- no space available */
181 static int jr_enqueue(uint32_t *desc_addr, 181 static int jr_enqueue(uint32_t *desc_addr,
182 void (*callback)(uint32_t status, void *arg), 182 void (*callback)(uint32_t status, void *arg),
183 void *arg, uint8_t sec_idx) 183 void *arg, uint8_t sec_idx)
184 { 184 {
185 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 185 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
186 struct jobring *jr = &jr0[sec_idx]; 186 struct jobring *jr = &jr0[sec_idx];
187 int head = jr->head; 187 int head = jr->head;
188 uint32_t desc_word; 188 uint32_t desc_word;
189 int length = desc_len(desc_addr); 189 int length = desc_len(desc_addr);
190 int i; 190 int i;
191 #ifdef CONFIG_PHYS_64BIT 191 #ifdef CONFIG_PHYS_64BIT
192 uint32_t *addr_hi, *addr_lo; 192 uint32_t *addr_hi, *addr_lo;
193 #endif 193 #endif
194 194
195 /* The descriptor must be submitted to SEC block as per endianness 195 /* The descriptor must be submitted to SEC block as per endianness
196 * of the SEC Block. 196 * of the SEC Block.
197 * So, if the endianness of Core and SEC block is different, each word 197 * So, if the endianness of Core and SEC block is different, each word
198 * of the descriptor will be byte-swapped. 198 * of the descriptor will be byte-swapped.
199 */ 199 */
200 for (i = 0; i < length; i++) { 200 for (i = 0; i < length; i++) {
201 desc_word = desc_addr[i]; 201 desc_word = desc_addr[i];
202 sec_out32((uint32_t *)&desc_addr[i], desc_word); 202 sec_out32((uint32_t *)&desc_addr[i], desc_word);
203 } 203 }
204 204
205 phys_addr_t desc_phys_addr = virt_to_phys(desc_addr); 205 phys_addr_t desc_phys_addr = virt_to_phys(desc_addr);
206 206
207 jr->info[head].desc_phys_addr = desc_phys_addr; 207 jr->info[head].desc_phys_addr = desc_phys_addr;
208 jr->info[head].callback = (void *)callback; 208 jr->info[head].callback = (void *)callback;
209 jr->info[head].arg = arg; 209 jr->info[head].arg = arg;
210 jr->info[head].op_done = 0; 210 jr->info[head].op_done = 0;
211 211
212 unsigned long start = (unsigned long)&jr->info[head] & 212 unsigned long start = (unsigned long)&jr->info[head] &
213 ~(ARCH_DMA_MINALIGN - 1); 213 ~(ARCH_DMA_MINALIGN - 1);
214 unsigned long end = ALIGN((unsigned long)&jr->info[head] + 214 unsigned long end = ALIGN((unsigned long)&jr->info[head] +
215 sizeof(struct jr_info), ARCH_DMA_MINALIGN); 215 sizeof(struct jr_info), ARCH_DMA_MINALIGN);
216 flush_dcache_range(start, end); 216 flush_dcache_range(start, end);
217 217
218 #ifdef CONFIG_PHYS_64BIT 218 #ifdef CONFIG_PHYS_64BIT
219 /* Write the 64 bit Descriptor address on Input Ring. 219 /* Write the 64 bit Descriptor address on Input Ring.
220 * The 32 bit hign and low part of the address will 220 * The 32 bit hign and low part of the address will
221 * depend on endianness of SEC block. 221 * depend on endianness of SEC block.
222 */ 222 */
223 #ifdef CONFIG_SYS_FSL_SEC_LE 223 #ifdef CONFIG_SYS_FSL_SEC_LE
224 addr_lo = (uint32_t *)(&jr->input_ring[head]); 224 addr_lo = (uint32_t *)(&jr->input_ring[head]);
225 addr_hi = (uint32_t *)(&jr->input_ring[head]) + 1; 225 addr_hi = (uint32_t *)(&jr->input_ring[head]) + 1;
226 #elif defined(CONFIG_SYS_FSL_SEC_BE) 226 #elif defined(CONFIG_SYS_FSL_SEC_BE)
227 addr_hi = (uint32_t *)(&jr->input_ring[head]); 227 addr_hi = (uint32_t *)(&jr->input_ring[head]);
228 addr_lo = (uint32_t *)(&jr->input_ring[head]) + 1; 228 addr_lo = (uint32_t *)(&jr->input_ring[head]) + 1;
229 #endif /* ifdef CONFIG_SYS_FSL_SEC_LE */ 229 #endif /* ifdef CONFIG_SYS_FSL_SEC_LE */
230 230
231 sec_out32(addr_hi, (uint32_t)(desc_phys_addr >> 32)); 231 sec_out32(addr_hi, (uint32_t)(desc_phys_addr >> 32));
232 sec_out32(addr_lo, (uint32_t)(desc_phys_addr)); 232 sec_out32(addr_lo, (uint32_t)(desc_phys_addr));
233 233
234 #else 234 #else
235 /* Write the 32 bit Descriptor address on Input Ring. */ 235 /* Write the 32 bit Descriptor address on Input Ring. */
236 sec_out32(&jr->input_ring[head], desc_phys_addr); 236 sec_out32(&jr->input_ring[head], desc_phys_addr);
237 #endif /* ifdef CONFIG_PHYS_64BIT */ 237 #endif /* ifdef CONFIG_PHYS_64BIT */
238 238
239 start = (unsigned long)&jr->input_ring[head] & ~(ARCH_DMA_MINALIGN - 1); 239 start = (unsigned long)&jr->input_ring[head] & ~(ARCH_DMA_MINALIGN - 1);
240 end = ALIGN((unsigned long)&jr->input_ring[head] + 240 end = ALIGN((unsigned long)&jr->input_ring[head] +
241 sizeof(dma_addr_t), ARCH_DMA_MINALIGN); 241 sizeof(dma_addr_t), ARCH_DMA_MINALIGN);
242 flush_dcache_range(start, end); 242 flush_dcache_range(start, end);
243 243
244 jr->head = (head + 1) & (jr->size - 1); 244 jr->head = (head + 1) & (jr->size - 1);
245 245
246 /* Invalidate output ring */ 246 /* Invalidate output ring */
247 start = (unsigned long)jr->output_ring & 247 start = (unsigned long)jr->output_ring &
248 ~(ARCH_DMA_MINALIGN - 1); 248 ~(ARCH_DMA_MINALIGN - 1);
249 end = ALIGN((unsigned long)jr->output_ring + jr->op_size, 249 end = ALIGN((unsigned long)jr->output_ring + jr->op_size,
250 ARCH_DMA_MINALIGN); 250 ARCH_DMA_MINALIGN);
251 invalidate_dcache_range(start, end); 251 invalidate_dcache_range(start, end);
252 252
253 sec_out32(&regs->irja, 1); 253 sec_out32(&regs->irja, 1);
254 254
255 return 0; 255 return 0;
256 } 256 }
257 257
258 static int jr_dequeue(int sec_idx) 258 static int jr_dequeue(int sec_idx)
259 { 259 {
260 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 260 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
261 struct jobring *jr = &jr0[sec_idx]; 261 struct jobring *jr = &jr0[sec_idx];
262 int head = jr->head; 262 int head = jr->head;
263 int tail = jr->tail; 263 int tail = jr->tail;
264 int idx, i, found; 264 int idx, i, found;
265 void (*callback)(uint32_t status, void *arg); 265 void (*callback)(uint32_t status, void *arg);
266 void *arg = NULL; 266 void *arg = NULL;
267 #ifdef CONFIG_PHYS_64BIT 267 #ifdef CONFIG_PHYS_64BIT
268 uint32_t *addr_hi, *addr_lo; 268 uint32_t *addr_hi, *addr_lo;
269 #else 269 #else
270 uint32_t *addr; 270 uint32_t *addr;
271 #endif 271 #endif
272 272
273 while (sec_in32(&regs->orsf) && CIRC_CNT(jr->head, jr->tail, 273 while (sec_in32(&regs->orsf) && CIRC_CNT(jr->head, jr->tail,
274 jr->size)) { 274 jr->size)) {
275 275
276 found = 0; 276 found = 0;
277 277
278 phys_addr_t op_desc; 278 phys_addr_t op_desc;
279 #ifdef CONFIG_PHYS_64BIT 279 #ifdef CONFIG_PHYS_64BIT
280 /* Read the 64 bit Descriptor address from Output Ring. 280 /* Read the 64 bit Descriptor address from Output Ring.
281 * The 32 bit hign and low part of the address will 281 * The 32 bit hign and low part of the address will
282 * depend on endianness of SEC block. 282 * depend on endianness of SEC block.
283 */ 283 */
284 #ifdef CONFIG_SYS_FSL_SEC_LE 284 #ifdef CONFIG_SYS_FSL_SEC_LE
285 addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc); 285 addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc);
286 addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1; 286 addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1;
287 #elif defined(CONFIG_SYS_FSL_SEC_BE) 287 #elif defined(CONFIG_SYS_FSL_SEC_BE)
288 addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc); 288 addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc);
289 addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1; 289 addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1;
290 #endif /* ifdef CONFIG_SYS_FSL_SEC_LE */ 290 #endif /* ifdef CONFIG_SYS_FSL_SEC_LE */
291 291
292 op_desc = ((u64)sec_in32(addr_hi) << 32) | 292 op_desc = ((u64)sec_in32(addr_hi) << 32) |
293 ((u64)sec_in32(addr_lo)); 293 ((u64)sec_in32(addr_lo));
294 294
295 #else 295 #else
296 /* Read the 32 bit Descriptor address from Output Ring. */ 296 /* Read the 32 bit Descriptor address from Output Ring. */
297 addr = (uint32_t *)&jr->output_ring[jr->tail].desc; 297 addr = (uint32_t *)&jr->output_ring[jr->tail].desc;
298 op_desc = sec_in32(addr); 298 op_desc = sec_in32(addr);
299 #endif /* ifdef CONFIG_PHYS_64BIT */ 299 #endif /* ifdef CONFIG_PHYS_64BIT */
300 300
301 uint32_t status = sec_in32(&jr->output_ring[jr->tail].status); 301 uint32_t status = sec_in32(&jr->output_ring[jr->tail].status);
302 302
303 for (i = 0; CIRC_CNT(head, tail + i, jr->size) >= 1; i++) { 303 for (i = 0; CIRC_CNT(head, tail + i, jr->size) >= 1; i++) {
304 idx = (tail + i) & (jr->size - 1); 304 idx = (tail + i) & (jr->size - 1);
305 if (op_desc == jr->info[idx].desc_phys_addr) { 305 if (op_desc == jr->info[idx].desc_phys_addr) {
306 found = 1; 306 found = 1;
307 break; 307 break;
308 } 308 }
309 } 309 }
310 310
311 /* Error condition if match not found */ 311 /* Error condition if match not found */
312 if (!found) 312 if (!found)
313 return -1; 313 return -1;
314 314
315 jr->info[idx].op_done = 1; 315 jr->info[idx].op_done = 1;
316 callback = (void *)jr->info[idx].callback; 316 callback = (void *)jr->info[idx].callback;
317 arg = jr->info[idx].arg; 317 arg = jr->info[idx].arg;
318 318
319 /* When the job on tail idx gets done, increment 319 /* When the job on tail idx gets done, increment
320 * tail till the point where job completed out of oredr has 320 * tail till the point where job completed out of oredr has
321 * been taken into account 321 * been taken into account
322 */ 322 */
323 if (idx == tail) 323 if (idx == tail)
324 do { 324 do {
325 tail = (tail + 1) & (jr->size - 1); 325 tail = (tail + 1) & (jr->size - 1);
326 } while (jr->info[tail].op_done); 326 } while (jr->info[tail].op_done);
327 327
328 jr->tail = tail; 328 jr->tail = tail;
329 jr->read_idx = (jr->read_idx + 1) & (jr->size - 1); 329 jr->read_idx = (jr->read_idx + 1) & (jr->size - 1);
330 330
331 sec_out32(&regs->orjr, 1); 331 sec_out32(&regs->orjr, 1);
332 jr->info[idx].op_done = 0; 332 jr->info[idx].op_done = 0;
333 333
334 callback(status, arg); 334 callback(status, arg);
335 } 335 }
336 336
337 return 0; 337 return 0;
338 } 338 }
339 339
340 static void desc_done(uint32_t status, void *arg) 340 static void desc_done(uint32_t status, void *arg)
341 { 341 {
342 struct result *x = arg; 342 struct result *x = arg;
343 x->status = status; 343 x->status = status;
344 #ifndef CONFIG_SPL_BUILD 344 #ifndef CONFIG_SPL_BUILD
345 caam_jr_strstatus(status); 345 caam_jr_strstatus(status);
346 #endif 346 #endif
347 x->done = 1; 347 x->done = 1;
348 } 348 }
349 349
350 static inline int run_descriptor_jr_idx(uint32_t *desc, uint8_t sec_idx) 350 static inline int run_descriptor_jr_idx(uint32_t *desc, uint8_t sec_idx)
351 { 351 {
352 unsigned long long timeval = get_ticks(); 352 unsigned long long timeval = get_ticks();
353 unsigned long long timeout = usec2ticks(CONFIG_SEC_DEQ_TIMEOUT); 353 unsigned long long timeout = usec2ticks(CONFIG_SEC_DEQ_TIMEOUT);
354 struct result op; 354 struct result op;
355 int ret = 0; 355 int ret = 0;
356 356
357 memset(&op, 0, sizeof(op)); 357 memset(&op, 0, sizeof(op));
358 358
359 ret = jr_enqueue(desc, desc_done, &op, sec_idx); 359 ret = jr_enqueue(desc, desc_done, &op, sec_idx);
360 if (ret) { 360 if (ret) {
361 debug("Error in SEC enq\n"); 361 debug("Error in SEC enq\n");
362 ret = JQ_ENQ_ERR; 362 ret = JQ_ENQ_ERR;
363 goto out; 363 goto out;
364 } 364 }
365 365
366 timeval = get_ticks(); 366 timeval = get_ticks();
367 timeout = usec2ticks(CONFIG_SEC_DEQ_TIMEOUT); 367 timeout = usec2ticks(CONFIG_SEC_DEQ_TIMEOUT);
368 while (op.done != 1) { 368 while (op.done != 1) {
369 ret = jr_dequeue(sec_idx); 369 ret = jr_dequeue(sec_idx);
370 if (ret) { 370 if (ret) {
371 debug("Error in SEC deq\n"); 371 debug("Error in SEC deq\n");
372 ret = JQ_DEQ_ERR; 372 ret = JQ_DEQ_ERR;
373 goto out; 373 goto out;
374 } 374 }
375 375
376 if ((get_ticks() - timeval) > timeout) { 376 if ((get_ticks() - timeval) > timeout) {
377 debug("SEC Dequeue timed out\n"); 377 debug("SEC Dequeue timed out\n");
378 ret = JQ_DEQ_TO_ERR; 378 ret = JQ_DEQ_TO_ERR;
379 goto out; 379 goto out;
380 } 380 }
381 } 381 }
382 382
383 if (op.status) { 383 if (op.status) {
384 debug("Error %x\n", op.status); 384 debug("Error %x\n", op.status);
385 ret = op.status; 385 ret = op.status;
386 } 386 }
387 out: 387 out:
388 return ret; 388 return ret;
389 } 389 }
390 390
391 int run_descriptor_jr(uint32_t *desc) 391 int run_descriptor_jr(uint32_t *desc)
392 { 392 {
393 return run_descriptor_jr_idx(desc, 0); 393 return run_descriptor_jr_idx(desc, 0);
394 } 394 }
395 395
396 static inline int jr_reset_sec(uint8_t sec_idx) 396 static inline int jr_reset_sec(uint8_t sec_idx)
397 { 397 {
398 if (jr_hw_reset(sec_idx) < 0) 398 if (jr_hw_reset(sec_idx) < 0)
399 return -1; 399 return -1;
400 400
401 /* Clean up the jobring structure maintained by software */ 401 /* Clean up the jobring structure maintained by software */
402 jr_sw_cleanup(sec_idx); 402 jr_sw_cleanup(sec_idx);
403 403
404 return 0; 404 return 0;
405 } 405 }
406 406
407 int jr_reset(void) 407 int jr_reset(void)
408 { 408 {
409 return jr_reset_sec(0); 409 return jr_reset_sec(0);
410 } 410 }
411 411
412 static inline int sec_reset_idx(uint8_t sec_idx) 412 static inline int sec_reset_idx(uint8_t sec_idx)
413 { 413 {
414 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 414 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
415 uint32_t mcfgr = sec_in32(&sec->mcfgr); 415 uint32_t mcfgr = sec_in32(&sec->mcfgr);
416 uint32_t timeout = 100000; 416 uint32_t timeout = 100000;
417 417
418 mcfgr |= MCFGR_SWRST; 418 mcfgr |= MCFGR_SWRST;
419 sec_out32(&sec->mcfgr, mcfgr); 419 sec_out32(&sec->mcfgr, mcfgr);
420 420
421 mcfgr |= MCFGR_DMA_RST; 421 mcfgr |= MCFGR_DMA_RST;
422 sec_out32(&sec->mcfgr, mcfgr); 422 sec_out32(&sec->mcfgr, mcfgr);
423 do { 423 do {
424 mcfgr = sec_in32(&sec->mcfgr); 424 mcfgr = sec_in32(&sec->mcfgr);
425 } while ((mcfgr & MCFGR_DMA_RST) == MCFGR_DMA_RST && --timeout); 425 } while ((mcfgr & MCFGR_DMA_RST) == MCFGR_DMA_RST && --timeout);
426 426
427 if (timeout == 0) 427 if (timeout == 0)
428 return -1; 428 return -1;
429 429
430 timeout = 100000; 430 timeout = 100000;
431 do { 431 do {
432 mcfgr = sec_in32(&sec->mcfgr); 432 mcfgr = sec_in32(&sec->mcfgr);
433 } while ((mcfgr & MCFGR_SWRST) == MCFGR_SWRST && --timeout); 433 } while ((mcfgr & MCFGR_SWRST) == MCFGR_SWRST && --timeout);
434 434
435 if (timeout == 0) 435 if (timeout == 0)
436 return -1; 436 return -1;
437 437
438 return 0; 438 return 0;
439 } 439 }
440 int sec_reset(void) 440 int sec_reset(void)
441 { 441 {
442 return sec_reset_idx(0); 442 return sec_reset_idx(0);
443 } 443 }
444 #ifndef CONFIG_SPL_BUILD 444 #ifndef CONFIG_SPL_BUILD
445 static int instantiate_rng(uint8_t sec_idx) 445 static int instantiate_rng(uint8_t sec_idx)
446 { 446 {
447 struct result op; 447 struct result op;
448 u32 *desc; 448 u32 *desc;
449 u32 rdsta_val; 449 u32 rdsta_val;
450 int ret = 0; 450 int ret = 0;
451 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx); 451 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx);
452 struct rng4tst __iomem *rng = 452 struct rng4tst __iomem *rng =
453 (struct rng4tst __iomem *)&sec->rng; 453 (struct rng4tst __iomem *)&sec->rng;
454 454
455 memset(&op, 0, sizeof(struct result)); 455 memset(&op, 0, sizeof(struct result));
456 456
457 desc = memalign(ARCH_DMA_MINALIGN, sizeof(uint32_t) * 6); 457 desc = memalign(ARCH_DMA_MINALIGN, sizeof(uint32_t) * 6);
458 if (!desc) { 458 if (!desc) {
459 printf("cannot allocate RNG init descriptor memory\n"); 459 printf("cannot allocate RNG init descriptor memory\n");
460 return -1; 460 return -1;
461 } 461 }
462 462
463 inline_cnstr_jobdesc_rng_instantiation(desc); 463 inline_cnstr_jobdesc_rng_instantiation(desc);
464 int size = roundup(sizeof(uint32_t) * 6, ARCH_DMA_MINALIGN); 464 int size = roundup(sizeof(uint32_t) * 6, ARCH_DMA_MINALIGN);
465 flush_dcache_range((unsigned long)desc, 465 flush_dcache_range((unsigned long)desc,
466 (unsigned long)desc + size); 466 (unsigned long)desc + size);
467 467
468 ret = run_descriptor_jr_idx(desc, sec_idx); 468 ret = run_descriptor_jr_idx(desc, sec_idx);
469 469
470 if (ret) 470 if (ret)
471 printf("RNG: Instantiation failed with error %x\n", ret); 471 printf("RNG: Instantiation failed with error %x\n", ret);
472 472
473 rdsta_val = sec_in32(&rng->rdsta); 473 rdsta_val = sec_in32(&rng->rdsta);
474 if (op.status || !(rdsta_val & RNG_STATE0_HANDLE_INSTANTIATED)) 474 if (op.status || !(rdsta_val & RNG_STATE0_HANDLE_INSTANTIATED))
475 return -1; 475 return -1;
476 476
477 return ret; 477 return ret;
478 } 478 }
479 479
480 static u8 get_rng_vid(uint8_t sec_idx) 480 static u8 get_rng_vid(uint8_t sec_idx)
481 { 481 {
482 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 482 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
483 u32 cha_vid = sec_in32(&sec->chavid_ls); 483 u32 cha_vid = sec_in32(&sec->chavid_ls);
484 484
485 return (cha_vid & SEC_CHAVID_RNG_LS_MASK) >> SEC_CHAVID_LS_RNG_SHIFT; 485 return (cha_vid & SEC_CHAVID_RNG_LS_MASK) >> SEC_CHAVID_LS_RNG_SHIFT;
486 } 486 }
487 487
488 /* 488 /*
489 * By default, the TRNG runs for 200 clocks per sample; 489 * By default, the TRNG runs for 200 clocks per sample;
490 * 1200 clocks per sample generates better entropy. 490 * 1200 clocks per sample generates better entropy.
491 */ 491 */
492 static void kick_trng(int ent_delay, uint8_t sec_idx) 492 static void kick_trng(int ent_delay, uint8_t sec_idx)
493 { 493 {
494 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx); 494 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx);
495 struct rng4tst __iomem *rng = 495 struct rng4tst __iomem *rng =
496 (struct rng4tst __iomem *)&sec->rng; 496 (struct rng4tst __iomem *)&sec->rng;
497 u32 val; 497 u32 val;
498 498
499 /* put RNG4 into program mode */ 499 /* put RNG4 into program mode */
500 sec_setbits32(&rng->rtmctl, RTMCTL_PRGM); 500 sec_setbits32(&rng->rtmctl, RTMCTL_PRGM);
501 /* rtsdctl bits 0-15 contain "Entropy Delay, which defines the 501 /* rtsdctl bits 0-15 contain "Entropy Delay, which defines the
502 * length (in system clocks) of each Entropy sample taken 502 * length (in system clocks) of each Entropy sample taken
503 * */ 503 * */
504 val = sec_in32(&rng->rtsdctl); 504 val = sec_in32(&rng->rtsdctl);
505 val = (val & ~RTSDCTL_ENT_DLY_MASK) | 505 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
506 (ent_delay << RTSDCTL_ENT_DLY_SHIFT); 506 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
507 sec_out32(&rng->rtsdctl, val); 507 sec_out32(&rng->rtsdctl, val);
508 /* min. freq. count, equal to 1/4 of the entropy sample length */ 508 /* min. freq. count, equal to 1/4 of the entropy sample length */
509 sec_out32(&rng->rtfreqmin, ent_delay >> 2); 509 sec_out32(&rng->rtfreqmin, ent_delay >> 2);
510 /* disable maximum frequency count */ 510 /* disable maximum frequency count */
511 sec_out32(&rng->rtfreqmax, RTFRQMAX_DISABLE); 511 sec_out32(&rng->rtfreqmax, RTFRQMAX_DISABLE);
512 /* 512 /*
513 * select raw sampling in both entropy shifter 513 * select raw sampling in both entropy shifter
514 * and statistical checker 514 * and statistical checker
515 */ 515 */
516 sec_setbits32(&rng->rtmctl, RTMCTL_SAMP_MODE_RAW_ES_SC); 516 sec_setbits32(&rng->rtmctl, RTMCTL_SAMP_MODE_RAW_ES_SC);
517 /* put RNG4 into run mode */ 517 /* put RNG4 into run mode */
518 sec_clrbits32(&rng->rtmctl, RTMCTL_PRGM); 518 sec_clrbits32(&rng->rtmctl, RTMCTL_PRGM);
519 } 519 }
520 520
521 static int rng_init(uint8_t sec_idx) 521 static int rng_init(uint8_t sec_idx)
522 { 522 {
523 int ret, ent_delay = RTSDCTL_ENT_DLY_MIN; 523 int ret, ent_delay = RTSDCTL_ENT_DLY_MIN;
524 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx); 524 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx);
525 struct rng4tst __iomem *rng = 525 struct rng4tst __iomem *rng =
526 (struct rng4tst __iomem *)&sec->rng; 526 (struct rng4tst __iomem *)&sec->rng;
527 527
528 u32 rdsta = sec_in32(&rng->rdsta); 528 u32 rdsta = sec_in32(&rng->rdsta);
529 529
530 /* Check if RNG state 0 handler is already instantiated */ 530 /* Check if RNG state 0 handler is already instantiated */
531 if (rdsta & RNG_STATE0_HANDLE_INSTANTIATED) 531 if (rdsta & RNG_STATE0_HANDLE_INSTANTIATED)
532 return 0; 532 return 0;
533 533
534 do { 534 do {
535 /* 535 /*
536 * If either of the SH's were instantiated by somebody else 536 * If either of the SH's were instantiated by somebody else
537 * then it is assumed that the entropy 537 * then it is assumed that the entropy
538 * parameters are properly set and thus the function 538 * parameters are properly set and thus the function
539 * setting these (kick_trng(...)) is skipped. 539 * setting these (kick_trng(...)) is skipped.
540 * Also, if a handle was instantiated, do not change 540 * Also, if a handle was instantiated, do not change
541 * the TRNG parameters. 541 * the TRNG parameters.
542 */ 542 */
543 kick_trng(ent_delay, sec_idx); 543 kick_trng(ent_delay, sec_idx);
544 ent_delay += 400; 544 ent_delay += 400;
545 /* 545 /*
546 * if instantiate_rng(...) fails, the loop will rerun 546 * if instantiate_rng(...) fails, the loop will rerun
547 * and the kick_trng(...) function will modfiy the 547 * and the kick_trng(...) function will modfiy the
548 * upper and lower limits of the entropy sampling 548 * upper and lower limits of the entropy sampling
549 * interval, leading to a sucessful initialization of 549 * interval, leading to a sucessful initialization of
550 * the RNG. 550 * the RNG.
551 */ 551 */
552 ret = instantiate_rng(sec_idx); 552 ret = instantiate_rng(sec_idx);
553 } while ((ret == -1) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); 553 } while ((ret == -1) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
554 if (ret) { 554 if (ret) {
555 printf("RNG: Failed to instantiate RNG\n"); 555 printf("RNG: Failed to instantiate RNG\n");
556 return ret; 556 return ret;
557 } 557 }
558 558
559 /* Enable RDB bit so that RNG works faster */ 559 /* Enable RDB bit so that RNG works faster */
560 sec_setbits32(&sec->scfgr, SEC_SCFGR_RDBENABLE); 560 sec_setbits32(&sec->scfgr, SEC_SCFGR_RDBENABLE);
561 561
562 return ret; 562 return ret;
563 } 563 }
564 #endif 564 #endif
565 int sec_init_idx(uint8_t sec_idx) 565 int sec_init_idx(uint8_t sec_idx)
566 { 566 {
567 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 567 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
568 uint32_t mcr = sec_in32(&sec->mcfgr); 568 uint32_t mcr = sec_in32(&sec->mcfgr);
569 uint32_t jrown_ns;
570 int i;
569 int ret = 0; 571 int ret = 0;
570 572
571 #ifdef CONFIG_FSL_CORENET 573 #ifdef CONFIG_FSL_CORENET
572 uint32_t liodnr; 574 uint32_t liodnr;
573 uint32_t liodn_ns; 575 uint32_t liodn_ns;
574 uint32_t liodn_s; 576 uint32_t liodn_s;
575 #endif 577 #endif
576 578
577 if (!(sec_idx < CONFIG_SYS_FSL_MAX_NUM_OF_SEC)) { 579 if (!(sec_idx < CONFIG_SYS_FSL_MAX_NUM_OF_SEC)) {
578 printf("SEC initialization failed\n"); 580 printf("SEC initialization failed\n");
579 return -1; 581 return -1;
580 } 582 }
581 583
582 /* 584 /*
583 * Modifying CAAM Read/Write Attributes 585 * Modifying CAAM Read/Write Attributes
584 * For LS2080A 586 * For LS2080A
585 * For AXI Write - Cacheable, Write Back, Write allocate 587 * For AXI Write - Cacheable, Write Back, Write allocate
586 * For AXI Read - Cacheable, Read allocate 588 * For AXI Read - Cacheable, Read allocate
587 * Only For LS2080a, to solve CAAM coherency issues 589 * Only For LS2080a, to solve CAAM coherency issues
588 */ 590 */
589 #ifdef CONFIG_ARCH_LS2080A 591 #ifdef CONFIG_ARCH_LS2080A
590 mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0xb << MCFGR_AWCACHE_SHIFT); 592 mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0xb << MCFGR_AWCACHE_SHIFT);
591 mcr = (mcr & ~MCFGR_ARCACHE_MASK) | (0x6 << MCFGR_ARCACHE_SHIFT); 593 mcr = (mcr & ~MCFGR_ARCACHE_MASK) | (0x6 << MCFGR_ARCACHE_SHIFT);
592 #else 594 #else
593 mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT); 595 mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT);
594 #endif 596 #endif
595 597
596 #ifdef CONFIG_PHYS_64BIT 598 #ifdef CONFIG_PHYS_64BIT
597 mcr |= (1 << MCFGR_PS_SHIFT); 599 mcr |= (1 << MCFGR_PS_SHIFT);
598 #endif 600 #endif
599 sec_out32(&sec->mcfgr, mcr); 601 sec_out32(&sec->mcfgr, mcr);
600 602
601 #ifdef CONFIG_FSL_CORENET 603 #ifdef CONFIG_FSL_CORENET
602 #ifdef CONFIG_SPL_BUILD 604 #ifdef CONFIG_SPL_BUILD
603 /* 605 /*
604 * For SPL Build, Set the Liodns in SEC JR0 for 606 * For SPL Build, Set the Liodns in SEC JR0 for
605 * creating PAMU entries corresponding to these. 607 * creating PAMU entries corresponding to these.
606 * For normal build, these are set in set_liodns(). 608 * For normal build, these are set in set_liodns().
607 */ 609 */
608 liodn_ns = CONFIG_SPL_JR0_LIODN_NS & JRNSLIODN_MASK; 610 liodn_ns = CONFIG_SPL_JR0_LIODN_NS & JRNSLIODN_MASK;
609 liodn_s = CONFIG_SPL_JR0_LIODN_S & JRSLIODN_MASK; 611 liodn_s = CONFIG_SPL_JR0_LIODN_S & JRSLIODN_MASK;
610 612
611 liodnr = sec_in32(&sec->jrliodnr[0].ls) & 613 liodnr = sec_in32(&sec->jrliodnr[0].ls) &
612 ~(JRNSLIODN_MASK | JRSLIODN_MASK); 614 ~(JRNSLIODN_MASK | JRSLIODN_MASK);
613 liodnr = liodnr | 615 liodnr = liodnr |
614 (liodn_ns << JRNSLIODN_SHIFT) | 616 (liodn_ns << JRNSLIODN_SHIFT) |
615 (liodn_s << JRSLIODN_SHIFT); 617 (liodn_s << JRSLIODN_SHIFT);
616 sec_out32(&sec->jrliodnr[0].ls, liodnr); 618 sec_out32(&sec->jrliodnr[0].ls, liodnr);
617 #else 619 #else
618 liodnr = sec_in32(&sec->jrliodnr[0].ls); 620 liodnr = sec_in32(&sec->jrliodnr[0].ls);
619 liodn_ns = (liodnr & JRNSLIODN_MASK) >> JRNSLIODN_SHIFT; 621 liodn_ns = (liodnr & JRNSLIODN_MASK) >> JRNSLIODN_SHIFT;
620 liodn_s = (liodnr & JRSLIODN_MASK) >> JRSLIODN_SHIFT; 622 liodn_s = (liodnr & JRSLIODN_MASK) >> JRSLIODN_SHIFT;
621 #endif 623 #endif
622 #endif 624 #endif
625
626 /* Set ownership of job rings to non-TrustZone mode by default */
627 for (i = 0; i < ARRAY_SIZE(sec->jrliodnr); i++) {
628 jrown_ns = sec_in32(&sec->jrliodnr[i].ms);
629 jrown_ns |= JROWN_NS | JRMID_NS;
630 sec_out32(&sec->jrliodnr[i].ms, jrown_ns);
631 }
623 632
624 ret = jr_init(sec_idx); 633 ret = jr_init(sec_idx);
625 if (ret < 0) { 634 if (ret < 0) {
626 printf("SEC initialization failed\n"); 635 printf("SEC initialization failed\n");
627 return -1; 636 return -1;
628 } 637 }
629 638
630 #ifdef CONFIG_FSL_CORENET 639 #ifdef CONFIG_FSL_CORENET
631 ret = sec_config_pamu_table(liodn_ns, liodn_s); 640 ret = sec_config_pamu_table(liodn_ns, liodn_s);
632 if (ret < 0) 641 if (ret < 0)
633 return -1; 642 return -1;
634 643
635 pamu_enable(); 644 pamu_enable();
636 #endif 645 #endif
637 #ifndef CONFIG_SPL_BUILD 646 #ifndef CONFIG_SPL_BUILD
638 if (get_rng_vid(sec_idx) >= 4) { 647 if (get_rng_vid(sec_idx) >= 4) {
639 if (rng_init(sec_idx) < 0) { 648 if (rng_init(sec_idx) < 0) {
640 printf("SEC%u: RNG instantiation failed\n", sec_idx); 649 printf("SEC%u: RNG instantiation failed\n", sec_idx);
641 return -1; 650 return -1;
642 } 651 }
643 printf("SEC%u: RNG instantiated\n", sec_idx); 652 printf("SEC%u: RNG instantiated\n", sec_idx);
644 } 653 }
645 #endif 654 #endif
646 return ret; 655 return ret;
647 } 656 }
648 657
649 int sec_init(void) 658 int sec_init(void)
650 { 659 {
651 return sec_init_idx(0); 660 return sec_init_idx(0);
652 } 661 }
653 662
drivers/crypto/fsl/jr.h
1 /* 1 /*
2 * Copyright 2008-2014 Freescale Semiconductor, Inc. 2 * Copyright 2008-2014 Freescale Semiconductor, Inc.
3 * 3 *
4 * SPDX-License-Identifier: GPL-2.0+ 4 * SPDX-License-Identifier: GPL-2.0+
5 * 5 *
6 */ 6 */
7 7
8 #ifndef __JR_H 8 #ifndef __JR_H
9 #define __JR_H 9 #define __JR_H
10 10
11 #include <linux/compiler.h> 11 #include <linux/compiler.h>
12 12
13 #define JR_SIZE 4 13 #define JR_SIZE 4
14 /* Timeout currently defined as 90 sec */ 14 /* Timeout currently defined as 90 sec */
15 #define CONFIG_SEC_DEQ_TIMEOUT 90000000U 15 #define CONFIG_SEC_DEQ_TIMEOUT 90000000U
16 16
17 #define DEFAULT_JR_ID 0 17 #define DEFAULT_JR_ID 0
18 #define DEFAULT_JR_LIODN 0 18 #define DEFAULT_JR_LIODN 0
19 #define DEFAULT_IRQ 0 /* Interrupts not to be configured */ 19 #define DEFAULT_IRQ 0 /* Interrupts not to be configured */
20 20
21 #define MCFGR_SWRST ((uint32_t)(1)<<31) /* Software Reset */ 21 #define MCFGR_SWRST ((uint32_t)(1)<<31) /* Software Reset */
22 #define MCFGR_DMA_RST ((uint32_t)(1)<<28) /* DMA Reset */ 22 #define MCFGR_DMA_RST ((uint32_t)(1)<<28) /* DMA Reset */
23 #define MCFGR_PS_SHIFT 16 23 #define MCFGR_PS_SHIFT 16
24 #define MCFGR_AWCACHE_SHIFT 8 24 #define MCFGR_AWCACHE_SHIFT 8
25 #define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT) 25 #define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
26 #define MCFGR_ARCACHE_SHIFT 12 26 #define MCFGR_ARCACHE_SHIFT 12
27 #define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT) 27 #define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
28 28
29 #define JR_INTMASK 0x00000001 29 #define JR_INTMASK 0x00000001
30 #define JRCR_RESET 0x01 30 #define JRCR_RESET 0x01
31 #define JRINT_ERR_HALT_INPROGRESS 0x4 31 #define JRINT_ERR_HALT_INPROGRESS 0x4
32 #define JRINT_ERR_HALT_MASK 0xc 32 #define JRINT_ERR_HALT_MASK 0xc
33 #define JRNSLIODN_SHIFT 16 33 #define JRNSLIODN_SHIFT 16
34 #define JRNSLIODN_MASK 0x0fff0000 34 #define JRNSLIODN_MASK 0x0fff0000
35 #define JRSLIODN_SHIFT 0 35 #define JRSLIODN_SHIFT 0
36 #define JRSLIODN_MASK 0x00000fff 36 #define JRSLIODN_MASK 0x00000fff
37 #define JROWN_NS 0x00000008
38 #define JRMID_NS 0x00000001
37 39
38 #define JQ_DEQ_ERR -1 40 #define JQ_DEQ_ERR -1
39 #define JQ_DEQ_TO_ERR -2 41 #define JQ_DEQ_TO_ERR -2
40 #define JQ_ENQ_ERR -3 42 #define JQ_ENQ_ERR -3
41 43
42 struct op_ring { 44 struct op_ring {
43 phys_addr_t desc; 45 phys_addr_t desc;
44 uint32_t status; 46 uint32_t status;
45 } __packed; 47 } __packed;
46 48
47 struct jr_info { 49 struct jr_info {
48 void (*callback)(uint32_t status, void *arg); 50 void (*callback)(uint32_t status, void *arg);
49 phys_addr_t desc_phys_addr; 51 phys_addr_t desc_phys_addr;
50 uint32_t desc_len; 52 uint32_t desc_len;
51 uint32_t op_done; 53 uint32_t op_done;
52 void *arg; 54 void *arg;
53 }; 55 };
54 56
55 struct jobring { 57 struct jobring {
56 int jq_id; 58 int jq_id;
57 int irq; 59 int irq;
58 int liodn; 60 int liodn;
59 /* Head is the index where software would enq the descriptor in 61 /* Head is the index where software would enq the descriptor in
60 * the i/p ring 62 * the i/p ring
61 */ 63 */
62 int head; 64 int head;
63 /* Tail index would be used by s/w ehile enqueuing to determine if 65 /* Tail index would be used by s/w ehile enqueuing to determine if
64 * there is any space left in the s/w maintained i/p rings 66 * there is any space left in the s/w maintained i/p rings
65 */ 67 */
66 /* Also in case of deq tail will be incremented only in case of 68 /* Also in case of deq tail will be incremented only in case of
67 * in-order job completion 69 * in-order job completion
68 */ 70 */
69 int tail; 71 int tail;
70 /* Read index of the output ring. It may not match with tail in case 72 /* Read index of the output ring. It may not match with tail in case
71 * of out of order completetion 73 * of out of order completetion
72 */ 74 */
73 int read_idx; 75 int read_idx;
74 /* Write index to input ring. Would be always equal to head */ 76 /* Write index to input ring. Would be always equal to head */
75 int write_idx; 77 int write_idx;
76 /* Size of the rings. */ 78 /* Size of the rings. */
77 int size; 79 int size;
78 /* Op ring size aligned to cache line size */ 80 /* Op ring size aligned to cache line size */
79 int op_size; 81 int op_size;
80 /* The ip and output rings have to be accessed by SEC. So the 82 /* The ip and output rings have to be accessed by SEC. So the
81 * pointers will ahve to point to the housekeeping region provided 83 * pointers will ahve to point to the housekeeping region provided
82 * by SEC 84 * by SEC
83 */ 85 */
84 /*Circular Ring of i/p descriptors */ 86 /*Circular Ring of i/p descriptors */
85 dma_addr_t *input_ring; 87 dma_addr_t *input_ring;
86 /* Circular Ring of o/p descriptors */ 88 /* Circular Ring of o/p descriptors */
87 /* Circula Ring containing info regarding descriptors in i/p 89 /* Circula Ring containing info regarding descriptors in i/p
88 * and o/p ring 90 * and o/p ring
89 */ 91 */
90 /* This ring can be on the stack */ 92 /* This ring can be on the stack */
91 struct jr_info info[JR_SIZE]; 93 struct jr_info info[JR_SIZE];
92 struct op_ring *output_ring; 94 struct op_ring *output_ring;
93 /* Offset in CCSR to the SEC engine to which this JR belongs */ 95 /* Offset in CCSR to the SEC engine to which this JR belongs */
94 uint32_t sec_offset; 96 uint32_t sec_offset;
95 97
96 }; 98 };
97 99
98 struct result { 100 struct result {
99 int done; 101 int done;
100 uint32_t status; 102 uint32_t status;
101 }; 103 };
102 104
103 void caam_jr_strstatus(u32 status); 105 void caam_jr_strstatus(u32 status);
104 int run_descriptor_jr(uint32_t *desc); 106 int run_descriptor_jr(uint32_t *desc);
105 107
106 #endif 108 #endif
107 109