Commit 625d4faf5c173b0e6f81b9efded32ad4466a4c09

Authored by Breno Lima
Committed by Ye Li
1 parent 57fa56f3ce

MLK-21386 Revert "drivers/crypto/fsl: assign job-rings to non-TrustZone"

Commit 22191ac35344 ("drivers/crypto/fsl: assign job-rings to
 non-TrustZone") breaks HABv4 encrypted boot support in the
following i.MX devices:

- i.MX6UL
- i.MX7S
- i.MX7D
- i.MX7ULP

For preparing a HABv4 encrypted boot image it's necessary to
encapsulated the generated DEK in a blob. The blob generation
function takes into consideration the Job Ring TrustZone
ownership configuration (JROWN_NS) and can be only decapsulated
by the same configuration.

The ROM code expects DEK blobs encapsulated by the Secure World
environments which commonly have JROWN_NS = 0.

As U-Boot is running in Secure World we must have JROWN_NS=0
so the blobs generated by dek_blob tool can be decapsulated
by the ROM code.

As NXP BSP does not requires all job-rings assigned to
non-Secure world this commit can be safely reverted.

This reverts commit 22191ac353445ad8fafc5a78aefcd94e78963041.

Reviewed-by: Silvano Di Ninno <silvano.dininno@nxp.com>
Signed-off-by: Breno Lima <breno.lima@nxp.com>
(cherry picked from commit 3eebc76f5571f7ce74d385235019e8eb4a6718f6)

Showing 2 changed files with 0 additions and 11 deletions Inline Diff

drivers/crypto/fsl/jr.c
1 /* 1 /*
2 * Copyright 2008-2014 Freescale Semiconductor, Inc. 2 * Copyright 2008-2014 Freescale Semiconductor, Inc.
3 * Copyright 2018 NXP 3 * Copyright 2018 NXP
4 * 4 *
5 * SPDX-License-Identifier: GPL-2.0+ 5 * SPDX-License-Identifier: GPL-2.0+
6 * 6 *
7 * Based on CAAM driver in drivers/crypto/caam in Linux 7 * Based on CAAM driver in drivers/crypto/caam in Linux
8 */ 8 */
9 9
10 #include <common.h> 10 #include <common.h>
11 #include <malloc.h> 11 #include <malloc.h>
12 #include "fsl_sec.h" 12 #include "fsl_sec.h"
13 #include "jr.h" 13 #include "jr.h"
14 #include "jobdesc.h" 14 #include "jobdesc.h"
15 #include "desc_constr.h" 15 #include "desc_constr.h"
16 #ifdef CONFIG_FSL_CORENET 16 #ifdef CONFIG_FSL_CORENET
17 #include <asm/fsl_pamu.h> 17 #include <asm/fsl_pamu.h>
18 #endif 18 #endif
19 19
20 #define CIRC_CNT(head, tail, size) (((head) - (tail)) & (size - 1)) 20 #define CIRC_CNT(head, tail, size) (((head) - (tail)) & (size - 1))
21 #define CIRC_SPACE(head, tail, size) CIRC_CNT((tail), (head) + 1, (size)) 21 #define CIRC_SPACE(head, tail, size) CIRC_CNT((tail), (head) + 1, (size))
22 22
23 uint32_t sec_offset[CONFIG_SYS_FSL_MAX_NUM_OF_SEC] = { 23 uint32_t sec_offset[CONFIG_SYS_FSL_MAX_NUM_OF_SEC] = {
24 0, 24 0,
25 #if defined(CONFIG_ARCH_C29X) 25 #if defined(CONFIG_ARCH_C29X)
26 CONFIG_SYS_FSL_SEC_IDX_OFFSET, 26 CONFIG_SYS_FSL_SEC_IDX_OFFSET,
27 2 * CONFIG_SYS_FSL_SEC_IDX_OFFSET 27 2 * CONFIG_SYS_FSL_SEC_IDX_OFFSET
28 #endif 28 #endif
29 }; 29 };
30 30
31 #define SEC_ADDR(idx) \ 31 #define SEC_ADDR(idx) \
32 (ulong)((CONFIG_SYS_FSL_SEC_ADDR + sec_offset[idx])) 32 (ulong)((CONFIG_SYS_FSL_SEC_ADDR + sec_offset[idx]))
33 33
34 #define SEC_JR0_ADDR(idx) \ 34 #define SEC_JR0_ADDR(idx) \
35 (ulong)(SEC_ADDR(idx) + \ 35 (ulong)(SEC_ADDR(idx) + \
36 (CONFIG_SYS_FSL_JR0_OFFSET - CONFIG_SYS_FSL_SEC_OFFSET)) 36 (CONFIG_SYS_FSL_JR0_OFFSET - CONFIG_SYS_FSL_SEC_OFFSET))
37 37
38 struct jobring jr0[CONFIG_SYS_FSL_MAX_NUM_OF_SEC]; 38 struct jobring jr0[CONFIG_SYS_FSL_MAX_NUM_OF_SEC];
39 39
40 static inline void start_jr0(uint8_t sec_idx) 40 static inline void start_jr0(uint8_t sec_idx)
41 { 41 {
42 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 42 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
43 u32 ctpr_ms = sec_in32(&sec->ctpr_ms); 43 u32 ctpr_ms = sec_in32(&sec->ctpr_ms);
44 u32 scfgr = sec_in32(&sec->scfgr); 44 u32 scfgr = sec_in32(&sec->scfgr);
45 45
46 if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_INCL) { 46 if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_INCL) {
47 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or 47 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
48 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SEC_SCFGR_VIRT_EN = 1 48 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SEC_SCFGR_VIRT_EN = 1
49 */ 49 */
50 if ((ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR) || 50 if ((ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR) ||
51 (scfgr & SEC_SCFGR_VIRT_EN)) 51 (scfgr & SEC_SCFGR_VIRT_EN))
52 sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0); 52 sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0);
53 } else { 53 } else {
54 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */ 54 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
55 if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR) 55 if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR)
56 sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0); 56 sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0);
57 } 57 }
58 } 58 }
59 59
60 static inline void jr_reset_liodn(uint8_t sec_idx) 60 static inline void jr_reset_liodn(uint8_t sec_idx)
61 { 61 {
62 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 62 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
63 sec_out32(&sec->jrliodnr[0].ls, 0); 63 sec_out32(&sec->jrliodnr[0].ls, 0);
64 } 64 }
65 65
66 static inline void jr_disable_irq(uint8_t sec_idx) 66 static inline void jr_disable_irq(uint8_t sec_idx)
67 { 67 {
68 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 68 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
69 uint32_t jrcfg = sec_in32(&regs->jrcfg1); 69 uint32_t jrcfg = sec_in32(&regs->jrcfg1);
70 70
71 jrcfg = jrcfg | JR_INTMASK; 71 jrcfg = jrcfg | JR_INTMASK;
72 72
73 sec_out32(&regs->jrcfg1, jrcfg); 73 sec_out32(&regs->jrcfg1, jrcfg);
74 } 74 }
75 75
76 static void jr_initregs(uint8_t sec_idx) 76 static void jr_initregs(uint8_t sec_idx)
77 { 77 {
78 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 78 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
79 struct jobring *jr = &jr0[sec_idx]; 79 struct jobring *jr = &jr0[sec_idx];
80 phys_addr_t ip_base = virt_to_phys((void *)jr->input_ring); 80 phys_addr_t ip_base = virt_to_phys((void *)jr->input_ring);
81 phys_addr_t op_base = virt_to_phys((void *)jr->output_ring); 81 phys_addr_t op_base = virt_to_phys((void *)jr->output_ring);
82 82
83 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M) 83 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M)
84 sec_out32(&regs->irba_h, ip_base >> 32); 84 sec_out32(&regs->irba_h, ip_base >> 32);
85 #else 85 #else
86 sec_out32(&regs->irba_h, 0x0); 86 sec_out32(&regs->irba_h, 0x0);
87 #endif 87 #endif
88 sec_out32(&regs->irba_l, (uint32_t)ip_base); 88 sec_out32(&regs->irba_l, (uint32_t)ip_base);
89 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M) 89 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M)
90 sec_out32(&regs->orba_h, op_base >> 32); 90 sec_out32(&regs->orba_h, op_base >> 32);
91 #else 91 #else
92 sec_out32(&regs->orba_h, 0x0); 92 sec_out32(&regs->orba_h, 0x0);
93 #endif 93 #endif
94 sec_out32(&regs->orba_l, (uint32_t)op_base); 94 sec_out32(&regs->orba_l, (uint32_t)op_base);
95 sec_out32(&regs->ors, JR_SIZE); 95 sec_out32(&regs->ors, JR_SIZE);
96 sec_out32(&regs->irs, JR_SIZE); 96 sec_out32(&regs->irs, JR_SIZE);
97 97
98 if (!jr->irq) 98 if (!jr->irq)
99 jr_disable_irq(sec_idx); 99 jr_disable_irq(sec_idx);
100 } 100 }
101 101
102 static int jr_init(uint8_t sec_idx) 102 static int jr_init(uint8_t sec_idx)
103 { 103 {
104 struct jobring *jr = &jr0[sec_idx]; 104 struct jobring *jr = &jr0[sec_idx];
105 105
106 memset(jr, 0, sizeof(struct jobring)); 106 memset(jr, 0, sizeof(struct jobring));
107 107
108 jr->jq_id = DEFAULT_JR_ID; 108 jr->jq_id = DEFAULT_JR_ID;
109 jr->irq = DEFAULT_IRQ; 109 jr->irq = DEFAULT_IRQ;
110 110
111 #ifdef CONFIG_FSL_CORENET 111 #ifdef CONFIG_FSL_CORENET
112 jr->liodn = DEFAULT_JR_LIODN; 112 jr->liodn = DEFAULT_JR_LIODN;
113 #endif 113 #endif
114 jr->size = JR_SIZE; 114 jr->size = JR_SIZE;
115 jr->input_ring = (uint32_t *)memalign(ARCH_DMA_MINALIGN, 115 jr->input_ring = (uint32_t *)memalign(ARCH_DMA_MINALIGN,
116 JR_SIZE * sizeof(dma_addr_t)); 116 JR_SIZE * sizeof(dma_addr_t));
117 if (!jr->input_ring) 117 if (!jr->input_ring)
118 return -1; 118 return -1;
119 119
120 jr->op_size = roundup(JR_SIZE * sizeof(struct op_ring), 120 jr->op_size = roundup(JR_SIZE * sizeof(struct op_ring),
121 ARCH_DMA_MINALIGN); 121 ARCH_DMA_MINALIGN);
122 jr->output_ring = 122 jr->output_ring =
123 (struct op_ring *)memalign(ARCH_DMA_MINALIGN, jr->op_size); 123 (struct op_ring *)memalign(ARCH_DMA_MINALIGN, jr->op_size);
124 if (!jr->output_ring) 124 if (!jr->output_ring)
125 return -1; 125 return -1;
126 126
127 memset(jr->input_ring, 0, JR_SIZE * sizeof(dma_addr_t)); 127 memset(jr->input_ring, 0, JR_SIZE * sizeof(dma_addr_t));
128 memset(jr->output_ring, 0, jr->op_size); 128 memset(jr->output_ring, 0, jr->op_size);
129 129
130 start_jr0(sec_idx); 130 start_jr0(sec_idx);
131 131
132 jr_initregs(sec_idx); 132 jr_initregs(sec_idx);
133 133
134 return 0; 134 return 0;
135 } 135 }
136 136
137 static int jr_sw_cleanup(uint8_t sec_idx) 137 static int jr_sw_cleanup(uint8_t sec_idx)
138 { 138 {
139 struct jobring *jr = &jr0[sec_idx]; 139 struct jobring *jr = &jr0[sec_idx];
140 140
141 jr->head = 0; 141 jr->head = 0;
142 jr->tail = 0; 142 jr->tail = 0;
143 jr->read_idx = 0; 143 jr->read_idx = 0;
144 jr->write_idx = 0; 144 jr->write_idx = 0;
145 memset(jr->info, 0, sizeof(jr->info)); 145 memset(jr->info, 0, sizeof(jr->info));
146 memset(jr->input_ring, 0, jr->size * sizeof(dma_addr_t)); 146 memset(jr->input_ring, 0, jr->size * sizeof(dma_addr_t));
147 memset(jr->output_ring, 0, jr->size * sizeof(struct op_ring)); 147 memset(jr->output_ring, 0, jr->size * sizeof(struct op_ring));
148 148
149 return 0; 149 return 0;
150 } 150 }
151 151
152 static int jr_hw_reset(uint8_t sec_idx) 152 static int jr_hw_reset(uint8_t sec_idx)
153 { 153 {
154 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 154 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
155 uint32_t timeout = 100000; 155 uint32_t timeout = 100000;
156 uint32_t jrint, jrcr; 156 uint32_t jrint, jrcr;
157 157
158 sec_out32(&regs->jrcr, JRCR_RESET); 158 sec_out32(&regs->jrcr, JRCR_RESET);
159 do { 159 do {
160 jrint = sec_in32(&regs->jrint); 160 jrint = sec_in32(&regs->jrint);
161 } while (((jrint & JRINT_ERR_HALT_MASK) == 161 } while (((jrint & JRINT_ERR_HALT_MASK) ==
162 JRINT_ERR_HALT_INPROGRESS) && --timeout); 162 JRINT_ERR_HALT_INPROGRESS) && --timeout);
163 163
164 jrint = sec_in32(&regs->jrint); 164 jrint = sec_in32(&regs->jrint);
165 if (((jrint & JRINT_ERR_HALT_MASK) != 165 if (((jrint & JRINT_ERR_HALT_MASK) !=
166 JRINT_ERR_HALT_INPROGRESS) && timeout == 0) 166 JRINT_ERR_HALT_INPROGRESS) && timeout == 0)
167 return -1; 167 return -1;
168 168
169 timeout = 100000; 169 timeout = 100000;
170 sec_out32(&regs->jrcr, JRCR_RESET); 170 sec_out32(&regs->jrcr, JRCR_RESET);
171 do { 171 do {
172 jrcr = sec_in32(&regs->jrcr); 172 jrcr = sec_in32(&regs->jrcr);
173 } while ((jrcr & JRCR_RESET) && --timeout); 173 } while ((jrcr & JRCR_RESET) && --timeout);
174 174
175 if (timeout == 0) 175 if (timeout == 0)
176 return -1; 176 return -1;
177 177
178 return 0; 178 return 0;
179 } 179 }
180 180
181 /* -1 --- error, can't enqueue -- no space available */ 181 /* -1 --- error, can't enqueue -- no space available */
182 static int jr_enqueue(uint32_t *desc_addr, 182 static int jr_enqueue(uint32_t *desc_addr,
183 void (*callback)(uint32_t status, void *arg), 183 void (*callback)(uint32_t status, void *arg),
184 void *arg, uint8_t sec_idx) 184 void *arg, uint8_t sec_idx)
185 { 185 {
186 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 186 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
187 struct jobring *jr = &jr0[sec_idx]; 187 struct jobring *jr = &jr0[sec_idx];
188 int head = jr->head; 188 int head = jr->head;
189 uint32_t desc_word; 189 uint32_t desc_word;
190 int length = desc_len(desc_addr); 190 int length = desc_len(desc_addr);
191 int i; 191 int i;
192 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M) 192 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M)
193 uint32_t *addr_hi, *addr_lo; 193 uint32_t *addr_hi, *addr_lo;
194 #endif 194 #endif
195 195
196 /* The descriptor must be submitted to SEC block as per endianness 196 /* The descriptor must be submitted to SEC block as per endianness
197 * of the SEC Block. 197 * of the SEC Block.
198 * So, if the endianness of Core and SEC block is different, each word 198 * So, if the endianness of Core and SEC block is different, each word
199 * of the descriptor will be byte-swapped. 199 * of the descriptor will be byte-swapped.
200 */ 200 */
201 for (i = 0; i < length; i++) { 201 for (i = 0; i < length; i++) {
202 desc_word = desc_addr[i]; 202 desc_word = desc_addr[i];
203 sec_out32((uint32_t *)&desc_addr[i], desc_word); 203 sec_out32((uint32_t *)&desc_addr[i], desc_word);
204 } 204 }
205 205
206 phys_addr_t desc_phys_addr = virt_to_phys(desc_addr); 206 phys_addr_t desc_phys_addr = virt_to_phys(desc_addr);
207 207
208 jr->info[head].desc_phys_addr = desc_phys_addr; 208 jr->info[head].desc_phys_addr = desc_phys_addr;
209 jr->info[head].callback = (void *)callback; 209 jr->info[head].callback = (void *)callback;
210 jr->info[head].arg = arg; 210 jr->info[head].arg = arg;
211 jr->info[head].op_done = 0; 211 jr->info[head].op_done = 0;
212 212
213 unsigned long start = (unsigned long)&jr->info[head] & 213 unsigned long start = (unsigned long)&jr->info[head] &
214 ~(ARCH_DMA_MINALIGN - 1); 214 ~(ARCH_DMA_MINALIGN - 1);
215 unsigned long end = ALIGN((unsigned long)&jr->info[head] + 215 unsigned long end = ALIGN((unsigned long)&jr->info[head] +
216 sizeof(struct jr_info), ARCH_DMA_MINALIGN); 216 sizeof(struct jr_info), ARCH_DMA_MINALIGN);
217 flush_dcache_range(start, end); 217 flush_dcache_range(start, end);
218 218
219 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M) 219 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M)
220 /* Write the 64 bit Descriptor address on Input Ring. 220 /* Write the 64 bit Descriptor address on Input Ring.
221 * The 32 bit hign and low part of the address will 221 * The 32 bit hign and low part of the address will
222 * depend on endianness of SEC block. 222 * depend on endianness of SEC block.
223 */ 223 */
224 #ifdef CONFIG_SYS_FSL_SEC_LE 224 #ifdef CONFIG_SYS_FSL_SEC_LE
225 addr_lo = (uint32_t *)(&jr->input_ring[head]); 225 addr_lo = (uint32_t *)(&jr->input_ring[head]);
226 addr_hi = (uint32_t *)(&jr->input_ring[head]) + 1; 226 addr_hi = (uint32_t *)(&jr->input_ring[head]) + 1;
227 #elif defined(CONFIG_SYS_FSL_SEC_BE) 227 #elif defined(CONFIG_SYS_FSL_SEC_BE)
228 addr_hi = (uint32_t *)(&jr->input_ring[head]); 228 addr_hi = (uint32_t *)(&jr->input_ring[head]);
229 addr_lo = (uint32_t *)(&jr->input_ring[head]) + 1; 229 addr_lo = (uint32_t *)(&jr->input_ring[head]) + 1;
230 #endif /* ifdef CONFIG_SYS_FSL_SEC_LE */ 230 #endif /* ifdef CONFIG_SYS_FSL_SEC_LE */
231 231
232 sec_out32(addr_hi, (uint32_t)(desc_phys_addr >> 32)); 232 sec_out32(addr_hi, (uint32_t)(desc_phys_addr >> 32));
233 sec_out32(addr_lo, (uint32_t)(desc_phys_addr)); 233 sec_out32(addr_lo, (uint32_t)(desc_phys_addr));
234 234
235 #else 235 #else
236 /* Write the 32 bit Descriptor address on Input Ring. */ 236 /* Write the 32 bit Descriptor address on Input Ring. */
237 sec_out32(&jr->input_ring[head], desc_phys_addr); 237 sec_out32(&jr->input_ring[head], desc_phys_addr);
238 #endif /* ifdef CONFIG_PHYS_64BIT */ 238 #endif /* ifdef CONFIG_PHYS_64BIT */
239 239
240 start = (unsigned long)&jr->input_ring[head] & ~(ARCH_DMA_MINALIGN - 1); 240 start = (unsigned long)&jr->input_ring[head] & ~(ARCH_DMA_MINALIGN - 1);
241 end = ALIGN((unsigned long)&jr->input_ring[head] + 241 end = ALIGN((unsigned long)&jr->input_ring[head] +
242 sizeof(dma_addr_t), ARCH_DMA_MINALIGN); 242 sizeof(dma_addr_t), ARCH_DMA_MINALIGN);
243 flush_dcache_range(start, end); 243 flush_dcache_range(start, end);
244 244
245 jr->head = (head + 1) & (jr->size - 1); 245 jr->head = (head + 1) & (jr->size - 1);
246 246
247 /* Invalidate output ring */ 247 /* Invalidate output ring */
248 start = (unsigned long)jr->output_ring & 248 start = (unsigned long)jr->output_ring &
249 ~(ARCH_DMA_MINALIGN - 1); 249 ~(ARCH_DMA_MINALIGN - 1);
250 end = ALIGN((unsigned long)jr->output_ring + jr->op_size, 250 end = ALIGN((unsigned long)jr->output_ring + jr->op_size,
251 ARCH_DMA_MINALIGN); 251 ARCH_DMA_MINALIGN);
252 invalidate_dcache_range(start, end); 252 invalidate_dcache_range(start, end);
253 253
254 sec_out32(&regs->irja, 1); 254 sec_out32(&regs->irja, 1);
255 255
256 return 0; 256 return 0;
257 } 257 }
258 258
259 static int jr_dequeue(int sec_idx) 259 static int jr_dequeue(int sec_idx)
260 { 260 {
261 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx); 261 struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
262 struct jobring *jr = &jr0[sec_idx]; 262 struct jobring *jr = &jr0[sec_idx];
263 int head = jr->head; 263 int head = jr->head;
264 int tail = jr->tail; 264 int tail = jr->tail;
265 int idx, i, found; 265 int idx, i, found;
266 void (*callback)(uint32_t status, void *arg); 266 void (*callback)(uint32_t status, void *arg);
267 void *arg = NULL; 267 void *arg = NULL;
268 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M) 268 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M)
269 uint32_t *addr_hi, *addr_lo; 269 uint32_t *addr_hi, *addr_lo;
270 #else 270 #else
271 uint32_t *addr; 271 uint32_t *addr;
272 #endif 272 #endif
273 273
274 while (sec_in32(&regs->orsf) && CIRC_CNT(jr->head, jr->tail, 274 while (sec_in32(&regs->orsf) && CIRC_CNT(jr->head, jr->tail,
275 jr->size)) { 275 jr->size)) {
276 276
277 found = 0; 277 found = 0;
278 278
279 phys_addr_t op_desc; 279 phys_addr_t op_desc;
280 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M) 280 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M)
281 /* Read the 64 bit Descriptor address from Output Ring. 281 /* Read the 64 bit Descriptor address from Output Ring.
282 * The 32 bit hign and low part of the address will 282 * The 32 bit hign and low part of the address will
283 * depend on endianness of SEC block. 283 * depend on endianness of SEC block.
284 */ 284 */
285 #ifdef CONFIG_SYS_FSL_SEC_LE 285 #ifdef CONFIG_SYS_FSL_SEC_LE
286 addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc); 286 addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc);
287 addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1; 287 addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1;
288 #elif defined(CONFIG_SYS_FSL_SEC_BE) 288 #elif defined(CONFIG_SYS_FSL_SEC_BE)
289 addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc); 289 addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc);
290 addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1; 290 addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1;
291 #endif /* ifdef CONFIG_SYS_FSL_SEC_LE */ 291 #endif /* ifdef CONFIG_SYS_FSL_SEC_LE */
292 292
293 op_desc = ((u64)sec_in32(addr_hi) << 32) | 293 op_desc = ((u64)sec_in32(addr_hi) << 32) |
294 ((u64)sec_in32(addr_lo)); 294 ((u64)sec_in32(addr_lo));
295 295
296 #else 296 #else
297 /* Read the 32 bit Descriptor address from Output Ring. */ 297 /* Read the 32 bit Descriptor address from Output Ring. */
298 addr = (uint32_t *)&jr->output_ring[jr->tail].desc; 298 addr = (uint32_t *)&jr->output_ring[jr->tail].desc;
299 op_desc = sec_in32(addr); 299 op_desc = sec_in32(addr);
300 #endif /* ifdef CONFIG_PHYS_64BIT */ 300 #endif /* ifdef CONFIG_PHYS_64BIT */
301 301
302 uint32_t status = sec_in32(&jr->output_ring[jr->tail].status); 302 uint32_t status = sec_in32(&jr->output_ring[jr->tail].status);
303 303
304 for (i = 0; CIRC_CNT(head, tail + i, jr->size) >= 1; i++) { 304 for (i = 0; CIRC_CNT(head, tail + i, jr->size) >= 1; i++) {
305 idx = (tail + i) & (jr->size - 1); 305 idx = (tail + i) & (jr->size - 1);
306 if (op_desc == jr->info[idx].desc_phys_addr) { 306 if (op_desc == jr->info[idx].desc_phys_addr) {
307 found = 1; 307 found = 1;
308 break; 308 break;
309 } 309 }
310 } 310 }
311 311
312 /* Error condition if match not found */ 312 /* Error condition if match not found */
313 if (!found) 313 if (!found)
314 return -1; 314 return -1;
315 315
316 jr->info[idx].op_done = 1; 316 jr->info[idx].op_done = 1;
317 callback = (void *)jr->info[idx].callback; 317 callback = (void *)jr->info[idx].callback;
318 arg = jr->info[idx].arg; 318 arg = jr->info[idx].arg;
319 319
320 /* When the job on tail idx gets done, increment 320 /* When the job on tail idx gets done, increment
321 * tail till the point where job completed out of oredr has 321 * tail till the point where job completed out of oredr has
322 * been taken into account 322 * been taken into account
323 */ 323 */
324 if (idx == tail) 324 if (idx == tail)
325 do { 325 do {
326 tail = (tail + 1) & (jr->size - 1); 326 tail = (tail + 1) & (jr->size - 1);
327 } while (jr->info[tail].op_done); 327 } while (jr->info[tail].op_done);
328 328
329 jr->tail = tail; 329 jr->tail = tail;
330 jr->read_idx = (jr->read_idx + 1) & (jr->size - 1); 330 jr->read_idx = (jr->read_idx + 1) & (jr->size - 1);
331 331
332 sec_out32(&regs->orjr, 1); 332 sec_out32(&regs->orjr, 1);
333 jr->info[idx].op_done = 0; 333 jr->info[idx].op_done = 0;
334 334
335 callback(status, arg); 335 callback(status, arg);
336 } 336 }
337 337
338 return 0; 338 return 0;
339 } 339 }
340 340
341 static void desc_done(uint32_t status, void *arg) 341 static void desc_done(uint32_t status, void *arg)
342 { 342 {
343 struct result *x = arg; 343 struct result *x = arg;
344 x->status = status; 344 x->status = status;
345 #ifndef CONFIG_SPL_BUILD 345 #ifndef CONFIG_SPL_BUILD
346 caam_jr_strstatus(status); 346 caam_jr_strstatus(status);
347 #endif 347 #endif
348 x->done = 1; 348 x->done = 1;
349 } 349 }
350 350
351 static inline int run_descriptor_jr_idx(uint32_t *desc, uint8_t sec_idx) 351 static inline int run_descriptor_jr_idx(uint32_t *desc, uint8_t sec_idx)
352 { 352 {
353 unsigned long long timeval = 0; 353 unsigned long long timeval = 0;
354 unsigned long long timeout = CONFIG_USEC_DEQ_TIMEOUT; 354 unsigned long long timeout = CONFIG_USEC_DEQ_TIMEOUT;
355 struct result op; 355 struct result op;
356 int ret = 0; 356 int ret = 0;
357 357
358 memset(&op, 0, sizeof(op)); 358 memset(&op, 0, sizeof(op));
359 359
360 ret = jr_enqueue(desc, desc_done, &op, sec_idx); 360 ret = jr_enqueue(desc, desc_done, &op, sec_idx);
361 if (ret) { 361 if (ret) {
362 debug("Error in SEC enq\n"); 362 debug("Error in SEC enq\n");
363 ret = JQ_ENQ_ERR; 363 ret = JQ_ENQ_ERR;
364 goto out; 364 goto out;
365 } 365 }
366 366
367 while (op.done != 1) { 367 while (op.done != 1) {
368 udelay(1); 368 udelay(1);
369 timeval += 1; 369 timeval += 1;
370 370
371 ret = jr_dequeue(sec_idx); 371 ret = jr_dequeue(sec_idx);
372 if (ret) { 372 if (ret) {
373 debug("Error in SEC deq\n"); 373 debug("Error in SEC deq\n");
374 ret = JQ_DEQ_ERR; 374 ret = JQ_DEQ_ERR;
375 goto out; 375 goto out;
376 } 376 }
377 377
378 if (timeval > timeout) { 378 if (timeval > timeout) {
379 debug("SEC Dequeue timed out\n"); 379 debug("SEC Dequeue timed out\n");
380 ret = JQ_DEQ_TO_ERR; 380 ret = JQ_DEQ_TO_ERR;
381 goto out; 381 goto out;
382 } 382 }
383 } 383 }
384 384
385 if (op.status) { 385 if (op.status) {
386 debug("Error %x\n", op.status); 386 debug("Error %x\n", op.status);
387 ret = op.status; 387 ret = op.status;
388 } 388 }
389 out: 389 out:
390 return ret; 390 return ret;
391 } 391 }
392 392
393 int run_descriptor_jr(uint32_t *desc) 393 int run_descriptor_jr(uint32_t *desc)
394 { 394 {
395 return run_descriptor_jr_idx(desc, 0); 395 return run_descriptor_jr_idx(desc, 0);
396 } 396 }
397 397
398 static inline int jr_reset_sec(uint8_t sec_idx) 398 static inline int jr_reset_sec(uint8_t sec_idx)
399 { 399 {
400 if (jr_hw_reset(sec_idx) < 0) 400 if (jr_hw_reset(sec_idx) < 0)
401 return -1; 401 return -1;
402 402
403 /* Clean up the jobring structure maintained by software */ 403 /* Clean up the jobring structure maintained by software */
404 jr_sw_cleanup(sec_idx); 404 jr_sw_cleanup(sec_idx);
405 405
406 return 0; 406 return 0;
407 } 407 }
408 408
409 int jr_reset(void) 409 int jr_reset(void)
410 { 410 {
411 return jr_reset_sec(0); 411 return jr_reset_sec(0);
412 } 412 }
413 413
414 static inline int sec_reset_idx(uint8_t sec_idx) 414 static inline int sec_reset_idx(uint8_t sec_idx)
415 { 415 {
416 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 416 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
417 uint32_t mcfgr = sec_in32(&sec->mcfgr); 417 uint32_t mcfgr = sec_in32(&sec->mcfgr);
418 uint32_t timeout = 100000; 418 uint32_t timeout = 100000;
419 419
420 mcfgr |= MCFGR_SWRST; 420 mcfgr |= MCFGR_SWRST;
421 sec_out32(&sec->mcfgr, mcfgr); 421 sec_out32(&sec->mcfgr, mcfgr);
422 422
423 mcfgr |= MCFGR_DMA_RST; 423 mcfgr |= MCFGR_DMA_RST;
424 sec_out32(&sec->mcfgr, mcfgr); 424 sec_out32(&sec->mcfgr, mcfgr);
425 do { 425 do {
426 mcfgr = sec_in32(&sec->mcfgr); 426 mcfgr = sec_in32(&sec->mcfgr);
427 } while ((mcfgr & MCFGR_DMA_RST) == MCFGR_DMA_RST && --timeout); 427 } while ((mcfgr & MCFGR_DMA_RST) == MCFGR_DMA_RST && --timeout);
428 428
429 if (timeout == 0) 429 if (timeout == 0)
430 return -1; 430 return -1;
431 431
432 timeout = 100000; 432 timeout = 100000;
433 do { 433 do {
434 mcfgr = sec_in32(&sec->mcfgr); 434 mcfgr = sec_in32(&sec->mcfgr);
435 } while ((mcfgr & MCFGR_SWRST) == MCFGR_SWRST && --timeout); 435 } while ((mcfgr & MCFGR_SWRST) == MCFGR_SWRST && --timeout);
436 436
437 if (timeout == 0) 437 if (timeout == 0)
438 return -1; 438 return -1;
439 439
440 return 0; 440 return 0;
441 } 441 }
442 int sec_reset(void) 442 int sec_reset(void)
443 { 443 {
444 return sec_reset_idx(0); 444 return sec_reset_idx(0);
445 } 445 }
446 #ifndef CONFIG_SPL_BUILD 446 #ifndef CONFIG_SPL_BUILD
447 static int instantiate_rng(uint8_t sec_idx) 447 static int instantiate_rng(uint8_t sec_idx)
448 { 448 {
449 u32 *desc; 449 u32 *desc;
450 u32 rdsta_val; 450 u32 rdsta_val;
451 int ret = 0, sh_idx, size; 451 int ret = 0, sh_idx, size;
452 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx); 452 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx);
453 struct rng4tst __iomem *rng = 453 struct rng4tst __iomem *rng =
454 (struct rng4tst __iomem *)&sec->rng; 454 (struct rng4tst __iomem *)&sec->rng;
455 455
456 desc = memalign(ARCH_DMA_MINALIGN, sizeof(uint32_t) * 6); 456 desc = memalign(ARCH_DMA_MINALIGN, sizeof(uint32_t) * 6);
457 if (!desc) { 457 if (!desc) {
458 printf("cannot allocate RNG init descriptor memory\n"); 458 printf("cannot allocate RNG init descriptor memory\n");
459 return -1; 459 return -1;
460 } 460 }
461 461
462 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { 462 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
463 /* 463 /*
464 * If the corresponding bit is set, this state handle 464 * If the corresponding bit is set, this state handle
465 * was initialized by somebody else, so it's left alone. 465 * was initialized by somebody else, so it's left alone.
466 */ 466 */
467 rdsta_val = sec_in32(&rng->rdsta) & RNG_STATE_HANDLE_MASK; 467 rdsta_val = sec_in32(&rng->rdsta) & RNG_STATE_HANDLE_MASK;
468 if (rdsta_val & (1 << sh_idx)) 468 if (rdsta_val & (1 << sh_idx))
469 continue; 469 continue;
470 470
471 inline_cnstr_jobdesc_rng_instantiation(desc, sh_idx); 471 inline_cnstr_jobdesc_rng_instantiation(desc, sh_idx);
472 size = roundup(sizeof(uint32_t) * 6, ARCH_DMA_MINALIGN); 472 size = roundup(sizeof(uint32_t) * 6, ARCH_DMA_MINALIGN);
473 flush_dcache_range((unsigned long)desc, 473 flush_dcache_range((unsigned long)desc,
474 (unsigned long)desc + size); 474 (unsigned long)desc + size);
475 475
476 ret = run_descriptor_jr_idx(desc, sec_idx); 476 ret = run_descriptor_jr_idx(desc, sec_idx);
477 477
478 if (ret) 478 if (ret)
479 printf("RNG: Instantiation failed with error 0x%x\n", 479 printf("RNG: Instantiation failed with error 0x%x\n",
480 ret); 480 ret);
481 481
482 rdsta_val = sec_in32(&rng->rdsta) & RNG_STATE_HANDLE_MASK; 482 rdsta_val = sec_in32(&rng->rdsta) & RNG_STATE_HANDLE_MASK;
483 if (!(rdsta_val & (1 << sh_idx))) { 483 if (!(rdsta_val & (1 << sh_idx))) {
484 free(desc); 484 free(desc);
485 return -1; 485 return -1;
486 } 486 }
487 487
488 memset(desc, 0, sizeof(uint32_t) * 6); 488 memset(desc, 0, sizeof(uint32_t) * 6);
489 } 489 }
490 490
491 free(desc); 491 free(desc);
492 492
493 return ret; 493 return ret;
494 } 494 }
495 495
496 static u8 get_rng_vid(uint8_t sec_idx) 496 static u8 get_rng_vid(uint8_t sec_idx)
497 { 497 {
498 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 498 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
499 u32 cha_vid = sec_in32(&sec->chavid_ls); 499 u32 cha_vid = sec_in32(&sec->chavid_ls);
500 500
501 return (cha_vid & SEC_CHAVID_RNG_LS_MASK) >> SEC_CHAVID_LS_RNG_SHIFT; 501 return (cha_vid & SEC_CHAVID_RNG_LS_MASK) >> SEC_CHAVID_LS_RNG_SHIFT;
502 } 502 }
503 503
504 /* 504 /*
505 * By default, the TRNG runs for 200 clocks per sample; 505 * By default, the TRNG runs for 200 clocks per sample;
506 * 1200 clocks per sample generates better entropy. 506 * 1200 clocks per sample generates better entropy.
507 */ 507 */
508 static void kick_trng(int ent_delay, uint8_t sec_idx) 508 static void kick_trng(int ent_delay, uint8_t sec_idx)
509 { 509 {
510 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx); 510 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx);
511 struct rng4tst __iomem *rng = 511 struct rng4tst __iomem *rng =
512 (struct rng4tst __iomem *)&sec->rng; 512 (struct rng4tst __iomem *)&sec->rng;
513 u32 val; 513 u32 val;
514 514
515 /* put RNG4 into program mode */ 515 /* put RNG4 into program mode */
516 sec_setbits32(&rng->rtmctl, RTMCTL_PRGM); 516 sec_setbits32(&rng->rtmctl, RTMCTL_PRGM);
517 /* rtsdctl bits 0-15 contain "Entropy Delay, which defines the 517 /* rtsdctl bits 0-15 contain "Entropy Delay, which defines the
518 * length (in system clocks) of each Entropy sample taken 518 * length (in system clocks) of each Entropy sample taken
519 * */ 519 * */
520 val = sec_in32(&rng->rtsdctl); 520 val = sec_in32(&rng->rtsdctl);
521 val = (val & ~RTSDCTL_ENT_DLY_MASK) | 521 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
522 (ent_delay << RTSDCTL_ENT_DLY_SHIFT); 522 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
523 sec_out32(&rng->rtsdctl, val); 523 sec_out32(&rng->rtsdctl, val);
524 /* min. freq. count, equal to 1/4 of the entropy sample length */ 524 /* min. freq. count, equal to 1/4 of the entropy sample length */
525 sec_out32(&rng->rtfreqmin, ent_delay >> 2); 525 sec_out32(&rng->rtfreqmin, ent_delay >> 2);
526 /* disable maximum frequency count */ 526 /* disable maximum frequency count */
527 sec_out32(&rng->rtfreqmax, RTFRQMAX_DISABLE); 527 sec_out32(&rng->rtfreqmax, RTFRQMAX_DISABLE);
528 /* 528 /*
529 * select raw sampling in both entropy shifter 529 * select raw sampling in both entropy shifter
530 * and statistical checker 530 * and statistical checker
531 */ 531 */
532 sec_setbits32(&rng->rtmctl, RTMCTL_SAMP_MODE_RAW_ES_SC); 532 sec_setbits32(&rng->rtmctl, RTMCTL_SAMP_MODE_RAW_ES_SC);
533 /* put RNG4 into run mode */ 533 /* put RNG4 into run mode */
534 sec_clrbits32(&rng->rtmctl, RTMCTL_PRGM); 534 sec_clrbits32(&rng->rtmctl, RTMCTL_PRGM);
535 } 535 }
536 536
537 static int rng_init(uint8_t sec_idx) 537 static int rng_init(uint8_t sec_idx)
538 { 538 {
539 int ret, ent_delay = RTSDCTL_ENT_DLY_MIN; 539 int ret, ent_delay = RTSDCTL_ENT_DLY_MIN;
540 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx); 540 ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx);
541 struct rng4tst __iomem *rng = 541 struct rng4tst __iomem *rng =
542 (struct rng4tst __iomem *)&sec->rng; 542 (struct rng4tst __iomem *)&sec->rng;
543 u32 inst_handles; 543 u32 inst_handles;
544 544
545 do { 545 do {
546 inst_handles = sec_in32(&rng->rdsta) & RNG_STATE_HANDLE_MASK; 546 inst_handles = sec_in32(&rng->rdsta) & RNG_STATE_HANDLE_MASK;
547 547
548 /* 548 /*
549 * If either of the SH's were instantiated by somebody else 549 * If either of the SH's were instantiated by somebody else
550 * then it is assumed that the entropy 550 * then it is assumed that the entropy
551 * parameters are properly set and thus the function 551 * parameters are properly set and thus the function
552 * setting these (kick_trng(...)) is skipped. 552 * setting these (kick_trng(...)) is skipped.
553 * Also, if a handle was instantiated, do not change 553 * Also, if a handle was instantiated, do not change
554 * the TRNG parameters. 554 * the TRNG parameters.
555 */ 555 */
556 if (!inst_handles) { 556 if (!inst_handles) {
557 kick_trng(ent_delay, sec_idx); 557 kick_trng(ent_delay, sec_idx);
558 ent_delay += 400; 558 ent_delay += 400;
559 } 559 }
560 /* 560 /*
561 * if instantiate_rng(...) fails, the loop will rerun 561 * if instantiate_rng(...) fails, the loop will rerun
562 * and the kick_trng(...) function will modfiy the 562 * and the kick_trng(...) function will modfiy the
563 * upper and lower limits of the entropy sampling 563 * upper and lower limits of the entropy sampling
564 * interval, leading to a sucessful initialization of 564 * interval, leading to a sucessful initialization of
565 * the RNG. 565 * the RNG.
566 */ 566 */
567 ret = instantiate_rng(sec_idx); 567 ret = instantiate_rng(sec_idx);
568 } while ((ret == -1) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); 568 } while ((ret == -1) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
569 if (ret) { 569 if (ret) {
570 printf("RNG: Failed to instantiate RNG\n"); 570 printf("RNG: Failed to instantiate RNG\n");
571 return ret; 571 return ret;
572 } 572 }
573 573
574 /* Enable RDB bit so that RNG works faster */ 574 /* Enable RDB bit so that RNG works faster */
575 sec_setbits32(&sec->scfgr, SEC_SCFGR_RDBENABLE); 575 sec_setbits32(&sec->scfgr, SEC_SCFGR_RDBENABLE);
576 576
577 return ret; 577 return ret;
578 } 578 }
579 #endif 579 #endif
580 int sec_init_idx(uint8_t sec_idx) 580 int sec_init_idx(uint8_t sec_idx)
581 { 581 {
582 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx); 582 ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
583 uint32_t mcr = sec_in32(&sec->mcfgr); 583 uint32_t mcr = sec_in32(&sec->mcfgr);
584 uint32_t jrown_ns;
585 int i;
586 int ret = 0; 584 int ret = 0;
587 585
588 #ifdef CONFIG_FSL_CORENET 586 #ifdef CONFIG_FSL_CORENET
589 uint32_t liodnr; 587 uint32_t liodnr;
590 uint32_t liodn_ns; 588 uint32_t liodn_ns;
591 uint32_t liodn_s; 589 uint32_t liodn_s;
592 #endif 590 #endif
593 591
594 if (!(sec_idx < CONFIG_SYS_FSL_MAX_NUM_OF_SEC)) { 592 if (!(sec_idx < CONFIG_SYS_FSL_MAX_NUM_OF_SEC)) {
595 printf("SEC initialization failed\n"); 593 printf("SEC initialization failed\n");
596 return -1; 594 return -1;
597 } 595 }
598 596
599 /* 597 /*
600 * Modifying CAAM Read/Write Attributes 598 * Modifying CAAM Read/Write Attributes
601 * For LS2080A 599 * For LS2080A
602 * For AXI Write - Cacheable, Write Back, Write allocate 600 * For AXI Write - Cacheable, Write Back, Write allocate
603 * For AXI Read - Cacheable, Read allocate 601 * For AXI Read - Cacheable, Read allocate
604 * Only For LS2080a, to solve CAAM coherency issues 602 * Only For LS2080a, to solve CAAM coherency issues
605 */ 603 */
606 #ifdef CONFIG_ARCH_LS2080A 604 #ifdef CONFIG_ARCH_LS2080A
607 mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0xb << MCFGR_AWCACHE_SHIFT); 605 mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0xb << MCFGR_AWCACHE_SHIFT);
608 mcr = (mcr & ~MCFGR_ARCACHE_MASK) | (0x6 << MCFGR_ARCACHE_SHIFT); 606 mcr = (mcr & ~MCFGR_ARCACHE_MASK) | (0x6 << MCFGR_ARCACHE_SHIFT);
609 #else 607 #else
610 mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT); 608 mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT);
611 #endif 609 #endif
612 610
613 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M) 611 #if defined(CONFIG_PHYS_64BIT) && !defined(CONFIG_IMX8M)
614 mcr |= (1 << MCFGR_PS_SHIFT); 612 mcr |= (1 << MCFGR_PS_SHIFT);
615 #endif 613 #endif
616 sec_out32(&sec->mcfgr, mcr); 614 sec_out32(&sec->mcfgr, mcr);
617 615
618 #ifdef CONFIG_FSL_CORENET 616 #ifdef CONFIG_FSL_CORENET
619 #ifdef CONFIG_SPL_BUILD 617 #ifdef CONFIG_SPL_BUILD
620 /* 618 /*
621 * For SPL Build, Set the Liodns in SEC JR0 for 619 * For SPL Build, Set the Liodns in SEC JR0 for
622 * creating PAMU entries corresponding to these. 620 * creating PAMU entries corresponding to these.
623 * For normal build, these are set in set_liodns(). 621 * For normal build, these are set in set_liodns().
624 */ 622 */
625 liodn_ns = CONFIG_SPL_JR0_LIODN_NS & JRNSLIODN_MASK; 623 liodn_ns = CONFIG_SPL_JR0_LIODN_NS & JRNSLIODN_MASK;
626 liodn_s = CONFIG_SPL_JR0_LIODN_S & JRSLIODN_MASK; 624 liodn_s = CONFIG_SPL_JR0_LIODN_S & JRSLIODN_MASK;
627 625
628 liodnr = sec_in32(&sec->jrliodnr[0].ls) & 626 liodnr = sec_in32(&sec->jrliodnr[0].ls) &
629 ~(JRNSLIODN_MASK | JRSLIODN_MASK); 627 ~(JRNSLIODN_MASK | JRSLIODN_MASK);
630 liodnr = liodnr | 628 liodnr = liodnr |
631 (liodn_ns << JRNSLIODN_SHIFT) | 629 (liodn_ns << JRNSLIODN_SHIFT) |
632 (liodn_s << JRSLIODN_SHIFT); 630 (liodn_s << JRSLIODN_SHIFT);
633 sec_out32(&sec->jrliodnr[0].ls, liodnr); 631 sec_out32(&sec->jrliodnr[0].ls, liodnr);
634 #else 632 #else
635 liodnr = sec_in32(&sec->jrliodnr[0].ls); 633 liodnr = sec_in32(&sec->jrliodnr[0].ls);
636 liodn_ns = (liodnr & JRNSLIODN_MASK) >> JRNSLIODN_SHIFT; 634 liodn_ns = (liodnr & JRNSLIODN_MASK) >> JRNSLIODN_SHIFT;
637 liodn_s = (liodnr & JRSLIODN_MASK) >> JRSLIODN_SHIFT; 635 liodn_s = (liodnr & JRSLIODN_MASK) >> JRSLIODN_SHIFT;
638 #endif 636 #endif
639 #endif 637 #endif
640
641 /* Set ownership of job rings to non-TrustZone mode by default */
642 for (i = 0; i < ARRAY_SIZE(sec->jrliodnr); i++) {
643 jrown_ns = sec_in32(&sec->jrliodnr[i].ms);
644 jrown_ns |= JROWN_NS | JRMID_NS;
645 sec_out32(&sec->jrliodnr[i].ms, jrown_ns);
646 }
647 638
648 ret = jr_init(sec_idx); 639 ret = jr_init(sec_idx);
649 if (ret < 0) { 640 if (ret < 0) {
650 printf("SEC initialization failed\n"); 641 printf("SEC initialization failed\n");
651 return -1; 642 return -1;
652 } 643 }
653 644
654 #ifdef CONFIG_FSL_CORENET 645 #ifdef CONFIG_FSL_CORENET
655 ret = sec_config_pamu_table(liodn_ns, liodn_s); 646 ret = sec_config_pamu_table(liodn_ns, liodn_s);
656 if (ret < 0) 647 if (ret < 0)
657 return -1; 648 return -1;
658 649
659 pamu_enable(); 650 pamu_enable();
660 #endif 651 #endif
661 #ifndef CONFIG_SPL_BUILD 652 #ifndef CONFIG_SPL_BUILD
662 if (get_rng_vid(sec_idx) >= 4) { 653 if (get_rng_vid(sec_idx) >= 4) {
663 if (rng_init(sec_idx) < 0) { 654 if (rng_init(sec_idx) < 0) {
664 printf("SEC%u: RNG instantiation failed\n", sec_idx); 655 printf("SEC%u: RNG instantiation failed\n", sec_idx);
665 return -1; 656 return -1;
666 } 657 }
667 printf("SEC%u: RNG instantiated\n", sec_idx); 658 printf("SEC%u: RNG instantiated\n", sec_idx);
668 } 659 }
669 #endif 660 #endif
670 return ret; 661 return ret;
671 } 662 }
672 663
673 int sec_init(void) 664 int sec_init(void)
674 { 665 {
675 return sec_init_idx(0); 666 return sec_init_idx(0);
676 } 667 }
677 668
drivers/crypto/fsl/jr.h
1 /* 1 /*
2 * Copyright 2008-2014 Freescale Semiconductor, Inc. 2 * Copyright 2008-2014 Freescale Semiconductor, Inc.
3 * Copyright 2018 NXP 3 * Copyright 2018 NXP
4 * 4 *
5 * SPDX-License-Identifier: GPL-2.0+ 5 * SPDX-License-Identifier: GPL-2.0+
6 * 6 *
7 */ 7 */
8 8
9 #ifndef __JR_H 9 #ifndef __JR_H
10 #define __JR_H 10 #define __JR_H
11 11
12 #include <linux/compiler.h> 12 #include <linux/compiler.h>
13 13
14 #define JR_SIZE 4 14 #define JR_SIZE 4
15 /* Timeout currently defined as 10 sec */ 15 /* Timeout currently defined as 10 sec */
16 #define CONFIG_USEC_DEQ_TIMEOUT 10000000U 16 #define CONFIG_USEC_DEQ_TIMEOUT 10000000U
17 17
18 #define DEFAULT_JR_ID 0 18 #define DEFAULT_JR_ID 0
19 #define DEFAULT_JR_LIODN 0 19 #define DEFAULT_JR_LIODN 0
20 #define DEFAULT_IRQ 0 /* Interrupts not to be configured */ 20 #define DEFAULT_IRQ 0 /* Interrupts not to be configured */
21 21
22 #define MCFGR_SWRST ((uint32_t)(1)<<31) /* Software Reset */ 22 #define MCFGR_SWRST ((uint32_t)(1)<<31) /* Software Reset */
23 #define MCFGR_DMA_RST ((uint32_t)(1)<<28) /* DMA Reset */ 23 #define MCFGR_DMA_RST ((uint32_t)(1)<<28) /* DMA Reset */
24 #define MCFGR_PS_SHIFT 16 24 #define MCFGR_PS_SHIFT 16
25 #define MCFGR_AWCACHE_SHIFT 8 25 #define MCFGR_AWCACHE_SHIFT 8
26 #define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT) 26 #define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
27 #define MCFGR_ARCACHE_SHIFT 12 27 #define MCFGR_ARCACHE_SHIFT 12
28 #define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT) 28 #define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
29 29
30 #define JR_INTMASK 0x00000001 30 #define JR_INTMASK 0x00000001
31 #define JRCR_RESET 0x01 31 #define JRCR_RESET 0x01
32 #define JRINT_ERR_HALT_INPROGRESS 0x4 32 #define JRINT_ERR_HALT_INPROGRESS 0x4
33 #define JRINT_ERR_HALT_MASK 0xc 33 #define JRINT_ERR_HALT_MASK 0xc
34 #define JRNSLIODN_SHIFT 16 34 #define JRNSLIODN_SHIFT 16
35 #define JRNSLIODN_MASK 0x0fff0000 35 #define JRNSLIODN_MASK 0x0fff0000
36 #define JRSLIODN_SHIFT 0 36 #define JRSLIODN_SHIFT 0
37 #define JRSLIODN_MASK 0x00000fff 37 #define JRSLIODN_MASK 0x00000fff
38 #define JROWN_NS 0x00000008
39 #define JRMID_NS 0x00000001
40 38
41 #define JQ_DEQ_ERR -1 39 #define JQ_DEQ_ERR -1
42 #define JQ_DEQ_TO_ERR -2 40 #define JQ_DEQ_TO_ERR -2
43 #define JQ_ENQ_ERR -3 41 #define JQ_ENQ_ERR -3
44 42
45 #define RNG4_MAX_HANDLES 2 43 #define RNG4_MAX_HANDLES 2
46 44
47 struct op_ring { 45 struct op_ring {
48 u32 desc; 46 u32 desc;
49 u32 status; 47 u32 status;
50 } __packed; 48 } __packed;
51 49
52 struct jr_info { 50 struct jr_info {
53 void (*callback)(uint32_t status, void *arg); 51 void (*callback)(uint32_t status, void *arg);
54 phys_addr_t desc_phys_addr; 52 phys_addr_t desc_phys_addr;
55 uint32_t desc_len; 53 uint32_t desc_len;
56 uint32_t op_done; 54 uint32_t op_done;
57 void *arg; 55 void *arg;
58 }; 56 };
59 57
60 struct jobring { 58 struct jobring {
61 int jq_id; 59 int jq_id;
62 int irq; 60 int irq;
63 int liodn; 61 int liodn;
64 /* Head is the index where software would enq the descriptor in 62 /* Head is the index where software would enq the descriptor in
65 * the i/p ring 63 * the i/p ring
66 */ 64 */
67 int head; 65 int head;
68 /* Tail index would be used by s/w ehile enqueuing to determine if 66 /* Tail index would be used by s/w ehile enqueuing to determine if
69 * there is any space left in the s/w maintained i/p rings 67 * there is any space left in the s/w maintained i/p rings
70 */ 68 */
71 /* Also in case of deq tail will be incremented only in case of 69 /* Also in case of deq tail will be incremented only in case of
72 * in-order job completion 70 * in-order job completion
73 */ 71 */
74 int tail; 72 int tail;
75 /* Read index of the output ring. It may not match with tail in case 73 /* Read index of the output ring. It may not match with tail in case
76 * of out of order completetion 74 * of out of order completetion
77 */ 75 */
78 int read_idx; 76 int read_idx;
79 /* Write index to input ring. Would be always equal to head */ 77 /* Write index to input ring. Would be always equal to head */
80 int write_idx; 78 int write_idx;
81 /* Size of the rings. */ 79 /* Size of the rings. */
82 int size; 80 int size;
83 /* Op ring size aligned to cache line size */ 81 /* Op ring size aligned to cache line size */
84 int op_size; 82 int op_size;
85 /* The ip and output rings have to be accessed by SEC. So the 83 /* The ip and output rings have to be accessed by SEC. So the
86 * pointers will ahve to point to the housekeeping region provided 84 * pointers will ahve to point to the housekeeping region provided
87 * by SEC 85 * by SEC
88 */ 86 */
89 /*Circular Ring of i/p descriptors */ 87 /*Circular Ring of i/p descriptors */
90 u32 *input_ring; 88 u32 *input_ring;
91 /* Circular Ring of o/p descriptors */ 89 /* Circular Ring of o/p descriptors */
92 /* Circula Ring containing info regarding descriptors in i/p 90 /* Circula Ring containing info regarding descriptors in i/p
93 * and o/p ring 91 * and o/p ring
94 */ 92 */
95 /* This ring can be on the stack */ 93 /* This ring can be on the stack */
96 struct jr_info info[JR_SIZE]; 94 struct jr_info info[JR_SIZE];
97 struct op_ring *output_ring; 95 struct op_ring *output_ring;
98 /* Offset in CCSR to the SEC engine to which this JR belongs */ 96 /* Offset in CCSR to the SEC engine to which this JR belongs */
99 uint32_t sec_offset; 97 uint32_t sec_offset;
100 98
101 }; 99 };
102 100
103 struct result { 101 struct result {
104 int done; 102 int done;
105 uint32_t status; 103 uint32_t status;
106 }; 104 };
107 105
108 void caam_jr_strstatus(u32 status); 106 void caam_jr_strstatus(u32 status);
109 int run_descriptor_jr(uint32_t *desc); 107 int run_descriptor_jr(uint32_t *desc);
110 108
111 #endif 109 #endif
112 110