Commit e884a4b1def5d46debbb68b3228522b8bfbd88c8

Authored by Vitaly Andrianov
Committed by Hongmei Gou
1 parent ad6e0b7583

crypto: keystone: keystone sideband crypto driver

This commits adds the driver support for keystone SA in sideband mode.
The driver registers algorithm implementations in the Kernel crypto
framework. Since the primary use case in kernel is to enable HW offload
of IPSec ESP crypto operations, the driver is currently supporting only
authenticated encryption (AEAD) mode. Following algorithms are currently
supported:

 - authenc(hmac(sha1),cbc(aes))
 - authenc(hmac(sha1),cbc(des3_ede))
 - authenc(hmac(sha1),ecb(cipher_null))

Signed-off-by: Sandeep Nair <sandeep_n@ti.com>
Signed-off-by: Tinku Mannan <tmannan@ti.com>
Signed-off-by: Hao Zhang <hzhang@ti.com>
Signed-off-by: Vitaly Andrianov <vitalya@ti.com>

Showing 8 changed files with 4418 additions and 0 deletions Side-by-side Diff

drivers/crypto/Kconfig
... ... @@ -273,6 +273,22 @@
273 273 the ECB and CBC modes of operation supported by the driver. Also
274 274 accesses made on unaligned boundaries are also supported.
275 275  
  276 +config CRYPTO_DEV_KEYSTONE
  277 + tristate "Support for TI Keystone security accelerator"
  278 + depends on ARCH_KEYSTONE
  279 + select CRYPTO_AES
  280 + select CRYPTO_AES_ARM
  281 + select CRYPTO_SHA1
  282 + select CRYPTO_MD5
  283 + select CRYPTO_ALGAPI
  284 + select CRYPTO_AUTHENC
  285 + select HW_RANDOM
  286 + default y if ARCH_KEYSTONE
  287 + help
  288 + Keystone devices include a security accelerator engine that may be
  289 + used for crypto offload. Select this if you want to use hardware
  290 + acceleration for cryptographic algorithms on these devices.
  291 +
276 292 config CRYPTO_DEV_PICOXCELL
277 293 tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
278 294 depends on ARCH_PICOXCELL && HAVE_CLK
drivers/crypto/Makefile
... ... @@ -8,6 +8,9 @@
8 8 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
9 9 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
10 10 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
  11 +obj-$(CONFIG_CRYPTO_DEV_KEYSTONE) += keystone-sa-driver.o
  12 +keystone-sa-driver-objs := keystone-sa.o keystone-sa-utils.o \
  13 + keystone-sa-lld.o keystone-sa-tbls.o
11 14 obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
12 15 obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
13 16 obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
drivers/crypto/keystone-sa-hlp.h
  1 +/*
  2 + * Keystone crypto accelerator driver
  3 + *
  4 + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
  5 + *
  6 + * Authors: Sandeep Nair
  7 + * Vitaly Andrianov
  8 + *
  9 + * This program is free software; you can redistribute it and/or
  10 + * modify it under the terms of the GNU General Public License
  11 + * version 2 as published by the Free Software Foundation.
  12 + *
  13 + * This program is distributed in the hope that it will be useful, but
  14 + * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16 + * General Public License for more details.
  17 + */
  18 +
  19 +#ifndef _KEYSTONE_SA_HLP_
  20 +#define _KEYSTONE_SA_HLP_
  21 +
  22 +#include <linux/soc/ti/knav_dma.h>
  23 +#include <linux/soc/ti/knav_qmss.h>
  24 +#include <linux/interrupt.h>
  25 +#include <linux/hw_random.h>
  26 +#include <linux/skbuff.h>
  27 +
  28 +/* Enable the below macro for testing with run-time
  29 + * self tests in the cryptographic algorithm manager
  30 + * framework */
  31 +/* #define TEST */
  32 +
  33 +/* For enabling debug prints */
  34 +/* #define DEBUG */
  35 +
  36 +/* Algorithm constants */
  37 +#define MD5_BLOCK_SIZE 64
  38 +#define AES_XCBC_DIGEST_SIZE 16
  39 +
  40 +/* Values for NULL algorithms */
  41 +#define NULL_KEY_SIZE 0
  42 +#define NULL_BLOCK_SIZE 1
  43 +#define NULL_DIGEST_SIZE 0
  44 +#define NULL_IV_SIZE 0
  45 +
  46 +/* Number of 32 bit words in EPIB */
  47 +#define SA_DMA_NUM_EPIB_WORDS 4
  48 +
  49 +/* Number of 32 bit words in PS data */
  50 +#define SA_DMA_NUM_PS_WORDS 16
  51 +
  52 +/* Number of meta data elements passed in descriptor to SA */
  53 +#define SA_NUM_DMA_META_ELEMS 2
  54 +
  55 +/* Maximum number of simultaeneous security contexts
  56 + * supported by the driver */
  57 +#define SA_MAX_NUM_CTX 512
  58 +
  59 +/* Encoding used to identify the typo of crypto operation
  60 + * performed on the packet when the packet is returned
  61 + * by SA
  62 + */
  63 +#define SA_REQ_SUBTYPE_ENC 0x0001
  64 +#define SA_REQ_SUBTYPE_DEC 0x0002
  65 +#define SA_REQ_SUBTYPE_SHIFT 16
  66 +#define SA_REQ_SUBTYPE_MASK 0xffff
  67 +
  68 +/* Maximum size of authentication tag
  69 + * NOTE: update this macro as we start supporting
  70 + * algorithms with bigger digest size
  71 + */
  72 +#define SA_MAX_AUTH_TAG_SZ SHA1_DIGEST_SIZE
  73 +
  74 +/* Memory map of the SA register set */
  75 +struct sa_mmr_regs {
  76 + u32 PID;
  77 + u32 EFUSE_EN;
  78 + u32 CMD_STATUS;
  79 + u32 BLKMGR_PA_BLKS;
  80 + u32 PA_FLOWID;
  81 + u32 CDMA_FLOWID;
  82 + u32 PA_ENG_ID;
  83 + u32 CDMA_ENG_ID;
  84 + u8 RSVD0[224];
  85 + u32 CTXCACH_CTRL;
  86 + u32 CTXCACH_SC_PTR;
  87 + u32 CTXCACH_SC_ID;
  88 + u32 CTXCACH_MISSCNT;
  89 +};
  90 +
  91 +/*
  92 + * Register Overlay Structure for TRNG module
  93 + */
  94 +struct sa_trng_regs {
  95 + u32 TRNG_OUTPUT_L;
  96 + u32 TRNG_OUTPUT_H;
  97 + u32 TRNG_STATUS;
  98 + u32 TRNG_INTMASK;
  99 + u32 TRNG_INTACK;
  100 + u32 TRNG_CONTROL;
  101 + u32 TRNG_CONFIG;
  102 + u32 TRNG_ALARMCNT;
  103 + u32 TRNG_FROENABLE;
  104 + u32 TRNG_FRODETUNE;
  105 + u32 TRNG_ALARMMASK;
  106 + u32 TRNG_ALARMSTOP;
  107 + u32 TRNG_LFSR_L;
  108 + u32 TRNG_LFSR_M;
  109 + u32 TRNG_LFSR_H;
  110 + u32 TRNG_COUNT;
  111 + u32 TRNG_TEST;
  112 +};
  113 +
  114 +struct sa_regs {
  115 + struct sa_mmr_regs mmr;
  116 +};
  117 +
  118 +/* Driver statistics */
  119 +struct sa_drv_stats {
  120 + /* Number of data pkts dropped while submitting to CP_ACE */
  121 + atomic_t tx_dropped;
  122 + /* Number of tear-down pkts dropped while submitting to CP_ACE */
  123 + atomic_t sc_tear_dropped;
  124 + /* Number of crypto requests sent to CP_ACE */
  125 + atomic_t tx_pkts;
  126 + /* Number of crypto request completions received from CP_ACE */
  127 + atomic_t rx_pkts;
  128 +};
  129 +
  130 +/* Crypto driver instance data */
  131 +struct keystone_crypto_data {
  132 + struct platform_device *pdev;
  133 + struct clk *clk;
  134 + struct tasklet_struct rx_task;
  135 + struct tasklet_struct tx_task;
  136 + struct dma_pool *sc_pool;
  137 + struct kmem_cache *dma_req_ctx_cache;
  138 + struct sa_regs *regs;
  139 + struct sa_trng_regs *trng_regs;
  140 +
  141 + void *rx_chan;
  142 + void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
  143 + void *rx_compl_q;
  144 + void *tx_chan;
  145 + void *tx_submit_q;
  146 + void *tx_compl_q;
  147 + u32 tx_submit_qid;
  148 + u32 tx_compl_qid;
  149 + u32 rx_compl_qid;
  150 + const char *rx_chan_name;
  151 + const char *tx_chan_name;
  152 + u32 tx_queue_depth;
  153 + u32 rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN];
  154 + u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
  155 + u32 rx_pool_size;
  156 + u32 rx_pool_region_id;
  157 + void *rx_pool;
  158 + u32 tx_pool_size;
  159 + u32 tx_pool_region_id;
  160 + void *tx_pool;
  161 +
  162 + struct hwrng rng;
  163 +
  164 + spinlock_t scid_lock; /* lock for SC-ID allocation */
  165 + spinlock_t trng_lock; /* reading random data from TRNG */
  166 +
  167 + struct kobject stats_kobj;
  168 +
  169 + /* Security context data */
  170 + u16 sc_id_start;
  171 + u16 sc_id_end;
  172 + u16 sc_id;
  173 +
  174 + /* Bitmap to keep track of Security context ID's */
  175 + unsigned long ctx_bm[DIV_ROUND_UP(SA_MAX_NUM_CTX,
  176 + BITS_PER_LONG)];
  177 + /* Driver stats */
  178 + struct sa_drv_stats stats;
  179 + atomic_t rx_dma_page_cnt; /* N buf from 2nd pool available */
  180 + atomic_t tx_dma_desc_cnt; /* Tx DMA desc-s available */
  181 +};
  182 +
  183 +
  184 +
  185 +
  186 +/* Packet structure used in Rx */
  187 +#define SA_SGLIST_SIZE (MAX_SKB_FRAGS + SA_NUM_DMA_META_ELEMS)
  188 +struct sa_packet {
  189 + struct scatterlist sg[SA_SGLIST_SIZE];
  190 + int sg_ents;
  191 + struct keystone_crypto_data *priv;
  192 + struct dma_chan *chan;
  193 + struct dma_async_tx_descriptor *desc;
  194 + dma_cookie_t cookie;
  195 + u32 epib[SA_DMA_NUM_EPIB_WORDS];
  196 + u32 psdata[SA_DMA_NUM_PS_WORDS];
  197 + struct completion complete;
  198 + void *data;
  199 +};
  200 +
  201 +/* Command label updation info */
  202 +struct sa_cmdl_param_info {
  203 + u16 index;
  204 + u16 offset;
  205 + u16 size;
  206 +};
  207 +
  208 +/* Maximum length of Auxiliary data in 32bit words */
  209 +#define SA_MAX_AUX_DATA_WORDS 8
  210 +
  211 +struct sa_cmdl_upd_info {
  212 + u16 flags;
  213 + u16 submode;
  214 + struct sa_cmdl_param_info enc_size;
  215 + struct sa_cmdl_param_info enc_size2;
  216 + struct sa_cmdl_param_info enc_offset;
  217 + struct sa_cmdl_param_info enc_iv;
  218 + struct sa_cmdl_param_info enc_iv2;
  219 + struct sa_cmdl_param_info aad;
  220 + struct sa_cmdl_param_info payload;
  221 + struct sa_cmdl_param_info auth_size;
  222 + struct sa_cmdl_param_info auth_size2;
  223 + struct sa_cmdl_param_info auth_offset;
  224 + struct sa_cmdl_param_info auth_iv;
  225 + struct sa_cmdl_param_info aux_key_info;
  226 + u32 aux_key[SA_MAX_AUX_DATA_WORDS];
  227 +};
  228 +
  229 +enum sa_submode {
  230 + SA_MODE_GEN = 0,
  231 + SA_MODE_CCM,
  232 + SA_MODE_GCM,
  233 + SA_MODE_GMAC
  234 +};
  235 +
  236 +/* TFM Context info */
  237 +
  238 +/* Number of 32bit words appended after the command label
  239 + * in PSDATA to identify the crypto request context.
  240 + * word-0: Request type
  241 + * word-1: pointer to request
  242 + */
  243 +#define SA_NUM_PSDATA_CTX_WORDS 4
  244 +
  245 +/* Maximum size of Command label in 32 words */
  246 +#define SA_MAX_CMDL_WORDS (SA_DMA_NUM_PS_WORDS - SA_NUM_PSDATA_CTX_WORDS)
  247 +
  248 +struct sa_ctx_info {
  249 + u8 *sc;
  250 + dma_addr_t sc_phys;
  251 + u16 sc_id;
  252 + u16 cmdl_size;
  253 + u32 cmdl[SA_MAX_CMDL_WORDS];
  254 + struct sa_cmdl_upd_info cmdl_upd_info;
  255 + /* Store Auxiliary data such as K2/K3 subkeys in AES-XCBC */
  256 + u32 epib[SA_DMA_NUM_EPIB_WORDS];
  257 + u32 rx_flow;
  258 + u32 rx_compl_qid;
  259 +};
  260 +
  261 +struct sa_tfm_ctx {
  262 + struct keystone_crypto_data *dev_data;
  263 + struct sa_ctx_info enc;
  264 + struct sa_ctx_info dec;
  265 + struct sa_ctx_info auth;
  266 +};
  267 +
  268 +/* Tx DMA callback param */
  269 +struct sa_dma_req_ctx {
  270 + struct keystone_crypto_data *dev_data;
  271 + u32 cmdl[SA_MAX_CMDL_WORDS + SA_NUM_PSDATA_CTX_WORDS];
  272 + unsigned map_idx;
  273 + struct sg_table sg_tbl;
  274 + dma_cookie_t cookie;
  275 + struct dma_chan *tx_chan;
  276 + bool pkt;
  277 +};
  278 +
  279 +/* Encryption algorithms */
  280 +enum sa_ealg_id {
  281 + SA_EALG_ID_NONE = 0, /* No encryption */
  282 + SA_EALG_ID_NULL, /* NULL encryption */
  283 + SA_EALG_ID_AES_CTR, /* AES Counter mode */
  284 + SA_EALG_ID_AES_F8, /* AES F8 mode */
  285 + SA_EALG_ID_AES_CBC, /* AES CBC mode */
  286 + SA_EALG_ID_DES_CBC, /* DES CBC mode */
  287 + SA_EALG_ID_3DES_CBC, /* 3DES CBC mode */
  288 + SA_EALG_ID_CCM, /* Counter with CBC-MAC mode */
  289 + SA_EALG_ID_GCM, /* Galois Counter mode */
  290 + SA_EALG_ID_LAST
  291 +};
  292 +
  293 +/* Authentication algorithms */
  294 +enum sa_aalg_id {
  295 + SA_AALG_ID_NONE = 0, /* No Authentication */
  296 + SA_AALG_ID_NULL = SA_EALG_ID_LAST, /* NULL Authentication */
  297 + SA_AALG_ID_MD5, /* MD5 mode */
  298 + SA_AALG_ID_SHA1, /* SHA1 mode */
  299 + SA_AALG_ID_SHA2_224, /* 224-bit SHA2 mode */
  300 + SA_AALG_ID_SHA2_256, /* 256-bit SHA2 mode */
  301 + SA_AALG_ID_HMAC_MD5, /* HMAC with MD5 mode */
  302 + SA_AALG_ID_HMAC_SHA1, /* HMAC with SHA1 mode */
  303 + SA_AALG_ID_HMAC_SHA2_224, /* HMAC with 224-bit SHA2 mode */
  304 + SA_AALG_ID_HMAC_SHA2_256, /* HMAC with 256-bit SHA2 mode */
  305 + SA_AALG_ID_GMAC, /* Galois Message
  306 + Authentication Code mode */
  307 + SA_AALG_ID_CMAC, /* Cipher-based Message
  308 + Authentication Code mode */
  309 + SA_AALG_ID_CBC_MAC, /* Cipher Block Chaining */
  310 + SA_AALG_ID_AES_XCBC /* AES Extended
  311 + Cipher Block Chaining */
  312 +};
  313 +
  314 +/* Mode control engine algorithms used to index the
  315 + * mode control instruction tables
  316 + */
  317 +enum sa_eng_algo_id {
  318 + SA_ENG_ALGO_ECB = 0,
  319 + SA_ENG_ALGO_CBC,
  320 + SA_ENG_ALGO_CFB,
  321 + SA_ENG_ALGO_OFB,
  322 + SA_ENG_ALGO_CTR,
  323 + SA_ENG_ALGO_F8,
  324 + SA_ENG_ALGO_GCM,
  325 + SA_ENG_ALGO_GMAC,
  326 + SA_ENG_ALGO_CCM,
  327 + SA_ENG_ALGO_CMAC,
  328 + SA_ENG_ALGO_CBCMAC,
  329 + SA_NUM_ENG_ALGOS
  330 +};
  331 +
  332 +struct sa_eng_info {
  333 + u8 eng_id;
  334 + u16 sc_size;
  335 +};
  336 +
  337 +#define DMA_HAS_PSINFO BIT(31)
  338 +#define DMA_HAS_EPIB BIT(30)
  339 +
  340 +void sa_register_algos(const struct device *dev);
  341 +void sa_unregister_algos(const struct device *dev);
  342 +void sa_tx_completion_process(struct keystone_crypto_data *dev_data);
  343 +void sa_rx_completion_process(struct keystone_crypto_data *dev_data);
  344 +
  345 +void sa_set_sc_auth(u16 alg_id, const u8 *key, u16 key_sz, u8 *sc_buf);
  346 +int sa_set_sc_enc(u16 alg_id, const u8 *key, u16 key_sz,
  347 + u16 aad_len, u8 enc, u8 *sc_buf);
  348 +
  349 +void sa_swiz_128(u8 *in, u8 *out, u16 len);
  350 +void sa_conv_calg_to_salg(const char *cra_name, int *ealg_id, int *aalg_id);
  351 +void sa_get_engine_info(int alg_id, struct sa_eng_info *info);
  352 +int sa_get_hash_size(u16 aalg_id);
  353 +
  354 +#define AES_MAXNR 14
  355 +struct asm_aes_key {
  356 + unsigned int rd_key[4 * (AES_MAXNR + 1)];
  357 + int rounds;
  358 +};
  359 +
  360 +/* AES encryption functions defined in aes-armv4.S */
  361 +asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct asm_aes_key *key);
  362 +asmlinkage int private_AES_set_encrypt_key(const unsigned char *user_key,
  363 + const int bits, struct asm_aes_key *key);
  364 +/*
  365 + * Derive sub-key k1, k2 and k3 used in the AES XCBC MAC mode
  366 + * detailed in RFC 3566
  367 + */
  368 +static inline int sa_aes_xcbc_subkey(u8 *sub_key1, u8 *sub_key2,
  369 + u8 *sub_key3, const u8 *key,
  370 + u16 key_sz)
  371 +{
  372 + struct asm_aes_key enc_key;
  373 +
  374 + if (private_AES_set_encrypt_key(key, (key_sz * 8), &enc_key) == -1) {
  375 + pr_err("%s: failed to set enc key\n", __func__);
  376 + return -1;
  377 + }
  378 +
  379 + if (sub_key1) {
  380 + memset(sub_key1, 0x01, AES_BLOCK_SIZE);
  381 + AES_encrypt(sub_key1, sub_key1, &enc_key);
  382 + }
  383 +
  384 + if (sub_key2) {
  385 + memset(sub_key2, 0x02, AES_BLOCK_SIZE);
  386 + AES_encrypt(sub_key2, sub_key2, &enc_key);
  387 + }
  388 +
  389 + if (sub_key3) {
  390 + memset(sub_key3, 0x03, AES_BLOCK_SIZE);
  391 + AES_encrypt(sub_key3, sub_key3, &enc_key);
  392 + }
  393 +
  394 + return 0;
  395 +}
  396 +
  397 +
  398 +extern const uint8_t sa_eng_aes_enc_mci_tbl[11][3][27];
  399 +extern const uint8_t sa_eng_aes_dec_mci_tbl[11][3][27];
  400 +extern const uint8_t sa_eng_3des_enc_mci_tbl[4][27];
  401 +extern const uint8_t sa_eng_3des_dec_mci_tbl[4][27];
  402 +extern struct device *sa_ks2_dev;
  403 +
  404 +#endif /* _KEYSTONE_SA_HLP_ */
drivers/crypto/keystone-sa-lld.c
  1 +/*
  2 + * Keystone crypto accelerator driver
  3 + *
  4 + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
  5 + *
  6 + * Authors: Sandeep Nair
  7 + * Vitaly Andrianov
  8 + *
  9 + * Contributors:Tinku Mannan
  10 + * Hao Zhang
  11 + *
  12 + * This program is free software; you can redistribute it and/or
  13 + * modify it under the terms of the GNU General Public License
  14 + * version 2 as published by the Free Software Foundation.
  15 + *
  16 + * This program is distributed in the hope that it will be useful, but
  17 + * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19 + * General Public License for more details.
  20 + */
  21 +
  22 +#include <linux/types.h>
  23 +#include <linux/crypto.h>
  24 +#include <linux/cryptohash.h>
  25 +
  26 +#include <crypto/algapi.h>
  27 +#include <crypto/aead.h>
  28 +#include <crypto/authenc.h>
  29 +#include <crypto/hash.h>
  30 +#include <crypto/internal/hash.h>
  31 +#include <crypto/aes.h>
  32 +#include <crypto/des.h>
  33 +#include <crypto/sha.h>
  34 +#include <crypto/md5.h>
  35 +
  36 +#include "keystone-sa.h"
  37 +#include "keystone-sa-hlp.h"
  38 +
  39 +/* Perform 16 byte swizzling */
  40 +void sa_swiz_128(u8 *in, u8 *out, u16 len)
  41 +{
  42 + u8 data[16];
  43 + int i, j;
  44 +
  45 + for (i = 0; i < len; i += 16) {
  46 + memcpy(data, &in[i], 16);
  47 + for (j = 0; j < 16; j++)
  48 + out[i + j] = data[15 - j];
  49 + }
  50 +}
  51 +
  52 +/* Convert CRA name to internal algorithm ID */
  53 +void sa_conv_calg_to_salg(const char *cra_name, int *ealg_id, int *aalg_id)
  54 +{
  55 + *ealg_id = SA_EALG_ID_NONE;
  56 + *aalg_id = SA_AALG_ID_NONE;
  57 +
  58 + if (!strcmp(cra_name, "authenc(hmac(sha1),cbc(aes))")) {
  59 + *ealg_id = SA_EALG_ID_AES_CBC;
  60 + *aalg_id = SA_AALG_ID_HMAC_SHA1;
  61 + } else if (!strcmp(cra_name, "authenc(hmac(sha1),ecb(cipher_null))")) {
  62 + *ealg_id = SA_EALG_ID_NULL;
  63 + *aalg_id = SA_AALG_ID_HMAC_SHA1;
  64 + } else if (!strcmp(cra_name, "authenc(hmac(sha1),cbc(des3_ede))")) {
  65 + *ealg_id = SA_EALG_ID_3DES_CBC;
  66 + *aalg_id = SA_AALG_ID_HMAC_SHA1;
  67 + } else if (!strcmp(cra_name, "authenc(xcbc(aes),cbc(aes))")) {
  68 + *ealg_id = SA_EALG_ID_AES_CBC;
  69 + *aalg_id = SA_AALG_ID_AES_XCBC;
  70 + } else if (!strcmp(cra_name, "authenc(xcbc(aes),cbc(des3_ede))")) {
  71 + *ealg_id = SA_EALG_ID_3DES_CBC;
  72 + *aalg_id = SA_AALG_ID_AES_XCBC;
  73 + } else if (!strcmp(cra_name, "cbc(aes)")) {
  74 + *ealg_id = SA_EALG_ID_AES_CBC;
  75 + } else if (!strcmp(cra_name, "cbc(des3_ede)")) {
  76 + *ealg_id = SA_EALG_ID_3DES_CBC;
  77 + } else if (!strcmp(cra_name, "hmac(sha1)")) {
  78 + *aalg_id = SA_AALG_ID_HMAC_SHA1;
  79 + } else if (!strcmp(cra_name, "xcbc(aes)")) {
  80 + *aalg_id = SA_AALG_ID_AES_XCBC;
  81 + }
  82 +}
  83 +
  84 +/* Given an algorithm ID get the engine details */
  85 +void sa_get_engine_info(int alg_id, struct sa_eng_info *info)
  86 +{
  87 + switch (alg_id) {
  88 + case SA_EALG_ID_AES_CBC:
  89 + case SA_EALG_ID_3DES_CBC:
  90 + case SA_EALG_ID_DES_CBC:
  91 + info->eng_id = SA_ENG_ID_EM1;
  92 + info->sc_size = SA_CTX_ENC_TYPE1_SZ;
  93 + break;
  94 +
  95 + case SA_EALG_ID_NULL:
  96 + info->eng_id = SA_ENG_ID_NONE;
  97 + info->sc_size = 0;
  98 + break;
  99 +
  100 + case SA_AALG_ID_HMAC_SHA1:
  101 + case SA_AALG_ID_HMAC_MD5:
  102 + info->eng_id = SA_ENG_ID_AM1;
  103 + info->sc_size = SA_CTX_AUTH_TYPE2_SZ;
  104 + break;
  105 +
  106 + case SA_AALG_ID_AES_XCBC:
  107 + case SA_AALG_ID_CMAC:
  108 + info->eng_id = SA_ENG_ID_EM1;
  109 + info->sc_size = SA_CTX_AUTH_TYPE1_SZ;
  110 + break;
  111 +
  112 + default:
  113 + pr_err("%s: unsupported algo\n", __func__);
  114 + info->eng_id = SA_ENG_ID_NONE;
  115 + info->sc_size = 0;
  116 + break;
  117 + }
  118 +}
  119 +
  120 +/* Given an algorithm get the hash size */
  121 +int sa_get_hash_size(u16 aalg_id)
  122 +{
  123 + int hash_size = 0;
  124 +
  125 + switch (aalg_id) {
  126 + case SA_AALG_ID_MD5:
  127 + case SA_AALG_ID_HMAC_MD5:
  128 + hash_size = MD5_DIGEST_SIZE;
  129 + break;
  130 +
  131 + case SA_AALG_ID_SHA1:
  132 + case SA_AALG_ID_HMAC_SHA1:
  133 + hash_size = SHA1_DIGEST_SIZE;
  134 + break;
  135 +
  136 + case SA_AALG_ID_SHA2_224:
  137 + case SA_AALG_ID_HMAC_SHA2_224:
  138 + hash_size = SHA224_DIGEST_SIZE;
  139 + break;
  140 +
  141 + case SA_AALG_ID_SHA2_256:
  142 + case SA_AALG_ID_HMAC_SHA2_256:
  143 + hash_size = SHA256_DIGEST_SIZE;
  144 + break;
  145 +
  146 + case SA_AALG_ID_AES_XCBC:
  147 + case SA_AALG_ID_CMAC:
  148 + hash_size = AES_BLOCK_SIZE;
  149 + break;
  150 +
  151 + default:
  152 + pr_err("%s: unsupported hash\n", __func__);
  153 + break;
  154 + }
  155 +
  156 + return hash_size;
  157 +}
  158 +
  159 +/* Initialize MD5 digest */
  160 +static inline void md5_init(u32 *hash)
  161 +{
  162 + /* Load magic initialization constants */
  163 + hash[0] = 0x67452301;
  164 + hash[1] = 0xefcdab89;
  165 + hash[2] = 0x98badcfe;
  166 + hash[3] = 0x10325476;
  167 +}
  168 +
  169 +/* Generate HMAC-MD5 intermediate Hash */
  170 +void sa_hmac_md5_get_pad(const u8 *key, u16 key_sz, u32 *ipad, u32 *opad)
  171 +{
  172 + u8 k_ipad[MD5_MESSAGE_BYTES];
  173 + u8 k_opad[MD5_MESSAGE_BYTES];
  174 + int i;
  175 +
  176 + for (i = 0; i < key_sz; i++) {
  177 + k_ipad[i] = key[i] ^ 0x36;
  178 + k_opad[i] = key[i] ^ 0x5c;
  179 + }
  180 + /* Instead of XOR with 0 */
  181 + for (; i < SHA_MESSAGE_BYTES; i++) {
  182 + k_ipad[i] = 0x36;
  183 + k_opad[i] = 0x5c;
  184 + }
  185 +
  186 + /* SHA-1 on k_ipad */
  187 + md5_init(ipad);
  188 + md5_transform(ipad, (u32 *)k_ipad);
  189 +
  190 + /* SHA-1 on k_opad */
  191 + md5_init(opad);
  192 + md5_transform(ipad, (u32 *)k_opad);
  193 +}
  194 +
  195 +/* Generate HMAC-SHA1 intermediate Hash */
  196 +void sa_hmac_sha1_get_pad(const u8 *key, u16 key_sz, u32 *ipad, u32 *opad)
  197 +{
  198 + u32 ws[SHA_WORKSPACE_WORDS];
  199 + u8 k_ipad[SHA_MESSAGE_BYTES];
  200 + u8 k_opad[SHA_MESSAGE_BYTES];
  201 + int i;
  202 +
  203 + for (i = 0; i < key_sz; i++) {
  204 + k_ipad[i] = key[i] ^ 0x36;
  205 + k_opad[i] = key[i] ^ 0x5c;
  206 + }
  207 + /* Instead of XOR with 0 */
  208 + for (; i < SHA_MESSAGE_BYTES; i++) {
  209 + k_ipad[i] = 0x36;
  210 + k_opad[i] = 0x5c;
  211 + }
  212 +
  213 + /* SHA-1 on k_ipad */
  214 + sha_init(ipad);
  215 + sha_transform(ipad, k_ipad, ws);
  216 +
  217 + for (i = 0; i < SHA_DIGEST_WORDS; i++)
  218 + ipad[i] = cpu_to_be32(ipad[i]);
  219 +
  220 + /* SHA-1 on k_opad */
  221 + sha_init(opad);
  222 + sha_transform(opad, k_opad, ws);
  223 +
  224 + for (i = 0; i < SHA_DIGEST_WORDS; i++)
  225 + opad[i] = cpu_to_be32(opad[i]);
  226 +}
  227 +
  228 +/* Derive GHASH to be used in the GCM algorithm */
  229 +void sa_calc_ghash(const u8 *key, u16 key_sz, u8 *ghash)
  230 +{
  231 +}
  232 +
  233 +/* Generate HMAC-SHA224 intermediate Hash */
  234 +void sa_hmac_sha224_get_pad(const u8 *key, u16 key_sz, u32 *ipad, u32 *opad)
  235 +{
  236 +}
  237 +
  238 +/* Generate HMAC-SHA256 intermediate Hash */
  239 +void sa_hmac_sha256_get_pad(const u8 *key, u16 key_sz, u32 *ipad, u32 *opad)
  240 +{
  241 +}
  242 +
  243 +
  244 +/* Derive the inverse key used in AES-CBC decryption operation */
  245 +static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
  246 +{
  247 + struct crypto_aes_ctx ctx;
  248 + int key_pos;
  249 +
  250 + if (crypto_aes_expand_key(&ctx, key, key_sz)) {
  251 + pr_err("%s: bad key len(%d)\n", __func__, key_sz);
  252 + return -1;
  253 + }
  254 +
  255 + /* Refer the implementation of crypto_aes_expand_key()
  256 + * to understand the below logic
  257 + */
  258 + switch (key_sz) {
  259 + case AES_KEYSIZE_128:
  260 + case AES_KEYSIZE_192:
  261 + key_pos = key_sz + 24;
  262 + break;
  263 +
  264 + case AES_KEYSIZE_256:
  265 + key_pos = key_sz + 24 - 4;
  266 + break;
  267 +
  268 + default:
  269 + pr_err("%s: bad key len(%d)\n", __func__, key_sz);
  270 + return -1;
  271 + }
  272 +
  273 + memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
  274 + return 0;
  275 +}
  276 +
  277 +
  278 +/* Set Security context for the encryption engine */
  279 +int sa_set_sc_enc(u16 alg_id, const u8 *key, u16 key_sz,
  280 + u16 aad_len, u8 enc, u8 *sc_buf)
  281 +{
  282 +/* Byte offset for key in encryption security context */
  283 +#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
  284 +/* Byte offset for Aux-1 in encryption security context */
  285 +#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
  286 +
  287 + u8 ghash[16]; /* AES block size */
  288 + const u8 *mci = NULL;
  289 + /* Convert the key size (16/24/32) to the key size index (0/1/2) */
  290 + int key_idx = (key_sz >> 3) - 2;
  291 +
  292 + /* Set Encryption mode selector to crypto processing */
  293 + sc_buf[0] = 0;
  294 +
  295 + /* Select the mode control instruction */
  296 + switch (alg_id) {
  297 + case SA_EALG_ID_AES_CBC:
  298 + mci = (enc) ? sa_eng_aes_enc_mci_tbl[SA_ENG_ALGO_CBC][key_idx] :
  299 + sa_eng_aes_dec_mci_tbl[SA_ENG_ALGO_CBC][key_idx];
  300 + break;
  301 +
  302 + case SA_EALG_ID_CCM:
  303 + mci = (enc) ? sa_eng_aes_enc_mci_tbl[SA_ENG_ALGO_CCM][key_idx] :
  304 + sa_eng_aes_dec_mci_tbl[SA_ENG_ALGO_CCM][key_idx];
  305 + break;
  306 +
  307 + case SA_EALG_ID_AES_F8:
  308 + mci = sa_eng_aes_enc_mci_tbl[SA_ENG_ALGO_F8][key_idx];
  309 + break;
  310 +
  311 + case SA_EALG_ID_AES_CTR:
  312 + mci = sa_eng_aes_enc_mci_tbl[SA_ENG_ALGO_CTR][key_idx];
  313 + break;
  314 +
  315 + case SA_EALG_ID_GCM:
  316 + mci = (enc) ? sa_eng_aes_enc_mci_tbl[SA_ENG_ALGO_GCM][key_idx] :
  317 + sa_eng_aes_dec_mci_tbl[SA_ENG_ALGO_GCM][key_idx];
  318 + /* Set AAD length at byte offset 23 in Aux-1 */
  319 + sc_buf[SC_ENC_AUX1_OFFSET + 23] = (aad_len << 3);
  320 + /* fall through to GMAC */
  321 +
  322 + case SA_AALG_ID_GMAC:
  323 + sa_calc_ghash(key, (key_sz << 3), ghash);
  324 + /* copy GCM Hash in Aux-1 */
  325 + memcpy(&sc_buf[SC_ENC_AUX1_OFFSET], ghash, 16);
  326 + break;
  327 +
  328 + case SA_AALG_ID_AES_XCBC:
  329 + case SA_AALG_ID_CMAC:
  330 + mci = sa_eng_aes_enc_mci_tbl[SA_ENG_ALGO_CMAC][key_idx];
  331 + break;
  332 +
  333 + case SA_AALG_ID_CBC_MAC:
  334 + mci = sa_eng_aes_enc_mci_tbl[SA_ENG_ALGO_CBCMAC][key_idx];
  335 + break;
  336 +
  337 + case SA_EALG_ID_3DES_CBC:
  338 + mci = (enc) ? sa_eng_3des_enc_mci_tbl[SA_ENG_ALGO_CBC] :
  339 + sa_eng_3des_dec_mci_tbl[SA_ENG_ALGO_CBC];
  340 + break;
  341 + }
  342 +
  343 + /* Set the mode control instructions in security context */
  344 + if (mci)
  345 + memcpy(&sc_buf[1], mci, 27);
  346 +
  347 + /* For AES-CBC decryption get the inverse key */
  348 + if ((alg_id == SA_EALG_ID_AES_CBC) && !enc) {
  349 + if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
  350 + return -1;
  351 + }
  352 + /* For AES-XCBC-MAC get the subkey */
  353 + else if (alg_id == SA_AALG_ID_AES_XCBC) {
  354 + if (sa_aes_xcbc_subkey(&sc_buf[SC_ENC_KEY_OFFSET], NULL,
  355 + NULL, key, key_sz))
  356 + return -1;
  357 + }
  358 + /* For all other cases: key is used */
  359 + else
  360 + memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
  361 +
  362 + return 0;
  363 +}
  364 +
  365 +/* Set Security context for the authentication engine */
  366 +void sa_set_sc_auth(u16 alg_id, const u8 *key, u16 key_sz, u8 *sc_buf)
  367 +{
  368 + u32 ipad[8], opad[8];
  369 + u8 mac_sz, keyed_mac = 0;
  370 +
  371 + /* Set Authentication mode selector to hash processing */
  372 + sc_buf[0] = 0;
  373 +
  374 + /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
  375 + sc_buf[1] = 0x40;
  376 +
  377 + switch (alg_id) {
  378 + case SA_AALG_ID_MD5:
  379 + /* Auth SW ctrl word: bit[4]=1 (basic hash)
  380 + * bit[3:0]=1 (MD5 operation)*/
  381 + sc_buf[1] |= (0x10 | 0x1);
  382 + break;
  383 +
  384 + case SA_AALG_ID_SHA1:
  385 + /* Auth SW ctrl word: bit[4]=1 (basic hash)
  386 + * bit[3:0]=2 (SHA1 operation)*/
  387 + sc_buf[1] |= (0x10 | 0x2);
  388 + break;
  389 +
  390 + case SA_AALG_ID_SHA2_224:
  391 + /* Auth SW ctrl word: bit[4]=1 (basic hash)
  392 + * bit[3:0]=3 (SHA2-224 operation)*/
  393 + sc_buf[1] |= (0x10 | 0x3);
  394 + break;
  395 +
  396 + case SA_AALG_ID_SHA2_256:
  397 + /* Auth SW ctrl word: bit[4]=1 (basic hash)
  398 + * bit[3:0]=4 (SHA2-256 operation)*/
  399 + sc_buf[1] |= (0x10 | 0x4);
  400 + break;
  401 +
  402 + case SA_AALG_ID_HMAC_MD5:
  403 + /* Auth SW ctrl word: bit[4]=0 (HMAC)
  404 + * bit[3:0]=1 (MD5 operation)*/
  405 + sc_buf[1] |= 0x1;
  406 + keyed_mac = 1;
  407 + mac_sz = MD5_DIGEST_SIZE;
  408 + sa_hmac_md5_get_pad(key, key_sz, ipad, opad);
  409 + break;
  410 +
  411 + case SA_AALG_ID_HMAC_SHA1:
  412 + /* Auth SW ctrl word: bit[4]=0 (HMAC)
  413 + * bit[3:0]=2 (SHA1 operation)*/
  414 + sc_buf[1] |= 0x2;
  415 + keyed_mac = 1;
  416 + mac_sz = SHA1_DIGEST_SIZE;
  417 + sa_hmac_sha1_get_pad(key, key_sz, ipad, opad);
  418 + break;
  419 +
  420 + case SA_AALG_ID_HMAC_SHA2_224:
  421 + /* Auth SW ctrl word: bit[4]=0 (HMAC)
  422 + * bit[3:0]=3 (SHA2-224 operation)*/
  423 + sc_buf[1] |= 0x3;
  424 + keyed_mac = 1;
  425 + mac_sz = SHA224_DIGEST_SIZE;
  426 + sa_hmac_sha224_get_pad(key, key_sz, ipad, opad);
  427 + break;
  428 +
  429 + case SA_AALG_ID_HMAC_SHA2_256:
  430 + /* Auth SW ctrl word: bit[4]=0 (HMAC)
  431 + * bit[3:0]=4 (SHA2-256 operation)*/
  432 + sc_buf[1] |= 0x4;
  433 + keyed_mac = 1;
  434 + mac_sz = SHA256_DIGEST_SIZE;
  435 + sa_hmac_sha256_get_pad(key, key_sz, ipad, opad);
  436 + break;
  437 + }
  438 +
  439 + /* Copy the keys or ipad/opad */
  440 + if (keyed_mac) {
  441 + /* Copy ipad to AuthKey */
  442 + memcpy(&sc_buf[32], ipad, mac_sz);
  443 + /* Copy opad to Aux-1 */
  444 + memcpy(&sc_buf[64], opad, mac_sz);
  445 + }
  446 +}
drivers/crypto/keystone-sa-tbls.c
  1 +/*
  2 + * Keystone crypto accelerator driver
  3 + *
  4 + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
  5 + *
  6 + * Authors: Sandeep Nair
  7 + * Vitaly Andrianov
  8 + *
  9 + * This program is free software; you can redistribute it and/or
  10 + * modify it under the terms of the GNU General Public License
  11 + * version 2 as published by the Free Software Foundation.
  12 + *
  13 + * This program is distributed in the hope that it will be useful, but
  14 + * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16 + * General Public License for more details.
  17 + */
  18 +
  19 +#include <linux/types.h>
  20 +
  21 +/************************************************************
  22 + * Note: The below tables are generated.
  23 + * Do not update it manually.
  24 + *
  25 + * Note: This is a special version of MCI file with
  26 + * 3GPP standard modes disabled.
  27 +************************************************************/
  28 +const uint8_t sa_eng_aes_enc_mci_tbl[11][3][27] = {
  29 + {
  30 + {
  31 + 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00,
  32 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  33 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  34 + },
  35 + {
  36 + 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00,
  37 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  38 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  39 + },
  40 + {
  41 + 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00,
  42 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  43 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  44 + }
  45 + },
  46 + {
  47 + {
  48 + 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e,
  49 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  50 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  51 + },
  52 + {
  53 + 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e,
  54 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  55 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  56 + },
  57 + {
  58 + 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e,
  59 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  60 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  61 + }
  62 + },
  63 + {
  64 + {
  65 + 0x21, 0x00, 0x00, 0x80, 0x9a, 0x09, 0x94, 0x7c, 0x00,
  66 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  67 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  68 + },
  69 + {
  70 + 0x21, 0x00, 0x00, 0x84, 0x9a, 0x09, 0x94, 0x7c, 0x00,
  71 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  72 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  73 + },
  74 + {
  75 + 0x21, 0x00, 0x00, 0x88, 0x9a, 0x09, 0x94, 0x7c, 0x00,
  76 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  77 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  78 + }
  79 + },
  80 + {
  81 + {
  82 + 0x21, 0x00, 0x00, 0x80, 0x9a, 0xa5, 0xb4, 0x60, 0x00,
  83 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  84 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  85 + },
  86 + {
  87 + 0x21, 0x00, 0x00, 0x84, 0x9a, 0xa5, 0xb4, 0x60, 0x00,
  88 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  89 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  90 + },
  91 + {
  92 + 0x21, 0x00, 0x00, 0x88, 0x9a, 0xa5, 0xb4, 0x60, 0x00,
  93 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  94 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  95 + }
  96 + },
  97 + {
  98 + {
  99 + 0x21, 0x00, 0x00, 0x80, 0x9a, 0x8f, 0x54, 0x1b, 0x82,
  100 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  101 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  102 + },
  103 + {
  104 + 0x21, 0x00, 0x00, 0x84, 0x9a, 0x8f, 0x54, 0x1b, 0x82,
  105 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  106 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  107 + },
  108 + {
  109 + 0x21, 0x00, 0x00, 0x88, 0x9a, 0x8f, 0x54, 0x1b, 0x82,
  110 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  111 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  112 + }
  113 + },
  114 + {
  115 + {
  116 + 0x21, 0x00, 0x22, 0x3b, 0xa3, 0xfb, 0x19, 0x31, 0x91,
  117 + 0x80, 0xa5, 0xc3, 0xa8, 0x89, 0x9e, 0x10, 0x2c, 0x00,
  118 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  119 + },
  120 + {
  121 + 0x21, 0x00, 0x22, 0x3b, 0xa3, 0xfb, 0x19, 0x31, 0x91,
  122 + 0x84, 0xa5, 0xc3, 0xa8, 0x89, 0x9e, 0x10, 0x2c, 0x00,
  123 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  124 + },
  125 + {
  126 + 0x21, 0x00, 0x22, 0x3b, 0xa3, 0xfb, 0x19, 0x31, 0x91,
  127 + 0x88, 0xa5, 0xc3, 0xa8, 0x89, 0x9e, 0x10, 0x2c, 0x00,
  128 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  129 + }
  130 + },
  131 + {
  132 + {
  133 + 0x61, 0x00, 0x44, 0x80, 0xa9, 0xfe, 0x83, 0x99, 0x7e,
  134 + 0x58, 0x2e, 0x0a, 0x90, 0x71, 0x41, 0x83, 0x9d, 0x63,
  135 + 0xaa, 0x0b, 0x7e, 0x9a, 0x78, 0x3a, 0xa3, 0x8b, 0x1e
  136 + },
  137 + {
  138 + 0x61, 0x00, 0x44, 0x84, 0xa9, 0xfe, 0x83, 0x99, 0x7e,
  139 + 0x58, 0x2e, 0x4a, 0x90, 0x71, 0x41, 0x83, 0x9d, 0x63,
  140 + 0xaa, 0x0b, 0x7e, 0x9a, 0x78, 0x3a, 0xa3, 0x8b, 0x1e
  141 + },
  142 + {
  143 + 0x61, 0x00, 0x44, 0x88, 0xa9, 0xfe, 0x83, 0x99, 0x7e,
  144 + 0x58, 0x2e, 0x8a, 0x90, 0x71, 0x41, 0x83, 0x9d, 0x63,
  145 + 0xaa, 0x0b, 0x7e, 0x9a, 0x78, 0x3a, 0xa3, 0x8b, 0x1e
  146 + }
  147 + },
  148 + {
  149 + {
  150 + 0x41, 0x00, 0x44, 0x80, 0xa9, 0xfe, 0x83, 0x99, 0x7e,
  151 + 0x14, 0x18, 0x39, 0xd4, 0xba, 0xa0, 0xb7, 0xe9, 0xa7,
  152 + 0x83, 0xaa, 0x38, 0xb5, 0xe0, 0x00, 0x00, 0x00, 0x00
  153 + },
  154 + {
  155 + 0x41, 0x00, 0x44, 0x84, 0xa9, 0xfe, 0x83, 0x99, 0x7e,
  156 + 0x14, 0x18, 0x39, 0xd4, 0xba, 0xa0, 0xb7, 0xe9, 0xa7,
  157 + 0x83, 0xaa, 0x38, 0xb5, 0xe0, 0x00, 0x00, 0x00, 0x00
  158 + },
  159 + {
  160 + 0x41, 0x00, 0x44, 0x88, 0xa9, 0xfe, 0x83, 0x99, 0x7e,
  161 + 0x14, 0x18, 0x39, 0xd4, 0xba, 0xa0, 0xb7, 0xe9, 0xa7,
  162 + 0x83, 0xaa, 0x38, 0xb5, 0xe0, 0x00, 0x00, 0x00, 0x00
  163 + }
  164 + },
  165 + {
  166 + {
  167 + 0x61, 0x00, 0x66, 0x80, 0xa9, 0x8f, 0x80, 0xa9, 0xbe,
  168 + 0x80, 0xb9, 0x7e, 0x18, 0x28, 0x0a, 0x9b, 0xe5, 0xc3,
  169 + 0x80, 0xbd, 0x6c, 0x15, 0x1a, 0x8e, 0xb0, 0x00, 0x00
  170 + },
  171 + {
  172 + 0x61, 0x00, 0x66, 0x84, 0xa9, 0x8f, 0x84, 0xa9, 0xbe,
  173 + 0x84, 0xb9, 0x7e, 0x18, 0x28, 0x4a, 0x9b, 0xe5, 0xc3,
  174 + 0x84, 0xbd, 0x6c, 0x15, 0x1a, 0x8e, 0xb0, 0x00, 0x00
  175 + },
  176 + {
  177 + 0x61, 0x00, 0x66, 0x88, 0xa9, 0x8f, 0x88, 0xa9, 0xbe,
  178 + 0x88, 0xb9, 0x7e, 0x18, 0x28, 0x8a, 0x9b, 0xe5, 0xc3,
  179 + 0x88, 0xbd, 0x6c, 0x15, 0x1a, 0x8e, 0xb0, 0x00, 0x00
  180 + }
  181 + },
  182 + {
  183 + {
  184 + 0x41, 0x00, 0x00, 0xf1, 0x0d, 0x19, 0x10, 0x8d, 0x2c,
  185 + 0x12, 0x88, 0x08, 0xa6, 0x4b, 0x7e, 0x00, 0x00, 0x00,
  186 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  187 + },
  188 + {
  189 + 0x41, 0x00, 0x00, 0xf1, 0x0d, 0x19, 0x10, 0x8d, 0x2c,
  190 + 0x12, 0x88, 0x48, 0xa6, 0x4b, 0x7e, 0x00, 0x00, 0x00,
  191 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  192 + },
  193 + {
  194 + 0x41, 0x00, 0x00, 0xf1, 0x0d, 0x19, 0x10, 0x8d, 0x2c,
  195 + 0x12, 0x88, 0x88, 0xa6, 0x4b, 0x7e, 0x00, 0x00, 0x00,
  196 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  197 + }
  198 + },
  199 + {
  200 + {
  201 + 0x01, 0x00, 0x11, 0x37, 0x91, 0x41, 0x80, 0x9a, 0x4c,
  202 + 0x97, 0xec, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  203 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  204 + },
  205 + {
  206 + 0x01, 0x00, 0x11, 0x37, 0x91, 0x41, 0x84, 0x9a, 0x4c,
  207 + 0x97, 0xec, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  208 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  209 + },
  210 + {
  211 + 0x01, 0x00, 0x11, 0x37, 0x91, 0x41, 0x88, 0x9a, 0x4c,
  212 + 0x97, 0xec, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  213 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  214 + }
  215 + }
  216 +};
  217 +
  218 +const uint8_t sa_eng_aes_dec_mci_tbl[11][3][27] = {
  219 + {
  220 + {
  221 + 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00,
  222 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  223 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  224 + },
  225 + {
  226 + 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00,
  227 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  228 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  229 + },
  230 + {
  231 + 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00,
  232 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  233 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  234 + }
  235 + },
  236 + {
  237 + {
  238 + 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40,
  239 + 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  240 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  241 + },
  242 + {
  243 + 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40,
  244 + 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  245 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  246 + },
  247 + {
  248 + 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40,
  249 + 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  250 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  251 + }
  252 + },
  253 + {
  254 + {
  255 + 0x21, 0x00, 0x00, 0x80, 0x9a, 0xc7, 0x44, 0x0b, 0x00,
  256 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  257 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  258 + },
  259 + {
  260 + 0x21, 0x00, 0x00, 0x84, 0x9a, 0xc7, 0x44, 0x0b, 0x00,
  261 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  262 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  263 + },
  264 + {
  265 + 0x21, 0x00, 0x00, 0x88, 0x9a, 0xc7, 0x44, 0x0b, 0x00,
  266 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  267 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  268 + }
  269 + },
  270 + {
  271 + {
  272 + 0x21, 0x00, 0x00, 0x80, 0x9a, 0xa5, 0xb4, 0x60, 0x00,
  273 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  274 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  275 + },
  276 + {
  277 + 0x21, 0x00, 0x00, 0x84, 0x9a, 0xa5, 0xb4, 0x60, 0x00,
  278 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  279 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  280 + },
  281 + {
  282 + 0x21, 0x00, 0x00, 0x88, 0x9a, 0xa5, 0xb4, 0x60, 0x00,
  283 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  284 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  285 + }
  286 + },
  287 + {
  288 + {
  289 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  290 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  291 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  292 + },
  293 + {
  294 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  295 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  296 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  297 + },
  298 + {
  299 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  300 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  301 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  302 + }
  303 + },
  304 + {
  305 + {
  306 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  307 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  308 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  309 + },
  310 + {
  311 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  312 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  313 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  314 + },
  315 + {
  316 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  317 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  318 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  319 + }
  320 + },
  321 + {
  322 + {
  323 + 0x61, 0x00, 0x44, 0x80, 0xa9, 0xfe, 0x83, 0x99, 0x7e,
  324 + 0x58, 0x2e, 0x0a, 0x14, 0x19, 0x07, 0x83, 0x9d, 0x63,
  325 + 0xaa, 0x0b, 0x7e, 0x9a, 0x78, 0x3a, 0xa3, 0x8b, 0x1e
  326 + },
  327 + {
  328 + 0x61, 0x00, 0x44, 0x84, 0xa9, 0xfe, 0x83, 0x99, 0x7e,
  329 + 0x58, 0x2e, 0x4a, 0x14, 0x19, 0x07, 0x83, 0x9d, 0x63,
  330 + 0xaa, 0x0b, 0x7e, 0x9a, 0x78, 0x3a, 0xa3, 0x8b, 0x1e
  331 + },
  332 + {
  333 + 0x61, 0x00, 0x44, 0x88, 0xa9, 0xfe, 0x83, 0x99, 0x7e,
  334 + 0x58, 0x2e, 0x8a, 0x14, 0x19, 0x07, 0x83, 0x9d, 0x63,
  335 + 0xaa, 0x0b, 0x7e, 0x9a, 0x78, 0x3a, 0xa3, 0x8b, 0x1e
  336 + }
  337 + },
  338 + {
  339 + {
  340 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  341 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  342 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  343 + },
  344 + {
  345 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  346 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  347 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  348 + },
  349 + {
  350 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  351 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  352 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  353 + }
  354 + },
  355 + {
  356 + {
  357 + 0x61, 0x00, 0x66, 0x80, 0xa9, 0x8f, 0x80, 0xa9, 0xbe,
  358 + 0x80, 0xb9, 0x7e, 0x5c, 0x3e, 0x0b, 0x90, 0x71, 0x82,
  359 + 0x80, 0xaa, 0x88, 0x9b, 0xed, 0x7c, 0x14, 0xac, 0x00
  360 + },
  361 + {
  362 + 0x61, 0x00, 0x66, 0x84, 0xa9, 0x8f, 0x84, 0xa9, 0xbe,
  363 + 0x84, 0xb9, 0x7e, 0x5c, 0x3e, 0x4b, 0x90, 0x71, 0x82,
  364 + 0x84, 0xaa, 0x88, 0x9b, 0xed, 0x7c, 0x14, 0xac, 0x00
  365 + },
  366 + {
  367 + 0x61, 0x00, 0x66, 0x88, 0xa9, 0x8f, 0x88, 0xa9, 0xbe,
  368 + 0x88, 0xb9, 0x7e, 0x5c, 0x3e, 0x8b, 0x90, 0x71, 0x82,
  369 + 0x88, 0xaa, 0x88, 0x9b, 0xed, 0x7c, 0x14, 0xac, 0x00
  370 + }
  371 + },
  372 + {
  373 + {
  374 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  375 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  376 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  377 + },
  378 + {
  379 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  380 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  381 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  382 + },
  383 + {
  384 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  385 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  386 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  387 + }
  388 + },
  389 + {
  390 + {
  391 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  392 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  393 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  394 + },
  395 + {
  396 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  397 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  398 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  399 + },
  400 + {
  401 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  402 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  403 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  404 + }
  405 + }
  406 +};
  407 +
  408 +const uint8_t sa_eng_3des_enc_mci_tbl[4][27] = {
  409 + {
  410 + 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  411 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  412 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  413 + },
  414 + {
  415 + 0x20, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00,
  416 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  417 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  418 + },
  419 + {
  420 + 0x20, 0x00, 0x00, 0x85, 0x1a, 0x09, 0x94, 0x7c, 0x00, 0x00,
  421 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  422 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  423 + },
  424 + {
  425 + 0x20, 0x00, 0x00, 0x85, 0x1a, 0xa5, 0xb4, 0x60, 0x00, 0x00,
  426 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  427 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  428 + }
  429 +};
  430 +
  431 +const uint8_t sa_eng_3des_dec_mci_tbl[4][27] = {
  432 + {
  433 + 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  434 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  435 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  436 + },
  437 + {
  438 + 0x30, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  439 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  440 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  441 + },
  442 + {
  443 + 0x20, 0x00, 0x00, 0x85, 0x1a, 0xc7, 0x44, 0x0b, 0x00, 0x00,
  444 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  445 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  446 + },
  447 + {
  448 + 0x20, 0x00, 0x00, 0x85, 0x1a, 0xa5, 0xb4, 0x60, 0x00, 0x00,
  449 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  450 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  451 + }
  452 +};
drivers/crypto/keystone-sa-utils.c
Changes suppressed. Click to show
  1 +/*
  2 + * Keystone crypto accelerator driver
  3 + *
  4 + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
  5 + *
  6 + * Authors: Sandeep Nair
  7 + * Vitaly Andrianov
  8 + *
  9 + * Contributors:Tinku Mannan
  10 + * Hao Zhang
  11 + *
  12 + * This program is free software; you can redistribute it and/or
  13 + * modify it under the terms of the GNU General Public License
  14 + * version 2 as published by the Free Software Foundation.
  15 + *
  16 + * This program is distributed in the hope that it will be useful, but
  17 + * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19 + * General Public License for more details.
  20 + */
  21 +
  22 +#include <linux/clk.h>
  23 +#include <linux/err.h>
  24 +#include <linux/init.h>
  25 +#include <linux/slab.h>
  26 +#include <linux/module.h>
  27 +#include <linux/interrupt.h>
  28 +#include <linux/dmapool.h>
  29 +#include <linux/of.h>
  30 +#include <linux/of_address.h>
  31 +#include <linux/rtnetlink.h>
  32 +#include <linux/dma-mapping.h>
  33 +#include <linux/platform_device.h>
  34 +#include <linux/soc/ti/knav_dma.h>
  35 +#include <linux/soc/ti/knav_qmss.h>
  36 +
  37 +#include <linux/crypto.h>
  38 +#include <linux/hw_random.h>
  39 +#include <linux/cryptohash.h>
  40 +#include <crypto/algapi.h>
  41 +#include <crypto/aead.h>
  42 +#include <crypto/authenc.h>
  43 +#include <crypto/hash.h>
  44 +#include <crypto/internal/hash.h>
  45 +#include <crypto/aes.h>
  46 +#include <crypto/des.h>
  47 +#include <crypto/sha.h>
  48 +#include <crypto/md5.h>
  49 +#include <crypto/scatterwalk.h>
  50 +
  51 +#include "keystone-sa.h"
  52 +#include "keystone-sa-hlp.h"
  53 +
  54 +struct device *sa_ks2_dev;
  55 +
  56 +/* Number of elements in scatterlist */
  57 +static int sg_count(struct scatterlist *sg, int len)
  58 +{
  59 + int sg_nents = 0;
  60 +
  61 + while (sg && (len > 0)) {
  62 + sg_nents++;
  63 + len -= sg->length;
  64 + sg = sg_next(sg);
  65 + }
  66 + return sg_nents;
  67 +}
  68 +
  69 +/* buffer capacity of scatterlist */
  70 +static int sg_len(struct scatterlist *sg)
  71 +{
  72 + int len = 0;
  73 +
  74 + while (sg) {
  75 + len += sg->length;
  76 + sg = sg_next(sg);
  77 + }
  78 + return len;
  79 +}
  80 +
  81 +/* Clone SG list without copying the buffer */
  82 +static inline void sa_clone_sg(struct scatterlist *src,
  83 + struct scatterlist *dst, unsigned int nbytes)
  84 +{
  85 + while ((nbytes > 0) && src && dst) {
  86 + struct page *pg = sg_page(src);
  87 + unsigned int len = min(nbytes, src->length);
  88 +
  89 + sg_set_page(dst, pg, len, src->offset);
  90 + src = sg_next(src);
  91 + dst = sg_next(dst);
  92 + nbytes -= len;
  93 + }
  94 +}
  95 +
  96 +static inline unsigned int sa_scatterwalk_sglen(struct scatter_walk *walk)
  97 +{
  98 + return walk->sg->offset + walk->sg->length - walk->offset;
  99 +}
  100 +
  101 +static inline void *sa_scatterwalk_vaddr(struct scatter_walk *walk)
  102 +{
  103 + return sg_virt(walk->sg) + (walk->offset - walk->sg->offset);
  104 +}
  105 +
  106 +static inline void sa_scatterwalk_sgdone(struct scatter_walk *walk, size_t len)
  107 +{
  108 + if (walk->offset >= walk->sg->offset + walk->sg->length)
  109 + scatterwalk_start(walk, sg_next(walk->sg));
  110 +}
  111 +
  112 +/* scatterwalk_copychunks() for mapped SG list */
  113 +static inline void
  114 +sa_scatterwalk_copychunks(void *buf,
  115 + struct scatter_walk *walk, unsigned int nbytes,
  116 + int out)
  117 +{
  118 + unsigned int len_this_sg;
  119 +
  120 + for (;;) {
  121 + len_this_sg = sa_scatterwalk_sglen(walk);
  122 +
  123 + if (len_this_sg > nbytes)
  124 + len_this_sg = nbytes;
  125 +
  126 + if (out)
  127 + memcpy(sa_scatterwalk_vaddr(walk), buf,
  128 + len_this_sg);
  129 + else
  130 + memcpy(buf, sa_scatterwalk_vaddr(walk),
  131 + len_this_sg);
  132 +
  133 + scatterwalk_advance(walk, len_this_sg);
  134 +
  135 + if (nbytes == len_this_sg)
  136 + break;
  137 +
  138 + buf += len_this_sg;
  139 + nbytes -= len_this_sg;
  140 +
  141 + sa_scatterwalk_sgdone(walk, len_this_sg);
  142 + }
  143 +}
  144 +
  145 +/* Copy buffer content from list of hwdesc-s to DST SG list */
  146 +static int sa_hwdesc2sg_copy(struct knav_dma_desc **hwdesc,
  147 + struct scatterlist *dst,
  148 + unsigned int src_offset, unsigned int dst_offset,
  149 + size_t len, int num)
  150 +{
  151 + struct scatter_walk walk;
  152 + int sglen, cplen;
  153 + int j = 0;
  154 +
  155 + sglen = hwdesc[0]->desc_info & KNAV_DMA_DESC_PKT_LEN_MASK;
  156 + if (unlikely(len + src_offset > sglen)) {
  157 + pr_err("[%s] src len(%d) less than (%d)\n", __func__,
  158 + sglen, len + src_offset);
  159 + return -1;
  160 + }
  161 +
  162 + sglen = sg_len(dst);
  163 + if (unlikely(len + dst_offset > sglen)) {
  164 + pr_err("[%s] dst len(%d) less than (%d)\n", __func__,
  165 + sglen, len + dst_offset);
  166 + return -1;
  167 + }
  168 +
  169 + scatterwalk_start(&walk, dst);
  170 + scatterwalk_advance(&walk, dst_offset);
  171 + while ((j < num) && (len > 0)) {
  172 + cplen = min((int)len, (int)(hwdesc[j]->buff_len - src_offset));
  173 + if (likely(cplen)) {
  174 + sa_scatterwalk_copychunks(((char *)hwdesc[j]->pad[0] +
  175 + src_offset),
  176 + &walk, cplen, 1);
  177 + }
  178 + len -= cplen;
  179 + j++;
  180 + src_offset = 0;
  181 + }
  182 + return 0;
  183 +}
  184 +
  185 +void sa_scatterwalk_copy(void *buf, struct scatterlist *sg,
  186 + unsigned int start, unsigned int nbytes, int out)
  187 +{
  188 + struct scatter_walk walk;
  189 + unsigned int offset = 0;
  190 +
  191 + if (!nbytes)
  192 + return;
  193 +
  194 + for (;;) {
  195 + scatterwalk_start(&walk, sg);
  196 +
  197 + if (start < offset + sg->length)
  198 + break;
  199 +
  200 + offset += sg->length;
  201 + sg = sg_next(sg);
  202 + }
  203 +
  204 + scatterwalk_advance(&walk, start - offset);
  205 + sa_scatterwalk_copychunks(buf, &walk, nbytes, out);
  206 +}
  207 +
  208 +/******************************************************************************
  209 + * Command Label Definitions and utility functions
  210 + ******************************************************************************/
  211 +struct sa_cmdl_cfg {
  212 + int enc1st;
  213 + int aalg;
  214 + u8 enc_eng_id;
  215 + u8 auth_eng_id;
  216 + u8 iv_size;
  217 + const u8 *akey;
  218 + u16 akey_len;
  219 +};
  220 +
  221 +#define SA_CMDL_UPD_ENC 0x0001
  222 +#define SA_CMDL_UPD_AUTH 0x0002
  223 +#define SA_CMDL_UPD_ENC_IV 0x0004
  224 +#define SA_CMDL_UPD_AUTH_IV 0x0008
  225 +#define SA_CMDL_UPD_AUX_KEY 0x0010
  226 +
  227 +/* Format general command label */
  228 +static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
  229 + struct sa_cmdl_upd_info *upd_info)
  230 +{
  231 + u8 offset = 0;
  232 + u32 *word_ptr = (u32 *)cmdl;
  233 + int i;
  234 +
  235 + /* Clear the command label */
  236 + memset(cmdl, 0, (SA_MAX_CMDL_WORDS * sizeof(u32)));
  237 +
  238 + /* Iniialize the command update structure */
  239 + memset(upd_info, 0, sizeof(*upd_info));
  240 + upd_info->enc_size.offset = 2;
  241 + upd_info->enc_size.size = 2;
  242 + upd_info->enc_offset.size = 1;
  243 + upd_info->enc_size2.size = 4;
  244 + upd_info->auth_size.offset = 2;
  245 + upd_info->auth_size.size = 2;
  246 + upd_info->auth_offset.size = 1;
  247 +
  248 + if (cfg->aalg == SA_AALG_ID_AES_XCBC) {
  249 +
  250 + /* Derive K2/K3 subkeys */
  251 + if (sa_aes_xcbc_subkey(NULL, (u8 *)&upd_info->aux_key[0],
  252 + (u8 *)&upd_info->aux_key[AES_BLOCK_SIZE/sizeof(u32)],
  253 + cfg->akey, cfg->akey_len))
  254 + return -1;
  255 +
  256 + /*
  257 + * Format the key into 32bit CPU words
  258 + * from a big-endian stream
  259 + */
  260 + for (i = 0; i < SA_MAX_AUX_DATA_WORDS; i++)
  261 + upd_info->aux_key[i] =
  262 + be32_to_cpu(upd_info->aux_key[i]);
  263 + }
  264 +
  265 + if (cfg->enc1st) {
  266 + if (cfg->enc_eng_id != SA_ENG_ID_NONE) {
  267 + upd_info->flags |= SA_CMDL_UPD_ENC;
  268 + upd_info->enc_size.index = 0;
  269 + upd_info->enc_offset.index = 1;
  270 +
  271 + if ((cfg->enc_eng_id == SA_ENG_ID_EM1) &&
  272 + (cfg->auth_eng_id == SA_ENG_ID_EM1))
  273 + cfg->auth_eng_id = SA_ENG_ID_EM2;
  274 +
  275 + /* Encryption command label */
  276 + if (cfg->auth_eng_id != SA_ENG_ID_NONE)
  277 + cmdl[SA_CMDL_OFFSET_NESC] = cfg->auth_eng_id;
  278 + else
  279 + cmdl[SA_CMDL_OFFSET_NESC] = SA_ENG_ID_OUTPORT2;
  280 +
  281 + /* Encryption modes requiring IV */
  282 + if (cfg->iv_size) {
  283 + upd_info->flags |= SA_CMDL_UPD_ENC_IV;
  284 + upd_info->enc_iv.index =
  285 + SA_CMDL_HEADER_SIZE_BYTES >> 2;
  286 + upd_info->enc_iv.size = cfg->iv_size;
  287 +
  288 + cmdl[SA_CMDL_OFFSET_LABEL_LEN] =
  289 + SA_CMDL_HEADER_SIZE_BYTES +
  290 + cfg->iv_size;
  291 +
  292 + cmdl[SA_CMDL_OFFSET_OPTION_CTRL1] =
  293 + (SA_CTX_ENC_AUX2_OFFSET |
  294 + (cfg->iv_size >> 3));
  295 +
  296 + offset = SA_CMDL_HEADER_SIZE_BYTES +
  297 + cfg->iv_size;
  298 + } else {
  299 + cmdl[SA_CMDL_OFFSET_LABEL_LEN] =
  300 + SA_CMDL_HEADER_SIZE_BYTES;
  301 + offset = SA_CMDL_HEADER_SIZE_BYTES;
  302 + }
  303 + }
  304 +
  305 + if (cfg->auth_eng_id != SA_ENG_ID_NONE) {
  306 + upd_info->flags |= SA_CMDL_UPD_AUTH;
  307 + upd_info->auth_size.index = offset >> 2;
  308 + upd_info->auth_offset.index =
  309 + upd_info->auth_size.index + 1;
  310 +
  311 + cmdl[offset + SA_CMDL_OFFSET_NESC] = SA_ENG_ID_OUTPORT2;
  312 +
  313 + /* Algorithm with subkeys */
  314 + if ((cfg->aalg == SA_AALG_ID_AES_XCBC) ||
  315 + (cfg->aalg == SA_AALG_ID_CMAC)) {
  316 + upd_info->flags |= SA_CMDL_UPD_AUX_KEY;
  317 + upd_info->aux_key_info.index =
  318 + (offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
  319 +
  320 + cmdl[offset + SA_CMDL_OFFSET_LABEL_LEN] =
  321 + SA_CMDL_HEADER_SIZE_BYTES + 16;
  322 + cmdl[offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
  323 + (SA_CTX_ENC_AUX1_OFFSET | (16 >> 3));
  324 +
  325 + offset += SA_CMDL_HEADER_SIZE_BYTES + 16;
  326 + } else {
  327 + cmdl[offset + SA_CMDL_OFFSET_LABEL_LEN] =
  328 + SA_CMDL_HEADER_SIZE_BYTES;
  329 + offset += SA_CMDL_HEADER_SIZE_BYTES;
  330 + }
  331 + }
  332 + } else {
  333 + /* Auth first */
  334 + if (cfg->auth_eng_id != SA_ENG_ID_NONE) {
  335 + upd_info->flags |= SA_CMDL_UPD_AUTH;
  336 + upd_info->auth_size.index = 0;
  337 + upd_info->auth_offset.index = 1;
  338 +
  339 + if ((cfg->auth_eng_id == SA_ENG_ID_EM1) &&
  340 + (cfg->enc_eng_id == SA_ENG_ID_EM1))
  341 + cfg->enc_eng_id = SA_ENG_ID_EM2;
  342 +
  343 + /* Authentication command label */
  344 + if (cfg->enc_eng_id != SA_ENG_ID_NONE)
  345 + cmdl[SA_CMDL_OFFSET_NESC] = cfg->enc_eng_id;
  346 + else
  347 + cmdl[SA_CMDL_OFFSET_NESC] = SA_ENG_ID_OUTPORT2;
  348 +
  349 + /* Algorithm with subkeys */
  350 + if ((cfg->aalg == SA_AALG_ID_AES_XCBC) ||
  351 + (cfg->aalg == SA_AALG_ID_CMAC)) {
  352 + upd_info->flags |= SA_CMDL_UPD_AUX_KEY;
  353 + upd_info->aux_key_info.index =
  354 + (SA_CMDL_HEADER_SIZE_BYTES) >> 2;
  355 +
  356 + cmdl[SA_CMDL_OFFSET_LABEL_LEN] =
  357 + SA_CMDL_HEADER_SIZE_BYTES + 16;
  358 + cmdl[offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
  359 + (SA_CTX_ENC_AUX1_OFFSET | (16 >> 3));
  360 +
  361 + offset = SA_CMDL_HEADER_SIZE_BYTES + 16;
  362 + } else {
  363 + cmdl[SA_CMDL_OFFSET_LABEL_LEN] =
  364 + SA_CMDL_HEADER_SIZE_BYTES;
  365 + offset = SA_CMDL_HEADER_SIZE_BYTES;
  366 + }
  367 + }
  368 +
  369 + if (cfg->enc_eng_id != SA_ENG_ID_NONE) {
  370 + upd_info->flags |= SA_CMDL_UPD_ENC;
  371 + upd_info->enc_size.index = offset >> 2;
  372 + upd_info->enc_offset.index =
  373 + upd_info->enc_size.index + 1;
  374 +
  375 + cmdl[offset + SA_CMDL_OFFSET_NESC] = SA_ENG_ID_OUTPORT2;
  376 +
  377 + /* Encryption modes requiring IV */
  378 + if (cfg->iv_size) {
  379 + upd_info->flags |= SA_CMDL_UPD_ENC_IV;
  380 + upd_info->enc_iv.index =
  381 + (offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
  382 + upd_info->enc_iv.size = cfg->iv_size;
  383 +
  384 + cmdl[offset + SA_CMDL_OFFSET_LABEL_LEN] =
  385 + SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
  386 +
  387 + cmdl[offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
  388 + (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
  389 +
  390 + offset += SA_CMDL_HEADER_SIZE_BYTES +
  391 + cfg->iv_size;
  392 + } else {
  393 + cmdl[offset + SA_CMDL_OFFSET_LABEL_LEN] =
  394 + SA_CMDL_HEADER_SIZE_BYTES;
  395 + offset += SA_CMDL_HEADER_SIZE_BYTES;
  396 + }
  397 + }
  398 + }
  399 +
  400 + offset = roundup(offset, 8);
  401 +
  402 + for (i = 0; i < offset/4; i++)
  403 + word_ptr[i] = be32_to_cpu(word_ptr[i]);
  404 +
  405 + return offset;
  406 +}
  407 +
  408 +/* Make 32-bit word from 4 bytes */
  409 +#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
  410 + ((b2) << 8) | (b3))
  411 +
  412 +/* Update Command label */
  413 +static inline void
  414 +sa_update_cmdl(struct device *dev, u8 enc_offset, u16 enc_size, u8 *enc_iv,
  415 + u8 auth_offset, u16 auth_size, u8 *auth_iv, u8 aad_size,
  416 + u8 *aad, struct sa_cmdl_upd_info *upd_info, u32 *cmdl)
  417 +{
  418 + switch (upd_info->submode) {
  419 + case SA_MODE_GEN:
  420 + if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
  421 + cmdl[upd_info->enc_size.index] &= 0xffff0000;
  422 + cmdl[upd_info->enc_size.index] |= enc_size;
  423 + cmdl[upd_info->enc_offset.index] &= 0x00ffffff;
  424 + cmdl[upd_info->enc_offset.index] |=
  425 + ((u32)enc_offset << 24);
  426 +
  427 + if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
  428 + u32 *data = &cmdl[upd_info->enc_iv.index];
  429 +
  430 + data[0] = SA_MK_U32(enc_iv[0], enc_iv[1],
  431 + enc_iv[2], enc_iv[3]);
  432 + data[1] = SA_MK_U32(enc_iv[4], enc_iv[5],
  433 + enc_iv[6], enc_iv[7]);
  434 +
  435 + if (upd_info->enc_iv.size > 8) {
  436 + data[2] = SA_MK_U32(enc_iv[8],
  437 + enc_iv[9],
  438 + enc_iv[10],
  439 + enc_iv[11]);
  440 + data[3] = SA_MK_U32(enc_iv[12],
  441 + enc_iv[13],
  442 + enc_iv[14],
  443 + enc_iv[15]);
  444 + }
  445 + }
  446 + }
  447 +
  448 + if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
  449 + cmdl[upd_info->auth_size.index] &= 0xffff0000;
  450 + cmdl[upd_info->auth_size.index] |= auth_size;
  451 + cmdl[upd_info->auth_offset.index] &= 0x00ffffff;
  452 + cmdl[upd_info->auth_offset.index] |=
  453 + ((u32)auth_offset << 24);
  454 +
  455 + if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
  456 + u32 *data = &cmdl[upd_info->auth_iv.index];
  457 +
  458 + data[0] = SA_MK_U32(auth_iv[0], auth_iv[1],
  459 + auth_iv[2], auth_iv[3]);
  460 + data[1] = SA_MK_U32(auth_iv[4], auth_iv[5],
  461 + auth_iv[6], auth_iv[7]);
  462 +
  463 + if (upd_info->auth_iv.size > 8) {
  464 + data[2] = SA_MK_U32(auth_iv[8],
  465 + auth_iv[9], auth_iv[10], auth_iv[11]);
  466 + data[3] = SA_MK_U32(auth_iv[12],
  467 + auth_iv[13], auth_iv[14], auth_iv[15]);
  468 + }
  469 + }
  470 +
  471 + if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
  472 + int offset = (auth_size & 0xF) ? 4 : 0;
  473 +
  474 + memcpy(&cmdl[upd_info->aux_key_info.index],
  475 + &upd_info->aux_key[offset], 16);
  476 + }
  477 + }
  478 + break;
  479 +
  480 + case SA_MODE_CCM:
  481 + case SA_MODE_GCM:
  482 + case SA_MODE_GMAC:
  483 + default:
  484 + dev_err(dev, "unsupported mode(%d)\n", upd_info->submode);
  485 + break;
  486 +
  487 + }
  488 +}
  489 +
  490 +/* Format SWINFO words to be sent to SA */
  491 +static void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
  492 + u8 cmdl_present, u8 cmdl_offset, u8 flags, u16 queue_id,
  493 + u8 flow_id, u8 hash_size, u32 *swinfo)
  494 +{
  495 + swinfo[0] = sc_id;
  496 + swinfo[0] |= (flags << 16);
  497 + if (likely(cmdl_present))
  498 + swinfo[0] |= ((cmdl_offset | 0x10) << 20);
  499 + swinfo[0] |= (eng_id << 25);
  500 + swinfo[0] |= 0x40000000;
  501 + swinfo[1] = sc_phys;
  502 + swinfo[2] = (queue_id | (flow_id << 16) | (hash_size << 24));
  503 +}
  504 +
  505 +/******************************************************************************
  506 + * Security context creation functions
  507 + ******************************************************************************/
  508 +
  509 +/* Dump the security context */
  510 +static void sa_dump_sc(u8 *buf, u32 dma_addr)
  511 +{
  512 +#ifdef DEBUG
  513 + dev_info(sa_ks2_dev, "Security context dump for %p:\n",
  514 + (void *)dma_addr);
  515 + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
  516 + 16, 1, buf, SA_CTX_MAX_SZ, false);
  517 +#endif
  518 +}
  519 +
  520 +/* size of SCCTL structure in bytes */
  521 +#define SA_SCCTL_SZ 8
  522 +
  523 +/* Initialize Security context */
  524 +static int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
  525 + u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
  526 + const char *cra_name, u8 enc,
  527 + u32 *swinfo)
  528 +{
  529 + struct sa_eng_info enc_eng, auth_eng;
  530 + int ealg_id, aalg_id, use_enc = 0;
  531 + int enc_sc_offset, auth_sc_offset;
  532 + u8 php_f, php_e, eng0_f, eng1_f;
  533 + u8 *sc_buf = ctx->sc;
  534 + u16 sc_id = ctx->sc_id;
  535 + u16 aad_len = 0; /* Currently not supporting AEAD algo */
  536 + u8 first_engine;
  537 + u8 hash_size;
  538 +
  539 + memset(sc_buf, 0, SA_CTX_MAX_SZ);
  540 + sa_conv_calg_to_salg(cra_name, &ealg_id, &aalg_id);
  541 + sa_get_engine_info(ealg_id, &enc_eng);
  542 + sa_get_engine_info(aalg_id, &auth_eng);
  543 +
  544 + if (!enc_eng.sc_size && !auth_eng.sc_size)
  545 + return -1;
  546 +
  547 + if (auth_eng.eng_id <= SA_ENG_ID_EM2)
  548 + use_enc = 1;
  549 +
  550 + /* Determine the order of encryption & Authentication contexts */
  551 + if (enc || !use_enc) {
  552 + eng0_f = SA_CTX_SIZE_TO_DMA_SIZE(enc_eng.sc_size);
  553 + eng1_f = SA_CTX_SIZE_TO_DMA_SIZE(auth_eng.sc_size);
  554 + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
  555 + auth_sc_offset = enc_sc_offset + enc_eng.sc_size;
  556 + } else {
  557 + eng0_f = SA_CTX_SIZE_TO_DMA_SIZE(auth_eng.sc_size);
  558 + eng1_f = SA_CTX_SIZE_TO_DMA_SIZE(enc_eng.sc_size);
  559 + auth_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
  560 + enc_sc_offset = auth_sc_offset + auth_eng.sc_size;
  561 + }
  562 +
  563 + php_f = php_e = SA_CTX_DMA_SIZE_64;
  564 +
  565 + /* SCCTL Owner info: 0=host, 1=CP_ACE */
  566 + sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
  567 + /* SCCTL F/E control */
  568 + sc_buf[1] = SA_CTX_SCCTL_MK_DMA_INFO(php_f, eng0_f, eng1_f, php_e);
  569 + memcpy(&sc_buf[2], &sc_id, 2); /*(optional)
  570 + Filled here for reference only */
  571 + memcpy(&sc_buf[4], &ctx->sc_phys, 4); /*(optional)
  572 + Filled here for reference only */
  573 +
  574 + /* Initialize the rest of PHP context */
  575 + memset(sc_buf + SA_SCCTL_SZ, 0, SA_CTX_PHP_PE_CTX_SZ - SA_SCCTL_SZ);
  576 +
  577 + /* Prepare context for encryption engine */
  578 + if (enc_eng.sc_size) {
  579 + if (sa_set_sc_enc(ealg_id, enc_key, enc_key_sz, aad_len,
  580 + enc, &sc_buf[enc_sc_offset]))
  581 + return -1;
  582 + }
  583 +
  584 + /* Prepare context for authentication engine */
  585 + if (auth_eng.sc_size) {
  586 + if (use_enc) {
  587 + if (sa_set_sc_enc(aalg_id, auth_key, auth_key_sz,
  588 + aad_len, 0, &sc_buf[auth_sc_offset]))
  589 + return -1;
  590 + } else
  591 + sa_set_sc_auth(aalg_id, auth_key, auth_key_sz,
  592 + &sc_buf[auth_sc_offset]);
  593 + }
  594 +
  595 + /* Set the ownership of context to CP_ACE */
  596 + sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
  597 +
  598 + /* swizzle the security context */
  599 + sa_swiz_128(sc_buf, sc_buf, SA_CTX_MAX_SZ);
  600 +
  601 + /* Setup SWINFO */
  602 + if (ealg_id == SA_EALG_ID_NULL)
  603 + first_engine = auth_eng.eng_id;
  604 + else
  605 + first_engine = enc ? enc_eng.eng_id : auth_eng.eng_id;
  606 +
  607 + /* TODO: take care of AEAD algorithms */
  608 + hash_size = sa_get_hash_size(aalg_id);
  609 + if (!hash_size)
  610 + return -1;
  611 + /* Round up the tag size to multiple of 8 */
  612 + hash_size = roundup(hash_size, 8);
  613 +
  614 +#ifndef TEST
  615 + sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
  616 + 0, ctx->rx_compl_qid, ctx->rx_flow, hash_size, swinfo);
  617 +#else
  618 + /* For run-time self tests in the cryptographic
  619 + * algorithm manager framework */
  620 + sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
  621 + SA_SW_INFO_FLAG_EVICT, ctx->rx_compl_qid, ctx->rx_flow,
  622 + hash_size, swinfo);
  623 +#endif
  624 + sa_dump_sc(sc_buf, ctx->sc_phys);
  625 +
  626 + return 0;
  627 +}
  628 +
  629 +/* Tear down the Security Context */
  630 +#define SA_SC_TEAR_RETRIES 5
  631 +#define SA_SC_TEAR_DELAY 20 /* msecs */
  632 +static int sa_tear_sc(struct sa_ctx_info *ctx,
  633 + struct keystone_crypto_data *pdata)
  634 +{
  635 + struct device *dev = &pdata->pdev->dev;
  636 + int own_off, cnt = SA_SC_TEAR_RETRIES;
  637 + struct knav_dma_desc *hwdesc;
  638 + struct sa_dma_req_ctx *dma_ctx;
  639 + int ret = 0;
  640 + u32 packet_info;
  641 + int j;
  642 + dma_addr_t dma_addr;
  643 + u32 dma_sz;
  644 +
  645 + dma_ctx = kmem_cache_alloc(pdata->dma_req_ctx_cache, GFP_KERNEL);
  646 + if (!dma_ctx) {
  647 + ret = -ENOMEM;
  648 + goto err;
  649 + }
  650 +
  651 + dma_ctx->dev_data = pdata;
  652 + dma_ctx->pkt = false;
  653 +
  654 + sa_set_swinfo(SA_ENG_ID_OUTPORT2, ctx->sc_id, ctx->sc_phys, 0, 0,
  655 + (SA_SW_INFO_FLAG_TEAR | SA_SW_INFO_FLAG_EVICT |
  656 + SA_SW_INFO_FLAG_NOPD), ctx->rx_compl_qid, ctx->rx_flow, 0,
  657 + &ctx->epib[1]);
  658 +
  659 + ctx->epib[0] = 0;
  660 +
  661 + /* map the packet */
  662 + packet_info = KNAV_DMA_DESC_HAS_EPIB |
  663 + (pdata->tx_compl_qid << KNAV_DMA_DESC_RETQ_SHIFT);
  664 +
  665 + hwdesc = knav_pool_desc_get(pdata->tx_pool);
  666 + if (IS_ERR_OR_NULL(hwdesc)) {
  667 + dev_dbg(dev, "out of tx pool desc\n");
  668 + ret = -ENOBUFS;
  669 + goto err;
  670 + }
  671 +
  672 + memset(hwdesc, 0, sizeof(struct knav_dma_desc));
  673 + for (j = 0; j < 4; j++)
  674 + hwdesc->epib[j] = ctx->epib[j];
  675 +
  676 + hwdesc->packet_info = packet_info;
  677 +
  678 + knav_pool_desc_map(pdata->tx_pool, hwdesc, sizeof(hwdesc),
  679 + &dma_addr, &dma_sz);
  680 +
  681 + hwdesc->pad[0] = (u32)dma_addr;
  682 + hwdesc->pad[1] = dma_sz;
  683 + hwdesc->pad[2] = (u32)dma_ctx;
  684 +
  685 + knav_queue_push(pdata->tx_submit_q, dma_addr,
  686 + sizeof(struct knav_dma_desc), 0);
  687 +
  688 + /*
  689 + * Check that CP_ACE has released the context
  690 + * by making sure that the owner bit is 0
  691 + */
  692 + /*
  693 + * Security context had been swizzled by 128 bits
  694 + * before handing to CP_ACE
  695 + */
  696 + own_off = ((SA_CTX_SCCTL_OWNER_OFFSET/16) * 16)
  697 + + (15 - (SA_CTX_SCCTL_OWNER_OFFSET % 16));
  698 + while (__raw_readb(&ctx->sc[own_off])) {
  699 + if (!--cnt)
  700 + return -EAGAIN;
  701 + msleep_interruptible(SA_SC_TEAR_DELAY);
  702 + }
  703 + return 0;
  704 +
  705 +err:
  706 + atomic_inc(&pdata->stats.sc_tear_dropped);
  707 + if (dma_ctx)
  708 + kmem_cache_free(pdata->dma_req_ctx_cache, dma_ctx);
  709 + return ret;
  710 +}
  711 +
  712 +/************************************************************/
  713 +/* Algorithm interface functions & templates */
  714 +/************************************************************/
  715 +struct sa_alg_tmpl {
  716 + u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
  717 + union {
  718 + struct crypto_alg crypto;
  719 + struct ahash_alg hash;
  720 + } alg;
  721 + int registered;
  722 +};
  723 +
  724 +/* Free the per direction context memory */
  725 +static void sa_free_ctx_info(struct sa_ctx_info *ctx,
  726 + struct keystone_crypto_data *data)
  727 +{
  728 + unsigned long bn;
  729 +
  730 + if (sa_tear_sc(ctx, data)) {
  731 + dev_err(sa_ks2_dev,
  732 + "Failed to tear down context id(%x)\n", ctx->sc_id);
  733 + return;
  734 + }
  735 +
  736 + bn = ctx->sc_id - data->sc_id_start;
  737 + spin_lock(&data->scid_lock);
  738 + __clear_bit(bn, data->ctx_bm);
  739 + data->sc_id--;
  740 + spin_unlock(&data->scid_lock);
  741 +
  742 + if (ctx->sc) {
  743 + dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
  744 + ctx->sc = NULL;
  745 + }
  746 +}
  747 +
  748 +/* Initialize the per direction context memory */
  749 +static int sa_init_ctx_info(struct sa_ctx_info *ctx,
  750 + struct keystone_crypto_data *data)
  751 +{
  752 + unsigned long bn;
  753 + int err;
  754 +
  755 + spin_lock(&data->scid_lock);
  756 + if (data->sc_id > data->sc_id_end) {
  757 + spin_unlock(&data->scid_lock);
  758 + dev_err(&data->pdev->dev, "Out of SC IDs\n");
  759 + return -1;
  760 + }
  761 + bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
  762 + __set_bit(bn, data->ctx_bm);
  763 + data->sc_id++;
  764 + spin_unlock(&data->scid_lock);
  765 +
  766 + ctx->sc_id = (u16)(data->sc_id_start + bn);
  767 +
  768 + ctx->rx_flow = knav_dma_get_flow(data->rx_chan);
  769 + ctx->rx_compl_qid = data->rx_compl_qid;
  770 +
  771 + ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
  772 + if (!ctx->sc) {
  773 + dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
  774 + err = -ENOMEM;
  775 + goto scid_rollback;
  776 + }
  777 +
  778 + return 0;
  779 +
  780 +scid_rollback:
  781 + spin_lock(&data->scid_lock);
  782 + __clear_bit(bn, data->ctx_bm);
  783 + data->sc_id--;
  784 + spin_unlock(&data->scid_lock);
  785 +
  786 + return err;
  787 +}
  788 +
  789 +/* Initialize TFM context */
  790 +static int sa_init_tfm(struct crypto_tfm *tfm)
  791 +{
  792 + struct crypto_alg *alg = tfm->__crt_alg;
  793 + struct sa_alg_tmpl *sa_alg;
  794 + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  795 + struct keystone_crypto_data *data = dev_get_drvdata(sa_ks2_dev);
  796 + int ret;
  797 +
  798 + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
  799 + sa_alg = container_of(__crypto_ahash_alg(alg),
  800 + struct sa_alg_tmpl, alg.hash);
  801 + else
  802 + sa_alg = container_of(alg, struct sa_alg_tmpl, alg.crypto);
  803 +
  804 + memset(ctx, 0, sizeof(*ctx));
  805 + ctx->dev_data = data;
  806 +
  807 + if (sa_alg->type == CRYPTO_ALG_TYPE_AHASH) {
  808 + ret = sa_init_ctx_info(&ctx->auth, data);
  809 + if (ret)
  810 + return ret;
  811 + } else if (sa_alg->type == CRYPTO_ALG_TYPE_AEAD) {
  812 + ret = sa_init_ctx_info(&ctx->enc, data);
  813 + if (ret)
  814 + return ret;
  815 + ret = sa_init_ctx_info(&ctx->dec, data);
  816 + if (ret) {
  817 + sa_free_ctx_info(&ctx->enc, data);
  818 + return ret;
  819 + }
  820 + } else if (sa_alg->type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
  821 + ret = sa_init_ctx_info(&ctx->enc, data);
  822 + if (ret)
  823 + return ret;
  824 + ret = sa_init_ctx_info(&ctx->dec, data);
  825 + if (ret) {
  826 + sa_free_ctx_info(&ctx->enc, data);
  827 + return ret;
  828 + }
  829 + }
  830 +
  831 + dev_dbg(sa_ks2_dev, "%s(0x%p) sc-ids(0x%x(0x%x), 0x%x(0x%x))\n",
  832 + __func__, tfm, ctx->enc.sc_id, ctx->enc.sc_phys,
  833 + ctx->dec.sc_id, ctx->dec.sc_phys);
  834 + return 0;
  835 +}
  836 +
  837 +/* Algorithm init */
  838 +static int sa_cra_init_aead(struct crypto_tfm *tfm)
  839 +{
  840 + return sa_init_tfm(tfm);
  841 +}
  842 +
  843 +/* Algorithm init */
  844 +static int sa_cra_init_ablkcipher(struct crypto_tfm *tfm)
  845 +{
  846 + return sa_init_tfm(tfm);
  847 +}
  848 +
  849 +/* Algorithm init */
  850 +static int sa_cra_init_ahash(struct crypto_tfm *tfm)
  851 +{
  852 + return sa_init_tfm(tfm);
  853 +}
  854 +
  855 +/* Algorithm context teardown */
  856 +static void sa_exit_tfm(struct crypto_tfm *tfm)
  857 +{
  858 + struct crypto_alg *alg = tfm->__crt_alg;
  859 + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  860 + struct keystone_crypto_data *data = dev_get_drvdata(sa_ks2_dev);
  861 +
  862 + dev_dbg(sa_ks2_dev, "%s(0x%p) sc-ids(0x%x(0x%x), 0x%x(0x%x))\n",
  863 + __func__, tfm, ctx->enc.sc_id, ctx->enc.sc_phys,
  864 + ctx->dec.sc_id, ctx->dec.sc_phys);
  865 +
  866 + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK)
  867 + == CRYPTO_ALG_TYPE_AEAD) {
  868 + sa_free_ctx_info(&ctx->enc, data);
  869 + sa_free_ctx_info(&ctx->dec, data);
  870 + } else if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK)
  871 + == CRYPTO_ALG_TYPE_AHASH) {
  872 + sa_free_ctx_info(&ctx->auth, data);
  873 + } else if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK)
  874 + == CRYPTO_ALG_TYPE_ABLKCIPHER) {
  875 + sa_free_ctx_info(&ctx->enc, data);
  876 + sa_free_ctx_info(&ctx->dec, data);
  877 + }
  878 +}
  879 +
  880 +/* AEAD algorithm configuration interface function */
  881 +static int sa_aead_setkey(struct crypto_aead *authenc,
  882 + const u8 *key, unsigned int keylen)
  883 +{
  884 + struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
  885 + unsigned int enckey_len, authkey_len, auth_size;
  886 + struct rtattr *rta = (struct rtattr *)key;
  887 + struct crypto_authenc_key_param *param;
  888 + struct sa_eng_info enc_eng, auth_eng;
  889 + int ealg_id, aalg_id, cmdl_len;
  890 + struct sa_cmdl_cfg cfg;
  891 + u8 const *enc_key;
  892 + u8 const *auth_key;
  893 + const char *cra_name;
  894 +
  895 + if (!RTA_OK(rta, keylen))
  896 + goto badkey;
  897 + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
  898 + goto badkey;
  899 + if (RTA_PAYLOAD(rta) < sizeof(*param))
  900 + goto badkey;
  901 +
  902 + param = RTA_DATA(rta);
  903 + enckey_len = be32_to_cpu(param->enckeylen);
  904 +
  905 + key += RTA_ALIGN(rta->rta_len);
  906 + keylen -= RTA_ALIGN(rta->rta_len);
  907 +
  908 + if (keylen < enckey_len)
  909 + goto badkey;
  910 +
  911 + authkey_len = keylen - enckey_len;
  912 + auth_size = crypto_aead_authsize(authenc);
  913 +
  914 + enc_key = key + authkey_len;
  915 + auth_key = key;
  916 +
  917 + cra_name = crypto_tfm_alg_name(crypto_aead_tfm(authenc));
  918 +
  919 + sa_conv_calg_to_salg(cra_name, &ealg_id, &aalg_id);
  920 + sa_get_engine_info(ealg_id, &enc_eng);
  921 + sa_get_engine_info(aalg_id, &auth_eng);
  922 +
  923 + memset(&cfg, 0, sizeof(cfg));
  924 + cfg.enc1st = 1;
  925 + cfg.aalg = aalg_id;
  926 + cfg.enc_eng_id = enc_eng.eng_id;
  927 + cfg.auth_eng_id = auth_eng.eng_id;
  928 + cfg.iv_size = crypto_aead_ivsize(authenc);
  929 + cfg.akey = auth_key;
  930 + cfg.akey_len = authkey_len;
  931 +
  932 + /* Setup Encryption Security Context & Command label template */
  933 + if (sa_init_sc(&ctx->enc, enc_key, enckey_len, auth_key,
  934 + authkey_len, cra_name, 1, &ctx->enc.epib[1]))
  935 + goto badkey;
  936 +
  937 + cmdl_len = sa_format_cmdl_gen(&cfg,
  938 + (u8 *)ctx->enc.cmdl, &ctx->enc.cmdl_upd_info);
  939 + if ((cmdl_len <= 0) || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  940 + goto badkey;
  941 +
  942 + ctx->enc.cmdl_size = cmdl_len;
  943 +
  944 + /* Setup Decryption Security Context & Command label template */
  945 + if (sa_init_sc(&ctx->dec, enc_key, enckey_len, auth_key,
  946 + authkey_len, cra_name, 0, &ctx->dec.epib[1]))
  947 + goto badkey;
  948 +
  949 + cfg.enc1st = 0;
  950 + cfg.enc_eng_id = enc_eng.eng_id;
  951 + cfg.auth_eng_id = auth_eng.eng_id;
  952 + cmdl_len = sa_format_cmdl_gen(&cfg,
  953 + (u8 *)ctx->dec.cmdl,
  954 + &ctx->dec.cmdl_upd_info);
  955 +
  956 + if ((cmdl_len <= 0) || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  957 + goto badkey;
  958 +
  959 + ctx->dec.cmdl_size = cmdl_len;
  960 + return 0;
  961 +
  962 +badkey:
  963 + dev_err(sa_ks2_dev, "%s: badkey\n", __func__);
  964 + crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  965 + return -EINVAL;
  966 +}
  967 +
  968 +/* AEAD algorithm configuration interface function */
  969 +static int sa_aead_setauthsize(struct crypto_aead *tfm,
  970 + unsigned int auth_size)
  971 +{
  972 + if (auth_size > crypto_aead_alg(tfm)->maxauthsize)
  973 + return -EINVAL;
  974 + return 0;
  975 +}
  976 +
  977 +dma_addr_t
  978 +sa_prepare_tx_desc(struct keystone_crypto_data *pdata, struct scatterlist *_sg,
  979 + int num_sg, u32 pslen, u32 *psdata,
  980 + u32 epiblen, u32 *epib, struct sa_dma_req_ctx *ctx)
  981 +{
  982 + struct device *dev = &pdata->pdev->dev;
  983 + struct knav_dma_desc *hwdesc = NULL;
  984 + struct scatterlist *sg = _sg;
  985 + u32 packet_len = 0;
  986 + u32 nsg;
  987 + u32 next_desc = 0;
  988 + u32 packet_info;
  989 +
  990 + packet_info = KNAV_DMA_DESC_HAS_EPIB |
  991 + ((pslen / sizeof(u32)) << KNAV_DMA_DESC_PSLEN_SHIFT) |
  992 + (pdata->tx_compl_qid << KNAV_DMA_DESC_RETQ_SHIFT);
  993 +
  994 + for (sg += num_sg - 1, nsg = num_sg; nsg > 0; sg--, nsg--) {
  995 + u32 buflen, orig_len;
  996 + int i;
  997 + dma_addr_t dma_addr;
  998 + u32 dma_sz;
  999 + u32 *out, *in;
  1000 +
  1001 + hwdesc = knav_pool_desc_get(pdata->tx_pool);
  1002 + if (IS_ERR_OR_NULL(hwdesc)) {
  1003 + dev_dbg(dev, "out of tx pool desc\n");
  1004 + /* TODO: we need to return all pooped descriptors */
  1005 + return 0;
  1006 + }
  1007 +
  1008 + buflen = sg_dma_len(sg) & MASK(22);
  1009 + orig_len = (pdata->tx_submit_qid << 28) | buflen;
  1010 + packet_len += buflen;
  1011 +
  1012 + if (nsg == 1) { /* extra fileds for packed descriptor */
  1013 + for (out = hwdesc->epib, in = epib, i = 0;
  1014 + i < epiblen / sizeof(u32); i++)
  1015 + *out++ = *in++;
  1016 + for (out = hwdesc->psdata, in = psdata, i = 0;
  1017 + i < pslen / sizeof(u32); i++)
  1018 + *out++ = *in++;
  1019 + }
  1020 +
  1021 + hwdesc->desc_info = packet_len;
  1022 + hwdesc->tag_info = 0;
  1023 + hwdesc->packet_info = packet_info;
  1024 + hwdesc->buff_len = buflen;
  1025 + hwdesc->buff = sg_dma_address(sg);
  1026 + hwdesc->next_desc = next_desc;
  1027 + hwdesc->orig_len = orig_len;
  1028 + hwdesc->orig_buff = sg_dma_address(sg);
  1029 +
  1030 + knav_pool_desc_map(pdata->tx_pool, hwdesc, sizeof(hwdesc),
  1031 + &dma_addr, &dma_sz);
  1032 +
  1033 + hwdesc->pad[0] = (u32)dma_addr;
  1034 + hwdesc->pad[1] = dma_sz;
  1035 + hwdesc->pad[2] = (u32)ctx;
  1036 +
  1037 + next_desc = (u32)dma_addr;
  1038 +
  1039 + }
  1040 +
  1041 + return (unlikely(hwdesc == NULL)) ? 0 : hwdesc->pad[0];
  1042 +}
  1043 +
  1044 +void sa_tx_completion_process(struct keystone_crypto_data *dev_data)
  1045 +{
  1046 + struct knav_dma_desc *hwdesc = NULL;
  1047 + dma_addr_t dma;
  1048 + struct sa_dma_req_ctx *ctx = NULL;
  1049 + u32 pkt_len;
  1050 + u32 calc_pkt_len;
  1051 +
  1052 + for (;;) {
  1053 + dma = knav_queue_pop(dev_data->tx_compl_q, NULL);
  1054 + if (!dma) {
  1055 + dev_dbg(sa_ks2_dev, "no desc in the queue %d\n",
  1056 + dev_data->tx_compl_qid);
  1057 + break;
  1058 + }
  1059 +
  1060 + ctx = NULL;
  1061 + pkt_len = 0;
  1062 + calc_pkt_len = 0;
  1063 +
  1064 + do {
  1065 + hwdesc = knav_pool_desc_unmap(dev_data->tx_pool, dma,
  1066 + sizeof(hwdesc));
  1067 + if (!hwdesc) {
  1068 + pr_err("failed to unmap descriptor 0x%08x\n",
  1069 + dma);
  1070 + break;
  1071 + }
  1072 + /* take the req_ctx from the first descriptor */
  1073 + if (!ctx) {
  1074 + ctx = (struct sa_dma_req_ctx
  1075 + *)hwdesc->pad[2];
  1076 + pkt_len = hwdesc->desc_info &
  1077 + KNAV_DMA_DESC_PKT_LEN_MASK;
  1078 + }
  1079 + calc_pkt_len += hwdesc->buff_len;
  1080 + /* do we need to unmap buffer here, or will do it
  1081 + * later
  1082 + */
  1083 +
  1084 + dma = hwdesc->next_desc;
  1085 +
  1086 + knav_pool_desc_put(dev_data->tx_pool, hwdesc);
  1087 + } while (dma);
  1088 +
  1089 + if (pkt_len != calc_pkt_len)
  1090 + pr_err("[%s] calculated packet length doesn't match %d/%d\n",
  1091 + __func__, calc_pkt_len, pkt_len);
  1092 +
  1093 + if ((pkt_len > 0) && ctx) {
  1094 + dma_unmap_sg(&ctx->dev_data->pdev->dev, ctx->sg_tbl.sgl,
  1095 + ctx->sg_tbl.nents, DMA_TO_DEVICE);
  1096 +
  1097 + if (likely(ctx->pkt)) {
  1098 + atomic_add(ctx->sg_tbl.nents,
  1099 + &ctx->dev_data->tx_dma_desc_cnt);
  1100 + atomic_inc(&ctx->dev_data->stats.tx_pkts);
  1101 + }
  1102 +
  1103 + if (likely(ctx->sg_tbl.sgl))
  1104 + sg_free_table(&ctx->sg_tbl);
  1105 + }
  1106 +
  1107 + if (ctx)
  1108 + kmem_cache_free(ctx->dev_data->dma_req_ctx_cache, ctx);
  1109 + }
  1110 +}
  1111 +
  1112 +static void sa_rx_desc_process(struct keystone_crypto_data *dev_data,
  1113 + struct knav_dma_desc **hwdesc, int num)
  1114 +{
  1115 + int j;
  1116 + unsigned int alg_type;
  1117 + u32 req_sub_type;
  1118 +
  1119 + alg_type = hwdesc[0]->psdata[0] & CRYPTO_ALG_TYPE_MASK;
  1120 + req_sub_type = hwdesc[0]->psdata[0] >> SA_REQ_SUBTYPE_SHIFT;
  1121 +
  1122 + if (likely(alg_type == CRYPTO_ALG_TYPE_AEAD)) {
  1123 + int auth_words, auth_size, iv_size, enc_len, enc_offset, i;
  1124 + struct aead_request *req;
  1125 + struct crypto_aead *tfm;
  1126 + int enc, err = 0;
  1127 +
  1128 + req = (struct aead_request *)hwdesc[0]->psdata[1];
  1129 + tfm = crypto_aead_reqtfm(req);
  1130 + auth_size = crypto_aead_authsize(tfm);
  1131 + iv_size = crypto_aead_ivsize(tfm);
  1132 + enc_offset = req->assoclen + iv_size;
  1133 +
  1134 + if (req_sub_type == SA_REQ_SUBTYPE_ENC) {
  1135 + enc_len = req->cryptlen;
  1136 + enc = 1;
  1137 + } else if (req_sub_type == SA_REQ_SUBTYPE_DEC) {
  1138 + enc_len = req->cryptlen - auth_size;
  1139 + enc = 0;
  1140 + } else {
  1141 + err = -EBADMSG;
  1142 + goto aead_err;
  1143 + }
  1144 +
  1145 + /* NOTE: We receive the tag as host endian 32bit words */
  1146 + auth_words = auth_size/sizeof(u32);
  1147 +
  1148 + for (i = 2; i < (auth_words + SA_NUM_PSDATA_CTX_WORDS); i++)
  1149 + hwdesc[0]->psdata[i] = htonl(hwdesc[0]->psdata[i]);
  1150 +
  1151 + /* if encryption, copy the authentication tag */
  1152 + if (enc) {
  1153 + sa_scatterwalk_copy(
  1154 + &(hwdesc[0]->psdata[SA_NUM_PSDATA_CTX_WORDS]),
  1155 + req->dst, enc_len, auth_size, 1);
  1156 +#ifdef DEBUG
  1157 + dev_info(sa_ks2_dev, "computed tag:\n");
  1158 + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
  1159 + 16, 1, &(hwdesc[0]->psdata[SA_NUM_PSDATA_CTX_WORDS]),
  1160 + auth_size, false);
  1161 +#endif
  1162 + } else {
  1163 + /* Verify the authentication tag */
  1164 + u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
  1165 +
  1166 + sa_scatterwalk_copy(auth_tag, req->src, enc_len,
  1167 + auth_size, 0);
  1168 +
  1169 +#ifdef DEBUG
  1170 + dev_info(sa_ks2_dev, "expected tag:\n");
  1171 + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
  1172 + 16, 1, auth_tag, auth_size, false);
  1173 + dev_info(sa_ks2_dev, "computed tag:\n");
  1174 + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
  1175 + 16, 1, &(hwdesc[0]->psdata[SA_NUM_PSDATA_CTX_WORDS]),
  1176 + auth_size, false);
  1177 +#endif
  1178 +
  1179 + err = memcmp(&(hwdesc[0]->psdata[SA_NUM_PSDATA_CTX_WORDS]),
  1180 + auth_tag, auth_size) ? -EBADMSG : 0;
  1181 + if (unlikely(err))
  1182 + goto aead_err;
  1183 + }
  1184 +
  1185 + /* Copy the encrypted/decrypted data */
  1186 + if (unlikely(sa_hwdesc2sg_copy(hwdesc, req->dst,
  1187 + enc_offset, 0, enc_len, num)))
  1188 + err = -EBADMSG;
  1189 +
  1190 +aead_err:
  1191 + aead_request_complete(req, err);
  1192 + }
  1193 +
  1194 + /* free buffers here */
  1195 + for (j = 0; j < num; j++) {
  1196 + if (hwdesc[j]->orig_len == PAGE_SIZE) {
  1197 + __free_page((struct page *)hwdesc[j]->pad[1]);
  1198 + atomic_dec(&dev_data->rx_dma_page_cnt);
  1199 + } else
  1200 + kfree((void *)hwdesc[j]->pad[0]);
  1201 + }
  1202 +
  1203 + atomic_inc(&dev_data->stats.rx_pkts);
  1204 +}
  1205 +
  1206 +void sa_rx_completion_process(struct keystone_crypto_data *dev_data)
  1207 +{
  1208 + struct knav_dma_desc *hwdesc[MAX_SKB_FRAGS];
  1209 + int j, desc_num;
  1210 + dma_addr_t dma;
  1211 + u32 pkt_len;
  1212 + u32 calc_pkt_len;
  1213 + int wait4pkt = 1;
  1214 +
  1215 + for (;;) {
  1216 + dma = knav_queue_pop(dev_data->rx_compl_q, NULL);
  1217 + if (!dma) {
  1218 + dev_dbg(sa_ks2_dev, "no desc in the queue %d\n",
  1219 + dev_data->rx_compl_qid);
  1220 + break;
  1221 + }
  1222 +
  1223 + pkt_len = 0;
  1224 + calc_pkt_len = 0;
  1225 + wait4pkt = 1;
  1226 + desc_num = 0;
  1227 +
  1228 + do {
  1229 + hwdesc[desc_num] = knav_pool_desc_unmap(dev_data->rx_pool, dma,
  1230 + sizeof(hwdesc));
  1231 + if (!hwdesc[desc_num]) {
  1232 + pr_err("failed to unmap descriptor 0x%08x\n",
  1233 + dma);
  1234 + break;
  1235 + }
  1236 +
  1237 + if (hwdesc[desc_num]->orig_len == PAGE_SIZE) {
  1238 + dma_unmap_page(sa_ks2_dev,
  1239 + hwdesc[desc_num]->orig_buff,
  1240 + PAGE_SIZE,
  1241 + DMA_FROM_DEVICE);
  1242 + } else {
  1243 + dma_unmap_single(sa_ks2_dev,
  1244 + hwdesc[desc_num]->orig_buff,
  1245 + dev_data->rx_buffer_sizes[0],
  1246 + DMA_FROM_DEVICE);
  1247 + }
  1248 +
  1249 + /* take the req_ctx from the first descriptor */
  1250 + if (wait4pkt) {
  1251 + pkt_len = hwdesc[desc_num]->desc_info &
  1252 + KNAV_DMA_DESC_PKT_LEN_MASK;
  1253 + wait4pkt = 0;
  1254 + }
  1255 + calc_pkt_len += hwdesc[desc_num]->buff_len;
  1256 +
  1257 + dma = hwdesc[desc_num]->next_desc;
  1258 + desc_num++;
  1259 + } while (dma);
  1260 +
  1261 + if (pkt_len != calc_pkt_len)
  1262 + pr_err("[%s] calculated packet length doesn't match %d/%d\n",
  1263 + __func__, calc_pkt_len, pkt_len);
  1264 +
  1265 + /* retrieve data and copy it to the destination sg list */
  1266 + sa_rx_desc_process(dev_data, hwdesc, desc_num);
  1267 +
  1268 + /* return descriptor to the pool */
  1269 + for (j = 0; j < desc_num; j++)
  1270 + knav_pool_desc_put(dev_data->tx_pool, hwdesc[j]);
  1271 +
  1272 + /* increment rx packet counter */
  1273 + }
  1274 +}
  1275 +
  1276 +static int sa_aead_perform(struct aead_request *req, u8 *iv, int enc)
  1277 +{
  1278 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1279 + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
  1280 + struct sa_ctx_info *sa_ctx = enc ? &ctx->enc : &ctx->dec;
  1281 + dma_addr_t desc_dma_addr;
  1282 + struct keystone_crypto_data *pdata = dev_get_drvdata(sa_ks2_dev);
  1283 + unsigned ivsize = crypto_aead_ivsize(tfm);
  1284 + u8 enc_offset = req->assoclen + ivsize;
  1285 + struct sa_dma_req_ctx *req_ctx = NULL;
  1286 + int sg_nents;
  1287 + int assoc_sgents, src_sgents;
  1288 + int psdata_offset, ret = 0;
  1289 + u8 auth_offset = 0;
  1290 + u8 *auth_iv = NULL;
  1291 + u8 *aad = NULL;
  1292 + u8 aad_len = 0;
  1293 + int sg_idx = 0;
  1294 + u16 enc_len;
  1295 + u16 auth_len;
  1296 + u32 req_type;
  1297 + int n_bufs;
  1298 +
  1299 + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  1300 + GFP_KERNEL : GFP_ATOMIC;
  1301 +
  1302 + enc_len = req->cryptlen;
  1303 +
  1304 + /* req->cryptlen includes authsize when decrypting */
  1305 + if (!enc)
  1306 + enc_len -= crypto_aead_authsize(tfm);
  1307 +
  1308 + auth_len = req->assoclen + ivsize + enc_len;
  1309 +
  1310 + /* Allocate descriptor & submit packet */
  1311 + assoc_sgents = sg_count(req->assoc, req->assoclen);
  1312 + sg_nents = assoc_sgents;
  1313 + src_sgents = sg_count(req->src, enc_len);
  1314 + sg_nents += src_sgents;
  1315 +
  1316 + if (likely(ivsize))
  1317 + sg_nents += 1;
  1318 +
  1319 + if (unlikely(atomic_sub_return(sg_nents, &pdata->tx_dma_desc_cnt)
  1320 + < 0)) {
  1321 + ret = -EBUSY;
  1322 + goto err_0;
  1323 + }
  1324 +
  1325 + n_bufs = auth_len - pdata->rx_buffer_sizes[0];
  1326 +
  1327 + n_bufs = (n_bufs <= 0) ? 0 :
  1328 + DIV_ROUND_UP(n_bufs, pdata->rx_buffer_sizes[1]);
  1329 +
  1330 + if (unlikely(atomic_read(&pdata->rx_dma_page_cnt) < n_bufs)) {
  1331 + ret = -EBUSY;
  1332 + goto err_0;
  1333 + }
  1334 +
  1335 + req_ctx = kmem_cache_alloc(pdata->dma_req_ctx_cache, flags);
  1336 +
  1337 + if (unlikely(req_ctx == NULL)) {
  1338 + ret = -ENOMEM;
  1339 + goto err_0;
  1340 + }
  1341 +
  1342 + if (unlikely(sg_alloc_table(&req_ctx->sg_tbl, sg_nents, flags))) {
  1343 + ret = -ENOMEM;
  1344 + goto err_1;
  1345 + }
  1346 +
  1347 + memcpy(req_ctx->cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
  1348 + /* Update Command Label */
  1349 + sa_update_cmdl(sa_ks2_dev, enc_offset, enc_len,
  1350 + iv, auth_offset, auth_len,
  1351 + auth_iv, aad_len, aad,
  1352 + &sa_ctx->cmdl_upd_info, req_ctx->cmdl);
  1353 +
  1354 + /* Last 2 words in PSDATA will have the crypto alg type &
  1355 + * crypto request pointer
  1356 + */
  1357 + req_type = CRYPTO_ALG_TYPE_AEAD;
  1358 + if (enc)
  1359 + req_type |= (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
  1360 + else
  1361 + req_type |= (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
  1362 + psdata_offset = sa_ctx->cmdl_size/sizeof(u32);
  1363 + /* Append the type of request */
  1364 + req_ctx->cmdl[psdata_offset++] = req_type;
  1365 + /* Append the pointer to request */
  1366 + req_ctx->cmdl[psdata_offset] = (u32)req;
  1367 +
  1368 +#ifdef DEBUG
  1369 + dev_info(sa_ks2_dev, "cmdl:\n");
  1370 + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
  1371 + 16, 4, req_ctx->cmdl, sa_ctx->cmdl_size + 8, false);
  1372 +#endif
  1373 + /* clone the assoc sg list */
  1374 + if (likely(req->assoclen)) {
  1375 + sa_clone_sg(req->assoc, &req_ctx->sg_tbl.sgl[sg_idx],
  1376 + req->assoclen);
  1377 + sg_idx += assoc_sgents;
  1378 + }
  1379 +
  1380 + if (likely(ivsize))
  1381 + sg_set_buf(&req_ctx->sg_tbl.sgl[sg_idx++], iv, ivsize);
  1382 +
  1383 + /* clone the src sg list */
  1384 + if (likely(enc_len)) {
  1385 + sa_clone_sg(req->src, &req_ctx->sg_tbl.sgl[sg_idx], enc_len);
  1386 + sg_idx += src_sgents;
  1387 + }
  1388 +
  1389 + /* map the packet */
  1390 + req_ctx->sg_tbl.nents = dma_map_sg(sa_ks2_dev, req_ctx->sg_tbl.sgl,
  1391 + sg_nents, DMA_TO_DEVICE);
  1392 +
  1393 + if (unlikely(req_ctx->sg_tbl.nents != sg_nents)) {
  1394 + dev_warn_ratelimited(sa_ks2_dev, "failed to map tx pkt\n");
  1395 + ret = -EIO;
  1396 + goto err;
  1397 + }
  1398 +
  1399 + req_ctx->dev_data = pdata;
  1400 + req_ctx->pkt = true;
  1401 +
  1402 +/*
  1403 + * here we have the req_ctx->sg_tbl with a chain of packets ready to go.
  1404 + * Let's start filling HW descriptors and submit packet to the queue
  1405 + */
  1406 + desc_dma_addr = sa_prepare_tx_desc(pdata, req_ctx->sg_tbl.sgl,
  1407 + sg_nents,
  1408 + (sa_ctx->cmdl_size +
  1409 + (SA_NUM_PSDATA_CTX_WORDS *
  1410 + sizeof(u32))),
  1411 + req_ctx->cmdl,
  1412 + sizeof(sa_ctx->epib),
  1413 + sa_ctx->epib,
  1414 + req_ctx);
  1415 +
  1416 + if (desc_dma_addr == 0) {
  1417 + ret = -EIO;
  1418 + goto err;
  1419 + }
  1420 +
  1421 + knav_queue_push(pdata->tx_submit_q, desc_dma_addr,
  1422 + sizeof(struct knav_dma_desc), 0);
  1423 +
  1424 + return -EINPROGRESS;
  1425 +
  1426 +err:
  1427 + if (req_ctx && req_ctx->sg_tbl.sgl)
  1428 + sg_free_table(&req_ctx->sg_tbl);
  1429 +err_1:
  1430 + if (req_ctx)
  1431 + kmem_cache_free(pdata->dma_req_ctx_cache, req_ctx);
  1432 +err_0:
  1433 + atomic_add((sg_nents - SA_NUM_DMA_META_ELEMS), &pdata->tx_dma_desc_cnt);
  1434 + atomic_inc(&pdata->stats.tx_dropped);
  1435 + return ret;
  1436 +}
  1437 +
  1438 +/* AEAD algorithm encrypt interface function */
  1439 +static int sa_aead_encrypt(struct aead_request *req)
  1440 +{
  1441 + return sa_aead_perform(req, req->iv, 1);
  1442 +}
  1443 +
  1444 +/* AEAD algorithm decrypt interface function */
  1445 +static int sa_aead_decrypt(struct aead_request *req)
  1446 +{
  1447 + return sa_aead_perform(req, req->iv, 0);
  1448 +}
  1449 +
  1450 +/* AEAD algorithm givencrypt interface function */
  1451 +static int sa_aead_givencrypt(struct aead_givcrypt_request *req)
  1452 +{
  1453 + struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
  1454 +
  1455 + get_random_bytes(req->giv, crypto_aead_ivsize(tfm));
  1456 + return sa_aead_perform(&req->areq, req->giv, 1);
  1457 +}
  1458 +
  1459 +static int sa_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
  1460 + const u8 *key, unsigned int keylen)
  1461 +{
  1462 + return 0;
  1463 +}
  1464 +
  1465 +static int sa_ablkcipher_encrypt(struct ablkcipher_request *areq)
  1466 +{
  1467 + return 0;
  1468 +}
  1469 +
  1470 +static int sa_ablkcipher_decrypt(struct ablkcipher_request *areq)
  1471 +{
  1472 + return 0;
  1473 +}
  1474 +
  1475 +static int sa_ahash_init(struct ahash_request *areq)
  1476 +{
  1477 + return 0;
  1478 +}
  1479 +
  1480 +static int sa_ahash_update(struct ahash_request *areq)
  1481 +{
  1482 + return 0;
  1483 +}
  1484 +
  1485 +static int sa_ahash_final(struct ahash_request *areq)
  1486 +{
  1487 + return 0;
  1488 +}
  1489 +
  1490 +static int sa_ahash_finup(struct ahash_request *areq)
  1491 +{
  1492 + return 0;
  1493 +}
  1494 +
  1495 +static int sa_ahash_digest(struct ahash_request *areq)
  1496 +{
  1497 + return 0;
  1498 +}
  1499 +
  1500 +static int sa_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  1501 + unsigned int keylen)
  1502 +{
  1503 + return 0;
  1504 +}
  1505 +
  1506 +static struct sa_alg_tmpl sa_algs[] = {
  1507 + /* AEAD algorithms */
  1508 + { .type = CRYPTO_ALG_TYPE_AEAD,
  1509 + .alg.crypto = {
  1510 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1511 + .cra_driver_name =
  1512 + "authenc-hmac-sha1-cbc-aes-keystone-sa",
  1513 + .cra_blocksize = AES_BLOCK_SIZE,
  1514 + .cra_aead = {
  1515 + .geniv = "custom",
  1516 + .ivsize = AES_BLOCK_SIZE,
  1517 + .maxauthsize = SHA1_DIGEST_SIZE,
  1518 + }
  1519 + }
  1520 + },
  1521 + { .type = CRYPTO_ALG_TYPE_AEAD,
  1522 + .alg.crypto = {
  1523 + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
  1524 + .cra_driver_name =
  1525 + "authenc-hmac-sha1-cbc-3des-keystone-sa",
  1526 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1527 + .cra_aead = {
  1528 + .geniv = "custom",
  1529 + .ivsize = DES3_EDE_BLOCK_SIZE,
  1530 + .maxauthsize = SHA1_DIGEST_SIZE,
  1531 + }
  1532 + }
  1533 + },
  1534 + { .type = CRYPTO_ALG_TYPE_AEAD,
  1535 + .alg.crypto = {
  1536 + .cra_name = "authenc(xcbc(aes),cbc(aes))",
  1537 + .cra_driver_name =
  1538 + "authenc-aes-xcbc-mac-cbc-aes-keystone-sa",
  1539 + .cra_blocksize = AES_BLOCK_SIZE,
  1540 + .cra_aead = {
  1541 + .geniv = "custom",
  1542 + .ivsize = AES_BLOCK_SIZE,
  1543 + .maxauthsize = AES_XCBC_DIGEST_SIZE,
  1544 + }
  1545 + }
  1546 + },
  1547 + { .type = CRYPTO_ALG_TYPE_AEAD,
  1548 + .alg.crypto = {
  1549 + .cra_name = "authenc(xcbc(aes),cbc(des3_ede))",
  1550 + .cra_driver_name =
  1551 + "authenc-aes-xcbc-mac-cbc-3des-keystone-sa",
  1552 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1553 + .cra_aead = {
  1554 + .geniv = "custom",
  1555 + .ivsize = DES3_EDE_BLOCK_SIZE,
  1556 + .maxauthsize = AES_XCBC_DIGEST_SIZE,
  1557 + }
  1558 + }
  1559 + },
  1560 + { .type = CRYPTO_ALG_TYPE_AEAD,
  1561 + .alg.crypto = {
  1562 + .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
  1563 + .cra_driver_name =
  1564 + "authenc-hmac-sha1-cipher_null-keystone-sa",
  1565 + .cra_blocksize = NULL_BLOCK_SIZE,
  1566 + .cra_aead = {
  1567 + .geniv = "custom",
  1568 + .ivsize = NULL_IV_SIZE,
  1569 + .maxauthsize = SHA1_DIGEST_SIZE,
  1570 + }
  1571 + }
  1572 + },
  1573 +
  1574 +#ifdef TODO
  1575 + { .type = CRYPTO_ALG_TYPE_AEAD,
  1576 + .alg.crypto = {
  1577 + .cra_name = "authenc(hmac(md5),cbc(aes))",
  1578 + .cra_driver_name =
  1579 + "authenc-hmac-md5-cbc-aes-keystone-sa",
  1580 + .cra_blocksize = AES_BLOCK_SIZE,
  1581 + .cra_aead = {
  1582 + .geniv = "custom",
  1583 + .ivsize = AES_BLOCK_SIZE,
  1584 + .maxauthsize = MD5_DIGEST_SIZE,
  1585 + }
  1586 + }
  1587 + },
  1588 + { .type = CRYPTO_ALG_TYPE_AEAD,
  1589 + .alg.crypto = {
  1590 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1591 + .cra_driver_name =
  1592 + "authenc-hmac-md5-cbc-3des-keystone-sa",
  1593 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1594 + .cra_aead = {
  1595 + .geniv = "custom",
  1596 + .ivsize = DES3_EDE_BLOCK_SIZE,
  1597 + .maxauthsize = MD5_DIGEST_SIZE,
  1598 + }
  1599 + }
  1600 + },
  1601 + /* ABLKCIPHER algorithms. */
  1602 + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1603 + .alg.crypto = {
  1604 + .cra_name = "cbc(aes)",
  1605 + .cra_driver_name = "cbc-aes-keystone-sa",
  1606 + .cra_blocksize = AES_BLOCK_SIZE,
  1607 + .cra_ablkcipher = {
  1608 + .geniv = "custom",
  1609 + .min_keysize = AES_MIN_KEY_SIZE,
  1610 + .max_keysize = AES_MAX_KEY_SIZE,
  1611 + .ivsize = AES_BLOCK_SIZE,
  1612 + }
  1613 + }
  1614 + },
  1615 + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1616 + .alg.crypto = {
  1617 + .cra_name = "cbc(des3_ede)",
  1618 + .cra_driver_name = "cbc-3des-keystone-sa",
  1619 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1620 + .cra_ablkcipher = {
  1621 + .geniv = "custom",
  1622 + .min_keysize = DES3_EDE_KEY_SIZE,
  1623 + .max_keysize = DES3_EDE_KEY_SIZE,
  1624 + .ivsize = DES3_EDE_BLOCK_SIZE,
  1625 + }
  1626 + }
  1627 + },
  1628 + /* AHASH algorithms. */
  1629 + { .type = CRYPTO_ALG_TYPE_AHASH,
  1630 + .alg.hash = {
  1631 + .halg.digestsize = AES_XCBC_DIGEST_SIZE,
  1632 + .halg.base = {
  1633 + .cra_name = "xcbc(aes)",
  1634 + .cra_driver_name =
  1635 + "aes-xcbc-mac-keystone-sa",
  1636 + .cra_blocksize = SHA224_BLOCK_SIZE,
  1637 + }
  1638 + }
  1639 + },
  1640 + { .type = CRYPTO_ALG_TYPE_AHASH,
  1641 + .alg.hash = {
  1642 + .halg.digestsize = MD5_DIGEST_SIZE,
  1643 + .halg.base = {
  1644 + .cra_name = "hmac(md5)",
  1645 + .cra_driver_name =
  1646 + "hmac-md5-keystone-sa",
  1647 + .cra_blocksize = MD5_BLOCK_SIZE,
  1648 + }
  1649 + }
  1650 + },
  1651 + { .type = CRYPTO_ALG_TYPE_AHASH,
  1652 + .alg.hash = {
  1653 + .halg.digestsize = SHA1_DIGEST_SIZE,
  1654 + .halg.base = {
  1655 + .cra_name = "hmac(sha1)",
  1656 + .cra_driver_name =
  1657 + "hmac-sha1-keystone-sa",
  1658 + .cra_blocksize = SHA1_BLOCK_SIZE,
  1659 + }
  1660 + }
  1661 + }
  1662 +#endif
  1663 +};
  1664 +
  1665 +/* Register the algorithms in crypto framework */
  1666 +void sa_register_algos(const struct device *dev)
  1667 +{
  1668 + struct crypto_alg *cra;
  1669 + struct ahash_alg *hash = NULL;
  1670 + char *alg_name;
  1671 + u32 type;
  1672 + int i, err, num_algs = ARRAY_SIZE(sa_algs);
  1673 +
  1674 + for (i = 0; i < num_algs; i++) {
  1675 + type = sa_algs[i].type;
  1676 + if (type == CRYPTO_ALG_TYPE_AEAD) {
  1677 + cra = &sa_algs[i].alg.crypto;
  1678 + alg_name = cra->cra_name;
  1679 + if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
  1680 + "%s-keystone-sa", alg_name) >= CRYPTO_MAX_ALG_NAME) {
  1681 + continue;
  1682 + }
  1683 + cra->cra_type = &crypto_aead_type;
  1684 + cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1685 + CRYPTO_ALG_KERN_DRIVER_ONLY |
  1686 + CRYPTO_ALG_ASYNC;
  1687 + cra->cra_aead.setkey = sa_aead_setkey;
  1688 + cra->cra_aead.setauthsize = sa_aead_setauthsize;
  1689 + cra->cra_aead.encrypt = sa_aead_encrypt;
  1690 + cra->cra_aead.decrypt = sa_aead_decrypt;
  1691 + cra->cra_aead.givencrypt = sa_aead_givencrypt;
  1692 + cra->cra_init = sa_cra_init_aead;
  1693 + } else if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
  1694 + cra = &sa_algs[i].alg.crypto;
  1695 + alg_name = cra->cra_name;
  1696 + if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
  1697 + "%s-keystone-sa", alg_name) >= CRYPTO_MAX_ALG_NAME) {
  1698 + continue;
  1699 + }
  1700 + cra->cra_type = &crypto_ablkcipher_type;
  1701 + cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1702 + CRYPTO_ALG_KERN_DRIVER_ONLY |
  1703 + CRYPTO_ALG_ASYNC;
  1704 + cra->cra_ablkcipher.setkey = sa_ablkcipher_setkey;
  1705 + cra->cra_ablkcipher.encrypt = sa_ablkcipher_encrypt;
  1706 + cra->cra_ablkcipher.decrypt = sa_ablkcipher_decrypt;
  1707 + cra->cra_init = sa_cra_init_ablkcipher;
  1708 + } else if (type == CRYPTO_ALG_TYPE_AHASH) {
  1709 + hash = &sa_algs[i].alg.hash;
  1710 + alg_name = hash->halg.base.cra_name;
  1711 + if (snprintf(hash->halg.base.cra_driver_name,
  1712 + CRYPTO_MAX_ALG_NAME, "%s-keystone-sa",
  1713 + alg_name) >= CRYPTO_MAX_ALG_NAME) {
  1714 + continue;
  1715 + }
  1716 + hash->init = sa_ahash_init;
  1717 + hash->update = sa_ahash_update;
  1718 + hash->final = sa_ahash_final;
  1719 + hash->finup = sa_ahash_finup;
  1720 + hash->digest = sa_ahash_digest;
  1721 + hash->setkey = sa_ahash_setkey;
  1722 + cra = &hash->halg.base;
  1723 + cra->cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1724 + CRYPTO_ALG_KERN_DRIVER_ONLY |
  1725 + CRYPTO_ALG_ASYNC;
  1726 + cra->cra_type = &crypto_ahash_type;
  1727 + cra->cra_init = sa_cra_init_ahash;
  1728 + } else {
  1729 + dev_err(dev,
  1730 + "un-supported crypto algorithm (%d)", type);
  1731 + continue;
  1732 + }
  1733 +
  1734 + cra->cra_ctxsize = sizeof(struct sa_tfm_ctx);
  1735 + cra->cra_module = THIS_MODULE;
  1736 + cra->cra_alignmask = 0;
  1737 + cra->cra_priority = 3000;
  1738 + cra->cra_exit = sa_exit_tfm;
  1739 +
  1740 + if (type == CRYPTO_ALG_TYPE_AHASH)
  1741 + err = crypto_register_ahash(hash);
  1742 + else
  1743 + err = crypto_register_alg(cra);
  1744 +
  1745 + if (err)
  1746 + dev_err(dev, "Failed to register '%s'\n", alg_name);
  1747 + else
  1748 + sa_algs[i].registered = 1;
  1749 + }
  1750 +}
  1751 +
  1752 +/* un-register the algorithms from crypto framework */
  1753 +void sa_unregister_algos(const struct device *dev)
  1754 +{
  1755 + u32 type;
  1756 + char *alg_name;
  1757 + int err = 0, i, num_algs = ARRAY_SIZE(sa_algs);
  1758 +
  1759 + for (i = 0; i < num_algs; i++) {
  1760 + type = sa_algs[i].type;
  1761 + if ((type == CRYPTO_ALG_TYPE_AHASH) &&
  1762 + (sa_algs[i].registered)) {
  1763 + alg_name = sa_algs[i].alg.hash.halg.base.cra_name;
  1764 + err = crypto_unregister_ahash(&sa_algs[i].alg.hash);
  1765 + } else if (sa_algs[i].registered) {
  1766 + alg_name = sa_algs[i].alg.crypto.cra_name;
  1767 + err = crypto_unregister_alg(&sa_algs[i].alg.crypto);
  1768 + }
  1769 +
  1770 + if (err)
  1771 + dev_err(dev, "Failed to unregister '%s'", alg_name);
  1772 + }
  1773 +}
drivers/crypto/keystone-sa.c
Changes suppressed. Click to show
  1 +/*
  2 + * Keystone crypto accelerator driver
  3 + *
  4 + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
  5 + *
  6 + * Authors: Sandeep Nair
  7 + * Vitaly Andrianov
  8 + *
  9 + * Contributors:Tinku Mannan
  10 + * Hao Zhang
  11 + *
  12 + * This program is free software; you can redistribute it and/or
  13 + * modify it under the terms of the GNU General Public License
  14 + * version 2 as published by the Free Software Foundation.
  15 + *
  16 + * This program is distributed in the hope that it will be useful, but
  17 + * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19 + * General Public License for more details.
  20 + */
  21 +
  22 +#include <linux/clk.h>
  23 +#include <linux/err.h>
  24 +#include <linux/init.h>
  25 +#include <linux/slab.h>
  26 +#include <linux/module.h>
  27 +#include <linux/interrupt.h>
  28 +#include <linux/dmapool.h>
  29 +#include <linux/of.h>
  30 +#include <linux/of_address.h>
  31 +#include <linux/rtnetlink.h>
  32 +#include <linux/dma-mapping.h>
  33 +#include <linux/platform_device.h>
  34 +#include <linux/soc/ti/knav_dma.h>
  35 +#include <linux/soc/ti/knav_qmss.h>
  36 +
  37 +#include <linux/crypto.h>
  38 +#include <linux/hw_random.h>
  39 +#include <linux/cryptohash.h>
  40 +#include <crypto/algapi.h>
  41 +#include <crypto/aead.h>
  42 +#include <crypto/authenc.h>
  43 +#include <crypto/hash.h>
  44 +#include <crypto/internal/hash.h>
  45 +#include <crypto/aes.h>
  46 +#include <crypto/des.h>
  47 +#include <crypto/sha.h>
  48 +#include <crypto/md5.h>
  49 +#include <crypto/scatterwalk.h>
  50 +
  51 +#include "keystone-sa.h"
  52 +#include "keystone-sa-hlp.h"
  53 +
  54 +#define knav_queue_get_id(q) knav_queue_device_control(q, \
  55 + KNAV_QUEUE_GET_ID, (unsigned long)NULL)
  56 +
  57 +#define knav_queue_enable_notify(q) knav_queue_device_control(q, \
  58 + KNAV_QUEUE_ENABLE_NOTIFY, \
  59 + (unsigned long)NULL)
  60 +
  61 +#define knav_queue_disable_notify(q) knav_queue_device_control(q, \
  62 + KNAV_QUEUE_DISABLE_NOTIFY, \
  63 + (unsigned long)NULL)
  64 +
  65 +#define knav_queue_get_count(q) knav_queue_device_control(q, \
  66 + KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
  67 +
  68 +
  69 +/**********************************************************************/
  70 +/* Allocate ONE receive buffer for Rx descriptors */
  71 +static void sa_allocate_rx_buf(struct keystone_crypto_data *dev_data,
  72 + int fdq)
  73 +{
  74 + struct device *dev = &dev_data->pdev->dev;
  75 + struct knav_dma_desc *hwdesc;
  76 + unsigned int buf_len, dma_sz;
  77 + u32 desc_info, pkt_info;
  78 + void *bufptr;
  79 + struct page *page;
  80 + dma_addr_t dma;
  81 + u32 pad[2];
  82 +
  83 + /* Allocate descriptor */
  84 + hwdesc = knav_pool_desc_get(dev_data->rx_pool);
  85 + if (IS_ERR_OR_NULL(hwdesc)) {
  86 + dev_dbg(dev, "out of rx pool desc\n");
  87 + return;
  88 + }
  89 +
  90 + if (fdq == 0) {
  91 + buf_len = dev_data->rx_buffer_sizes[0]; /* TODO is that size
  92 + enough */
  93 + bufptr = kmalloc(buf_len, GFP_ATOMIC | GFP_DMA | __GFP_COLD);
  94 + if (unlikely(!bufptr)) {
  95 + dev_warn_ratelimited(dev, "Primary RX buffer alloc failed\n");
  96 + goto fail;
  97 + }
  98 + dma = dma_map_single(dev, bufptr, buf_len, DMA_TO_DEVICE);
  99 + pad[0] = (u32)bufptr;
  100 + pad[1] = 0;
  101 + } else {
  102 + /* Allocate a secondary receive queue entry */
  103 + page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
  104 + if (unlikely(!page)) {
  105 + dev_warn_ratelimited(dev, "Secondary page alloc failed\n");
  106 + goto fail;
  107 + }
  108 + buf_len = PAGE_SIZE;
  109 + dma = dma_map_page(dev, page, 0, buf_len, DMA_TO_DEVICE);
  110 + pad[0] = (u32)page_address(page);
  111 + pad[1] = (u32)page;
  112 +
  113 + atomic_inc(&dev_data->rx_dma_page_cnt);
  114 + }
  115 +
  116 + desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
  117 + desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
  118 + pkt_info = KNAV_DMA_DESC_HAS_EPIB;
  119 + pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
  120 + pkt_info |= (dev_data->rx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
  121 + KNAV_DMA_DESC_RETQ_SHIFT;
  122 + hwdesc->orig_buff = dma;
  123 + hwdesc->orig_len = buf_len;
  124 + hwdesc->pad[0] = pad[0];
  125 + hwdesc->pad[1] = pad[1];
  126 + hwdesc->desc_info = desc_info;
  127 + hwdesc->packet_info = pkt_info;
  128 +
  129 + /* Push to FDQs */
  130 + knav_pool_desc_map(dev_data->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
  131 + &dma_sz);
  132 + knav_queue_push(dev_data->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
  133 +
  134 + return;
  135 +fail:
  136 + knav_pool_desc_put(dev_data->rx_pool, hwdesc);
  137 +}
  138 +
  139 +/* Refill Rx FDQ with descriptors & attached buffers */
  140 +static void sa_rxpool_refill(struct keystone_crypto_data *dev_data)
  141 +{
  142 + u32 fdq_deficit;
  143 + int i;
  144 +
  145 + /* Calculate the FDQ deficit and refill */
  146 + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && dev_data->rx_fdq[i]; i++) {
  147 + fdq_deficit = dev_data->rx_queue_depths[i] -
  148 + knav_queue_get_count(dev_data->rx_fdq[i]);
  149 + while (fdq_deficit--)
  150 + sa_allocate_rx_buf(dev_data, i);
  151 + } /* end for fdqs */
  152 +}
  153 +
  154 +/* Release ALL descriptors and attached buffers from Rx FDQ */
  155 +static void sa_free_rx_buf(struct keystone_crypto_data *dev_data,
  156 + int fdq)
  157 +{
  158 + struct device *dev = &dev_data->pdev->dev;
  159 +
  160 + struct knav_dma_desc *desc;
  161 + unsigned int buf_len, dma_sz;
  162 + dma_addr_t dma;
  163 + void *buf_ptr;
  164 +
  165 + /* Allocate descriptor */
  166 + while ((dma = knav_queue_pop(dev_data->rx_fdq[fdq], &dma_sz))) {
  167 + desc = knav_pool_desc_unmap(dev_data->rx_pool, dma, dma_sz);
  168 + if (unlikely(!desc)) {
  169 + dev_err(dev, "failed to unmap Rx desc\n");
  170 + continue;
  171 + }
  172 + dma = desc->orig_buff;
  173 + buf_len = desc->orig_len;
  174 + buf_ptr = (void *)desc->pad[0];
  175 +
  176 + if (unlikely(!dma)) {
  177 + dev_err(dev, "NULL orig_buff in desc\n");
  178 + knav_pool_desc_put(dev_data->rx_pool, desc);
  179 + continue;
  180 + }
  181 +
  182 + if (unlikely(!buf_ptr)) {
  183 + dev_err(dev, "NULL bufptr in desc\n");
  184 + knav_pool_desc_put(dev_data->rx_pool, desc);
  185 + continue;
  186 + }
  187 +
  188 + if (fdq == 0) {
  189 + dma_unmap_single(dev, dma, buf_len, DMA_FROM_DEVICE);
  190 + kfree(buf_ptr);
  191 + } else {
  192 + dma_unmap_page(dev, dma, buf_len, DMA_FROM_DEVICE);
  193 + __free_page(buf_ptr);
  194 + }
  195 +
  196 + knav_pool_desc_put(dev_data->rx_pool, desc);
  197 + }
  198 +}
  199 +
  200 +static void sa_rxpool_free(struct keystone_crypto_data *dev_data)
  201 +{
  202 + struct device *dev = &dev_data->pdev->dev;
  203 + int i;
  204 +
  205 + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
  206 + !IS_ERR_OR_NULL(dev_data->rx_fdq[i]); i++)
  207 + sa_free_rx_buf(dev_data, i);
  208 +
  209 + if (knav_pool_count(dev_data->rx_pool) != dev_data->rx_pool_size)
  210 + dev_err(dev, "Lost Rx (%d) descriptors\n",
  211 + dev_data->rx_pool_size -
  212 + knav_pool_count(dev_data->rx_pool));
  213 +
  214 + knav_pool_destroy(dev_data->rx_pool);
  215 + dev_data->rx_pool = NULL;
  216 +}
  217 +
  218 +/* DMA channel rx notify callback */
  219 +static void sa_dma_notify_rx_compl(void *arg)
  220 +{
  221 + struct keystone_crypto_data *dev_data = arg;
  222 +
  223 + knav_queue_disable_notify(dev_data->rx_compl_q);
  224 + tasklet_schedule(&dev_data->rx_task);
  225 +}
  226 +
  227 +/* Rx tast tasklet code */
  228 +static void sa_rx_task(unsigned long data)
  229 +{
  230 + struct keystone_crypto_data *dev_data =
  231 + (struct keystone_crypto_data *)data;
  232 +
  233 + sa_rx_completion_process(dev_data);
  234 +
  235 + sa_rxpool_refill(dev_data);
  236 + knav_queue_enable_notify(dev_data->rx_compl_q);
  237 +}
  238 +
  239 +/* DMA channel tx notify callback */
  240 +static void sa_dma_notify_tx_compl(void *arg)
  241 +{
  242 + struct keystone_crypto_data *dev_data = arg;
  243 +
  244 + knav_queue_disable_notify(dev_data->tx_compl_q);
  245 + tasklet_schedule(&dev_data->tx_task);
  246 +}
  247 +
  248 +/* Tx task tasklet code */
  249 +static void sa_tx_task(unsigned long data)
  250 +{
  251 + struct keystone_crypto_data *dev_data =
  252 + (struct keystone_crypto_data *)data;
  253 +
  254 + sa_tx_completion_process(dev_data);
  255 + knav_queue_enable_notify(dev_data->tx_compl_q);
  256 +}
  257 +
  258 +static void sa_free_resources(struct keystone_crypto_data *dev_data)
  259 +{
  260 + int i;
  261 +
  262 + if (!IS_ERR_OR_NULL(dev_data->rx_pool))
  263 + sa_rxpool_free(dev_data);
  264 +
  265 + if (!IS_ERR_OR_NULL(dev_data->tx_pool)) {
  266 + knav_pool_destroy(dev_data->tx_pool);
  267 + dev_data->tx_pool = NULL;
  268 + }
  269 +
  270 + if (!IS_ERR_OR_NULL(dev_data->tx_chan)) {
  271 + knav_dma_close_channel(dev_data->tx_chan);
  272 + dev_data->tx_chan = NULL;
  273 + }
  274 +
  275 + if (!IS_ERR_OR_NULL(dev_data->rx_chan)) {
  276 + knav_dma_close_channel(dev_data->rx_chan);
  277 + dev_data->rx_chan = NULL;
  278 + }
  279 +
  280 + if (!IS_ERR_OR_NULL(dev_data->tx_submit_q)) {
  281 + knav_queue_close(dev_data->tx_submit_q);
  282 + dev_data->tx_submit_q = NULL;
  283 + }
  284 +
  285 + if (!IS_ERR_OR_NULL(dev_data->tx_compl_q)) {
  286 + knav_queue_close(dev_data->tx_compl_q);
  287 + dev_data->tx_compl_q = NULL;
  288 + }
  289 +
  290 + if (!IS_ERR_OR_NULL(dev_data->rx_compl_q)) {
  291 + knav_queue_close(dev_data->rx_compl_q);
  292 + dev_data->rx_compl_q = NULL;
  293 + }
  294 +
  295 + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
  296 + !IS_ERR_OR_NULL(dev_data->rx_fdq[i]) ; ++i) {
  297 + knav_queue_close(dev_data->rx_fdq[i]);
  298 + dev_data->rx_fdq[i] = NULL;
  299 + }
  300 +}
  301 +
  302 +static int sa_setup_resources(struct keystone_crypto_data *dev_data)
  303 +{
  304 + struct device *dev = &dev_data->pdev->dev;
  305 + u8 name[20];
  306 + int ret = 0;
  307 + int i;
  308 +
  309 + snprintf(name, sizeof(name), "rx-pool-%s", dev_name(dev));
  310 + dev_data->rx_pool = knav_pool_create(name, dev_data->rx_pool_size,
  311 + dev_data->rx_pool_region_id);
  312 + if (IS_ERR_OR_NULL(dev_data->rx_pool)) {
  313 + dev_err(dev, "Couldn't create rx pool\n");
  314 + ret = PTR_ERR(dev_data->rx_pool);
  315 + goto fail;
  316 + }
  317 +
  318 + snprintf(name, sizeof(name), "tx-pool-%s", dev_name(dev));
  319 + dev_data->tx_pool = knav_pool_create(name, dev_data->tx_pool_size,
  320 + dev_data->tx_pool_region_id);
  321 + if (IS_ERR_OR_NULL(dev_data->tx_pool)) {
  322 + dev_err(dev, "Couldn't create tx pool\n");
  323 + ret = PTR_ERR(dev_data->tx_pool);
  324 + goto fail;
  325 + }
  326 +
  327 + snprintf(name, sizeof(name), "tx-subm_q-%s", dev_name(dev));
  328 + dev_data->tx_submit_q = knav_queue_open(name,
  329 + dev_data->tx_submit_qid, 0);
  330 + if (IS_ERR(dev_data->tx_submit_q)) {
  331 + ret = PTR_ERR(dev_data->tx_submit_q);
  332 + dev_err(dev, "Could not open \"%s\": %d\n", name, ret);
  333 + goto fail;
  334 + }
  335 +
  336 + snprintf(name, sizeof(name), "tx-compl-q-%s", dev_name(dev));
  337 + dev_data->tx_compl_q = knav_queue_open(name, dev_data->tx_compl_qid, 0);
  338 + if (IS_ERR(dev_data->tx_compl_q)) {
  339 + ret = PTR_ERR(dev_data->tx_compl_q);
  340 + dev_err(dev, "Could not open \"%s\": %d\n", name, ret);
  341 + goto fail;
  342 + }
  343 +
  344 + snprintf(name, sizeof(name), "rx-compl-q-%s", dev_name(dev));
  345 + dev_data->rx_compl_q = knav_queue_open(name, dev_data->rx_compl_qid, 0);
  346 + if (IS_ERR(dev_data->rx_compl_q)) {
  347 + ret = PTR_ERR(dev_data->rx_compl_q);
  348 + dev_err(dev, "Could not open \"%s\": %d\n", name, ret);
  349 + goto fail;
  350 + }
  351 +
  352 + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
  353 + dev_data->rx_queue_depths[i] && dev_data->rx_buffer_sizes[i];
  354 + i++) {
  355 + snprintf(name, sizeof(name), "rx-fdq%d-%s", i, dev_name(dev));
  356 + dev_data->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
  357 + if (IS_ERR_OR_NULL(dev_data->rx_fdq[i])) {
  358 + ret = PTR_ERR(dev_data->rx_fdq[i]);
  359 + goto fail;
  360 + }
  361 + }
  362 + sa_rxpool_refill(dev_data);
  363 +
  364 + return 0;
  365 +
  366 +fail:
  367 + sa_free_resources(dev_data);
  368 + return ret;
  369 +}
  370 +
  371 +static int sa_setup_dma(struct keystone_crypto_data *dev_data)
  372 +{
  373 + struct device *dev = &dev_data->pdev->dev;
  374 + struct knav_queue_notify_config notify_cfg;
  375 + struct knav_dma_cfg config;
  376 + int error = 0;
  377 + int i;
  378 + u32 last_fdq = 0;
  379 + u8 name[16];
  380 +
  381 + error = sa_setup_resources(dev_data);
  382 + if (error)
  383 + goto fail;
  384 +
  385 + /* Setup Tx DMA channel */
  386 + memset(&config, 0, sizeof(config));
  387 + config.direction = DMA_MEM_TO_DEV;
  388 + config.u.tx.filt_einfo = false;
  389 + config.u.tx.filt_pswords = false;
  390 + config.u.tx.priority = DMA_PRIO_MED_L;
  391 +
  392 + dev_data->tx_chan = knav_dma_open_channel(dev, dev_data->tx_chan_name,
  393 + &config);
  394 + if (IS_ERR_OR_NULL(dev_data->tx_chan)) {
  395 + dev_err(dev, "(%s) failed to open dmachan\n",
  396 + dev_data->tx_chan_name);
  397 + error = -ENODEV;
  398 + goto fail;
  399 + }
  400 +
  401 + notify_cfg.fn = sa_dma_notify_tx_compl;
  402 + notify_cfg.fn_arg = dev_data;
  403 + error = knav_queue_device_control(dev_data->tx_compl_q,
  404 + KNAV_QUEUE_SET_NOTIFIER,
  405 + (unsigned long)&notify_cfg);
  406 + if (error)
  407 + goto fail;
  408 +
  409 + knav_queue_enable_notify(dev_data->tx_compl_q);
  410 +
  411 + dev_dbg(dev, "opened tx channel %s\n", name);
  412 +
  413 + /* Set notification for Rx completion */
  414 + notify_cfg.fn = sa_dma_notify_rx_compl;
  415 + notify_cfg.fn_arg = dev_data;
  416 + error = knav_queue_device_control(dev_data->rx_compl_q,
  417 + KNAV_QUEUE_SET_NOTIFIER,
  418 + (unsigned long)&notify_cfg);
  419 + if (error)
  420 + goto fail;
  421 +
  422 + knav_queue_disable_notify(dev_data->rx_compl_q);
  423 +
  424 + /* Setup Rx DMA channel */
  425 + memset(&config, 0, sizeof(config));
  426 + config.direction = DMA_DEV_TO_MEM;
  427 + config.u.rx.einfo_present = true;
  428 + config.u.rx.psinfo_present = true;
  429 + config.u.rx.err_mode = DMA_DROP;
  430 + config.u.rx.desc_type = DMA_DESC_HOST;
  431 + config.u.rx.psinfo_at_sop = false;
  432 + config.u.rx.sop_offset = 0; /* NETCP_SOP_OFFSET */
  433 + config.u.rx.dst_q = dev_data->rx_compl_qid;
  434 + config.u.rx.thresh = DMA_THRESH_NONE;
  435 +
  436 + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
  437 + if (dev_data->rx_fdq[i])
  438 + last_fdq = knav_queue_get_id(dev_data->rx_fdq[i]);
  439 + config.u.rx.fdq[i] = last_fdq;
  440 + }
  441 +
  442 + dev_data->rx_chan = knav_dma_open_channel(dev, dev_data->rx_chan_name,
  443 + &config);
  444 + if (IS_ERR_OR_NULL(dev_data->rx_chan)) {
  445 + dev_err(dev, "(%s) failed to open dmachan\n",
  446 + dev_data->rx_chan_name);
  447 + error = -ENODEV;
  448 + goto fail;
  449 + }
  450 +
  451 + knav_queue_enable_notify(dev_data->rx_compl_q);
  452 +
  453 + return 0;
  454 +
  455 +fail:
  456 + sa_free_resources(dev_data);
  457 +
  458 + return error;
  459 +}
  460 +
  461 +/* Teardown DMA channels */
  462 +static void sa_teardown_dma(struct keystone_crypto_data *dev_data)
  463 +{
  464 + if (dev_data->tx_chan) {
  465 + knav_dma_close_channel(dev_data->tx_chan);
  466 + dev_data->tx_chan = NULL;
  467 + }
  468 +
  469 + if (dev_data->rx_chan) {
  470 + knav_dma_close_channel(dev_data->rx_chan);
  471 + dev_data->rx_chan = NULL;
  472 + }
  473 +}
  474 +/******************************************************************************/
  475 +/************************************************************/
  476 +/* SYSFS interface functions */
  477 +/************************************************************/
  478 +struct sa_kobj_attribute {
  479 + struct attribute attr;
  480 + ssize_t (*show)(struct keystone_crypto_data *crypto,
  481 + struct sa_kobj_attribute *attr, char *buf);
  482 + ssize_t (*store)(struct keystone_crypto_data *crypto,
  483 + struct sa_kobj_attribute *attr, const char *, size_t);
  484 +};
  485 +
  486 +#define SA_ATTR(_name, _mode, _show, _store) \
  487 + struct sa_kobj_attribute sa_attr_##_name = \
  488 +__ATTR(_name, _mode, _show, _store)
  489 +
  490 +static ssize_t sa_stats_show_tx_pkts(struct keystone_crypto_data *crypto,
  491 + struct sa_kobj_attribute *attr, char *buf)
  492 +{
  493 + return scnprintf(buf, PAGE_SIZE, "%d\n",
  494 + atomic_read(&crypto->stats.tx_pkts));
  495 +}
  496 +
  497 +static ssize_t sa_stats_reset_tx_pkts(struct keystone_crypto_data *crypto,
  498 + struct sa_kobj_attribute *attr, const char *buf, size_t len)
  499 +{
  500 + atomic_set(&crypto->stats.tx_pkts, 0);
  501 + return len;
  502 +}
  503 +
  504 +static ssize_t sa_stats_show_rx_pkts(struct keystone_crypto_data *crypto,
  505 + struct sa_kobj_attribute *attr, char *buf)
  506 +{
  507 + return scnprintf(buf, PAGE_SIZE, "%d\n",
  508 + atomic_read(&crypto->stats.rx_pkts));
  509 +}
  510 +
  511 +static ssize_t sa_stats_reset_rx_pkts(struct keystone_crypto_data *crypto,
  512 + struct sa_kobj_attribute *attr, const char *buf, size_t len)
  513 +{
  514 + atomic_set(&crypto->stats.rx_pkts, 0);
  515 + return len;
  516 +}
  517 +
  518 +static ssize_t sa_stats_show_tx_drop_pkts(struct keystone_crypto_data *crypto,
  519 + struct sa_kobj_attribute *attr, char *buf)
  520 +{
  521 + return scnprintf(buf, PAGE_SIZE, "%d\n",
  522 + atomic_read(&crypto->stats.tx_dropped));
  523 +}
  524 +
  525 +static ssize_t sa_stats_reset_tx_drop_pkts(struct keystone_crypto_data *crypto,
  526 + struct sa_kobj_attribute *attr, const char *buf, size_t len)
  527 +{
  528 + atomic_set(&crypto->stats.tx_dropped, 0);
  529 + return len;
  530 +}
  531 +
  532 +static ssize_t
  533 +sa_stats_show_sc_tear_drop_pkts(struct keystone_crypto_data *crypto,
  534 + struct sa_kobj_attribute *attr, char *buf)
  535 +{
  536 + return scnprintf(buf, PAGE_SIZE, "%d\n",
  537 + atomic_read(&crypto->stats.sc_tear_dropped));
  538 +}
  539 +
  540 +static SA_ATTR(tx_pkts, S_IRUGO | S_IWUSR,
  541 + sa_stats_show_tx_pkts, sa_stats_reset_tx_pkts);
  542 +static SA_ATTR(rx_pkts, S_IRUGO | S_IWUSR,
  543 + sa_stats_show_rx_pkts, sa_stats_reset_rx_pkts);
  544 +static SA_ATTR(tx_drop_pkts, S_IRUGO | S_IWUSR,
  545 + sa_stats_show_tx_drop_pkts, sa_stats_reset_tx_drop_pkts);
  546 +static SA_ATTR(sc_tear_drop_pkts, S_IRUGO,
  547 + sa_stats_show_sc_tear_drop_pkts, NULL);
  548 +
  549 +static struct attribute *sa_stats_attrs[] = {
  550 + &sa_attr_tx_pkts.attr,
  551 + &sa_attr_rx_pkts.attr,
  552 + &sa_attr_tx_drop_pkts.attr,
  553 + &sa_attr_sc_tear_drop_pkts.attr,
  554 + NULL
  555 +};
  556 +
  557 +#define to_sa_kobj_attr(_attr) \
  558 + container_of(_attr, struct sa_kobj_attribute, attr)
  559 +#define to_crypto_data_from_stats_obj(obj) \
  560 + container_of(obj, struct keystone_crypto_data, stats_kobj)
  561 +
  562 +static ssize_t sa_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
  563 + char *buf)
  564 +{
  565 + struct sa_kobj_attribute *sa_attr = to_sa_kobj_attr(attr);
  566 + struct keystone_crypto_data *crypto =
  567 + to_crypto_data_from_stats_obj(kobj);
  568 + ssize_t ret = -EIO;
  569 +
  570 + if (sa_attr->show)
  571 + ret = sa_attr->show(crypto, sa_attr, buf);
  572 + return ret;
  573 +}
  574 +
  575 +static ssize_t sa_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
  576 + const char *buf, size_t len)
  577 +{
  578 + struct sa_kobj_attribute *sa_attr = to_sa_kobj_attr(attr);
  579 + struct keystone_crypto_data *crypto =
  580 + to_crypto_data_from_stats_obj(kobj);
  581 + ssize_t ret = -EIO;
  582 +
  583 + if (sa_attr->store)
  584 + ret = sa_attr->store(crypto, sa_attr, buf, len);
  585 + return ret;
  586 +}
  587 +
  588 +static const struct sysfs_ops sa_stats_sysfs_ops = {
  589 + .show = sa_kobj_attr_show,
  590 + .store = sa_kobj_attr_store,
  591 +};
  592 +
  593 +static struct kobj_type sa_stats_ktype = {
  594 + .sysfs_ops = &sa_stats_sysfs_ops,
  595 + .default_attrs = sa_stats_attrs,
  596 +};
  597 +
  598 +static int sa_create_sysfs_entries(struct keystone_crypto_data *crypto)
  599 +{
  600 + struct device *dev = &crypto->pdev->dev;
  601 + int ret;
  602 +
  603 + ret = kobject_init_and_add(&crypto->stats_kobj, &sa_stats_ktype,
  604 + kobject_get(&dev->kobj), "stats");
  605 +
  606 + if (ret) {
  607 + dev_err(dev, "failed to create sysfs entry\n");
  608 + kobject_put(&crypto->stats_kobj);
  609 + kobject_put(&dev->kobj);
  610 + }
  611 + return ret;
  612 +}
  613 +
  614 +static void sa_delete_sysfs_entries(struct keystone_crypto_data *crypto)
  615 +{
  616 + kobject_del(&crypto->stats_kobj);
  617 +}
  618 +
  619 +/*
  620 + * HW RNG functions
  621 + */
  622 +
  623 +static int sa_rng_init(struct hwrng *rng)
  624 +{
  625 + u32 value;
  626 + struct device *dev = (struct device *)rng->priv;
  627 + struct keystone_crypto_data *crypto = dev_get_drvdata(dev);
  628 + u32 startup_cycles, min_refill_cycles, max_refill_cycles, clk_div;
  629 +
  630 + crypto->trng_regs = (struct sa_trng_regs *)((void *)crypto->regs +
  631 + SA_REG_MAP_TRNG_OFFSET);
  632 +
  633 + startup_cycles = SA_TRNG_DEF_STARTUP_CYCLES;
  634 + min_refill_cycles = SA_TRNG_DEF_MIN_REFILL_CYCLES;
  635 + max_refill_cycles = SA_TRNG_DEF_MAX_REFILL_CYCLES;
  636 + clk_div = SA_TRNG_DEF_CLK_DIV_CYCLES;
  637 +
  638 + /* Enable RNG module */
  639 + value = __raw_readl(&crypto->regs->mmr.CMD_STATUS);
  640 + value |= SA_CMD_STATUS_REG_TRNG_ENABLE;
  641 + __raw_writel(value, &crypto->regs->mmr.CMD_STATUS);
  642 +
  643 + /* Configure RNG module */
  644 + __raw_writel(0, &crypto->trng_regs->TRNG_CONTROL); /* Disable RNG */
  645 + value = startup_cycles << SA_TRNG_CONTROL_REG_STARTUP_CYCLES_SHIFT;
  646 + __raw_writel(value, &crypto->trng_regs->TRNG_CONTROL);
  647 + value =
  648 + (min_refill_cycles << SA_TRNG_CONFIG_REG_MIN_REFILL_CYCLES_SHIFT) |
  649 + (max_refill_cycles << SA_TRNG_CONFIG_REG_MAX_REFILL_CYCLES_SHIFT) |
  650 + (clk_div << SA_TRNG_CONFIG_REG_SAMPLE_DIV_SHIFT);
  651 + __raw_writel(value, &crypto->trng_regs->TRNG_CONFIG);
  652 + /* Disable all interrupts from TRNG */
  653 + __raw_writel(0, &crypto->trng_regs->TRNG_INTMASK);
  654 + /* Enable RNG */
  655 + value = __raw_readl(&crypto->trng_regs->TRNG_CONTROL);
  656 + value |= SA_TRNG_CONTROL_REG_TRNG_ENABLE;
  657 + __raw_writel(value, &crypto->trng_regs->TRNG_CONTROL);
  658 +
  659 + /* Initialize the TRNG access lock */
  660 + spin_lock_init(&crypto->trng_lock);
  661 +
  662 + return 0;
  663 +}
  664 +
  665 +void sa_rng_cleanup(struct hwrng *rng)
  666 +{
  667 + u32 value;
  668 + struct device *dev = (struct device *)rng->priv;
  669 + struct keystone_crypto_data *crypto = dev_get_drvdata(dev);
  670 +
  671 + /* Disable RNG */
  672 + __raw_writel(0, &crypto->trng_regs->TRNG_CONTROL);
  673 + value = __raw_readl(&crypto->regs->mmr.CMD_STATUS);
  674 + value &= ~SA_CMD_STATUS_REG_TRNG_ENABLE;
  675 + __raw_writel(value, &crypto->regs->mmr.CMD_STATUS);
  676 +}
  677 +
  678 +/* Maximum size of RNG data available in one read */
  679 +#define SA_MAX_RNG_DATA 8
  680 +/* Maximum retries to get rng data */
  681 +#define SA_MAX_RNG_DATA_RETRIES 5
  682 +/* Delay between retries (in usecs) */
  683 +#define SA_RNG_DATA_RETRY_DELAY 5
  684 +
  685 +static int sa_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
  686 +{
  687 + u32 value;
  688 + u32 st_ready;
  689 + u32 rng_lo, rng_hi;
  690 + int retries = SA_MAX_RNG_DATA_RETRIES;
  691 + int data_sz = min_t(u32, max, SA_MAX_RNG_DATA);
  692 + struct device *dev = (struct device *)rng->priv;
  693 + struct keystone_crypto_data *crypto = dev_get_drvdata(dev);
  694 +
  695 + do {
  696 + spin_lock(&crypto->trng_lock);
  697 + value = __raw_readl(&crypto->trng_regs->TRNG_STATUS);
  698 + st_ready = value & SA_TRNG_STATUS_REG_READY;
  699 + if (st_ready) {
  700 + /* Read random data */
  701 + rng_hi = __raw_readl(&crypto->trng_regs->TRNG_OUTPUT_H);
  702 + rng_lo = __raw_readl(&crypto->trng_regs->TRNG_OUTPUT_L);
  703 + /* Clear ready status */
  704 + __raw_writel(SA_TRNG_INTACK_REG_READY,
  705 + &crypto->trng_regs->TRNG_INTACK);
  706 + }
  707 + spin_unlock(&crypto->trng_lock);
  708 + udelay(SA_RNG_DATA_RETRY_DELAY);
  709 + } while (wait && !st_ready && retries--);
  710 +
  711 + if (!st_ready)
  712 + return -EAGAIN;
  713 +
  714 + if (likely(data_sz > sizeof(rng_lo))) {
  715 + memcpy(data, &rng_lo, sizeof(rng_lo));
  716 + memcpy((data + sizeof(rng_lo)), &rng_hi,
  717 + (data_sz - sizeof(rng_lo)));
  718 + } else {
  719 + memcpy(data, &rng_lo, data_sz);
  720 + }
  721 +
  722 + return data_sz;
  723 +}
  724 +
  725 +static int sa_register_rng(struct device *dev)
  726 +{
  727 + struct keystone_crypto_data *crypto = dev_get_drvdata(dev);
  728 +
  729 + crypto->rng.name = dev_driver_string(dev);
  730 + crypto->rng.init = sa_rng_init;
  731 + crypto->rng.cleanup = sa_rng_cleanup;
  732 + crypto->rng.read = sa_rng_read;
  733 + crypto->rng.priv = (unsigned long)dev;
  734 +
  735 + return hwrng_register(&crypto->rng);
  736 +}
  737 +
  738 +static void sa_unregister_rng(struct device *dev)
  739 +{
  740 + struct keystone_crypto_data *crypto = dev_get_drvdata(dev);
  741 +
  742 + hwrng_unregister(&crypto->rng);
  743 +}
  744 +
  745 +/************************************************************/
  746 +/* Driver registration functions */
  747 +/************************************************************/
  748 +#define OF_PROP_READ(type, node, prop, var) \
  749 + do { \
  750 + ret = of_property_read_##type(node, prop, &var); \
  751 + if (ret < 0) { \
  752 + dev_err(dev, "missing \""prop"\" parameter\n"); \
  753 + return -1; \
  754 + } \
  755 + } while (0)
  756 +
  757 +#define OF_PROP_READ_U32_ARRAY(node, prop, array, size) \
  758 + do { \
  759 + ret = of_property_read_u32_array(node, prop, array, size); \
  760 + if (ret < 0) { \
  761 + dev_err(dev, "missing \""prop"\" parameter\n"); \
  762 + return -1; \
  763 + } \
  764 + } while (0)
  765 +
  766 +static int sa_read_dtb(struct device_node *node,
  767 + struct keystone_crypto_data *dev_data)
  768 +{
  769 + int i, ret = 0;
  770 + struct device *dev = &dev_data->pdev->dev;
  771 + u32 temp[2];
  772 +
  773 + OF_PROP_READ(string, node, "tx-channel", dev_data->tx_chan_name);
  774 + OF_PROP_READ(u32, node, "tx-queue-depth", dev_data->tx_queue_depth);
  775 + atomic_set(&dev_data->tx_dma_desc_cnt, dev_data->tx_queue_depth);
  776 + OF_PROP_READ(u32, node, "tx-submit-queue", dev_data->tx_submit_qid);
  777 + OF_PROP_READ(u32, node, "tx-compl-queue", dev_data->tx_compl_qid);
  778 + OF_PROP_READ(string, node, "rx-channel", dev_data->rx_chan_name);
  779 +
  780 + OF_PROP_READ_U32_ARRAY(node, "rx-queue-depth",
  781 + dev_data->rx_queue_depths,
  782 + KNAV_DMA_FDQ_PER_CHAN);
  783 +
  784 + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
  785 + dev_dbg(dev, "rx-queue-depth[%d]= %u\n", i,
  786 + dev_data->rx_queue_depths[i]);
  787 +
  788 + OF_PROP_READ_U32_ARRAY(node, "rx-buffer-size",
  789 + dev_data->rx_buffer_sizes,
  790 + KNAV_DMA_FDQ_PER_CHAN);
  791 +
  792 + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
  793 + dev_dbg(dev, "rx-buffer-size[%d]= %u\n", i,
  794 + dev_data->rx_buffer_sizes[i]);
  795 +
  796 + atomic_set(&dev_data->rx_dma_page_cnt, 0);
  797 +
  798 + OF_PROP_READ(u32, node, "rx-compl-queue", dev_data->rx_compl_qid);
  799 +
  800 + OF_PROP_READ_U32_ARRAY(node, "tx-pool", temp, 2);
  801 + dev_data->tx_pool_size = temp[0];
  802 + dev_data->tx_pool_region_id = temp[1];
  803 +
  804 + OF_PROP_READ_U32_ARRAY(node, "rx-pool", temp, 2);
  805 + dev_data->rx_pool_size = temp[0];
  806 + dev_data->rx_pool_region_id = temp[1];
  807 +
  808 + OF_PROP_READ_U32_ARRAY(node, "sc-id", temp, 2);
  809 + dev_data->sc_id_start = temp[0];
  810 + dev_data->sc_id_end = temp[1];
  811 + dev_data->sc_id = dev_data->sc_id_start;
  812 +
  813 + dev_data->regs = of_iomap(node, 0);
  814 + if (!dev_data->regs) {
  815 + dev_err(dev, "failed to of_iomap\n");
  816 + return -ENOMEM;
  817 + }
  818 +
  819 + return 0;
  820 +}
  821 +
  822 +static int sa_init_mem(struct keystone_crypto_data *dev_data)
  823 +{
  824 + struct device *dev = &dev_data->pdev->dev;
  825 + /* Setup dma pool for security context buffers */
  826 + dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
  827 + SA_CTX_MAX_SZ, 64, 0);
  828 + if (!dev_data->sc_pool) {
  829 + dev_err(dev, "Failed to create dma pool");
  830 + return -1;
  831 + }
  832 +
  833 + /* Create a cache for Tx DMA request context */
  834 + dev_data->dma_req_ctx_cache = KMEM_CACHE(sa_dma_req_ctx, 0);
  835 + if (!dev_data->dma_req_ctx_cache) {
  836 + dev_err(dev, "Failed to create dma req cache");
  837 + return -1;
  838 + }
  839 + return 0;
  840 +}
  841 +
  842 +static void sa_free_mem(struct keystone_crypto_data *dev_data)
  843 +{
  844 + if (dev_data->sc_pool)
  845 + dma_pool_destroy(dev_data->sc_pool);
  846 + if (dev_data->dma_req_ctx_cache)
  847 + kmem_cache_destroy(dev_data->dma_req_ctx_cache);
  848 +}
  849 +static int keystone_crypto_remove(struct platform_device *pdev)
  850 +{
  851 + struct keystone_crypto_data *dev_data = platform_get_drvdata(pdev);
  852 +
  853 + /* un-register crypto algorithms */
  854 + sa_unregister_algos(&pdev->dev);
  855 + /* un-register HW RNG */
  856 + sa_unregister_rng(&pdev->dev);
  857 + /* Delete SYSFS entries */
  858 + sa_delete_sysfs_entries(dev_data);
  859 + /* Release DMA channels */
  860 + sa_teardown_dma(dev_data);
  861 + /* Kill tasklets */
  862 + tasklet_kill(&dev_data->rx_task);
  863 + /* Free memory pools used by the driver */
  864 + sa_free_mem(dev_data);
  865 +
  866 + clk_disable_unprepare(dev_data->clk);
  867 + clk_put(dev_data->clk);
  868 + kfree(dev_data);
  869 + platform_set_drvdata(pdev, NULL);
  870 + return 0;
  871 +}
  872 +
  873 +static int keystone_crypto_probe(struct platform_device *pdev)
  874 +{
  875 + struct device *dev = &pdev->dev;
  876 + struct device_node *node = pdev->dev.of_node;
  877 + struct keystone_crypto_data *dev_data;
  878 + u32 value;
  879 + int ret;
  880 +
  881 + sa_ks2_dev = dev;
  882 + dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
  883 + if (!dev_data)
  884 + return -ENOMEM;
  885 +
  886 + dev_data->clk = clk_get(dev, NULL);
  887 + if (IS_ERR_OR_NULL(dev_data->clk)) {
  888 + dev_err(dev, "Couldn't get clock\n");
  889 + ret = -ENODEV;
  890 + goto err;
  891 + }
  892 +
  893 + ret = clk_prepare_enable(dev_data->clk);
  894 + if (ret < 0) {
  895 + dev_err(dev, "Couldn't enable clock\n");
  896 + clk_put(dev_data->clk);
  897 + ret = -ENODEV;
  898 + goto err;
  899 + }
  900 +
  901 + dev_data->pdev = pdev;
  902 + platform_set_drvdata(pdev, dev_data);
  903 +
  904 + /* Read configuration from device tree */
  905 + ret = sa_read_dtb(node, dev_data);
  906 + if (ret) {
  907 + dev_err(dev, "Failed to get all relevant configurations from DTB...\n");
  908 + goto err;
  909 + }
  910 +
  911 + /* Enable the required sub-modules in SA */
  912 + value = __raw_readl(&dev_data->regs->mmr.CMD_STATUS);
  913 +
  914 + value |= (0x00000001u /* Enc SS */
  915 + | 0x00000002u /* Auth SS */
  916 + | 0x00000080u /* Context Cache */
  917 + | 0x00000100u /* PA in port */
  918 + | 0x00000200u /* CDMA in port */
  919 + | 0x00000400u /* PA out port */
  920 + | 0x00000800u /* CDMA out port */
  921 + | 0x00001000u /* Enc SS1 */
  922 + | 0x00002000u); /* Auth SS1 */
  923 +
  924 + __raw_writel(value, &dev_data->regs->mmr.CMD_STATUS);
  925 +
  926 + tasklet_init(&dev_data->rx_task, sa_rx_task,
  927 + (unsigned long) dev_data);
  928 +
  929 + tasklet_init(&dev_data->tx_task, sa_tx_task, (unsigned long) dev_data);
  930 +
  931 + /* Initialize statistic counters */
  932 + atomic_set(&dev_data->stats.tx_dropped, 0);
  933 + atomic_set(&dev_data->stats.sc_tear_dropped, 0);
  934 + atomic_set(&dev_data->stats.tx_pkts, 0);
  935 + atomic_set(&dev_data->stats.rx_pkts, 0);
  936 +
  937 + /* Initialize memory pools used by the driver */
  938 + if (sa_init_mem(dev_data)) {
  939 + dev_err(dev, "Failed to create dma pool");
  940 + ret = -ENOMEM;
  941 + goto err;
  942 + }
  943 +
  944 + /* Setup DMA channels */
  945 + if (sa_setup_dma(dev_data)) {
  946 + dev_err(dev, "Failed to set DMA channels");
  947 + ret = -ENODEV;
  948 + goto err;
  949 + }
  950 +
  951 + /* Initialize the SC-ID allocation lock */
  952 + spin_lock_init(&dev_data->scid_lock);
  953 +
  954 + /* Create sysfs entries */
  955 + ret = sa_create_sysfs_entries(dev_data);
  956 + if (ret)
  957 + goto err;
  958 +
  959 + /* Register HW RNG support */
  960 + ret = sa_register_rng(dev);
  961 + if (ret) {
  962 + dev_err(dev, "Failed to register HW RNG");
  963 + goto err;
  964 + }
  965 +
  966 + /* Register crypto algorithms */
  967 + sa_register_algos(dev);
  968 + dev_info(dev, "crypto accelerator enabled\n");
  969 + return 0;
  970 +
  971 +err:
  972 + keystone_crypto_remove(pdev);
  973 + return ret;
  974 +}
  975 +
  976 +static const struct of_device_id of_match[] = {
  977 + { .compatible = "ti,keystone-crypto", },
  978 + {},
  979 +};
  980 +MODULE_DEVICE_TABLE(of, of_match);
  981 +
  982 +static struct platform_driver keystone_crypto_driver = {
  983 + .probe = keystone_crypto_probe,
  984 + .remove = keystone_crypto_remove,
  985 + .driver = {
  986 + .name = "keystone-crypto",
  987 + .owner = THIS_MODULE,
  988 + .of_match_table = of_match,
  989 + },
  990 +};
  991 +
  992 +static int __init keystone_crypto_mod_init(void)
  993 +{
  994 + return platform_driver_register(&keystone_crypto_driver);
  995 +}
  996 +
  997 +static void __exit keystone_crypto_mod_exit(void)
  998 +{
  999 + platform_driver_unregister(&keystone_crypto_driver);
  1000 +}
  1001 +
  1002 +module_init(keystone_crypto_mod_init);
  1003 +module_exit(keystone_crypto_mod_exit);
  1004 +
  1005 +MODULE_DESCRIPTION("Keystone crypto acceleration support.");
  1006 +MODULE_LICENSE("GPL v2");
  1007 +MODULE_AUTHOR("Sandeep Nair");
  1008 +MODULE_AUTHOR("Vitaly Andrianov");
drivers/crypto/keystone-sa.h
  1 +/*
  2 + * Keystone crypto accelerator driver
  3 + *
  4 + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
  5 + * Contact: Sandeep Nair <sandeep_n@ti.com>
  6 + *
  7 + * This program is free software; you can redistribute it and/or
  8 + * modify it under the terms of the GNU General Public License
  9 + * version 2 as published by the Free Software Foundation.
  10 + *
  11 + * This program is distributed in the hope that it will be useful, but
  12 + * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 + * General Public License for more details.
  15 + */
  16 +
  17 +/*
  18 + *
  19 + * 0 1 2 3
  20 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
  21 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -----
  22 + * | |
  23 + * | Software only section (not fetched by CP_ACE) |
  24 + * | (optional) |
  25 + * | |
  26 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+<-Must be
  27 + * | SCCTL | 64 byte
  28 + * | (8 bytes) | aligned
  29 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  30 + * | |
  31 + * | PHP module specific section (fetched by CP_ACE) |
  32 + * | (56 bytes) |
  33 + * | |
  34 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -----
  35 + * | |
  36 + * | Encryption module specific section (fetched by CP_ACE) |
  37 + * | (variable size) |
  38 + * | |
  39 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+<-64 byte
  40 + * | | aligned
  41 + * | Authentication module specific section (fetched by CP_ACE) |
  42 + * | (variable size) |
  43 + * | |
  44 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -----
  45 + *
  46 + * Figure: Security Context memory layout
  47 + *
  48 + *
  49 + * 0 1 2 3
  50 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
  51 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -----
  52 + * |O|Evict done | F/E control | SCID |
  53 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  54 + * | SCPTR (Security Context Pointer) |
  55 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -----
  56 + * O : Owner
  57 + * SCID & SCPTR are filled by hardware
  58 + * Figure: Security Context control word (SCCTL)
  59 + *
  60 + *
  61 + * 0 1 2 3
  62 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
  63 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  64 + * |D| Pkt Type | Flow Index | Dest Queue ID |
  65 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  66 + * | SWINFO-0 (4 bytes) |
  67 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  68 + * | SWINFO-1 (4 bytes) |
  69 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  70 + * | PktID (16 bits) | |
  71 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  72 + * | |
  73 + * | Protocol Specific Parameters |
  74 + * | (Variable Size up to 116 bytes |
  75 + * ... ...
  76 + * | |
  77 + * | |
  78 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  79 + * D : Direction
  80 + * Figure: PHP engine Security Context Format
  81 + *
  82 + *
  83 + * 0 1 2 3
  84 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
  85 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  86 + * |M| R |nEngineID| |
  87 + * +-+-+-+-+-+-+-+-+ +
  88 + * | encryption mode ctrl word |
  89 + * ... ...
  90 + * | (27 bytes) |
  91 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  92 + * | Reserved (4 bytes) (must initialize to 0) |
  93 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  94 + * | Encryption Key value (32 bytes) |
  95 + * ... ...
  96 + * | |
  97 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  98 + * | Encryption Aux-1 (32 bytes) (optional) |
  99 + * ... ...
  100 + * | |
  101 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  102 + * | Encryption Aux-2 (16 bytes) (optional) |
  103 + * ... ...
  104 + * | |
  105 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  106 + * | Encryption Aux-3 (16 bytes) (optional) |
  107 + * ... ...
  108 + * | |
  109 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  110 + * | Encryption Aux-4 (16 bytes) |
  111 + * ... ...
  112 + * | |
  113 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  114 + * | Pre-crypto data store (15 bytes) |
  115 + * | |
  116 + * | +-+-+-+-+-+-+-+-|
  117 + * | | |
  118 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  119 + * M : Encryption Mode selector (0=crypto processing, 1=NULL)
  120 + * R : Reserved
  121 + * Figure: Encryption engine Security Context Format
  122 + *
  123 + *
  124 + * 0 1 2 3
  125 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
  126 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  127 + * |M| R |nEngineID| Auth SW Ctrl | |
  128 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +
  129 + * | Reserved (6 bytes) |
  130 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  131 + * | Authentication length (8 bytes) |
  132 + * | ( 0 = let h/w calculate the length ) |
  133 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  134 + * | Reserved (12 bytes) |
  135 + * ... ...
  136 + * | |
  137 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  138 + * | HW crtl word (4 bytes) must be set to 0 by SW |
  139 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  140 + * | Authentication Key value (32 bytes) |
  141 + * ... Master Key or pre-computed inner digest for HMAC ...
  142 + * | |
  143 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  144 + * | Authentication Aux-1 (32 bytes) (optional) |
  145 + * ... Pre-computed outer opad for HMAC ...
  146 + * | |
  147 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  148 + * | Authentication Aux-2 (32 bytes) (optional) |
  149 + * ... ...
  150 + * | |
  151 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  152 + * | Pre-crypto data store (32 bytes) |
  153 + * ... ( HW access only) ...
  154 + * | |
  155 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  156 + * | Pre-crypto data store (32 bytes) |
  157 + * ... ( HW access only) ...
  158 + * | |
  159 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ------
  160 + * M : Authentication Mode selector (0=hash processing, 1=NULL)
  161 + * R : Reserved
  162 + * Figure: Authentication engine Security Context Format
  163 + *
  164 + */
  165 +
  166 +/******************************************************************************
  167 + * This type represents the various packet types to be processed
  168 + * by the PHP engine in SA.
  169 + * It is used to identify the corresponding PHP processing function.
  170 + ******************************************************************************/
  171 +typedef u8 SA_CTX_PE_PKT_TYPE_T;
  172 +#define SA_CTX_PE_PKT_TYPE_3GPP_AIR 0 /* 3GPP Air Cipher */
  173 +#define SA_CTX_PE_PKT_TYPE_SRTP 1 /* SRTP */
  174 +#define SA_CTX_PE_PKT_TYPE_IPSEC_AH 2 /* IPSec Authentication Header */
  175 +#define SA_CTX_PE_PKT_TYPE_IPSEC_ESP 3 /* IPSec Encapsulating
  176 + Security Payload */
  177 +#define SA_CTX_PE_PKT_TYPE_NONE 4 /* Indicates that it is in
  178 + data mode, It may not be
  179 + used by PHP */
  180 +
  181 +
  182 +#define SA_CTX_ENC_TYPE1_SZ 64 /* Encryption SC with Key only */
  183 +#define SA_CTX_ENC_TYPE2_SZ 96 /* Encryption SC with Key and Aux1 */
  184 +
  185 +#define SA_CTX_AUTH_TYPE1_SZ 64 /* Auth SC with Key only */
  186 +#define SA_CTX_AUTH_TYPE2_SZ 96 /* Auth SC with Key and Aux1 */
  187 +
  188 +#define SA_CTX_PHP_PE_CTX_SZ 64 /* Size of security ctx for
  189 + PHP engine */
  190 +
  191 +#define SA_CTX_MAX_SZ (64 + SA_CTX_ENC_TYPE2_SZ + SA_CTX_AUTH_TYPE2_SZ)
  192 +
  193 +/*
  194 + * Encoding of F/E control in SCCTL
  195 + * Bit 0-1: Fetch PHP Bytes
  196 + * Bit 2-3: Fetch Encryption/Air Ciphering Bytes
  197 + * Bit 4-5: Fetch Authentication Bytes or Encr pass 2
  198 + * Bit 6-7: Evict PHP Bytes
  199 + *
  200 + * where 00 = 0 bytes
  201 + * 01 = 64 bytes
  202 + * 10 = 96 bytes
  203 + * 11 = 128 bytes
  204 + */
  205 +#define SA_CTX_DMA_SIZE_0 0
  206 +#define SA_CTX_DMA_SIZE_64 1
  207 +#define SA_CTX_DMA_SIZE_96 2
  208 +#define SA_CTX_DMA_SIZE_128 3
  209 +
  210 +#define SA_CTX_SCCTL_MK_DMA_INFO(php_f, eng0_f, eng1_f, php_e) \
  211 + ((php_f) | \
  212 + ((eng0_f) << 2) | \
  213 + ((eng1_f) << 4) | \
  214 + ((php_e) << 6))
  215 +
  216 +/*
  217 + * Byte offset of the owner word in SCCTL
  218 + * in the security context
  219 + */
  220 +#define SA_CTX_SCCTL_OWNER_OFFSET 0
  221 +
  222 +/*
  223 + * Assumption: CTX size is multiple of 32
  224 + */
  225 +#define SA_CTX_SIZE_TO_DMA_SIZE(ctx_sz) \
  226 + ((ctx_sz) ? ((ctx_sz)/32 - 1) : 0)
  227 +
  228 +#define SA_CTX_ENC_KEY_OFFSET 32
  229 +#define SA_CTX_ENC_AUX1_OFFSET 64
  230 +#define SA_CTX_ENC_AUX2_OFFSET 96
  231 +#define SA_CTX_ENC_AUX3_OFFSET 112
  232 +#define SA_CTX_ENC_AUX4_OFFSET 128
  233 +
  234 +/* Next Engine Select code in CP_ACE */
  235 +#define SA_ENG_ID_EM1 2 /* Encryption/Decryption engine
  236 + with AES/DES core */
  237 +#define SA_ENG_ID_EM2 3 /* Encryption/Decryption enginefor pass 2 */
  238 +#define SA_ENG_ID_AM1 4 /* Authentication engine with
  239 + SHA1/MD5/SHA2 core */
  240 +#define SA_ENG_ID_AM2 5 /* Authentication engine for pass 2 */
  241 +#define SA_ENG_ID_OUTPORT2 20 /* Egress module 2 */
  242 +#define SA_ENG_ID_NONE 0xff
  243 +
  244 +/*
  245 + * Command Label Definitions
  246 + */
  247 +#define SA_CMDL_OFFSET_NESC 0 /* Next Engine Select Code */
  248 +#define SA_CMDL_OFFSET_LABEL_LEN 1 /* Engine Command Label Length */
  249 +#define SA_CMDL_OFFSET_DATA_LEN 2 /* 16-bit Length of Data to be
  250 + processed */
  251 +#define SA_CMDL_OFFSET_DATA_OFFSET 4 /* Stat Data Offset */
  252 +#define SA_CMDL_OFFSET_OPTION_CTRL1 5 /* Option Control Byte 1 */
  253 +#define SA_CMDL_OFFSET_OPTION_CTRL2 6 /* Option Control Byte 2 */
  254 +#define SA_CMDL_OFFSET_OPTION_CTRL3 7 /* Option Control Byte 3 */
  255 +#define SA_CMDL_OFFSET_OPTION_BYTE 8
  256 +
  257 +#define SA_CMDL_HEADER_SIZE_BYTES 8
  258 +
  259 +#define SA_CMDL_OPTION_BYTES_MAX_SIZE 72
  260 +#define SA_CMDL_MAX_SIZE_BYTES (SA_CMDL_HEADER_SIZE_BYTES + \
  261 + SA_CMDL_OPTION_BYTES_MAX_SIZE)
  262 +
  263 +/* SWINFO word-0 flags */
  264 +#define SA_SW_INFO_FLAG_EVICT 0x0001
  265 +#define SA_SW_INFO_FLAG_TEAR 0x0002
  266 +#define SA_SW_INFO_FLAG_NOPD 0x0004
  267 +
  268 +/*
  269 + * TRNG module definitions
  270 + */
  271 +
  272 +/* Offset to TRNG module in CP_ACE memory map */
  273 +#define SA_REG_MAP_TRNG_OFFSET 0x24000
  274 +
  275 +/* TRNG enable control in CP_ACE */
  276 +#define SA_CMD_STATUS_REG_TRNG_ENABLE BIT(3)
  277 +
  278 +/* TRNG start control in TRNG module */
  279 +#define SA_TRNG_CONTROL_REG_TRNG_ENABLE BIT(10)
  280 +
  281 +/* Data ready indicator in STATUS register */
  282 +#define SA_TRNG_STATUS_REG_READY BIT(0)
  283 +
  284 +/* Data ready clear control in INTACK register */
  285 +#define SA_TRNG_INTACK_REG_READY BIT(0)
  286 +
  287 +/* Number of samples taken to gather entropy during startup.
  288 + * If value is 0, the number of samples is 2^24 else
  289 + * equals value times 2^8.
  290 + */
  291 +#define SA_TRNG_DEF_STARTUP_CYCLES 0
  292 +#define SA_TRNG_CONTROL_REG_STARTUP_CYCLES_SHIFT 16
  293 +
  294 +/* Minimum number of samples taken to regenerate entropy
  295 + * If value is 0, the number of samples is 2^24 else
  296 + * equals value times 2^6.
  297 + */
  298 +#define SA_TRNG_DEF_MIN_REFILL_CYCLES 1
  299 +#define SA_TRNG_CONFIG_REG_MIN_REFILL_CYCLES_SHIFT 0
  300 +
  301 +/* Maximum number of samples taken to regenerate entropy
  302 + * If value is 0, the number of samples is 2^24 else
  303 + * equals value times 2^8.
  304 + */
  305 +#define SA_TRNG_DEF_MAX_REFILL_CYCLES 0
  306 +#define SA_TRNG_CONFIG_REG_MAX_REFILL_CYCLES_SHIFT 16
  307 +
  308 +/* Number of CLK input cycles between samples */
  309 +#define SA_TRNG_DEF_CLK_DIV_CYCLES 0
  310 +#define SA_TRNG_CONFIG_REG_SAMPLE_DIV_SHIFT 8