summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 13:31:29 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 13:31:29 -0800
commit0f1d6dfe03ca4e36132221b918499c6f0b0f048d (patch)
tree0de8e9330610190a23e173ca7d7f3fb74a517aa2 /drivers/crypto
parentd05c5f7ba164aed3db02fb188c26d0dd94f5455b (diff)
parent04b46fbdea5e31ffd745a34fa61269a69ba9f47a (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.10: API: - add skcipher walk interface - add asynchronous compression (acomp) interface - fix algif_aed AIO handling of zero buffer Algorithms: - fix unaligned access in poly1305 - fix DRBG output to large buffers Drivers: - add support for iMX6UL to caam - fix givenc descriptors (used by IPsec) in caam - accelerated SHA256/SHA512 for ARM64 from OpenSSL - add SSE CRCT10DIF and CRC32 to ARM/ARM64 - add AEAD support to Chelsio chcr - add Armada 8K support to omap-rng" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (148 commits) crypto: testmgr - fix overlap in chunked tests again crypto: arm/crc32 - accelerated support based on x86 SSE implementation crypto: arm64/crc32 - accelerated support based on x86 SSE implementation crypto: arm/crct10dif - port x86 SSE implementation to ARM crypto: arm64/crct10dif - port x86 SSE implementation to arm64 crypto: testmgr - add/enhance test cases for CRC-T10DIF crypto: testmgr - avoid overlap in chunked tests crypto: chcr - checking for IS_ERR() instead of NULL crypto: caam - check caam_emi_slow instead of re-lookup platform crypto: algif_aead - fix AIO handling of zero buffer crypto: aes-ce - Make aes_simd_algs static crypto: algif_skcipher - set error code when kcalloc fails crypto: caam - make aamalg_desc a proper module crypto: caam - pass key buffers with typesafe pointers crypto: arm64/aes-ce-ccm - Fix AEAD decryption length MAINTAINERS: add crypto headers to crypto entry crypt: doc - remove misleading mention of async API crypto: doc - fix header file name crypto: api - fix comment typo crypto: skcipher - Add separate walker for AEAD decryption ..
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c3
-rw-r--r--drivers/crypto/atmel-aes-regs.h4
-rw-r--r--drivers/crypto/atmel-aes.c189
-rw-r--r--drivers/crypto/caam/Kconfig11
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamalg.c1505
-rw-r--r--drivers/crypto/caam/caamalg_desc.c1306
-rw-r--r--drivers/crypto/caam/caamalg_desc.h97
-rw-r--r--drivers/crypto/caam/caamhash.c227
-rw-r--r--drivers/crypto/caam/caampkc.c4
-rw-r--r--drivers/crypto/caam/caamrng.c10
-rw-r--r--drivers/crypto/caam/ctrl.c75
-rw-r--r--drivers/crypto/caam/desc.h22
-rw-r--r--drivers/crypto/caam/desc_constr.h133
-rw-r--r--drivers/crypto/caam/error.c5
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c27
-rw-r--r--drivers/crypto/caam/key_gen.c62
-rw-r--r--drivers/crypto/caam/key_gen.h6
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h6
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c30
-rw-r--r--drivers/crypto/ccp/ccp-dev.c6
-rw-r--r--drivers/crypto/ccp/ccp-dev.h45
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2001
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h103
-rw-r--r--drivers/crypto/chelsio/chcr_core.c8
-rw-r--r--drivers/crypto/chelsio/chcr_core.h18
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h115
-rw-r--r--drivers/crypto/marvell/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa.h5
-rw-r--r--drivers/crypto/marvell/cipher.c8
-rw-r--r--drivers/crypto/marvell/hash.c65
-rw-r--r--drivers/crypto/marvell/tdma.c33
-rw-r--r--drivers/crypto/mv_cesa.c4
-rw-r--r--drivers/crypto/nx/nx.c1
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/vmx/Makefile12
40 files changed, 4122 insertions, 2039 deletions
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index dae1e39139e9..d10b4ae5e0da 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -135,8 +135,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
&ctx->sa_out_dma_addr, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
- dma_free_coherent(ctx->dev->core_dev->device,
- ctx->sa_len * 4,
+ dma_free_coherent(ctx->dev->core_dev->device, size * 4,
ctx->sa_in, ctx->sa_in_dma_addr);
return -ENOMEM;
}
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
index 6c2951bb70b1..0ec04407b533 100644
--- a/drivers/crypto/atmel-aes-regs.h
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -28,6 +28,7 @@
#define AES_MR_OPMOD_CFB (0x3 << 12)
#define AES_MR_OPMOD_CTR (0x4 << 12)
#define AES_MR_OPMOD_GCM (0x5 << 12)
+#define AES_MR_OPMOD_XTS (0x6 << 12)
#define AES_MR_LOD (0x1 << 15)
#define AES_MR_CFBS_MASK (0x7 << 16)
#define AES_MR_CFBS_128b (0x0 << 16)
@@ -67,6 +68,9 @@
#define AES_CTRR 0x98
#define AES_GCMHR(x) (0x9c + ((x) * 0x04))
+#define AES_TWR(x) (0xc0 + ((x) * 0x04))
+#define AES_ALPHAR(x) (0xd0 + ((x) * 0x04))
+
#define AES_HW_VERSION 0xFC
#endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index e3d40a8dfffb..0e3d0d655b96 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <linux/platform_data/crypto-atmel.h>
#include <dt-bindings/dma/at91.h>
@@ -68,6 +69,7 @@
#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
+#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
AES_FLAGS_ENCRYPT | \
@@ -89,6 +91,7 @@ struct atmel_aes_caps {
bool has_cfb64;
bool has_ctr32;
bool has_gcm;
+ bool has_xts;
u32 max_burst_size;
};
@@ -135,6 +138,12 @@ struct atmel_aes_gcm_ctx {
atmel_aes_fn_t ghash_resume;
};
+struct atmel_aes_xts_ctx {
+ struct atmel_aes_base_ctx base;
+
+ u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
+};
+
struct atmel_aes_reqctx {
unsigned long mode;
};
@@ -282,6 +291,20 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
break;
+ case AES_TWR(0):
+ case AES_TWR(1):
+ case AES_TWR(2):
+ case AES_TWR(3):
+ snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
+ break;
+
+ case AES_ALPHAR(0):
+ case AES_ALPHAR(1):
+ case AES_ALPHAR(2):
+ case AES_ALPHAR(3):
+ snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
+ break;
+
default:
snprintf(tmp, sz, "0x%02x", offset);
break;
@@ -317,7 +340,7 @@ static inline void atmel_aes_write(struct atmel_aes_dev *dd,
char tmp[16];
dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
- atmel_aes_reg_name(offset, tmp));
+ atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
}
#endif /* VERBOSE_DEBUG */
@@ -453,15 +476,15 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
return err;
}
-static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv)
+static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv, const u32 *key, int keylen)
{
u32 valmr = 0;
/* MR register must be set before IV registers */
- if (dd->ctx->keylen == AES_KEYSIZE_128)
+ if (keylen == AES_KEYSIZE_128)
valmr |= AES_MR_KEYSIZE_128;
- else if (dd->ctx->keylen == AES_KEYSIZE_192)
+ else if (keylen == AES_KEYSIZE_192)
valmr |= AES_MR_KEYSIZE_192;
else
valmr |= AES_MR_KEYSIZE_256;
@@ -478,13 +501,19 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
atmel_aes_write(dd, AES_MR, valmr);
- atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
- SIZE_IN_WORDS(dd->ctx->keylen));
+ atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
atmel_aes_write_block(dd, AES_IVR(0), iv);
}
+static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv)
+
+{
+ atmel_aes_write_ctrl_key(dd, use_dma, iv,
+ dd->ctx->key, dd->ctx->keylen);
+}
/* CPU transfer */
@@ -1769,6 +1798,137 @@ static struct aead_alg aes_gcm_alg = {
};
+/* xts functions */
+
+static inline struct atmel_aes_xts_ctx *
+atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_xts_ctx, base);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
+
+static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ unsigned long flags;
+ int err;
+
+ atmel_aes_set_mode(dd, rctx);
+
+ err = atmel_aes_hw_init(dd);
+ if (err)
+ return atmel_aes_complete(dd, err);
+
+ /* Compute the tweak value from req->info with ecb(aes). */
+ flags = dd->flags;
+ dd->flags &= ~AES_FLAGS_MODE_MASK;
+ dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
+ atmel_aes_write_ctrl_key(dd, false, NULL,
+ ctx->key2, ctx->base.keylen);
+ dd->flags = flags;
+
+ atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+ return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+ u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
+ static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+ u8 *tweak_bytes = (u8 *)tweak;
+ int i;
+
+ /* Read the computed ciphered tweak value. */
+ atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
+ /*
+ * Hardware quirk:
+ * the order of the ciphered tweak bytes need to be reversed before
+ * writing them into the ODATARx registers.
+ */
+ for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
+ u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
+
+ tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
+ tweak_bytes[i] = tmp;
+ }
+
+ /* Process the data. */
+ atmel_aes_write_ctrl(dd, use_dma, NULL);
+ atmel_aes_write_block(dd, AES_TWR(0), tweak);
+ atmel_aes_write_block(dd, AES_ALPHAR(0), one);
+ if (use_dma)
+ return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+}
+
+static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int err;
+
+ err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+ if (err)
+ return err;
+
+ memcpy(ctx->base.key, key, keylen/2);
+ memcpy(ctx->key2, key + keylen/2, keylen/2);
+ ctx->base.keylen = keylen/2;
+
+ return 0;
+}
+
+static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS);
+}
+
+static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ ctx->base.start = atmel_aes_xts_start;
+
+ return 0;
+}
+
+static struct crypto_alg aes_xts_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "atmel-xts-aes",
+ .cra_priority = ATMEL_AES_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_xts_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_xts_setkey,
+ .encrypt = atmel_aes_xts_encrypt,
+ .decrypt = atmel_aes_xts_decrypt,
+ }
+};
+
+
/* Probe functions */
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
@@ -1877,6 +2037,9 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
+ if (dd->caps.has_xts)
+ crypto_unregister_alg(&aes_xts_alg);
+
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
@@ -1909,8 +2072,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_gcm_alg;
}
+ if (dd->caps.has_xts) {
+ err = crypto_register_alg(&aes_xts_alg);
+ if (err)
+ goto err_aes_xts_alg;
+ }
+
return 0;
+err_aes_xts_alg:
+ crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
crypto_unregister_alg(&aes_cfb64_alg);
err_aes_cfb64_alg:
@@ -1928,6 +2099,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 0;
dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
+ dd->caps.has_xts = 0;
dd->caps.max_burst_size = 1;
/* keep only major version number */
@@ -1937,6 +2109,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 1;
dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
+ dd->caps.has_xts = 1;
dd->caps.max_burst_size = 4;
break;
case 0x200:
@@ -2138,7 +2311,7 @@ aes_dd_err:
static int atmel_aes_remove(struct platform_device *pdev)
{
- static struct atmel_aes_dev *aes_dd;
+ struct atmel_aes_dev *aes_dd;
aes_dd = platform_get_drvdata(pdev);
if (!aes_dd)
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 64bf3024b680..bc0d3569f8d9 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -74,7 +74,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -89,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -101,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -113,7 +113,7 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -134,3 +134,6 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
help
Selecting this will enable printing of various debug
information in the CAAM driver.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 08bf5515ae8a..6554742f357e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -8,6 +8,7 @@ endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 954a64c7757b..662fe94cb2f8 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2,6 +2,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
*
* Based on talitos crypto API driver.
*
@@ -53,6 +54,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamalg_desc.h"
/*
* crypto alg
@@ -62,8 +64,6 @@
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
-/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define CAAM_MAX_IV_LENGTH 16
#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
@@ -71,37 +71,6 @@
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5)
-/* length of descriptors text */
-#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
-
-/* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
-
-#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
- 20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
- 15 * CAAM_CMD_SZ)
-
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -117,8 +86,7 @@
static void dbg_dump_sg(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
- struct scatterlist *sg, size_t tlen, bool ascii,
- bool may_sleep)
+ struct scatterlist *sg, size_t tlen, bool ascii)
{
struct scatterlist *it;
void *it_page;
@@ -152,7 +120,6 @@ static struct list_head alg_list;
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
- int alg_op;
bool rfc3686;
bool geniv;
};
@@ -163,52 +130,6 @@ struct caam_aead_alg {
bool registered;
};
-/* Set DK bit in class 1 operation if shared */
-static inline void append_dec_op1(u32 *desc, u32 type)
-{
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* DK bit is valid only for AES */
- if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- return;
- }
-
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
-}
-
-/*
- * For aead functions, read payload and write payload,
- * both of which are specified in req->src and req->dst
- */
-static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
-{
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
- KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
-}
-
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
-{
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
- KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
-}
-
/*
* per-session context
*/
@@ -220,147 +141,36 @@ struct caam_ctx {
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
- u32 class1_alg_type;
- u32 class2_alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
- unsigned int enckeylen;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
+ struct alginfo cdata;
unsigned int authsize;
};
-static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *nonce;
- unsigned int enckeylen = ctx->enckeylen;
-
- /*
- * RFC3686 specific:
- * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
- * | enckeylen = encryption key size + nonce size
- */
- if (is_rfc3686)
- enckeylen -= CTR_RFC3686_NONCE_SIZE;
-
- if (keys_fit_inline) {
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key_as_imm(desc, (void *)ctx->key +
- ctx->split_key_pad_len, enckeylen,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
- enckeylen);
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc,
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-}
-
-static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *key_jump_cmd;
-
- /* Note: Context registers are saved. */
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-}
-
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+ ctx->adata.keylen_pad;
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -368,84 +178,22 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- desc = ctx->sh_desc_dec;
+ if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH2 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /*
- * Insert a NOP here, since we need at least 4 instructions between
- * code patching the descriptor buffer and the location being patched.
- */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -453,12 +201,6 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -470,11 +212,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline;
- u32 geniv, moveiv;
u32 ctx1_iv_off = 0;
- u32 *desc;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ u32 *desc, *nonce = NULL;
+ u32 inl_mask;
+ unsigned int data_len[2];
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -482,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return 0;
/* NULL encryption / decryption */
- if (!ctx->enckeylen)
+ if (!ctx->cdata.keylen)
return aead_null_set_sh_desc(aead);
/*
@@ -497,8 +239,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* RFC3686 specific:
* CONTEXT1[255:128] = {NONCE, IV, COUNTER}
*/
- if (is_rfc3686)
+ if (is_rfc3686) {
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
if (alg->caam.geniv)
goto skip_enc;
@@ -507,54 +255,29 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_encrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- FIFOLDST_VLF);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ if (desc_inline_query(DESC_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
+ is_rfc3686, nonce, ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -562,79 +285,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_enc:
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (desc_inline_query(DESC_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (alg->caam.geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
+ ctx->adata.key_dma = ctx->key_dma;
- if (alg->caam.geniv) {
- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, alg->caam.geniv, is_rfc3686,
+ nonce, ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -642,11 +322,6 @@ skip_enc:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
if (!alg->caam.geniv)
goto skip_givenc;
@@ -655,93 +330,30 @@ skip_enc:
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- if (is_rfc3686)
- goto copy_iv;
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-copy_iv:
- /* Copy IV to class 1 context */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
-
- /* Return to encryption */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- /* Copy iv from outfifo to class 2 fifo */
- moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
- NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Will write ivsize + cryptlen */
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Not need to reload iv */
- append_seq_fifo_load(desc, ivsize,
- FIFOLD_CLASS_SKIP);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Will read cryptlen */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -749,11 +361,6 @@ copy_iv:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_givenc:
return 0;
@@ -774,12 +381,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *zero_payload_jump_cmd,
- *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -787,82 +393,16 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* if assoclen + cryptlen is ZERO, skip to ICV write */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqinlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* if cryptlen is ZERO jump to zero-payload commands */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* zero-payload commands */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
-
- /* There is no input data */
- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
- /* write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -870,80 +410,21 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD |
- JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* jump to zero-payload command if cryptlen is zero */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* store encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* zero-payload command */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -951,11 +432,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -974,11 +450,11 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -986,62 +462,16 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1049,73 +479,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* Will write cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read encrypted data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1123,11 +501,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1147,12 +520,11 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
- u32 *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -1160,61 +532,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read and write assoclen + cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1222,77 +549,21 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* In-snoop assoclen + cryptlen data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1300,11 +571,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1320,19 +586,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
return 0;
}
-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
- u32 authkeylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, authkeylen,
- ctx->alg_op);
-}
-
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
struct crypto_authenc_keys keys;
@@ -1341,33 +597,25 @@ static int aead_setkey(struct crypto_aead *aead,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
- if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- goto badkey;
-
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1376,14 +624,14 @@ static int aead_setkey(struct crypto_aead *aead,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + keys.enckeylen, 1);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
- ctx->enckeylen = keys.enckeylen;
+ ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
}
@@ -1412,11 +660,11 @@ static int gcm_setkey(struct crypto_aead *aead,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
ret = gcm_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1444,9 +692,9 @@ static int rfc4106_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1455,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
ret = rfc4106_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1483,9 +731,9 @@ static int rfc4543_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1494,7 +742,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
ret = rfc4543_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1505,21 +753,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
const char *alg_name = crypto_tfm_alg_name(tfm);
struct device *jrdev = ctx->jrdev;
- int ret = 0;
- u32 *key_jump_cmd;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc;
- u8 *nonce;
- u32 geniv;
u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
+ memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -1542,60 +787,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- memcpy(ctx->key, key, keylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Load iv */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1603,61 +808,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* load IV */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
- else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1666,76 +821,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX |
- (crt->ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, crt->ivsize,
- LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1743,14 +832,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
- return ret;
+ return 0;
}
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -1758,8 +841,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- u32 *key_jump_cmd, *desc;
- __be64 sector_size = cpu_to_be64(512);
+ u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
@@ -1774,88 +856,23 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 keys only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
@@ -1864,31 +881,22 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
return 0;
}
/*
* aead_edesc - s/w-extended aead descriptor
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct aead_edesc {
- int assoc_nents;
int src_nents;
int dst_nents;
- dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -1900,9 +908,9 @@ struct aead_edesc {
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
@@ -2019,8 +1027,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2031,7 +1038,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc->src_nents > 1 ? 100 : ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2052,8 +1059,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2063,7 +1069,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2157,7 +1163,7 @@ static void init_gcm_job(struct aead_request *req,
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
/* Append Salt */
if (!generic_gcm)
- append_data(desc, ctx->key + ctx->enckeylen, 4);
+ append_data(desc, ctx->key + ctx->cdata.keylen, 4);
/* Append IV */
append_data(desc, req->iv, ivsize);
/* End of blank commands */
@@ -2172,7 +1178,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
@@ -2218,15 +1224,13 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2278,14 +1282,12 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2344,10 +1346,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/* Check if data are contiguous. */
all_contig = !src_nents;
- if (!all_contig) {
- src_nents = src_nents ? : 1;
+ if (!all_contig)
sec4_sg_len = src_nents;
- }
sec4_sg_len += dst_nents;
@@ -2556,11 +1556,9 @@ static int aead_decrypt(struct aead_request *req)
int ret = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1, may_sleep);
+ req->assoclen + req->cryptlen, 1);
#endif
/* allocate extended descriptor */
@@ -2618,16 +1616,33 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2647,6 +1662,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2673,6 +1690,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -2794,11 +1814,26 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
/*
@@ -2808,6 +1843,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2823,6 +1860,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2850,6 +1889,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->iv_dma = iv_dma;
@@ -2916,7 +1958,6 @@ struct caam_alg_template {
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
- u32 alg_op;
};
static struct caam_alg_template driver_algs[] = {
@@ -3101,7 +2142,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3123,7 +2163,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3145,7 +2184,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3167,7 +2205,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3189,7 +2226,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3211,7 +2247,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3233,7 +2268,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3256,7 +2290,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3279,7 +2312,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3302,7 +2334,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3325,7 +2356,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3348,7 +2378,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3371,7 +2400,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3394,7 +2422,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3417,7 +2444,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3440,7 +2466,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3463,7 +2488,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3486,7 +2510,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3509,7 +2532,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
}
},
{
@@ -3532,7 +2554,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
}
},
@@ -3556,7 +2577,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3580,7 +2600,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3604,7 +2623,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3628,7 +2646,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3652,7 +2669,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3676,7 +2692,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3700,7 +2715,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3724,7 +2738,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3748,7 +2761,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3772,7 +2784,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3795,7 +2806,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3818,7 +2828,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3841,7 +2850,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3864,7 +2872,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3887,7 +2894,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3910,7 +2916,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3933,7 +2938,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3956,7 +2960,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3979,7 +2982,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4002,7 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4025,7 +3026,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4048,7 +3048,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4073,7 +3072,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4098,7 +3096,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4124,7 +3121,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4149,7 +3145,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4175,7 +3170,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4200,7 +3194,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4226,7 +3219,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4251,7 +3243,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4277,7 +3268,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4302,7 +3292,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4328,7 +3317,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4353,7 +3341,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4375,9 +3362,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
}
/* copy descriptor header template value */
- ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
- ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
return 0;
}
@@ -4420,7 +3406,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
if (ctx->key_dma &&
!dma_mapping_error(ctx->jrdev, ctx->key_dma))
dma_unmap_single(ctx->jrdev, ctx->key_dma,
- ctx->enckeylen + ctx->split_key_pad_len,
+ ctx->cdata.keylen + ctx->adata.keylen_pad,
DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
@@ -4498,7 +3484,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
t_alg->caam.class1_alg_type = template->class1_alg_type;
t_alg->caam.class2_alg_type = template->class2_alg_type;
- t_alg->caam.alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 000000000000..f3f48c10b9d6
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1306 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamalg_desc.h"
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ /* DK bit is valid only for AES */
+ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ return;
+ }
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
+
+/**
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
+
+static void init_sh_desc_key_aead(u32 * const desc,
+ struct alginfo * const cdata,
+ struct alginfo * const adata,
+ const bool is_rfc3686, u32 *nonce)
+{
+ u32 *key_jump_cmd;
+ unsigned int enckeylen = cdata->keylen;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /*
+ * RFC3686 specific:
+ * | key = {AUTH_KEY, ENC_KEY, NONCE}
+ * | enckeylen = encryption key size + nonce size
+ */
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, enckeylen,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686) {
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc,
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ FIFOLDST_VLF);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
+
+/**
+ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ if (geniv) {
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+ }
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
+
+/**
+ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with HW-generated initialization
+ * vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off)
+{
+ u32 geniv, moveiv;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ if (is_rfc3686)
+ goto copy_iv;
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+copy_iv:
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from outfifo to class 2 fifo */
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead givenc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
+
+/**
+ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
+ *zero_assoc_jump_cmd2;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD | JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ /* There is no input data */
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+ /* write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
+
+/**
+ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* store encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* zero-payload command */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
+
+/**
+ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
+
+/**
+ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read encrypted data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
+
+/**
+ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read and write assoclen + cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
+
+/**
+ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* In-snoop assoclen + cryptlen data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/**
+ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Load iv */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* load IV */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+
+/**
+ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
+ * with HW-generated initialization vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd, geniv;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
+ (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to memory */
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ if (ctx1_iv_off)
+ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 keys only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM descriptor support");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 000000000000..95551737333a
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,97 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#ifndef _CAAMALG_DESC_H_
+#define _CAAMALG_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in cdata.keylen */
+#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+
+#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 660dc206969f..e58639ea53b1 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -72,7 +72,7 @@
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
/* length of descriptors text */
-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
@@ -103,20 +103,15 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
- dma_addr_t sh_desc_finup_dma;
struct device *jrdev;
- u32 alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
dma_addr_t key_dma;
int ctx_len;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
};
/* ahash state */
@@ -222,89 +217,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
return 0;
}
-/* Common shared descriptor commands */
-static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-}
-
-/* Append key if it has been set */
-static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- u32 *key_jump_cmd;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- if (ctx->split_key_len) {
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_ahash(desc, ctx);
-
- set_jump_tgt_here(desc, key_jump_cmd);
- }
-
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
/*
- * For ahash read data from seqin following state->caam_ctx,
- * and write resulting class2 context to seqout, which may be state->caam_ctx
- * or req->result
+ * For ahash update, final and finup (import_ctx = true)
+ * import context, read and write to seqout
+ * For ahash firsts and digest (import_ctx = false)
+ * read and write to seqout
*/
-static inline void ahash_append_load_str(u32 *desc, int digestsize)
+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
+ struct caam_hash_ctx *ctx, bool import_ctx)
{
- /* Calculate remaining bytes to read */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
+ u32 op = ctx->adata.algtype;
+ u32 *skip_key_load;
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
-/*
- * For ahash update, final and finup, import context, read and write to seqout
- */
-static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize,
- struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* Append key if it has been set; ahash update excluded */
+ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
+ set_jump_tgt_here(desc, skip_key_load);
- /*
- * Load from buf and/or src and write to req->result or state->context
- */
- ahash_append_load_str(desc, digestsize);
-}
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
-/* For ahash firsts and digest, read and write to seqout */
-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize, struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
*/
- ahash_append_load_str(desc, digestsize);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
}
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
@@ -312,28 +272,11 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
- u32 have_key = 0;
u32 *desc;
- if (ctx->split_key_len)
- have_key = OP_ALG_AAI_HMAC_PRECOMP;
-
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
-
- /* Class 2 operation */
- append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
- OP_ALG_ENCRYPT);
-
- /* Load data and write to result or context */
- ahash_append_load_str(desc, ctx->ctx_len);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
@@ -348,10 +291,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
- ctx->ctx_len, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -367,10 +307,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
@@ -383,30 +320,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
desc_bytes(desc), 1);
#endif
- /* ahash_finup shared descriptor */
- desc = ctx->sh_desc_finup;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
- ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
- digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -424,14 +340,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 keylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, keylen,
- ctx->alg_op);
-}
-
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
@@ -467,7 +375,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Job descriptor to perform unkeyed hash on key_in */
- append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
@@ -511,8 +419,6 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
@@ -537,23 +443,12 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
-#ifdef DEBUG
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
-
- ret = gen_split_hash_key(ctx, key, keylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
+ CAAM_MAX_HASH_KEY_SIZE);
if (ret)
goto bad_free_key;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -563,14 +458,15 @@ static int ahash_setkey(struct crypto_ahash *ahash,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
+ ctx->adata.keylen_pad, 1);
#endif
ret = ahash_set_sh_desc(ahash);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
}
+
error_free_key:
kfree(hashed_key);
return ret;
@@ -639,8 +535,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -674,8 +569,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -709,8 +603,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -744,8 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -1078,7 +970,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -1683,7 +1575,6 @@ struct caam_hash_template {
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
- u32 alg_op;
};
/* ahash descriptors */
@@ -1709,7 +1600,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
@@ -1731,7 +1621,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
@@ -1753,7 +1642,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
@@ -1775,7 +1663,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
@@ -1797,7 +1684,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
}, {
.name = "md5",
.driver_name = "md5-caam",
@@ -1819,14 +1705,12 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
};
struct caam_hash_alg {
struct list_head entry;
int alg_type;
- int alg_op;
struct ahash_alg ahash_alg;
};
@@ -1859,10 +1743,10 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
/* copy descriptor header template value */
- ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -1893,10 +1777,6 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
desc_bytes(ctx->sh_desc_digest),
DMA_TO_DEVICE);
- if (ctx->sh_desc_finup_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
- desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
}
@@ -1956,7 +1836,6 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
- t_alg->alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 851015e652b8..32100c4851dd 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -395,7 +395,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -441,7 +441,7 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 9b92af2c7241..41398da3edf4 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -52,7 +52,7 @@
/* length of descriptors */
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
-#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
+#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
struct buf_data {
@@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
{
struct buf_data *bd;
- bd = (struct buf_data *)((char *)desc -
- offsetof(struct buf_data, hw_desc));
+ bd = container_of(desc, struct buf_data, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
init_sh_desc(desc, HDR_SHARE_SERIAL);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
/* Generate random bytes */
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
@@ -351,7 +347,7 @@ static int __init caam_rng_init(void)
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
err = -ENOMEM;
goto free_caam_alloc;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e483b78c6343..755109841cfd 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -330,8 +330,8 @@ static int caam_remove(struct platform_device *pdev)
clk_disable_unprepare(ctrlpriv->caam_ipg);
clk_disable_unprepare(ctrlpriv->caam_mem);
clk_disable_unprepare(ctrlpriv->caam_aclk);
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
return 0;
}
@@ -365,11 +365,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
*/
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
>> RTSDCTL_ENT_DLY_SHIFT;
- if (ent_delay <= val) {
- /* put RNG4 into run mode */
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
- return;
- }
+ if (ent_delay <= val)
+ goto start_rng;
val = rd_reg32(&r4tst->rtsdctl);
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
@@ -381,15 +378,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
/* read the control register */
val = rd_reg32(&r4tst->rtmctl);
+start_rng:
/*
* select raw sampling in both entropy shifter
- * and statistical checker
+ * and statistical checker; ; put RNG4 into run mode
*/
- clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
- /* put RNG4 into run mode */
- clrsetbits_32(&val, RTMCTL_PRGM, 0);
- /* write back the control register */
- wr_reg32(&r4tst->rtmctl, val);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
}
/**
@@ -482,14 +476,16 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_aclk = clk;
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM emi_slow clk: %d\n", ret);
- return ret;
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_emi_slow = clk;
}
- ctrlpriv->caam_emi_slow = clk;
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
if (ret < 0) {
@@ -510,11 +506,13 @@ static int caam_probe(struct platform_device *pdev)
goto disable_caam_mem;
}
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
- ret);
- goto disable_caam_aclk;
+ if (ctrlpriv->caam_emi_slow) {
+ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+ ret);
+ goto disable_caam_aclk;
+ }
}
/* Get configuration properties from device tree */
@@ -541,13 +539,13 @@ static int caam_probe(struct platform_device *pdev)
else
BLOCK_OFFSET = PG_SIZE_64K;
- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
- ctrlpriv->assure = (struct caam_assurance __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+ ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
);
- ctrlpriv->deco = (struct caam_deco __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->deco = (struct caam_deco __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * DECO_BLOCK_NUMBER
);
@@ -627,8 +625,8 @@ static int caam_probe(struct platform_device *pdev)
ring);
continue;
}
- ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl +
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
@@ -641,8 +639,8 @@ static int caam_probe(struct platform_device *pdev)
!!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
- ctrlpriv->qi = (struct caam_queue_if __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
);
/* This is all that's required to physically enable QI */
@@ -800,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
&caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
- ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IRUSR |
@@ -808,7 +806,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
- ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IRUSR |
@@ -816,7 +814,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
- ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IRUSR |
@@ -833,7 +831,8 @@ caam_remove:
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 513b6646bb36..2e6766a1573f 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -22,12 +22,6 @@
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
#define SEC4_SG_OFFSET_MASK 0x00001fff
-struct sec4_sg_entry {
- u64 ptr;
- u32 len;
- u32 bpid_offset;
-};
-
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
#define MAX_CAAM_DESCSIZE 64
@@ -90,8 +84,8 @@ struct sec4_sg_entry {
#define HDR_ZRO 0x00008000
/* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK 0x3f
#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
/* If shared descriptor header, 6-bit length */
#define HDR_DESCLEN_SHR_MASK 0x3f
@@ -121,10 +115,10 @@ struct sec4_sg_entry {
#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK 0x03
#define HDR_SD_SHARE_SHIFT 8
-#define HDR_JD_SHARE_MASK 0x07
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
@@ -235,7 +229,7 @@ struct sec4_sg_entry {
#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
@@ -400,7 +394,7 @@ struct sec4_sg_entry {
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
@@ -1107,8 +1101,8 @@ struct sec4_sg_entry {
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
-#define OP_ALG_TYPE_CLASS1 2
-#define OP_ALG_TYPE_CLASS2 4
+#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
@@ -1249,7 +1243,7 @@ struct sec4_sg_entry {
#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
#define OP_ALG_PKMODE_DST_REG_SHIFT 10
#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index a8cd8a78ec1f..b9c8d98ef826 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -33,38 +33,39 @@
extern bool caam_little_end;
-static inline int desc_len(u32 *desc)
+static inline int desc_len(u32 * const desc)
{
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
}
-static inline int desc_bytes(void *desc)
+static inline int desc_bytes(void * const desc)
{
return desc_len(desc) * CAAM_CMD_SZ;
}
-static inline u32 *desc_end(u32 *desc)
+static inline u32 *desc_end(u32 * const desc)
{
return desc + desc_len(desc);
}
-static inline void *sh_desc_pdb(u32 *desc)
+static inline void *sh_desc_pdb(u32 * const desc)
{
return desc + 1;
}
-static inline void init_desc(u32 *desc, u32 options)
+static inline void init_desc(u32 * const desc, u32 options)
{
*desc = cpu_to_caam32((options | HDR_ONE) + 1);
}
-static inline void init_sh_desc(u32 *desc, u32 options)
+static inline void init_sh_desc(u32 * const desc, u32 options)
{
PRINT_POS;
init_desc(desc, CMD_SHARED_DESC_HDR | options);
}
-static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
@@ -72,19 +73,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
options);
}
-static inline void init_job_desc(u32 *desc, u32 options)
+static inline void init_job_desc(u32 * const desc, u32 options)
{
init_desc(desc, CMD_DESC_HDR | options);
}
-static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_job_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
}
-static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
@@ -94,8 +96,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
CAAM_PTR_SZ / CAAM_CMD_SZ);
}
-static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
- u32 options)
+static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
+ int len, u32 options)
{
PRINT_POS;
init_job_desc(desc, HDR_SHARED | options |
@@ -103,7 +105,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 *desc, void *data, int len)
+static inline void append_data(u32 * const desc, void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -114,7 +116,7 @@ static inline void append_data(u32 *desc, void *data, int len)
(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
}
-static inline void append_cmd(u32 *desc, u32 command)
+static inline void append_cmd(u32 * const desc, u32 command)
{
u32 *cmd = desc_end(desc);
@@ -125,7 +127,7 @@ static inline void append_cmd(u32 *desc, u32 command)
#define append_u32 append_cmd
-static inline void append_u64(u32 *desc, u64 data)
+static inline void append_u64(u32 * const desc, u64 data)
{
u32 *offset = desc_end(desc);
@@ -142,14 +144,14 @@ static inline void append_u64(u32 *desc, u64 data)
}
/* Write command without affecting header, and return pointer to next word */
-static inline u32 *write_cmd(u32 *desc, u32 command)
+static inline u32 *write_cmd(u32 * const desc, u32 command)
{
*desc = cpu_to_caam32(command);
return desc + 1;
}
-static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
u32 command)
{
append_cmd(desc, command | len);
@@ -157,7 +159,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
}
/* Write length after pointer, rather than inside command */
-static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
unsigned int len, u32 command)
{
append_cmd(desc, command);
@@ -166,7 +168,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 *desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -174,7 +176,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
}
#define APPEND_CMD_RET(cmd, op) \
-static inline u32 *append_##cmd(u32 *desc, u32 options) \
+static inline u32 *append_##cmd(u32 * const desc, u32 options) \
{ \
u32 *cmd = desc_end(desc); \
PRINT_POS; \
@@ -184,13 +186,13 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
-static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
(desc_len(desc) - (jump_cmd - desc)));
}
-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
{
u32 val = caam32_to_cpu(*move_cmd);
@@ -200,7 +202,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
}
#define APPEND_CMD(cmd, op) \
-static inline void append_##cmd(u32 *desc, u32 options) \
+static inline void append_##cmd(u32 * const desc, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | options); \
@@ -208,7 +210,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
APPEND_CMD(operation, OPERATION)
#define APPEND_CMD_LEN(cmd, op) \
-static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+static inline void append_##cmd(u32 * const desc, unsigned int len, \
+ u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
@@ -220,8 +223,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
#define APPEND_CMD_PTR(cmd, op) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
- u32 options) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
@@ -231,8 +234,8 @@ APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
- u32 options)
+static inline void append_store(u32 * const desc, dma_addr_t ptr,
+ unsigned int len, u32 options)
{
u32 cmd_src;
@@ -249,7 +252,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
}
#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
-static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
+ dma_addr_t ptr, \
unsigned int len, \
u32 options) \
{ \
@@ -263,7 +267,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -273,7 +277,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
-static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -287,7 +291,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
* the size of its type
*/
#define APPEND_CMD_PTR_LEN(cmd, op, type) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
type len, u32 options) \
{ \
PRINT_POS; \
@@ -304,7 +308,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -315,7 +319,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
APPEND_CMD_PTR_TO_IMM2(key, KEY);
#define APPEND_CMD_RAW_IMM(cmd, op, type) \
-static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
@@ -426,3 +430,64 @@ do { \
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ * is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ * referenced by the descriptor
+ */
+struct alginfo {
+ u32 algtype;
+ unsigned int keylen;
+ unsigned int keylen_pad;
+ union {
+ dma_addr_t key_dma;
+ void *key_virt;
+ };
+ bool key_inline;
+};
+
+/**
+ * desc_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int desc_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len, unsigned int *data_len,
+ u32 *inl_mask, unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 33e41ea83fcc..79a0cc70717f 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -146,10 +146,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
strlen(rng_err_id_list[err_id])) {
/* RNG-only error */
err_str = rng_err_id_list[err_id];
- } else if (err_id < ARRAY_SIZE(err_id_list))
+ } else {
err_str = err_id_list[err_id];
- else
- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+ }
/*
* CCB ICV check failures are part of normal operation life;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c05074a5c..e2bcacc1a921 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
+ struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f9953d..c8604dfadbf5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev);
+ tasklet_kill(&jrp->irqtask);
+
/* Release interrupt */
free_irq(jrp->irq, dev);
@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/*
* Check the output ring for ready responses, kick
- * the threaded irq if jobs done.
+ * tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
- return IRQ_WAKE_THREAD;
+ preempt_disable();
+ tasklet_schedule(&jrp->irqtask);
+ preempt_enable();
+
+ return IRQ_HANDLED;
}
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = st_dev;
+ struct device *dev = (struct device *)devarg;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
-
- return IRQ_HANDLED;
}
/**
@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
/* Connect job ring interrupt handler. */
- error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
- caam_jr_threadirq, IRQF_SHARED,
- dev_name(dev), dev);
+ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -454,6 +460,7 @@ out_free_inpring:
out_free_irq:
free_irq(jrp->irq, dev);
out_kill_deq:
+ tasklet_kill(&jrp->irqtask);
return error;
}
@@ -489,7 +496,7 @@ static int caam_jr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
+ jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4ff9762..1bb2816a9b4d 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -10,6 +10,36 @@
#include "desc_constr.h"
#include "key_gen.h"
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
@@ -41,15 +71,29 @@ Split key generation-----------------------------------------------
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op)
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen)
{
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
int ret = -ENOMEM;
+ adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
+ adata->keylen_pad = split_key_pad_len(adata->algtype &
+ OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ adata->keylen, adata->keylen_pad);
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+#endif
+
+ if (adata->keylen_pad > max_keylen)
+ return -EINVAL;
+
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
@@ -63,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
goto out_free;
}
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
@@ -74,7 +118,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
- append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+ OP_ALG_AS_INIT);
/*
* do a FIFO_LOAD of zero, this will trigger the internal key expansion
@@ -87,7 +133,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- append_fifo_store(desc, dma_addr_out, split_key_len,
+ append_fifo_store(desc, dma_addr_out, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
#ifdef DEBUG
@@ -108,11 +154,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
- split_key_pad_len, 1);
+ adata->keylen_pad, 1);
#endif
}
- dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
DMA_FROM_DEVICE);
out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index c5588f6d8109..4628f389eb64 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@ struct split_key_result {
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op);
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen);
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 41cd5a356d05..6afa20c4a013 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -7,7 +7,11 @@
#include "regs.h"
-struct sec4_sg_entry;
+struct sec4_sg_entry {
+ u64 ptr;
+ u32 len;
+ u32 bpid_offset;
+};
/*
* convert single dma address to h/w link table format
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbacc6161..7bc09989e18a 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -404,10 +404,6 @@ static int ccp_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->sb_queue);
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index faf3cb3ddce2..e2ce8190ecc9 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -21,6 +21,12 @@
#include "ccp-dev.h"
+/* Allocate the requested number of contiguous LSB slots
+ * from the LSB bitmap. Look in the private range for this
+ * queue first; failing that, check the public area.
+ * If no space is available, wait around.
+ * Return: first slot number
+ */
static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
{
struct ccp_device *ccp;
@@ -50,7 +56,7 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
bitmap_set(ccp->lsbmap, start, count);
mutex_unlock(&ccp->sb_mutex);
- return start * LSB_ITEM_SIZE;
+ return start;
}
ccp->sb_avail = 0;
@@ -63,17 +69,18 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
}
}
+/* Free a number of LSB slots from the bitmap, starting at
+ * the indicated starting slot number.
+ */
static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
unsigned int count)
{
- int lsbno = start / LSB_SIZE;
-
if (!start)
return;
- if (cmd_q->lsb == lsbno) {
+ if (cmd_q->lsb == start) {
/* An entry from the private LSB */
- bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ bitmap_clear(cmd_q->lsbmap, start, count);
} else {
/* From the shared LSBs */
struct ccp_device *ccp = cmd_q->ccp;
@@ -396,7 +403,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
- CCP_RSA_SIZE(&function) = op->u.rsa.mod_size;
+ CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -411,10 +418,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
- /* Key (Exponent) is in external memory */
- CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
- CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
- CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+ /* Exponent is in LSB memory */
+ CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
+ CCP5_CMD_KEY_HI(&desc) = 0;
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
return ccp5_do_cmd(&desc, op->cmd_q);
}
@@ -751,9 +758,6 @@ static int ccp5_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queue used to suspend */
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Loading LSB map...\n");
/* Copy the private LSB mask to the public registers */
status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cafa633aae10..511ab042b5e7 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -41,7 +41,7 @@ struct ccp_tasklet_data {
};
/* Human-readable error strings */
-char *ccp_error_codes[] = {
+static char *ccp_error_codes[] = {
"",
"ERR 01: ILLEGAL_ENGINE",
"ERR 02: ILLEGAL_KEY_ID",
@@ -478,6 +478,10 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
ccp->sb_count = KSB_COUNT;
ccp->sb_start = 0;
+ /* Initialize the wait queues */
+ init_waitqueue_head(&ccp->sb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
ccp->ord = ccp_increment_unit_ordinal();
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index da5f4a678083..830f35e6005f 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -278,7 +278,7 @@ struct ccp_cmd_queue {
/* Private LSB that is assigned to this queue, or -1 if none.
* Bitmap for my private LSB, unused otherwise
*/
- unsigned int lsb;
+ int lsb;
DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
/* Queue processing thread */
@@ -515,7 +515,6 @@ struct ccp_op {
struct ccp_passthru_op passthru;
struct ccp_ecc_op ecc;
} u;
- struct ccp_mem key;
};
static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
@@ -541,23 +540,23 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
* word 7: upper 16 bits of key pointer; key memory type
*/
struct dword0 {
- __le32 soc:1;
- __le32 ioc:1;
- __le32 rsvd1:1;
- __le32 init:1;
- __le32 eom:1; /* AES/SHA only */
- __le32 function:15;
- __le32 engine:4;
- __le32 prot:1;
- __le32 rsvd2:7;
+ unsigned int soc:1;
+ unsigned int ioc:1;
+ unsigned int rsvd1:1;
+ unsigned int init:1;
+ unsigned int eom:1; /* AES/SHA only */
+ unsigned int function:15;
+ unsigned int engine:4;
+ unsigned int prot:1;
+ unsigned int rsvd2:7;
};
struct dword3 {
- __le32 src_hi:16;
- __le32 src_mem:2;
- __le32 lsb_cxt_id:8;
- __le32 rsvd1:5;
- __le32 fixed:1;
+ unsigned int src_hi:16;
+ unsigned int src_mem:2;
+ unsigned int lsb_cxt_id:8;
+ unsigned int rsvd1:5;
+ unsigned int fixed:1;
};
union dword4 {
@@ -567,18 +566,18 @@ union dword4 {
union dword5 {
struct {
- __le32 dst_hi:16;
- __le32 dst_mem:2;
- __le32 rsvd1:13;
- __le32 fixed:1;
+ unsigned int dst_hi:16;
+ unsigned int dst_mem:2;
+ unsigned int rsvd1:13;
+ unsigned int fixed:1;
} fields;
__le32 sha_len_hi;
};
struct dword7 {
- __le32 key_hi:16;
- __le32 key_mem:2;
- __le32 rsvd1:14;
+ unsigned int key_hi:16;
+ unsigned int key_mem:2;
+ unsigned int rsvd1:14;
};
struct ccp5_desc {
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 4ce67fb9a880..3e104f5aa0c2 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_AUTHENC
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 56b153805462..2ed1e24b44a8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -54,6 +54,12 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include "t4fw_api.h"
@@ -62,6 +68,11 @@
#include "chcr_algo.h"
#include "chcr_crypto.h"
+static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->aeadctx;
+}
+
static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->ablkctx;
@@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
return ctx->crypto_ctx->hmacctx;
}
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->authenc;
+}
+
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
return ctx->dev->u_ctx;
@@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+ u8 temp[SHA512_DIGEST_SIZE];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int authsize = crypto_aead_authsize(tfm);
+ struct cpl_fw6_pld *fw6_pld;
+ int cmp = 0;
+
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+ (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+ cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+ } else {
+
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+ authsize, req->assoclen +
+ req->cryptlen - authsize);
+ cmp = memcmp(temp, (fw6_pld + 1), authsize);
+ }
+ if (cmp)
+ *err = -EBADMSG;
+ else
+ *err = 0;
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
*/
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
- int error_status)
+ int err)
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
@@ -109,17 +155,33 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
unsigned int digestsize, updated_digestsize;
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ ctx_req.req.aead_req = (struct aead_request *)req;
+ ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
+ dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+ ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
+ if (ctx_req.ctx.reqctx->skb) {
+ kfree_skb(ctx_req.ctx.reqctx->skb);
+ ctx_req.ctx.reqctx->skb = NULL;
+ }
+ if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(ctx_req.req.aead_req, input,
+ &err);
+ ctx_req.ctx.reqctx->verify = VERIFY_HW;
+ }
+ break;
+
case CRYPTO_ALG_TYPE_BLKCIPHER:
ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
ctx_req.ctx.ablk_ctx =
ablkcipher_request_ctx(ctx_req.req.ablk_req);
- if (!error_status) {
+ if (!err) {
fw6_pld = (struct cpl_fw6_pld *)input;
memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
AES_BLOCK_SIZE);
}
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
- ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+ ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.ablk_ctx->skb) {
kfree_skb(ctx_req.ctx.ablk_ctx->skb);
ctx_req.ctx.ablk_ctx->skb = NULL;
@@ -138,8 +200,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- if (ctx_req.ctx.ahash_ctx->skb)
+ if (ctx_req.ctx.ahash_ctx->skb) {
+ kfree_skb(ctx_req.ctx.ahash_ctx->skb);
ctx_req.ctx.ahash_ctx->skb = NULL;
+ }
if (ctx_req.ctx.ahash_ctx->result == 1) {
ctx_req.ctx.ahash_ctx->result = 0;
memcpy(ctx_req.req.ahash_req->result, input +
@@ -150,11 +214,9 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
sizeof(struct cpl_fw6_pld),
updated_digestsize);
}
- kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
- ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
break;
}
- return 0;
+ return err;
}
/*
@@ -178,40 +240,81 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
return flits + sgl_len(cnt);
}
-static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+static inline void get_aes_decrypt_key(unsigned char *dec_key,
+ const unsigned char *key,
+ unsigned int keylength)
+{
+ u32 temp;
+ u32 w_ring[MAX_NK];
+ int i, j, k;
+ u8 nr, nk;
+
+ switch (keylength) {
+ case AES_KEYLENGTH_128BIT:
+ nk = KEYLENGTH_4BYTES;
+ nr = NUMBER_OF_ROUNDS_10;
+ break;
+ case AES_KEYLENGTH_192BIT:
+ nk = KEYLENGTH_6BYTES;
+ nr = NUMBER_OF_ROUNDS_12;
+ break;
+ case AES_KEYLENGTH_256BIT:
+ nk = KEYLENGTH_8BYTES;
+ nr = NUMBER_OF_ROUNDS_14;
+ break;
+ default:
+ return;
+ }
+ for (i = 0; i < nk; i++)
+ w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+ i = 0;
+ temp = w_ring[nk - 1];
+ while (i + nk < (nr + 1) * 4) {
+ if (!(i % nk)) {
+ /* RotWord(temp) */
+ temp = (temp << 8) | (temp >> 24);
+ temp = aes_ks_subword(temp);
+ temp ^= round_constant[i / nk];
+ } else if (nk == 8 && (i % 4 == 0)) {
+ temp = aes_ks_subword(temp);
+ }
+ w_ring[i % nk] ^= temp;
+ temp = w_ring[i % nk];
+ i++;
+ }
+ i--;
+ for (k = 0, j = i % nk; k < nk; k++) {
+ *((u32 *)dec_key + k) = htonl(w_ring[j]);
+ j--;
+ if (j < 0)
+ j += nk;
+ }
+}
+
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
{
struct crypto_shash *base_hash = NULL;
- struct shash_desc *desc;
switch (ds) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha1", 0, 0);
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha224", 0, 0);
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha256", 0, 0);
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha384", 0, 0);
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha512", 0, 0);
break;
}
- if (IS_ERR(base_hash)) {
- pr_err("Can not allocate sha-generic algo.\n");
- return (void *)base_hash;
- }
- desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
- GFP_KERNEL);
- if (!desc)
- return ERR_PTR(-ENOMEM);
- desc->tfm = base_hash;
- desc->flags = crypto_shash_get_flags(base_hash);
- return desc;
+ return base_hash;
}
static int chcr_compute_partial_hash(struct shash_desc *desc,
@@ -279,31 +382,18 @@ static inline int is_hmac(struct crypto_tfm *tfm)
struct chcr_alg_template *chcr_crypto_alg =
container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
alg.hash);
- if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
- CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+ if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
return 1;
return 0;
}
-static inline unsigned int ch_nents(struct scatterlist *sg,
- unsigned int *total_size)
-{
- unsigned int nents;
-
- for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
- nents++;
- *total_size += sg->length;
- }
- return nents;
-}
-
static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg,
struct phys_sge_parm *sg_param)
{
struct phys_sge_pairs *to;
- unsigned int out_buf_size = sg_param->obsize;
- unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+ int out_buf_size = sg_param->obsize;
+ unsigned int nents = sg_param->nents, i, j = 0;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -321,25 +411,24 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
sizeof(struct cpl_rx_phys_dsgl));
for (i = 0; nents; to++) {
- for (j = i; (nents && (j < (8 + i))); j++, nents--) {
- to->len[j] = htons(sg->length);
+ for (j = 0; j < 8 && nents; j++, nents--) {
+ out_buf_size -= sg_dma_len(sg);
+ to->len[j] = htons(sg_dma_len(sg));
to->addr[j] = cpu_to_be64(sg_dma_address(sg));
- if (out_buf_size) {
- if (tot_len + sg_dma_len(sg) >= out_buf_size) {
- to->len[j] = htons(out_buf_size -
- tot_len);
- return;
- }
- tot_len += sg_dma_len(sg);
- }
sg = sg_next(sg);
}
}
+ if (out_buf_size) {
+ j--;
+ to--;
+ to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
+ }
}
-static inline unsigned
-int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg, struct phys_sge_parm *sg_param)
+static inline int map_writesg_phys_cpl(struct device *dev,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg,
+ struct phys_sge_parm *sg_param)
{
if (!sg || !sg_param->nents)
return 0;
@@ -353,6 +442,14 @@ int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
return 0;
}
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -362,8 +459,23 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
+static inline void write_buffer_to_skb(struct sk_buff *skb,
+ unsigned int *frags,
+ char *bfr,
+ u8 bfr_len)
+{
+ skb->len += bfr_len;
+ skb->data_len += bfr_len;
+ skb->truesize += bfr_len;
+ get_page(virt_to_page(bfr));
+ skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+ offset_in_page(bfr), bfr_len);
+ (*frags)++;
+}
+
+
static inline void
-write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
struct scatterlist *sg, unsigned int count)
{
struct page *spage;
@@ -372,8 +484,9 @@ write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
skb->len += count;
skb->data_len += count;
skb->truesize += count;
+
while (count > 0) {
- if (sg && (!(sg->length)))
+ if (!sg || (!(sg->length)))
break;
spage = sg_page(sg);
get_page(spage);
@@ -389,29 +502,25 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
struct _key_ctx *key_ctx)
{
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- get_aes_decrypt_key(key_ctx->key, ablkctx->key,
- ablkctx->enckey_len << 3);
- memset(key_ctx->key + ablkctx->enckey_len, 0,
- CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+ memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
} else {
memcpy(key_ctx->key,
ablkctx->key + (ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
- ablkctx->key, ablkctx->enckey_len << 2);
+ memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->rrkey, ablkctx->enckey_len >> 1);
}
return 0;
}
static inline void create_wreq(struct chcr_context *ctx,
- struct fw_crypto_lookaside_wr *wreq,
+ struct chcr_wr *chcr_req,
void *req, struct sk_buff *skb,
int kctx_len, int hash_sz,
- unsigned int phys_dsgl)
+ int is_iv,
+ unsigned int sc_len)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
- struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
unsigned int immdatalen = 0, nr_frags = 0;
@@ -423,27 +532,27 @@ static inline void create_wreq(struct chcr_context *ctx,
nr_frags = skb_shinfo(skb)->nr_frags;
}
- wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- (kctx_len >> 4));
- wreq->pld_size_hash_size =
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+ ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.pld_size_hash_size =
htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
- wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+ chcr_req->wreq.len16_pkd =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
(calc_tx_flits_ofld(skb) * 8), 16)));
- wreq->cookie = cpu_to_be64((uintptr_t)req);
- wreq->rx_chid_to_rx_q_id =
+ chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+ chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
- (hash_sz) ? IV_NOP : iv_loc);
+ is_iv ? iv_loc : IV_NOP);
- ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
- ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(*wreq)) >> 4)));
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+ 16) - ((sizeof(chcr_req->wreq)) >> 4)));
- sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
- sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
- ((hash_sz) ? DUMMY_BYTES :
- (sizeof(struct cpl_rx_phys_dsgl) +
- phys_dsgl)) + immdatalen);
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(chcr_req->key_ctx) +
+ kctx_len + sc_len + immdatalen);
}
/**
@@ -454,86 +563,83 @@ static inline void create_wreq(struct chcr_context *ctx,
* @op_type: encryption or decryption
*/
static struct sk_buff
-*create_cipher_wr(struct crypto_async_request *req_base,
- struct chcr_context *ctx, unsigned short qid,
+*create_cipher_wr(struct ablkcipher_request *req,
+ unsigned short qid,
unsigned short op_type)
{
- struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct phys_sge_parm sg_param;
- unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+ unsigned int frags = 0, transhdr_len, phys_dsgl;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
if (!req->info)
return ERR_PTR(-EINVAL);
- ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
- ablkctx->enc = op_type;
-
+ reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AES:Invalid Destination sg lists\n");
+ return ERR_PTR(-EINVAL);
+ }
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+ (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+ pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+ ablkctx->enckey_len, req->nbytes, ivsize);
return ERR_PTR(-EINVAL);
+ }
- phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+ phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
- kctx_len = sizeof(*key_ctx) +
- (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
-
- sec_cpl->pldlen = htonl(ivsize + req->nbytes);
- sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
- ivsize + 1, 0);
-
- sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
- 0, 0);
- sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
+
+ chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1, 1);
- sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+ 0, 0, ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
0, 1, phys_dsgl);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+ chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if (op_type == CHCR_DECRYPT_OP) {
- if (generate_copy_rrkey(ablkctx, key_ctx))
- goto map_fail1;
+ generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key, ablkctx->key,
+ ablkctx->enckey_len);
} else {
- memcpy(key_ctx->key, ablkctx->key +
+ memcpy(chcr_req->key_ctx.key, ablkctx->key +
(ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- memcpy(key_ctx->key +
+ memcpy(chcr_req->key_ctx.key +
(ablkctx->enckey_len >> 1),
ablkctx->key,
ablkctx->enckey_len >> 1);
}
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-
- memcpy(ablkctx->iv, req->info, ivsize);
- sg_init_table(&ablkctx->iv_sg, 1);
- sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
- sg_param.nents = ablkctx->dst_nents;
- sg_param.obsize = dst_bufsize;
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->nbytes;
sg_param.qid = qid;
sg_param.align = 1;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
@@ -541,10 +647,12 @@ static struct sk_buff
goto map_fail1;
skb_set_transport_header(skb, transhdr_len);
- write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
- write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
- create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
- req_ctx->skb = skb;
+ memcpy(reqctx->iv, req->info, ivsize);
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+ reqctx->skb = skb;
skb_get(skb);
return skb;
map_fail1:
@@ -557,15 +665,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
unsigned int ck_size, context_size;
u16 alignment = 0;
- if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
- goto badkey_err;
-
- memcpy(ablkctx->key, key, keylen);
- ablkctx->enckey_len = keylen;
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
@@ -576,7 +678,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
} else {
goto badkey_err;
}
-
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
keylen + alignment) >> 4;
@@ -612,7 +716,6 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -622,8 +725,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx,
- u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -639,7 +741,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -649,7 +750,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -729,50 +830,33 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline int
-write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
- struct sk_buff *skb, unsigned int *frags, char *bfr,
- u8 bfr_len)
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
{
- void *page_ptr = NULL;
-
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
- if (!page_ptr)
- return -ENOMEM;
- get_page(virt_to_page(page_ptr));
- req_ctx->dummy_payload_ptr = page_ptr;
- memcpy(page_ptr, bfr, bfr_len);
- skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
- offset_in_page(page_ptr), bfr_len);
- (*frags)++;
- return 0;
+ crypto_free_shash(base_hash);
}
/**
- * create_final_hash_wr - Create hash work request
+ * create_hash_wr - Create hash work request
* @req - Cipher req base
*/
-static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
- struct hash_wr_param *param)
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
+ struct hash_wr_param *param)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = sizeof(*key_ctx);
+ unsigned int kctx_len = 0;
u8 hash_size_in_response = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
- kctx_len += param->alg_prm.result_size + iopad_alignment;
+ kctx_len = param->alg_prm.result_size + iopad_alignment;
if (param->opad_needed)
kctx_len += param->alg_prm.result_size + iopad_alignment;
@@ -781,54 +865,54 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
- memset(wreq, 0, transhdr_len);
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
- sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+ chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
- sec_cpl->aadstart_cipherstop_hi =
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
- sec_cpl->cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
- sec_cpl->seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
- param->opad_needed, 0, 0);
+ param->opad_needed, 0);
- sec_cpl->ivgen_hdrlen =
+ chcr_req->sec_cpl.ivgen_hdrlen =
FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+ memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+ param->alg_prm.result_size);
if (param->opad_needed)
- memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
- CHCR_HASH_MAX_DIGEST_SIZE),
+ memcpy(chcr_req->key_ctx.key +
+ ((param->alg_prm.result_size <= 32) ? 32 :
+ CHCR_HASH_MAX_DIGEST_SIZE),
hmacctx->opad, param->alg_prm.result_size);
- key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+ chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
- (kctx_len >> 4));
- sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+ ((kctx_len +
+ sizeof(chcr_req->key_ctx)) >> 4));
+ chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
skb_set_transport_header(skb, transhdr_len);
if (param->bfr_len != 0)
- write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
- param->bfr_len);
+ write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
+ param->bfr_len);
if (param->sg_len != 0)
- write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+ write_sg_to_skb(skb, &frags, req->src, param->sg_len);
- create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
- 0);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
+ DUMMY_BYTES);
req_ctx->skb = skb;
skb_get(skb);
return skb;
@@ -854,34 +938,40 @@ static int chcr_ahash_update(struct ahash_request *req)
return -EBUSY;
}
- if (nbytes + req_ctx->bfr_len >= bs) {
- remainder = (nbytes + req_ctx->bfr_len) % bs;
- nbytes = nbytes + req_ctx->bfr_len - remainder;
+ if (nbytes + req_ctx->reqlen >= bs) {
+ remainder = (nbytes + req_ctx->reqlen) % bs;
+ nbytes = nbytes + req_ctx->reqlen - remainder;
} else {
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
- req_ctx->bfr_len, nbytes, 0);
- req_ctx->bfr_len += nbytes;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+ + req_ctx->reqlen, nbytes, 0);
+ req_ctx->reqlen += nbytes;
return 0;
}
params.opad_needed = 0;
params.more = 1;
params.last = 0;
- params.sg_len = nbytes - req_ctx->bfr_len;
- params.bfr_len = req_ctx->bfr_len;
+ params.sg_len = nbytes - req_ctx->reqlen;
+ params.bfr_len = req_ctx->reqlen;
params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
- req_ctx->bfr_len = remainder;
- if (remainder)
+ if (remainder) {
+ u8 *temp;
+ /* Swap buffers */
+ temp = req_ctx->reqbfr;
+ req_ctx->reqbfr = req_ctx->skbfr;
+ req_ctx->skbfr = temp;
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- req_ctx->bfr, remainder, req->nbytes -
+ req_ctx->reqbfr, remainder, req->nbytes -
remainder);
+ }
+ req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -917,10 +1007,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.sg_len = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 1;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if (req_ctx->reqlen == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -931,7 +1021,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
+ if (!skb)
+ return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -963,12 +1056,12 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.opad_needed = 0;
params.sg_len = req->nbytes;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->data_len += params.bfr_len + params.sg_len;
req_ctx->result = 1;
- if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if ((req_ctx->reqlen + req->nbytes) == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -979,9 +1072,10 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -1023,13 +1117,13 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->result = 1;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && req->nbytes == 0) {
- create_last_hash_block(req_ctx->bfr, bs, 0);
+ if (req->nbytes == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, 0);
params.more = 1;
params.bfr_len = bs;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
@@ -1044,12 +1138,12 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = out;
- state->bfr_len = req_ctx->bfr_len;
+ state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
- memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -1057,10 +1151,11 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
- req_ctx->bfr_len = state->bfr_len;
+ req_ctx->reqlen = state->reqlen;
req_ctx->data_len = state->data_len;
- req_ctx->dummy_payload_ptr = NULL;
- memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
+ memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
return 0;
@@ -1075,15 +1170,16 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
- /*
- * use the key to calculate the ipad and opad. ipad will sent with the
+ SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+ /* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- if (!hmacctx->desc)
- return -EINVAL;
+ shash->tfm = hmacctx->base_hash;
+ shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
- err = crypto_shash_digest(hmacctx->desc, key, keylen,
+ err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
if (err)
goto out;
@@ -1104,13 +1200,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+ err = chcr_compute_partial_hash(shash, hmacctx->ipad,
hmacctx->ipad, digestsize);
if (err)
goto out;
chcr_change_order(hmacctx->ipad, updated_digestsize);
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+ err = chcr_compute_partial_hash(shash, hmacctx->opad,
hmacctx->opad, digestsize);
if (err)
goto out;
@@ -1124,28 +1220,29 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- int status = 0;
unsigned short context_size = 0;
- if ((key_len == (AES_KEYSIZE_128 << 1)) ||
- (key_len == (AES_KEYSIZE_256 << 1))) {
- memcpy(ablkctx->key, key, key_len);
- ablkctx->enckey_len = key_len;
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
- FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
- CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
- CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
- CHCR_KEYCTX_NO_KEY, 1,
- 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
- } else {
+ if ((key_len != (AES_KEYSIZE_128 << 1)) &&
+ (key_len != (AES_KEYSIZE_256 << 1))) {
crypto_tfm_set_flags((struct crypto_tfm *)tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
- status = -EINVAL;
+ return -EINVAL;
+
}
- return status;
+
+ memcpy(ablkctx->key, key, key_len);
+ ablkctx->enckey_len = key_len;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+ return 0;
}
static int chcr_sha_init(struct ahash_request *areq)
@@ -1155,8 +1252,9 @@ static int chcr_sha_init(struct ahash_request *areq)
int digestsize = crypto_ahash_digestsize(tfm);
req_ctx->data_len = 0;
- req_ctx->dummy_payload_ptr = NULL;
- req_ctx->bfr_len = 0;
+ req_ctx->reqlen = 0;
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL;
req_ctx->result = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
@@ -1204,29 +1302,1184 @@ static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->desc = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->desc))
- return PTR_ERR(hmacctx->desc);
+ hmacctx->base_hash = chcr_alloc_shash(digestsize);
+ if (IS_ERR(hmacctx->base_hash))
+ return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_free_shash(struct shash_desc *desc)
-{
- crypto_free_shash(desc->tfm);
- kfree(desc);
-}
-
static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
{
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- if (hmacctx->desc) {
- chcr_free_shash(hmacctx->desc);
- hmacctx->desc = NULL;
+ if (hmacctx->base_hash) {
+ chcr_free_shash(hmacctx->base_hash);
+ hmacctx->base_hash = NULL;
+ }
+}
+
+static int chcr_copy_assoc(struct aead_request *req,
+ struct chcr_aead_ctx *ctx)
+{
+ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+
+ skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+ NULL);
+
+ return crypto_skcipher_encrypt(skreq);
+}
+
+static unsigned char get_hmac(unsigned int authsize)
+{
+ switch (authsize) {
+ case ICV_8:
+ return CHCR_SCMD_HMAC_CTRL_PL1;
+ case ICV_10:
+ return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ case ICV_12:
+ return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ }
+ return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+}
+
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
+ unsigned int kctx_len = 0;
+ unsigned short stop_offset = 0;
+ unsigned int assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ int null = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ null = 1;
+ assoclen = 0;
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AUTHENC:Invalid Destination sg entries\n");
+ goto err;
+ }
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+ - sizeof(chcr_req->key_ctx);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* LLD is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ /* Write WR */
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+ /*
+ * Input order is AAD,IV and Payload. where IV should be included as
+ * the part of authdata. All other fields should be filled according
+ * to the hardware spec
+ */
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
+ (ivsize ? (assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ assoclen ? 1 : 0, assoclen,
+ assoclen + ivsize + 1,
+ (stop_offset & 0x1F0) >> 4);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+ stop_offset & 0xF,
+ null ? 0 : assoclen + ivsize + 1,
+ stop_offset, stop_offset);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ actx->auth_mode, aeadctx->hmac_ctrl,
+ ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ if (op_type == CHCR_ENCRYPT_OP)
+ memcpy(chcr_req->key_ctx.key, aeadctx->key,
+ aeadctx->enckey_len);
+ else
+ memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+ aeadctx->enckey_len);
+
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
+ 4), actx->h_iopad, kctx_len -
+ (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ if (assoclen) {
+ /* AAD buffer in */
+ write_sg_to_skb(skb, &frags, req->src, assoclen);
+
+ }
+ write_buffer_to_skb(skb, &frags, req->iv, ivsize);
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+
+ return skb;
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
+ unsigned short offset)
+{
+ struct page *spage;
+ unsigned char *addr;
+
+ spage = sg_page(sg);
+ get_page(spage); /* so that it is not freed by NIC */
+#ifdef KMAP_ATOMIC_ARGS
+ addr = kmap_atomic(spage, KM_SOFTIRQ0);
+#else
+ addr = kmap_atomic(spage);
+#endif
+ memset(addr + sg->offset, 0, offset + 1);
+
+ kunmap_atomic(addr);
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (unsigned int)(1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static void generate_b0(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned short op_type)
+{
+ unsigned int l, lp, m;
+ int rc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ u8 *b0 = reqctx->scratch_pad;
+
+ m = crypto_aead_authsize(aead);
+
+ memcpy(b0, reqctx->iv, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+
+ /* set m, bits 3-5 */
+ *b0 |= (8 * ((m - 2) / 2));
+
+ /* set adata, bit 6, if associated data is used */
+ if (req->assoclen)
+ *b0 |= 64;
+ rc = set_msg_len(b0 + 16 - l,
+ (op_type == CHCR_DECRYPT_OP) ?
+ req->cryptlen - m : req->cryptlen, l);
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int rc = 0;
+
+ if (req->assoclen > T5_MAX_AAD_SIZE) {
+ pr_err("CCM: Unsupported AAD data. It should be < %d\n",
+ T5_MAX_AAD_SIZE);
+ return -EINVAL;
+ }
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ reqctx->iv[0] = 3;
+ memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ memset(reqctx->iv + 12, 0, 4);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen - 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 16);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen);
+ }
+ generate_b0(req, aeadctx, op_type);
+ /* zero the ctr value */
+ memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ unsigned int dst_size,
+ struct aead_request *req,
+ unsigned short op_type,
+ struct chcr_context *chcrctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+ unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+ unsigned int c_id = chcrctx->dev->tx_channel_id;
+ unsigned int ccm_xtra;
+ unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
+ unsigned int assoclen;
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen = req->assoclen - 8;
+ else
+ assoclen = req->assoclen;
+ ccm_xtra = CCM_B0_SIZE +
+ ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+ auth_offset = req->cryptlen ?
+ (assoclen + ivsize + 1 + ccm_xtra) : 0;
+ if (op_type == CHCR_DECRYPT_OP) {
+ if (crypto_aead_authsize(tfm) != req->cryptlen)
+ tag_offset = crypto_aead_authsize(tfm);
+ else
+ auth_offset = 0;
+ }
+
+
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+ 2, (ivsize ? (assoclen + 1) : 0) +
+ ccm_xtra);
+ sec_cpl->pldlen =
+ htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+ /* For CCM there wil be b0 always. So AAD start will be 1 always */
+ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ 1, assoclen + ccm_xtra, assoclen
+ + ivsize + 1 + ccm_xtra, 0);
+
+ sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+ auth_offset, tag_offset,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 :
+ crypto_aead_authsize(tfm));
+ sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+ cipher_mode, mac_mode, hmac_ctrl,
+ ivsize >> 1);
+
+ sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+ 1, dst_size);
+}
+
+int aead_ccm_validate_input(unsigned short op_type,
+ struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type)
+{
+ if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ if (crypto_ccm_check_iv(req->iv)) {
+ pr_err("CCM: IV check fails\n");
+ return -EINVAL;
+ }
+ } else {
+ if (req->assoclen != 16 && req->assoclen != 20) {
+ pr_err("RFC4309: Invalid AAD length %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+ }
+ if (aeadctx->enckey_len == 0) {
+ pr_err("CCM: Encryption key not set\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+unsigned int fill_aead_req_fields(struct sk_buff *skb,
+ struct aead_request *req,
+ struct scatterlist *src,
+ unsigned int ivsize,
+ struct chcr_aead_ctx *aeadctx)
+{
+ unsigned int frags = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ /* b0 and aad length(if available) */
+
+ write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
+ (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
+ if (req->assoclen) {
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ write_sg_to_skb(skb, &frags, req->src,
+ req->assoclen - 8);
+ else
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+ }
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ if (req->cryptlen)
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+
+ return frags;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned int sub_type;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ sub_type = get_aead_subtype(tfm);
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err) {
+ pr_err("AAD copy to destination buffer fails\n");
+ return ERR_PTR(err);
+ }
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("CCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+ goto err;
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+
+ if (!skb)
+ goto err;
+
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), aeadctx->key, aeadctx->enckey_len);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+ goto dstmap_fail;
+
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+ frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+dstmap_fail:
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned char tag_offset = 0;
+ unsigned int crypt_len = 0;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ unsigned char hmac_ctrl = get_hmac(authsize);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+
+ if (!req->cryptlen)
+ /* null-payload is not supported in the hardware.
+ * software is sending block size
+ */
+ crypt_len = AES_BLOCK_SIZE;
+ else
+ crypt_len = req->cryptlen;
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("GCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* NIC driver is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ req->assoclen -= 8;
+
+ tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+ ctx->dev->tx_channel_id, 2, (ivsize ?
+ (req->assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ req->assoclen ? 1 : 0, req->assoclen,
+ req->assoclen + ivsize + 1, 0);
+ if (req->cryptlen) {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+ tag_offset, tag_offset);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+ CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
+ ivsize >> 1);
+ } else {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ?
+ 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ 0, 0, ivsize >> 1);
+ }
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+ /* prepare a 16 byte iv */
+ /* S A L T | IV | 0x00000001 */
+ if (get_aead_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ memcpy(reqctx->iv, aeadctx->salt, 4);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 12);
+ }
+ *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+
+ if (req->cryptlen) {
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ } else {
+ aes_gcm_empty_pld_pad(req->dst, authsize - 1);
+ write_sg_to_skb(skb, &frags, dst, crypt_len);
+ }
+
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return skb;
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+ aeadctx->null = crypto_get_default_null_skcipher();
+ if (IS_ERR(aeadctx->null))
+ return PTR_ERR(aeadctx->null);
+ return chcr_device_init(ctx);
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+ crypto_put_default_null_skcipher();
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+ aeadctx->mayverify = VERIFY_HW;
+ return 0;
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+ /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+ * true for sha1. authsize == 12 condition should be before
+ * authsize == (maxauth >> 1)
+ */
+ if (authsize == ICV_4) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_6) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_10) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_12) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_14) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == (maxauth >> 1)) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == maxauth) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ } else {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ }
+ return 0;
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_13:
+ case ICV_15:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ break;
+ default:
+
+ crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_6:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_10:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ unsigned char ck_size, mk_size;
+ int key_ctx_size = 0;
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
+ if (keylen == AES_KEYSIZE_128) {
+ mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+ key_ctx_size >> 4);
+ return 0;
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ if (keylen < 3) {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ keylen -= 3;
+ memcpy(aeadctx->salt, key + keylen, 3);
+ return chcr_aead_ccm_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+ struct blkcipher_desc h_desc;
+ struct scatterlist src[1];
+ unsigned int ck_size;
+ int ret = 0, key_ctx_size = 0;
+
+ if (get_aead_subtype(aead) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(aeadctx->salt, key + keylen, 4);
+ }
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ pr_err("GCM: Invalid key length %d", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+ /* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
+ * blkcipher It will go on key context
+ */
+ h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
+ if (IS_ERR(h_desc.tfm)) {
+ aeadctx->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+ h_desc.flags = 0;
+ ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
+ if (ret) {
+ aeadctx->enckey_len = 0;
+ goto out1;
+ }
+ memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+ sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
+ ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
+
+out1:
+ crypto_free_blkcipher(h_desc.tfm);
+out:
+ return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ /* it contains auth and cipher key both*/
+ struct crypto_authenc_keys keys;
+ unsigned int bs;
+ unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+ int err = 0, i, key_ctx_len = 0;
+ unsigned char ck_size = 0;
+ unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+ struct crypto_shash *base_hash = NULL;
+ struct algo_param param;
+ int align;
+ u8 *o_ptr = NULL;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+
+ if (get_alg_config(&param, max_authsize)) {
+ pr_err("chcr : Unsupported digest size\n");
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+
+ /* Copy only encryption key. We use authkey to generate h(ipad) and
+ * h(opad) so authkey is not needed again. authkeylen size have the
+ * size of the hash digest size.
+ */
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+
+ base_hash = chcr_alloc_shash(max_authsize);
+ if (IS_ERR(base_hash)) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
}
+ {
+ SHASH_DESC_ON_STACK(shash, base_hash);
+ shash->tfm = base_hash;
+ shash->flags = crypto_shash_get_flags(base_hash);
+ bs = crypto_shash_blocksize(base_hash);
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ o_ptr = actx->h_iopad + param.result_size + align;
+
+ if (keys.authkeylen > bs) {
+ err = crypto_shash_digest(shash, keys.authkey,
+ keys.authkeylen,
+ o_ptr);
+ if (err) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
+ }
+ keys.authkeylen = max_authsize;
+ } else
+ memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+ /* Compute the ipad-digest*/
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= IPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+ max_authsize))
+ goto out;
+ /* Compute the opad-digest */
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= OPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+ goto out;
+
+ /* convert the ipad and opad digest to network order */
+ chcr_change_order(actx->h_iopad, param.result_size);
+ chcr_change_order(o_ptr, param.result_size);
+ key_ctx_len = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+ 0, 1, key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+ chcr_free_shash(base_hash);
+
+ return 0;
+ }
+out:
+ aeadctx->enckey_len = 0;
+ if (base_hash)
+ chcr_free_shash(base_hash);
+ return -EINVAL;
}
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct crypto_authenc_keys keys;
+
+ /* it contains auth and cipher key both*/
+ int key_ctx_len = 0;
+ unsigned char ck_size = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ key_ctx_len = sizeof(struct _key_ctx)
+ + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+ 0, key_ctx_len >> 4);
+ actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+ return 0;
+out:
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+}
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ reqctx->verify = VERIFY_HW;
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int size;
+
+ if (aeadctx->mayverify == VERIFY_SW) {
+ size = crypto_aead_maxauthsize(tfm);
+ reqctx->verify = VERIFY_SW;
+ } else {
+ size = 0;
+ reqctx->verify = VERIFY_HW;
+ }
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+
+ if (ctx && !ctx->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+ op_type);
+
+ if (IS_ERR(skb) || skb == NULL) {
+ pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+ return PTR_ERR(skb);
+ }
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -1234,7 +2487,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "cbc(aes)",
- .cra_driver_name = "cbc(aes-chcr)",
+ .cra_driver_name = "cbc-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1261,7 +2514,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "xts(aes)",
- .cra_driver_name = "xts(aes-chcr)",
+ .cra_driver_name = "xts-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1354,7 +2607,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
- .cra_driver_name = "hmac(sha1-chcr)",
+ .cra_driver_name = "hmac-sha1-chcr",
.cra_blocksize = SHA1_BLOCK_SIZE,
}
}
@@ -1366,7 +2619,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
- .cra_driver_name = "hmac(sha224-chcr)",
+ .cra_driver_name = "hmac-sha224-chcr",
.cra_blocksize = SHA224_BLOCK_SIZE,
}
}
@@ -1378,7 +2631,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
- .cra_driver_name = "hmac(sha256-chcr)",
+ .cra_driver_name = "hmac-sha256-chcr",
.cra_blocksize = SHA256_BLOCK_SIZE,
}
}
@@ -1390,7 +2643,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
- .cra_driver_name = "hmac(sha384-chcr)",
+ .cra_driver_name = "hmac-sha384-chcr",
.cra_blocksize = SHA384_BLOCK_SIZE,
}
}
@@ -1402,11 +2655,205 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
- .cra_driver_name = "hmac(sha512-chcr)",
+ .cra_driver_name = "hmac-sha512-chcr",
.cra_blocksize = SHA512_BLOCK_SIZE,
}
}
},
+ /* Add AEAD Algorithms */
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+ },
+ .ivsize = 12,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_gcm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_ccm_setkey,
+ .setauthsize = chcr_ccm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_rfc4309_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,cbc(aes))",
+ .cra_driver_name =
+ "authenc-digest_null-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
};
/*
@@ -1424,6 +2871,11 @@ static int chcr_unregister_alg(void)
crypto_unregister_alg(
&driver_algs[i].alg.crypto);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ if (driver_algs[i].is_registered)
+ crypto_unregister_aead(
+ &driver_algs[i].alg.aead);
+ break;
case CRYPTO_ALG_TYPE_AHASH:
if (driver_algs[i].is_registered)
crypto_unregister_ahash(
@@ -1458,6 +2910,19 @@ static int chcr_register_alg(void)
err = crypto_register_alg(&driver_algs[i].alg.crypto);
name = driver_algs[i].alg.crypto.cra_driver_name;
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ driver_algs[i].alg.aead.base.cra_priority =
+ CHCR_CRA_PRIORITY;
+ driver_algs[i].alg.aead.base.cra_flags =
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+ driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+ driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+ driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+ driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+ err = crypto_register_aead(&driver_algs[i].alg.aead);
+ name = driver_algs[i].alg.aead.base.cra_driver_name;
+ break;
case CRYPTO_ALG_TYPE_AHASH:
a_hash = &driver_algs[i].alg.hash;
a_hash->update = chcr_ahash_update;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 199b0bb69b89..3c7c51f7bedf 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -108,30 +108,24 @@
#define IPAD_DATA 0x36363636
#define OPAD_DATA 0x5c5c5c5c
-#define TRANSHDR_SIZE(alignedkctx_len)\
- (sizeof(struct ulptx_idata) +\
- sizeof(struct ulp_txpkt) +\
- sizeof(struct fw_crypto_lookaside_wr) +\
- sizeof(struct cpl_tx_sec_pdu) +\
- (alignedkctx_len))
-#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
- (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
+#define TRANSHDR_SIZE(kctx_len)\
+ (sizeof(struct chcr_wr) +\
+ kctx_len)
+#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
+ (TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
sizeof(struct cpl_rx_phys_dsgl))
-#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
- (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
+#define HASH_TRANSHDR_SIZE(kctx_len)\
+ (TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
-#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
- sizeof(struct ulp_txpkt) + \
- sizeof(struct ulptx_idata))
-#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \
+#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst) \
htonl( \
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
CPL_TX_SEC_PDU_RXCHID_V((id)) | \
CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
- CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
@@ -148,7 +142,7 @@
CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
-#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \
+#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size) \
htonl( \
SCMD_SEQ_NO_CTRL_V(0) | \
SCMD_STATUS_PRESENT_V(0) | \
@@ -159,7 +153,7 @@
SCMD_AUTH_MODE_V((amode)) | \
SCMD_HMAC_CTRL_V((opad)) | \
SCMD_IV_SIZE_V((size)) | \
- SCMD_NUM_IVS_V((nivs)))
+ SCMD_NUM_IVS_V(0))
#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
SCMD_ENB_DBGID_V(0) | \
@@ -264,13 +258,15 @@ enum {
* where they indicate the size of the integrity check value (ICV)
*/
enum {
- AES_CCM_ICV_4 = 4,
- AES_CCM_ICV_6 = 6,
- AES_CCM_ICV_8 = 8,
- AES_CCM_ICV_10 = 10,
- AES_CCM_ICV_12 = 12,
- AES_CCM_ICV_14 = 14,
- AES_CCM_ICV_16 = 16
+ ICV_4 = 4,
+ ICV_6 = 6,
+ ICV_8 = 8,
+ ICV_10 = 10,
+ ICV_12 = 12,
+ ICV_13 = 13,
+ ICV_14 = 14,
+ ICV_15 = 15,
+ ICV_16 = 16
};
struct hash_op_params {
@@ -394,7 +390,7 @@ static const u8 aes_sbox[256] = {
187, 22
};
-static u32 aes_ks_subword(const u32 w)
+static inline u32 aes_ks_subword(const u32 w)
{
u8 bytes[4];
@@ -412,61 +408,4 @@ static u32 round_constant[11] = {
0x1B000000, 0x36000000, 0x6C000000
};
-/* dec_key - OUTPUT - Reverse round key
- * key - INPUT - key
- * keylength - INPUT - length of the key in number of bits
- */
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
- const unsigned char *key,
- unsigned int keylength)
-{
- u32 temp;
- u32 w_ring[MAX_NK];
- int i, j, k;
- u8 nr, nk;
-
- switch (keylength) {
- case AES_KEYLENGTH_128BIT:
- nk = KEYLENGTH_4BYTES;
- nr = NUMBER_OF_ROUNDS_10;
- break;
-
- case AES_KEYLENGTH_192BIT:
- nk = KEYLENGTH_6BYTES;
- nr = NUMBER_OF_ROUNDS_12;
- break;
- case AES_KEYLENGTH_256BIT:
- nk = KEYLENGTH_8BYTES;
- nr = NUMBER_OF_ROUNDS_14;
- break;
- default:
- return;
- }
- for (i = 0; i < nk; i++ )
- w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
-
- i = 0;
- temp = w_ring[nk - 1];
- while(i + nk < (nr + 1) * 4) {
- if(!(i % nk)) {
- /* RotWord(temp) */
- temp = (temp << 8) | (temp >> 24);
- temp = aes_ks_subword(temp);
- temp ^= round_constant[i / nk];
- }
- else if (nk == 8 && (i % 4 == 0))
- temp = aes_ks_subword(temp);
- w_ring[i % nk] ^= temp;
- temp = w_ring[i % nk];
- i++;
- }
- i--;
- for (k = 0, j = i % nk; k < nk; k++) {
- *((u32 *)dec_key + k) = htonl(w_ring[j]);
- j--;
- if(j < 0)
- j += nk;
- }
-}
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 4d7f6700fd7e..918da8e6e2d8 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -110,14 +110,12 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
if (ack_err_status) {
if (CHK_MAC_ERR_BIT(ack_err_status) ||
CHK_PAD_ERR_BIT(ack_err_status))
- error_status = -EINVAL;
+ error_status = -EBADMSG;
}
/* call completion callback with failure status */
if (req) {
- if (!chcr_handle_resp(req, input, error_status))
- req->complete(req, error_status);
- else
- return -EINVAL;
+ error_status = chcr_handle_resp(req, input, error_status);
+ req->complete(req, error_status);
} else {
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 2a5c671a4232..c7088a4e0a49 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -52,13 +52,27 @@
#define MAC_ERROR_BIT 0
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
+#define MAX_SALT 4
struct uld_ctx;
+struct _key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[MAX_SALT];
+ __be64 reserverd;
+ unsigned char key[0];
+};
+
+struct chcr_wr {
+ struct fw_crypto_lookaside_wr wreq;
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
struct chcr_dev {
- /* Request submited to h/w and waiting for response. */
spinlock_t lock_chcr_dev;
- struct crypto_queue pending_queue;
struct uld_ctx *u_ctx;
unsigned char tx_channel_id;
};
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d7d75605da8b..d5af7d64a763 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -36,6 +36,14 @@
#ifndef __CHCR_CRYPTO_H__
#define __CHCR_CRYPTO_H__
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+#define CCM_B0_SIZE 16
+#define CCM_AAD_FIELD_SIZE 2
+#define T5_MAX_AAD_SIZE 512
+
+
/* Define following if h/w is not dropping the AAD and IV data before
* giving the processed data
*/
@@ -63,22 +71,36 @@
#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
-#define CHCR_SCMD_CIPHER_MODE_NOP 0
-#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
-#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
-#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_NOP 0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_AES_CCM 7
#define CHCR_SCMD_AUTH_MODE_NOP 0
#define CHCR_SCMD_AUTH_MODE_SHA1 1
#define CHCR_SCMD_AUTH_MODE_SHA224 2
#define CHCR_SCMD_AUTH_MODE_SHA256 3
+#define CHCR_SCMD_AUTH_MODE_GHASH 4
#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
+#define CHCR_SCMD_AUTH_MODE_CBCMAC 9
+#define CHCR_SCMD_AUTH_MODE_CMAC 10
#define CHCR_SCMD_HMAC_CTRL_NOP 0
#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
+#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366 2
+#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT 3
+#define CHCR_SCMD_HMAC_CTRL_PL1 4
+#define CHCR_SCMD_HMAC_CTRL_PL2 5
+#define CHCR_SCMD_HMAC_CTRL_PL3 6
+#define CHCR_SCMD_HMAC_CTRL_DIV2 7
+#define VERIFY_HW 0
+#define VERIFY_SW 1
#define CHCR_SCMD_IVGEN_CTRL_HW 0
#define CHCR_SCMD_IVGEN_CTRL_SW 1
@@ -106,39 +128,74 @@
#define IV_IMMEDIATE 1
#define IV_DSGL 2
+#define AEAD_H_SIZE 16
+
#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
-#define MAX_SALT 4
#define MAX_SCRATCH_PAD_SIZE 32
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
/* Aligned to 128 bit boundary */
-struct _key_ctx {
- __be32 ctx_hdr;
- u8 salt[MAX_SALT];
- __be64 reserverd;
- unsigned char key[0];
-};
struct ablk_ctx {
- u8 enc;
- unsigned int processed_len;
__be32 key_ctx_hdr;
unsigned int enckey_len;
- unsigned int dst_nents;
- struct scatterlist iv_sg;
u8 key[CHCR_AES_MAX_KEY_LEN];
- u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char ciph_mode;
+ u8 rrkey[AES_MAX_KEY_SIZE];
+};
+struct chcr_aead_reqctx {
+ struct sk_buff *skb;
+ short int dst_nents;
+ u16 verify;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
+};
+
+struct chcr_gcm_ctx {
+ u8 ghash_h[AEAD_H_SIZE];
};
+struct chcr_authenc_ctx {
+ u8 dec_rrkey[AES_MAX_KEY_SIZE];
+ u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
+ unsigned char auth_mode;
+};
+
+struct __aead_ctx {
+ struct chcr_gcm_ctx gcm[0];
+ struct chcr_authenc_ctx authenc[0];
+};
+
+
+
+struct chcr_aead_ctx {
+ __be32 key_ctx_hdr;
+ unsigned int enckey_len;
+ struct crypto_skcipher *null;
+ u8 salt[MAX_SALT];
+ u8 key[CHCR_AES_MAX_KEY_LEN];
+ u16 hmac_ctrl;
+ u16 mayverify;
+ struct __aead_ctx ctx[0];
+};
+
+
+
struct hmac_ctx {
- struct shash_desc *desc;
+ struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
@@ -146,6 +203,7 @@ struct hmac_ctx {
struct __crypto_ctx {
struct hmac_ctx hmacctx[0];
struct ablk_ctx ablkctx[0];
+ struct chcr_aead_ctx aeadctx[0];
};
struct chcr_context {
@@ -156,18 +214,22 @@ struct chcr_context {
struct chcr_ahash_req_ctx {
u32 result;
- char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
- u8 bfr_len;
+ u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 *reqbfr;
+ u8 *skbfr;
+ u8 reqlen;
/* DMA the partial hash in it */
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
- void *dummy_payload_ptr;
/* SKB which is being sent to the hardware for processing */
struct sk_buff *skb;
};
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
+ unsigned int dst_nents;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
};
struct chcr_alg_template {
@@ -176,16 +238,19 @@ struct chcr_alg_template {
union {
struct crypto_alg crypto;
struct ahash_alg hash;
+ struct aead_alg aead;
} alg;
};
struct chcr_req_ctx {
union {
struct ahash_request *ahash_req;
+ struct aead_request *aead_req;
struct ablkcipher_request *ablk_req;
} req;
union {
struct chcr_ahash_req_ctx *ahash_ctx;
+ struct chcr_aead_reqctx *reqctx;
struct chcr_blkcipher_req_ctx *ablk_ctx;
} ctx;
};
@@ -195,9 +260,15 @@ struct sge_opaque_hdr {
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
-typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
- struct chcr_context *ctx,
+typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid,
+ int size,
unsigned short op_type);
+static int chcr_aead_op(struct aead_request *req_base,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn);
+static inline int get_aead_subtype(struct crypto_aead *aead);
+
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 37dadb2a4feb..6e7a5c77a00a 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -375,10 +375,6 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
if (!dma->padding_pool)
return -ENOMEM;
- dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
- if (!dma->iv_pool)
- return -ENOMEM;
-
cesa->dma = dma;
return 0;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e423d33decd4..a768da7138a1 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -277,7 +277,7 @@ struct mv_cesa_op_ctx {
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
-#define CESA_TDMA_IV 3
+#define CESA_TDMA_RESULT 3
/**
* struct mv_cesa_tdma_desc - TDMA descriptor
@@ -393,7 +393,6 @@ struct mv_cesa_dev_dma {
struct dma_pool *op_pool;
struct dma_pool *cache_pool;
struct dma_pool *padding_pool;
- struct dma_pool *iv_pool;
};
/**
@@ -839,7 +838,7 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
memset(chain, 0, sizeof(*chain));
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags);
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d19dc9614e6e..098871a22a54 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -212,7 +212,8 @@ mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+ ivsize);
} else {
memcpy_fromio(ablkreq->info,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
@@ -373,8 +374,9 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
/* Add output data for IV */
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
- ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
+ ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 77712b375b84..317cf029c0cf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -311,24 +311,40 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
- if (creq->last_req) {
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
+ (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+ __le32 *data = NULL;
+
/*
- * Hardware's MD5 digest is in little endian format, but
- * SHA in big endian format
+ * Result is already in the correct endianess when the SA is
+ * used
*/
- if (creq->algo_le) {
- __le32 *result = (void *)ahashreq->result;
+ data = creq->base.chain.last->op->ctx.hash.hash;
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = cpu_to_le32(data[i]);
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_le32(creq->state[i]);
- } else {
- __be32 *result = (void *)ahashreq->result;
+ memcpy(ahashreq->result, data, digsize);
+ } else {
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = readl_relaxed(engine->regs +
+ CESA_IVDIG(i));
+ if (creq->last_req) {
+ /*
+ * Hardware's MD5 digest is in little endian format, but
+ * SHA in big endian format
+ */
+ if (creq->algo_le) {
+ __le32 *result = (void *)ahashreq->result;
+
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_le32(creq->state[i]);
+ } else {
+ __be32 *result = (void *)ahashreq->result;
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_be32(creq->state[i]);
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_be32(creq->state[i]);
+ }
}
}
@@ -503,6 +519,12 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
CESA_SA_DESC_CFG_LAST_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
+ ret = mv_cesa_dma_add_result_op(chain,
+ CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
return op;
}
@@ -563,6 +585,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
int ret;
+ u32 type;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
@@ -634,7 +657,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
goto err_free_tdma;
}
- if (op) {
+ /*
+ * If results are copied via DMA, this means that this
+ * request can be directly processed by the engine,
+ * without partial updates. So we can chain it at the
+ * DMA level with other requests.
+ */
+ type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
+
+ if (op && type != CESA_TDMA_RESULT) {
/* Add dummy desc to wait for crypto operation end */
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
if (ret)
@@ -647,8 +678,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
else
creq->cache_ptr = 0;
- basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
- CESA_TDMA_BREAK_CHAIN);
+ basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+
+ if (type != CESA_TDMA_RESULT)
+ basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
return 0;
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 9fd7a5fbaa1b..4416b88eca70 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -69,9 +69,6 @@ void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
- else if (type == CESA_TDMA_IV)
- dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
- le32_to_cpu(tdma->dst));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -209,29 +206,37 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
return new_tdma;
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags)
{
-
- struct mv_cesa_tdma_desc *tdma;
- u8 *iv;
- dma_addr_t dma_handle;
+ struct mv_cesa_tdma_desc *tdma, *op_desc;
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
- iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
- if (!iv)
- return -ENOMEM;
+ /* We re-use an existing op_desc object to retrieve the context
+ * and result instead of allocating a new one.
+ * There is at least one object of this type in a CESA crypto
+ * req, just pick the first one in the chain.
+ */
+ for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
+ u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
+
+ if (type == CESA_TDMA_OP)
+ break;
+ }
+
+ if (!op_desc)
+ return -EIO;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = src;
- tdma->dst = cpu_to_le32(dma_handle);
- tdma->data = iv;
+ tdma->dst = op_desc->src;
+ tdma->op = op_desc->op;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
- tdma->flags = flags | CESA_TDMA_IV;
+ tdma->flags = flags | CESA_TDMA_RESULT;
return 0;
}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 104e9ce9400a..451fa18c1c7b 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1073,7 +1073,7 @@ static int mv_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -1163,7 +1163,6 @@ err_irq:
err_thread:
kthread_stop(cp->queue_th);
err:
- kfree(cp);
cpg = NULL;
return ret;
}
@@ -1187,7 +1186,6 @@ static int mv_remove(struct platform_device *pdev)
clk_put(cp->clk);
}
- kfree(cp);
cpg = NULL;
return 0;
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 42f0f229f7f7..036057abb257 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -32,7 +32,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/types.h>
#include <asm/hvcall.h>
#include <asm/vio.h>
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0c49956ee0ce..1d9ecd368b5b 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -390,7 +390,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
if (status & SAHARA_STATUS_MODE_BATCH)
dev_dbg(dev->device, " - Batch Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEDICATED)
- dev_dbg(dev->device, " - Decidated Mode.\n");
+ dev_dbg(dev->device, " - Dedicated Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEBUG)
dev_dbg(dev->device, " - Debug Mode.\n");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 0418a2f41dc0..0bba6a19d36a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -590,7 +590,7 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
if (v_lo & TALITOS_CCPSR_LO_MDTE)
dev_err(dev, "master data transfer error\n");
if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
- dev_err(dev, is_sec1 ? "pointeur not complete error\n"
+ dev_err(dev, is_sec1 ? "pointer not complete error\n"
: "s/g data length zero error\n");
if (v_lo & TALITOS_CCPSR_LO_FPZ)
dev_err(dev, is_sec1 ? "parity error\n"
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index de6e241b0866..55f7c392582f 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -10,10 +10,12 @@ endif
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
-$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl
- $(call cmd,perl)
+targets += aesp8-ppc.S ghashp8-ppc.S
+
+$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE
+ $(call if_changed,perl)
-$(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl
- $(call cmd,perl)
+$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE
+ $(call if_changed,perl)
-.PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S
+clean-files := aesp8-ppc.S ghashp8-ppc.S