aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2017-11-16 12:44:47 +0100
committerJérôme Forissier <jerome.forissier@linaro.org>2017-11-20 09:27:01 +0100
commit1fca7e269b134c6a02d662cdbd6ee67fa3b9801c (patch)
treef754567c595bddcb68d58ffdcaee0cc293d0022a /core
parent71cd3ee470bd9d794692f7440ae76dbd5877d8b7 (diff)
core: crypto: add new AES-GCM implementation
Adds a new AES-GCM implementation optimized for hardware acceleration. This implementation is enabled by default, to use the implementation in libTomCrypt instead set CFG_CRYPTO_AES_GCM_FROM_CRYPTOLIB=y. Tested-by: Jerome Forissier <jerome.forissier@linaro.org> (HiKey960) Acked-by: Jerome Forissier <jerome.forissier@linaro.org> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
Diffstat (limited to 'core')
-rw-r--r--core/arch/arm/crypto/aes-gcm-ce.c122
-rw-r--r--core/arch/arm/crypto/sub.mk1
-rw-r--r--core/crypto.mk3
-rw-r--r--core/crypto/aes-gcm-ghash.c79
-rw-r--r--core/crypto/aes-gcm-sw.c102
-rw-r--r--core/crypto/aes-gcm.c356
-rw-r--r--core/crypto/sub.mk3
-rw-r--r--core/include/crypto/internal_aes-gcm.h72
8 files changed, 737 insertions, 1 deletions
diff --git a/core/arch/arm/crypto/aes-gcm-ce.c b/core/arch/arm/crypto/aes-gcm-ce.c
new file mode 100644
index 00000000..a961e004
--- /dev/null
+++ b/core/arch/arm/crypto/aes-gcm-ce.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <crypto/internal_aes-gcm.h>
+#include <crypto/ghash-ce-core.h>
+#include <io.h>
+#include <kernel/panic.h>
+#include <kernel/thread.h>
+#include <tomcrypt.h>
+#include <types_ext.h>
+
+TEE_Result internal_aes_gcm_set_key(struct internal_aes_gcm_ctx *ctx,
+ const void *key, size_t key_len)
+{
+ uint64_t k[2];
+ uint64_t a;
+ uint64_t b;
+
+ if (aes_setup(key, key_len, 0, &ctx->skey))
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ internal_aes_gcm_encrypt_block(ctx, ctx->ctr, ctx->hash_subkey);
+
+ /* Store hash key in little endian and multiply by 'x' */
+ b = get_be64(ctx->hash_subkey);
+ a = get_be64(ctx->hash_subkey + 8);
+ k[0] = (a << 1) | (b >> 63);
+ k[1] = (b << 1) | (a >> 63);
+ if (b >> 63)
+ k[1] ^= 0xc200000000000000UL;
+
+ memcpy(ctx->hash_subkey, k, TEE_AES_BLOCK_SIZE);
+ return TEE_SUCCESS;
+}
+
+static void get_dg(uint64_t dg[2], struct internal_aes_gcm_ctx *ctx)
+{
+ dg[1] = get_be64(ctx->hash_state);
+ dg[0] = get_be64(ctx->hash_state + 8);
+}
+
+static void put_dg(struct internal_aes_gcm_ctx *ctx, uint64_t dg[2])
+{
+ put_be64(ctx->hash_state, dg[1]);
+ put_be64(ctx->hash_state + 8, dg[0]);
+}
+
+void internal_aes_gcm_ghash_update(struct internal_aes_gcm_ctx *ctx,
+ const void *head, const void *data,
+ size_t num_blocks)
+{
+ uint32_t vfp_state;
+ uint64_t dg[2];
+ uint64_t *k;
+
+ get_dg(dg, ctx);
+
+ k = (void *)ctx->hash_subkey;
+
+ vfp_state = thread_kernel_enable_vfp();
+
+#ifdef CFG_HWSUPP_PMULL
+ pmull_ghash_update_p64(num_blocks, dg, data, k, head);
+#else
+ pmull_ghash_update_p8(num_blocks, dg, data, k, head);
+#endif
+ thread_kernel_disable_vfp(vfp_state);
+
+ put_dg(ctx, dg);
+}
+
+#ifdef ARM64
+void internal_aes_gcm_encrypt_block(struct internal_aes_gcm_ctx *ctx,
+ const void *src, void *dst)
+{
+ uint32_t vfp_state;
+ void *enc_key = ctx->skey.rijndael.eK;
+ size_t rounds = ctx->skey.rijndael.Nr;
+
+ vfp_state = thread_kernel_enable_vfp();
+
+ pmull_gcm_load_round_keys(enc_key, rounds);
+ pmull_gcm_encrypt_block(dst, src, rounds);
+
+ thread_kernel_disable_vfp(vfp_state);
+}
+
+void
+internal_aes_gcm_update_payload_block_aligned(struct internal_aes_gcm_ctx *ctx,
+ TEE_OperationMode m,
+ const void *src,
+ size_t num_blocks, void *dst)
+{
+ uint32_t vfp_state;
+ uint64_t dg[2];
+ uint64_t *k;
+ void *ctr = ctx->ctr;
+ void *enc_key = ctx->skey.rijndael.eK;
+ size_t rounds = ctx->skey.rijndael.Nr;
+
+ get_dg(dg, ctx);
+ k = (void *)ctx->hash_subkey;
+
+ vfp_state = thread_kernel_enable_vfp();
+
+ pmull_gcm_load_round_keys(enc_key, rounds);
+
+ if (m == TEE_MODE_ENCRYPT)
+ pmull_gcm_encrypt(num_blocks, dg, dst, src, k, ctr, rounds,
+ ctx->buf_cryp);
+ else
+ pmull_gcm_decrypt(num_blocks, dg, dst, src, k, ctr, rounds);
+
+ thread_kernel_disable_vfp(vfp_state);
+
+ put_dg(ctx, dg);
+}
+#endif /*ARM64*/
diff --git a/core/arch/arm/crypto/sub.mk b/core/arch/arm/crypto/sub.mk
index 9a905a13..fabfd54e 100644
--- a/core/arch/arm/crypto/sub.mk
+++ b/core/arch/arm/crypto/sub.mk
@@ -1,4 +1,5 @@
ifeq ($(CFG_CRYPTO_WITH_CE),y)
srcs-$(CFG_ARM64_core) += ghash-ce-core_a64.S
srcs-$(CFG_ARM32_core) += ghash-ce-core_a32.S
+srcs-y += aes-gcm-ce.c
endif
diff --git a/core/crypto.mk b/core/crypto.mk
index dffa4840..2dc24d9e 100644
--- a/core/crypto.mk
+++ b/core/crypto.mk
@@ -36,7 +36,8 @@ CFG_CRYPTO_ECC ?= y
# Authenticated encryption
CFG_CRYPTO_CCM ?= y
CFG_CRYPTO_GCM ?= y
-CFG_CRYPTO_AES_GCM_FROM_CRYPTOLIB = $(CFG_CRYPTO_GCM)
+# Default uses the OP-TEE internal AES-GCM implementation
+CFG_CRYPTO_AES_GCM_FROM_CRYPTOLIB ?= n
endif
diff --git a/core/crypto/aes-gcm-ghash.c b/core/crypto/aes-gcm-ghash.c
new file mode 100644
index 00000000..f6dae768
--- /dev/null
+++ b/core/crypto/aes-gcm-ghash.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2010 Mike Belopuhov
+ * Copyright (c) 2017, Linaro Limited
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <crypto/internal_aes-gcm.h>
+#include <kernel/panic.h>
+#include <string.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+
+static void xor_block(void *dst, const void *src)
+{
+ uint64_t *d = dst;
+ const uint64_t *s = src;
+
+ d[0] ^= s[0];
+ d[1] ^= s[1];
+}
+
+/*
+ * gfmul() is based on ghash_gfmul() from
+ * https://github.com/openbsd/src/blob/master/sys/crypto/gmac.c
+ */
+static void gfmul(const uint64_t X[2], const uint64_t Y[2], uint64_t product[2])
+{
+ uint64_t y[2];
+ uint64_t z[2] = { 0 };
+ const uint8_t *x = (const uint8_t *)X;
+ uint32_t mul;
+ size_t n;
+
+ y[0] = TEE_U64_FROM_BIG_ENDIAN(Y[0]);
+ y[1] = TEE_U64_FROM_BIG_ENDIAN(Y[1]);
+
+ for (n = 0; n < TEE_AES_BLOCK_SIZE * 8; n++) {
+ /* update Z */
+ if (x[n >> 3] & (1 << (~n & 7)))
+ xor_block(z, y);
+
+ /* update Y */
+ mul = y[1] & 1;
+ y[1] = (y[0] << 63) | (y[1] >> 1);
+ y[0] = (y[0] >> 1) ^ (0xe100000000000000 * mul);
+ }
+
+ product[0] = TEE_U64_TO_BIG_ENDIAN(z[0]);
+ product[1] = TEE_U64_TO_BIG_ENDIAN(z[1]);
+}
+
+void __weak internal_aes_gcm_ghash_update(struct internal_aes_gcm_ctx *ctx,
+ const void *head, const void *data,
+ size_t num_blocks)
+{
+ const uint64_t *x = (const void *)data;
+ void *y = ctx->hash_state;
+ size_t n;
+
+ if (head) {
+ xor_block(y, head);
+ gfmul((void *)ctx->hash_subkey, y, y);
+ }
+ for (n = 0; n < num_blocks; n++) {
+ xor_block(y, x + n * 2);
+ gfmul((void *)ctx->hash_subkey, y, y);
+ }
+}
diff --git a/core/crypto/aes-gcm-sw.c b/core/crypto/aes-gcm-sw.c
new file mode 100644
index 00000000..5c05e53b
--- /dev/null
+++ b/core/crypto/aes-gcm-sw.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+/*
+ * gfmul() is based on ghash_gfmul from
+ * https://github.com/openbsd/src/blob/master/sys/crypto/gmac.c
+ * Which is:
+ * Copyright (c) 2010 Mike Belopuhov
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <crypto/internal_aes-gcm.h>
+#include <kernel/panic.h>
+#include <string.h>
+#include <tee_api_types.h>
+#include <tomcrypt.h>
+#include <types_ext.h>
+
+static bool __maybe_unused ptr_is_block_aligned(const void *p)
+{
+ return !((vaddr_t)p & (TEE_AES_BLOCK_SIZE - 1));
+}
+
+static void xor_block(void *dst, const void *src)
+{
+ uint64_t *d = dst;
+ const uint64_t *s = src;
+
+ d[0] ^= s[0];
+ d[1] ^= s[1];
+}
+
+TEE_Result __weak internal_aes_gcm_set_key(struct internal_aes_gcm_ctx *ctx,
+ const void *key, size_t key_len)
+{
+ if (aes_setup(key, key_len, 0, &ctx->skey))
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ if (aes_ecb_encrypt((void *)ctx->ctr, ctx->hash_subkey, &ctx->skey))
+ panic();
+
+ return TEE_SUCCESS;
+}
+
+void __weak
+internal_aes_gcm_update_payload_block_aligned(struct internal_aes_gcm_ctx *ctx,
+ TEE_OperationMode m,
+ const void *src,
+ size_t num_blocks, void *dst)
+{
+ size_t n;
+ const uint8_t *s = src;
+ uint8_t *d = dst;
+
+ assert(!ctx->buf_pos && num_blocks &&
+ ptr_is_block_aligned(s) && ptr_is_block_aligned(d));
+
+ for (n = 0; n < num_blocks; n++) {
+ if (m == TEE_MODE_ENCRYPT) {
+ xor_block(ctx->buf_cryp, s);
+ internal_aes_gcm_ghash_update(ctx, ctx->buf_cryp,
+ NULL, 0);
+ memcpy(d, ctx->buf_cryp, sizeof(ctx->buf_cryp));
+ internal_aes_gcm_encrypt_block(ctx, ctx->ctr,
+ ctx->buf_cryp);
+ internal_aes_gcm_inc_ctr(ctx);
+ } else {
+ internal_aes_gcm_encrypt_block(ctx, ctx->ctr,
+ ctx->buf_cryp);
+
+ xor_block(ctx->buf_cryp, s);
+ internal_aes_gcm_ghash_update(ctx, s, NULL, 0);
+ memcpy(d, ctx->buf_cryp, sizeof(ctx->buf_cryp));
+
+ internal_aes_gcm_inc_ctr(ctx);
+ }
+ s += TEE_AES_BLOCK_SIZE;
+ d += TEE_AES_BLOCK_SIZE;
+ }
+}
+
+void __weak internal_aes_gcm_encrypt_block(struct internal_aes_gcm_ctx *ctx,
+ const void *src, void *dst)
+{
+ if (aes_ecb_encrypt(src, dst, &ctx->skey))
+ panic();
+}
diff --git a/core/crypto/aes-gcm.c b/core/crypto/aes-gcm.c
new file mode 100644
index 00000000..4fb1ff3f
--- /dev/null
+++ b/core/crypto/aes-gcm.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <crypto/internal_aes-gcm.h>
+#include <io.h>
+#include <string_ext.h>
+#include <string.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+#include <utee_defines.h>
+#include <util.h>
+
+static void xor_buf(uint8_t *dst, const uint8_t *src, size_t len)
+{
+ size_t n;
+
+ for (n = 0; n < len; n++)
+ dst[n] ^= src[n];
+}
+
+static bool ptr_is_block_aligned(const void *p)
+{
+ return !((vaddr_t)p & (TEE_AES_BLOCK_SIZE - 1));
+}
+
+static void ghash_update_pad_zero(struct internal_aes_gcm_ctx *ctx,
+ const uint8_t *data, size_t len)
+{
+ size_t n = len / TEE_AES_BLOCK_SIZE;
+ uint64_t block[2];
+
+ if (n) {
+ if (ptr_is_block_aligned(data)) {
+ internal_aes_gcm_ghash_update(ctx, NULL, data, n);
+ } else {
+ size_t m;
+
+ for (m = 0; m < n; m++) {
+
+ memcpy(block, data + m * sizeof(block),
+ sizeof(block));
+ internal_aes_gcm_ghash_update(ctx, NULL,
+ (void *)block, 1);
+ }
+ }
+ }
+
+ if (len - n * TEE_AES_BLOCK_SIZE) {
+ memset(block, 0, sizeof(block));
+ memcpy(block, data + n * TEE_AES_BLOCK_SIZE,
+ len - n * TEE_AES_BLOCK_SIZE);
+ internal_aes_gcm_ghash_update(ctx, block, NULL, 0);
+ }
+}
+
+static void ghash_update_lengths(struct internal_aes_gcm_ctx *ctx, uint32_t l1,
+ uint32_t l2)
+{
+ uint64_t len_fields[2] = {
+ TEE_U64_TO_BIG_ENDIAN(l1 * 8),
+ TEE_U64_TO_BIG_ENDIAN(l2 * 8)
+ };
+
+ COMPILE_TIME_ASSERT(sizeof(len_fields) == TEE_AES_BLOCK_SIZE);
+ internal_aes_gcm_ghash_update(ctx, (uint8_t *)len_fields, NULL, 0);
+}
+
+TEE_Result internal_aes_gcm_init(struct internal_aes_gcm_ctx *ctx,
+ TEE_OperationMode mode, const void *key,
+ size_t key_len, const void *nonce,
+ size_t nonce_len, size_t tag_len)
+{
+ TEE_Result res;
+
+ COMPILE_TIME_ASSERT(sizeof(ctx->ctr) == TEE_AES_BLOCK_SIZE);
+
+ if (tag_len > sizeof(ctx->buf_tag))
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->tag_len = tag_len;
+ res = internal_aes_gcm_set_key(ctx, key, key_len);
+ if (res)
+ return res;
+
+ if (nonce_len == (96 / 8)) {
+ memcpy(ctx->ctr, nonce, nonce_len);
+ internal_aes_gcm_inc_ctr(ctx);
+ } else {
+ ghash_update_pad_zero(ctx, nonce, nonce_len);
+ ghash_update_lengths(ctx, 0, nonce_len);
+
+ memcpy(ctx->ctr, ctx->hash_state, sizeof(ctx->ctr));
+ memset(ctx->hash_state, 0, sizeof(ctx->hash_state));
+ }
+
+ internal_aes_gcm_encrypt_block(ctx, ctx->ctr, ctx->buf_tag);
+ internal_aes_gcm_inc_ctr(ctx);
+ if (mode == TEE_MODE_ENCRYPT) {
+ /*
+ * Encryption uses the pre-encrypted xor-buffer to encrypt
+ * while decryption encrypts the xor-buffer when needed
+ * instead.
+ *
+ * The reason for this is that the combined encryption and
+ * ghash implementation does both operations intertwined.
+ * In the decrypt case the xor-buffer is needed at the end
+ * of processing each block, while the encryption case
+ * needs xor-buffer before processing each block.
+ *
+ * In a pure software implementation we wouldn't have any
+ * use for this kind of optimization, but since this
+ * AES-GCM implementation is aimed at being combined with
+ * accelerated routines it's more convenient to always have
+ * this optimization activated.
+ */
+ internal_aes_gcm_encrypt_block(ctx, ctx->ctr, ctx->buf_cryp);
+ internal_aes_gcm_inc_ctr(ctx);
+ }
+
+ return TEE_SUCCESS;
+}
+
+TEE_Result internal_aes_gcm_update_aad(struct internal_aes_gcm_ctx *ctx,
+ const void *data, size_t len)
+{
+ const uint8_t *d = data;
+ size_t l = len;
+ const uint8_t *head = NULL;
+ size_t n;
+
+ if (ctx->payload_bytes)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ ctx->aad_bytes += len;
+
+ while (l) {
+ if (ctx->buf_pos || !ptr_is_block_aligned(d) ||
+ l < TEE_AES_BLOCK_SIZE) {
+ n = MIN(TEE_AES_BLOCK_SIZE - ctx->buf_pos, l);
+ memcpy(ctx->buf_hash + ctx->buf_pos, d, n);
+ ctx->buf_pos += n;
+
+ if (ctx->buf_pos != TEE_AES_BLOCK_SIZE)
+ return TEE_SUCCESS;
+
+ ctx->buf_pos = 0;
+ head = ctx->buf_hash;
+ d += n;
+ l -= n;
+ }
+
+ if (ptr_is_block_aligned(d))
+ n = l / TEE_AES_BLOCK_SIZE;
+ else
+ n = 0;
+
+ internal_aes_gcm_ghash_update(ctx, head, d, n);
+ l -= n * TEE_AES_BLOCK_SIZE;
+ d += n * TEE_AES_BLOCK_SIZE;
+ }
+
+ return TEE_SUCCESS;
+}
+
+TEE_Result internal_aes_gcm_update_payload(struct internal_aes_gcm_ctx *ctx,
+ TEE_OperationMode mode,
+ const void *src, size_t len,
+ void *dst)
+{
+ size_t n;
+ const uint8_t *s = src;
+ uint8_t *d = dst;
+ size_t l = len;
+
+ if (!ctx->payload_bytes && ctx->buf_pos) {
+ /* AAD part done, finish up the last bits. */
+ memset(ctx->buf_hash + ctx->buf_pos, 0,
+ TEE_AES_BLOCK_SIZE - ctx->buf_pos);
+ internal_aes_gcm_ghash_update(ctx, ctx->buf_hash, NULL, 0);
+ ctx->buf_pos = 0;
+ }
+
+ ctx->payload_bytes += len;
+
+ while (l) {
+ if (ctx->buf_pos || !ptr_is_block_aligned(s) ||
+ !ptr_is_block_aligned(d) || l < TEE_AES_BLOCK_SIZE) {
+ n = MIN(TEE_AES_BLOCK_SIZE - ctx->buf_pos, l);
+
+ if (!ctx->buf_pos && mode == TEE_MODE_DECRYPT) {
+ internal_aes_gcm_encrypt_block(ctx, ctx->ctr,
+ ctx->buf_cryp);
+ }
+
+ xor_buf(ctx->buf_cryp + ctx->buf_pos, s, n);
+ memcpy(d, ctx->buf_cryp + ctx->buf_pos, n);
+ if (mode == TEE_MODE_ENCRYPT)
+ memcpy(ctx->buf_hash + ctx->buf_pos,
+ ctx->buf_cryp + ctx->buf_pos, n);
+ else
+ memcpy(ctx->buf_hash + ctx->buf_pos, s, n);
+
+ ctx->buf_pos += n;
+
+ if (ctx->buf_pos != TEE_AES_BLOCK_SIZE)
+ return TEE_SUCCESS;
+
+ internal_aes_gcm_ghash_update(ctx, ctx->buf_hash,
+ NULL, 0);
+ ctx->buf_pos = 0;
+ d += n;
+ s += n;
+ l -= n;
+
+ if (mode == TEE_MODE_ENCRYPT)
+ internal_aes_gcm_encrypt_block(ctx, ctx->ctr,
+ ctx->buf_cryp);
+ internal_aes_gcm_inc_ctr(ctx);
+ } else {
+ n = l / TEE_AES_BLOCK_SIZE;
+ internal_aes_gcm_update_payload_block_aligned(ctx, mode,
+ s, n, d);
+ s += n * TEE_AES_BLOCK_SIZE;
+ d += n * TEE_AES_BLOCK_SIZE;
+ l -= n * TEE_AES_BLOCK_SIZE;
+ }
+ }
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result operation_final(struct internal_aes_gcm_ctx *ctx,
+ TEE_OperationMode m, const uint8_t *src,
+ size_t len, uint8_t *dst)
+{
+ TEE_Result res;
+
+ res = internal_aes_gcm_update_payload(ctx, m, src, len, dst);
+ if (res)
+ return res;
+
+ if (ctx->buf_pos) {
+ memset(ctx->buf_hash + ctx->buf_pos, 0,
+ sizeof(ctx->buf_hash) - ctx->buf_pos);
+ internal_aes_gcm_ghash_update(ctx, ctx->buf_hash, NULL, 0);
+ }
+
+ ghash_update_lengths(ctx, ctx->aad_bytes, ctx->payload_bytes);
+ /* buf_tag was filled in with the first counter block aes_gcm_init() */
+ xor_buf(ctx->buf_tag, ctx->hash_state, ctx->tag_len);
+
+ return TEE_SUCCESS;
+}
+
+TEE_Result internal_aes_gcm_enc_final(struct internal_aes_gcm_ctx *ctx,
+ const void *src, size_t len, void *dst,
+ void *tag, size_t *tag_len)
+{
+ TEE_Result res;
+
+ if (*tag_len < ctx->tag_len)
+ return TEE_ERROR_SHORT_BUFFER;
+
+ res = operation_final(ctx, TEE_MODE_ENCRYPT, src, len, dst);
+ if (res)
+ return res;
+
+ memcpy(tag, ctx->buf_tag, ctx->tag_len);
+ *tag_len = ctx->tag_len;
+
+ return TEE_SUCCESS;
+}
+
+TEE_Result internal_aes_gcm_dec_final(struct internal_aes_gcm_ctx *ctx,
+ const void *src, size_t len, void *dst,
+ const void *tag, size_t tag_len)
+{
+ TEE_Result res;
+
+ if (tag_len != ctx->tag_len)
+ return TEE_ERROR_MAC_INVALID;
+
+ res = operation_final(ctx, TEE_MODE_DECRYPT, src, len, dst);
+ if (res)
+ return res;
+
+ if (buf_compare_ct(ctx->buf_tag, tag, tag_len))
+ return TEE_ERROR_MAC_INVALID;
+
+ return TEE_SUCCESS;
+}
+
+void internal_aes_gcm_inc_ctr(struct internal_aes_gcm_ctx *ctx)
+{
+ uint64_t c;
+
+ c = TEE_U64_FROM_BIG_ENDIAN(ctx->ctr[1]) + 1;
+ ctx->ctr[1] = TEE_U64_TO_BIG_ENDIAN(c);
+ if (!c) {
+ c = TEE_U64_FROM_BIG_ENDIAN(ctx->ctr[0]) + 1;
+ ctx->ctr[0] = TEE_U64_TO_BIG_ENDIAN(c);
+ }
+}
+
+#ifndef CFG_CRYPTO_AES_GCM_FROM_CRYPTOLIB
+#include <crypto/aes-gcm.h>
+
+size_t crypto_aes_gcm_get_ctx_size(void)
+{
+ return sizeof(struct internal_aes_gcm_ctx);
+}
+
+TEE_Result crypto_aes_gcm_init(void *c, TEE_OperationMode mode,
+ const uint8_t *key, size_t key_len,
+ const uint8_t *nonce, size_t nonce_len,
+ size_t tag_len)
+{
+ return internal_aes_gcm_init(c, mode, key, key_len, nonce, nonce_len,
+ tag_len);
+}
+
+TEE_Result crypto_aes_gcm_update_aad(void *c, const uint8_t *data, size_t len)
+{
+ return internal_aes_gcm_update_aad(c, data, len);
+}
+
+TEE_Result crypto_aes_gcm_update_payload(void *c, TEE_OperationMode m,
+ const uint8_t *src, size_t len,
+ uint8_t *dst)
+{
+ return internal_aes_gcm_update_payload(c, m, src, len, dst);
+}
+
+TEE_Result crypto_aes_gcm_enc_final(void *c, const uint8_t *src, size_t len,
+ uint8_t *dst, uint8_t *tag, size_t *tag_len)
+{
+ return internal_aes_gcm_enc_final(c, src, len, dst, tag, tag_len);
+}
+
+TEE_Result crypto_aes_gcm_dec_final(void *c, const uint8_t *src, size_t len,
+ uint8_t *dst, const uint8_t *tag,
+ size_t tag_len)
+{
+ return internal_aes_gcm_dec_final(c, src, len, dst, tag, tag_len);
+}
+
+void crypto_aes_gcm_final(void *c __unused)
+{
+}
+#endif /*!CFG_CRYPTO_AES_GCM_FROM_CRYPTOLIB*/
diff --git a/core/crypto/sub.mk b/core/crypto/sub.mk
index 7f72ebd0..99df944d 100644
--- a/core/crypto/sub.mk
+++ b/core/crypto/sub.mk
@@ -1 +1,4 @@
srcs-y += crypto.c
+srcs-y += aes-gcm.c
+srcs-y += aes-gcm-sw.c
+srcs-y += aes-gcm-ghash.c
diff --git a/core/include/crypto/internal_aes-gcm.h b/core/include/crypto/internal_aes-gcm.h
new file mode 100644
index 00000000..5fe3a3f8
--- /dev/null
+++ b/core/include/crypto/internal_aes-gcm.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef __CRYPTO_INTERNAL_AES_GCM_H
+#define __CRYPTO_INTERNAL_AES_GCM_H
+
+#include <tee_api_types.h>
+#include <tee_api_types.h>
+#include <utee_defines.h>
+#include <tomcrypt.h>
+
+struct internal_aes_gcm_ctx {
+ uint64_t ctr[2];
+
+ uint8_t hash_subkey[TEE_AES_BLOCK_SIZE];
+ uint8_t hash_state[TEE_AES_BLOCK_SIZE];
+
+ uint8_t buf_tag[TEE_AES_BLOCK_SIZE];
+ uint8_t buf_hash[TEE_AES_BLOCK_SIZE];
+ uint8_t buf_cryp[TEE_AES_BLOCK_SIZE];
+
+ symmetric_key skey;
+
+ unsigned int tag_len;
+ unsigned int aad_bytes;
+ unsigned int payload_bytes;
+ unsigned int buf_pos;
+};
+
+TEE_Result internal_aes_gcm_init(struct internal_aes_gcm_ctx *ctx,
+ TEE_OperationMode mode, const void *key,
+ size_t key_len, const void *nonce,
+ size_t nonce_len, size_t tag_len);
+TEE_Result internal_aes_gcm_update_aad(struct internal_aes_gcm_ctx *ctx,
+ const void *data, size_t len);
+TEE_Result internal_aes_gcm_update_payload(struct internal_aes_gcm_ctx *ctx,
+ TEE_OperationMode mode,
+ const void *src, size_t len,
+ void *dst);
+TEE_Result internal_aes_gcm_enc_final(struct internal_aes_gcm_ctx *ctx,
+ const void *src, size_t len, void *dst,
+ void *tag, size_t *tag_len);
+TEE_Result internal_aes_gcm_dec_final(struct internal_aes_gcm_ctx *ctx,
+ const void *src, size_t len, void *dst,
+ const void *tag, size_t tag_len);
+
+void internal_aes_gcm_inc_ctr(struct internal_aes_gcm_ctx *ctx);
+
+/*
+ * Internal weak functions that can be overridden with hardware specific
+ * implementations.
+ */
+void internal_aes_gcm_encrypt_block(struct internal_aes_gcm_ctx *ctx,
+ const void *src, void *dst);
+
+TEE_Result internal_aes_gcm_set_key(struct internal_aes_gcm_ctx *ctx,
+ const void *key, size_t key_len);
+
+void internal_aes_gcm_ghash_update(struct internal_aes_gcm_ctx *ctx,
+ const void *head, const void *data,
+ size_t num_blocks);
+
+void
+internal_aes_gcm_update_payload_block_aligned(struct internal_aes_gcm_ctx *ctx,
+ TEE_OperationMode mode,
+ const void *src,
+ size_t num_blocks, void *dst);
+#endif /*__CRYPTO_INTERNAL_AES_GCM_H*/