summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/marvell/cesa.c1
-rw-r--r--drivers/crypto/marvell/cesa.h3
-rw-r--r--drivers/crypto/marvell/cipher.c28
-rw-r--r--drivers/crypto/marvell/hash.c22
4 files changed, 39 insertions, 15 deletions
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index f407bacdb021..40d4add19947 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -98,6 +98,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
engine->req = NULL;
mv_cesa_dequeue_req_unlocked(engine);
spin_unlock_bh(&engine->lock);
+ ctx->ops->complete(req);
ctx->ops->cleanup(req);
local_bh_disable();
req->complete(req, res);
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e67e3f17eb01..d7493351f14b 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -456,6 +456,8 @@ struct mv_cesa_engine {
* code)
* @step: launch the crypto operation on the next chunk
* @cleanup: cleanup the crypto request (release associated data)
+ * @complete: complete the request, i.e copy result or context from sram when
+ * needed.
*/
struct mv_cesa_req_ops {
void (*prepare)(struct crypto_async_request *req,
@@ -463,6 +465,7 @@ struct mv_cesa_req_ops {
int (*process)(struct crypto_async_request *req, u32 status);
void (*step)(struct crypto_async_request *req);
void (*cleanup)(struct crypto_async_request *req);
+ void (*complete)(struct crypto_async_request *req);
};
/**
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 7699528957f4..f994469feeb9 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -118,7 +118,6 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
- unsigned int ivsize;
len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
engine->sram + CESA_SA_DATA_SRAM_OFFSET,
@@ -128,10 +127,6 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
if (sreq->offset < req->nbytes)
return -EINPROGRESS;
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- memcpy_fromio(req->info,
- engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, ivsize);
-
return 0;
}
@@ -211,11 +206,34 @@ mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
mv_cesa_ablkcipher_cleanup(ablkreq);
}
+static void
+mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct mv_cesa_engine *engine = creq->base.engine;
+ unsigned int ivsize;
+
+ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
+
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
+ struct mv_cesa_req *basereq;
+
+ basereq = &creq->base;
+ memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ } else {
+ memcpy_fromio(ablkreq->info,
+ engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ivsize);
+ }
+}
+
static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
.step = mv_cesa_ablkcipher_step,
.process = mv_cesa_ablkcipher_process,
.prepare = mv_cesa_ablkcipher_prepare,
.cleanup = mv_cesa_ablkcipher_req_cleanup,
+ .complete = mv_cesa_ablkcipher_complete,
};
static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 03aaa39d6fd6..faa677a3e881 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -287,17 +287,20 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
- struct mv_cesa_engine *engine = creq->base.engine;
- unsigned int digsize;
- int ret, i;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- ret = mv_cesa_dma_process(&creq->base, status);
- else
- ret = mv_cesa_ahash_std_process(ahashreq, status);
+ return mv_cesa_dma_process(&creq->base, status);
- if (ret == -EINPROGRESS)
- return ret;
+ return mv_cesa_ahash_std_process(ahashreq, status);
+}
+
+static void mv_cesa_ahash_complete(struct crypto_async_request *req)
+{
+ struct ahash_request *ahashreq = ahash_request_cast(req);
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+ struct mv_cesa_engine *engine = creq->base.engine;
+ unsigned int digsize;
+ int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
for (i = 0; i < digsize / 4; i++)
@@ -326,8 +329,6 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
result[i] = cpu_to_be32(creq->state[i]);
}
}
-
- return ret;
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
@@ -366,6 +367,7 @@ static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
.process = mv_cesa_ahash_process,
.prepare = mv_cesa_ahash_prepare,
.cleanup = mv_cesa_ahash_req_cleanup,
+ .complete = mv_cesa_ahash_complete,
};
static int mv_cesa_ahash_init(struct ahash_request *req,