ipq40xx: qce - switch to skcipher API

This backports a commit updating the API of the QCE crypto engine to
what is used in current kerenl, easing future upstream backports.

Signed-off-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
[renumber patches, refreshed, added 5.4 patches]
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
master
Eneas U de Queiroz 4 years ago committed by Christian Lamparter
parent ce691b2abc
commit 26681de412

@ -0,0 +1,973 @@
From f441873642eebf20566c18d2966a8cd4b433ec1c Mon Sep 17 00:00:00 2001
From: Ard Biesheuvel <ardb@kernel.org>
Date: Tue, 5 Nov 2019 14:28:17 +0100
Subject: [PATCH] crypto: qce - switch to skcipher API
Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface")
dated 20 august 2015 introduced the new skcipher API which is supposed to
replace both blkcipher and ablkcipher. While all consumers of the API have
been converted long ago, some producers of the ablkcipher remain, forcing
us to keep the ablkcipher support routines alive, along with the matching
code to expose [a]blkciphers via the skcipher API.
So switch this driver to the skcipher API, allowing us to finally drop the
blkcipher code in the near future.
Reviewed-by: Stanimir Varbanov <stanimir.varbanov@linaro.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Backported-to-4.19-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -4,4 +4,4 @@ qcrypto-objs := core.o \
common.o \
dma.o \
sha.o \
- ablkcipher.o
+ skcipher.o
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -53,12 +53,12 @@ struct qce_cipher_reqctx {
unsigned int cryptlen;
};
-static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- return container_of(alg, struct qce_alg_template, alg.crypto);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ return container_of(alg, struct qce_alg_template, alg.skcipher);
}
-extern const struct qce_algo_ops ablkcipher_ops;
+extern const struct qce_algo_ops skcipher_ops;
#endif /* _CIPHER_H_ */
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -312,13 +312,13 @@ go_proc:
return 0;
}
-static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
@@ -397,8 +397,8 @@ int qce_start(struct crypto_async_reques
u32 offset)
{
switch (type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return qce_setup_regs_skcipher(async_req, totallen, offset);
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
default:
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
+#include <crypto/internal/skcipher.h>
/* key size in bytes */
#define QCE_SHA_HMAC_KEY_SIZE 64
@@ -87,7 +88,7 @@ struct qce_alg_template {
unsigned long alg_flags;
const u32 *std_iv;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg ahash;
} alg;
struct qce_device *qce;
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -30,7 +30,7 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
- &ablkcipher_ops,
+ &skcipher_ops,
&ahash_ops,
};
--- a/drivers/crypto/qce/ablkcipher.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <crypto/aes.h>
-#include <crypto/des.h>
-#include <crypto/internal/skcipher.h>
-
-#include "cipher.h"
-
-static LIST_HEAD(ablkcipher_algs);
-
-static void qce_ablkcipher_done(void *data)
-{
- struct crypto_async_request *async_req = data;
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
- struct qce_device *qce = tmpl->qce;
- enum dma_data_direction dir_src, dir_dst;
- u32 status;
- int error;
- bool diff_dst;
-
- diff_dst = (req->src != req->dst) ? true : false;
- dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
- dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
-
- error = qce_dma_terminate_all(&qce->dma);
- if (error)
- dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
- error);
-
- if (diff_dst)
- dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
- dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
-
- sg_free_table(&rctx->dst_tbl);
-
- error = qce_check_status(qce, &status);
- if (error < 0)
- dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
-
- qce->async_req_done(tmpl->qce, error);
-}
-
-static int
-qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
-{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
- struct qce_device *qce = tmpl->qce;
- enum dma_data_direction dir_src, dir_dst;
- struct scatterlist *sg;
- bool diff_dst;
- gfp_t gfp;
- int ret;
-
- rctx->iv = req->info;
- rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- rctx->cryptlen = req->nbytes;
-
- diff_dst = (req->src != req->dst) ? true : false;
- dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
- dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
-
- rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (diff_dst)
- rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- else
- rctx->dst_nents = rctx->src_nents;
- if (rctx->src_nents < 0) {
- dev_err(qce->dev, "Invalid numbers of src SG.\n");
- return rctx->src_nents;
- }
- if (rctx->dst_nents < 0) {
- dev_err(qce->dev, "Invalid numbers of dst SG.\n");
- return -rctx->dst_nents;
- }
-
- rctx->dst_nents += 1;
-
- gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
-
- ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
- if (ret)
- return ret;
-
- sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
-
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto error_free;
- }
-
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto error_free;
- }
-
- sg_mark_end(sg);
- rctx->dst_sg = rctx->dst_tbl.sgl;
-
- ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (ret < 0)
- goto error_free;
-
- if (diff_dst) {
- ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
- if (ret < 0)
- goto error_unmap_dst;
- rctx->src_sg = req->src;
- } else {
- rctx->src_sg = rctx->dst_sg;
- }
-
- ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
- rctx->dst_sg, rctx->dst_nents,
- qce_ablkcipher_done, async_req);
- if (ret)
- goto error_unmap_src;
-
- qce_dma_issue_pending(&qce->dma);
-
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
- if (ret)
- goto error_terminate;
-
- return 0;
-
-error_terminate:
- qce_dma_terminate_all(&qce->dma);
-error_unmap_src:
- if (diff_dst)
- dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
-error_unmap_dst:
- dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
-error_free:
- sg_free_table(&rctx->dst_tbl);
- return ret;
-}
-
-static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
- int ret;
-
- if (!key || !keylen)
- return -EINVAL;
-
- if (IS_AES(flags)) {
- switch (keylen) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_256:
- break;
- default:
- goto fallback;
- }
- } else if (IS_DES(flags)) {
- u32 tmp[DES_EXPKEY_WORDS];
-
- ret = des_ekey(tmp, key);
- if (!ret && crypto_ablkcipher_get_flags(ablk) &
- CRYPTO_TFM_REQ_WEAK_KEY)
- goto weakkey;
- }
-
- ctx->enc_keylen = keylen;
- memcpy(ctx->enc_key, key, keylen);
- return 0;
-fallback:
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
- if (!ret)
- ctx->enc_keylen = keylen;
- return ret;
-weakkey:
- crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
- return -EINVAL;
-}
-
-static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
-{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
- int ret;
-
- rctx->flags = tmpl->alg_flags;
- rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
-
- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
- ctx->enc_keylen != AES_KEYSIZE_256) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
- ret = encrypt ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
- return ret;
- }
-
- return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
-}
-
-static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
-{
- return qce_ablkcipher_crypt(req, 1);
-}
-
-static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
-{
- return qce_ablkcipher_crypt(req, 0);
-}
-
-static int qce_ablkcipher_init(struct crypto_tfm *tfm)
-{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- memset(ctx, 0, sizeof(*ctx));
- tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
-
- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
- return PTR_ERR_OR_ZERO(ctx->fallback);
-}
-
-static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
-{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_skcipher(ctx->fallback);
-}
-
-struct qce_ablkcipher_def {
- unsigned long flags;
- const char *name;
- const char *drv_name;
- unsigned int blocksize;
- unsigned int ivsize;
- unsigned int min_keysize;
- unsigned int max_keysize;
-};
-
-static const struct qce_ablkcipher_def ablkcipher_def[] = {
- {
- .flags = QCE_ALG_AES | QCE_MODE_ECB,
- .name = "ecb(aes)",
- .drv_name = "ecb-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_AES | QCE_MODE_CBC,
- .name = "cbc(aes)",
- .drv_name = "cbc-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_AES | QCE_MODE_CTR,
- .name = "ctr(aes)",
- .drv_name = "ctr-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_AES | QCE_MODE_XTS,
- .name = "xts(aes)",
- .drv_name = "xts-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_DES | QCE_MODE_ECB,
- .name = "ecb(des)",
- .drv_name = "ecb-des-qce",
- .blocksize = DES_BLOCK_SIZE,
- .ivsize = 0,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_DES | QCE_MODE_CBC,
- .name = "cbc(des)",
- .drv_name = "cbc-des-qce",
- .blocksize = DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_3DES | QCE_MODE_ECB,
- .name = "ecb(des3_ede)",
- .drv_name = "ecb-3des-qce",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .ivsize = 0,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_3DES | QCE_MODE_CBC,
- .name = "cbc(des3_ede)",
- .drv_name = "cbc-3des-qce",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
-};
-
-static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
- struct qce_device *qce)
-{
- struct qce_alg_template *tmpl;
- struct crypto_alg *alg;
- int ret;
-
- tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
- if (!tmpl)
- return -ENOMEM;
-
- alg = &tmpl->alg.crypto;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
-
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
- alg->cra_ablkcipher.min_keysize = def->min_keysize;
- alg->cra_ablkcipher.max_keysize = def->max_keysize;
- alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
- alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
- alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
-
- alg->cra_priority = 300;
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY;
- alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
- alg->cra_alignmask = 0;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_module = THIS_MODULE;
- alg->cra_init = qce_ablkcipher_init;
- alg->cra_exit = qce_ablkcipher_exit;
- INIT_LIST_HEAD(&alg->cra_list);
-
- INIT_LIST_HEAD(&tmpl->entry);
- tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
- tmpl->alg_flags = def->flags;
- tmpl->qce = qce;
-
- ret = crypto_register_alg(alg);
- if (ret) {
- kfree(tmpl);
- dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
- return ret;
- }
-
- list_add_tail(&tmpl->entry, &ablkcipher_algs);
- dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
- return 0;
-}
-
-static void qce_ablkcipher_unregister(struct qce_device *qce)
-{
- struct qce_alg_template *tmpl, *n;
-
- list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
- crypto_unregister_alg(&tmpl->alg.crypto);
- list_del(&tmpl->entry);
- kfree(tmpl);
- }
-}
-
-static int qce_ablkcipher_register(struct qce_device *qce)
-{
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
- ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
- if (ret)
- goto err;
- }
-
- return 0;
-err:
- qce_ablkcipher_unregister(qce);
- return ret;
-}
-
-const struct qce_algo_ops ablkcipher_ops = {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .register_algs = qce_ablkcipher_register,
- .unregister_algs = qce_ablkcipher_unregister,
- .async_req_handle = qce_ablkcipher_async_req_handle,
-};
--- /dev/null
+++ b/drivers/crypto/qce/skcipher.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/internal/skcipher.h>
+
+#include "cipher.h"
+
+static LIST_HEAD(skcipher_algs);
+
+static void qce_skcipher_done(void *data)
+{
+ struct crypto_async_request *async_req = data;
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ enum dma_data_direction dir_src, dir_dst;
+ u32 status;
+ int error;
+ bool diff_dst;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ error = qce_dma_terminate_all(&qce->dma);
+ if (error)
+ dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
+ error);
+
+ if (diff_dst)
+ dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+
+ sg_free_table(&rctx->dst_tbl);
+
+ error = qce_check_status(qce, &status);
+ if (error < 0)
+ dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
+
+ qce->async_req_done(tmpl->qce, error);
+}
+
+static int
+qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
+{
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ enum dma_data_direction dir_src, dir_dst;
+ struct scatterlist *sg;
+ bool diff_dst;
+ gfp_t gfp;
+ int ret;
+
+ rctx->iv = req->iv;
+ rctx->ivsize = crypto_skcipher_ivsize(skcipher);
+ rctx->cryptlen = req->cryptlen;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (diff_dst)
+ rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+ else
+ rctx->dst_nents = rctx->src_nents;
+ if (rctx->src_nents < 0) {
+ dev_err(qce->dev, "Invalid numbers of src SG.\n");
+ return rctx->src_nents;
+ }
+ if (rctx->dst_nents < 0) {
+ dev_err(qce->dev, "Invalid numbers of dst SG.\n");
+ return -rctx->dst_nents;
+ }
+
+ rctx->dst_nents += 1;
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
+ if (ret)
+ return ret;
+
+ sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto error_free;
+ }
+
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto error_free;
+ }
+
+ sg_mark_end(sg);
+ rctx->dst_sg = rctx->dst_tbl.sgl;
+
+ ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+ if (ret < 0)
+ goto error_free;
+
+ if (diff_dst) {
+ ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+ if (ret < 0)
+ goto error_unmap_dst;
+ rctx->src_sg = req->src;
+ } else {
+ rctx->src_sg = rctx->dst_sg;
+ }
+
+ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
+ rctx->dst_sg, rctx->dst_nents,
+ qce_skcipher_done, async_req);
+ if (ret)
+ goto error_unmap_src;
+
+ qce_dma_issue_pending(&qce->dma);
+
+ ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+ if (ret)
+ goto error_terminate;
+
+ return 0;
+
+error_terminate:
+ qce_dma_terminate_all(&qce->dma);
+error_unmap_src:
+ if (diff_dst)
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+error_unmap_dst:
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+error_free:
+ sg_free_table(&rctx->dst_tbl);
+ return ret;
+}
+
+static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
+ unsigned int keylen)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
+ int ret;
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ if (IS_AES(flags)) {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ goto fallback;
+ }
+ } else if (IS_DES(flags)) {
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ ret = des_ekey(tmp, key);
+ if (!ret && crypto_skcipher_get_flags(ablk) &
+ CRYPTO_TFM_REQ_WEAK_KEY)
+ goto weakkey;
+ }
+
+ ctx->enc_keylen = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ return 0;
+fallback:
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (!ret)
+ ctx->enc_keylen = keylen;
+ return ret;
+weakkey:
+ crypto_skcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+}
+
+static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int ret;
+
+ rctx->flags = tmpl->alg_flags;
+ rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+
+ if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
+ ctx->enc_keylen != AES_KEYSIZE_256) {
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+ return ret;
+ }
+
+ return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_skcipher_encrypt(struct skcipher_request *req)
+{
+ return qce_skcipher_crypt(req, 1);
+}
+
+static int qce_skcipher_decrypt(struct skcipher_request *req)
+{
+ return qce_skcipher_crypt(req, 0);
+}
+
+static int qce_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+
+ ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ return PTR_ERR_OR_ZERO(ctx->fallback);
+}
+
+static void qce_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->fallback);
+}
+
+struct qce_skcipher_def {
+ unsigned long flags;
+ const char *name;
+ const char *drv_name;
+ unsigned int blocksize;
+ unsigned int ivsize;
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+};
+
+static const struct qce_skcipher_def skcipher_def[] = {
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_ECB,
+ .name = "ecb(aes)",
+ .drv_name = "ecb-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CBC,
+ .name = "cbc(aes)",
+ .drv_name = "cbc-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CTR,
+ .name = "ctr(aes)",
+ .drv_name = "ctr-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_XTS,
+ .name = "xts(aes)",
+ .drv_name = "xts-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_ECB,
+ .name = "ecb(des)",
+ .drv_name = "ecb-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = 0,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_CBC,
+ .name = "cbc(des)",
+ .drv_name = "cbc-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_ECB,
+ .name = "ecb(des3_ede)",
+ .drv_name = "ecb-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = 0,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_CBC,
+ .name = "cbc(des3_ede)",
+ .drv_name = "cbc-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+};
+
+static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
+ struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl;
+ struct skcipher_alg *alg;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl)
+ return -ENOMEM;
+
+ alg = &tmpl->alg.skcipher;
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
+ alg->min_keysize = def->min_keysize;
+ alg->max_keysize = def->max_keysize;
+ alg->setkey = qce_skcipher_setkey;
+ alg->encrypt = qce_skcipher_encrypt;
+ alg->decrypt = qce_skcipher_decrypt;
+
+ alg->base.cra_priority = 300;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = qce_skcipher_init;
+ alg->exit = qce_skcipher_exit;
+
+ INIT_LIST_HEAD(&tmpl->entry);
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
+ tmpl->alg_flags = def->flags;
+ tmpl->qce = qce;
+
+ ret = crypto_register_skcipher(alg);
+ if (ret) {
+ kfree(tmpl);
+ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
+ return ret;
+ }
+
+ list_add_tail(&tmpl->entry, &skcipher_algs);
+ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
+ return 0;
+}
+
+static void qce_skcipher_unregister(struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl, *n;
+
+ list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&tmpl->alg.skcipher);
+ list_del(&tmpl->entry);
+ kfree(tmpl);
+ }
+}
+
+static int qce_skcipher_register(struct qce_device *qce)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
+ ret = qce_skcipher_register_one(&skcipher_def[i], qce);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ qce_skcipher_unregister(qce);
+ return ret;
+}
+
+const struct qce_algo_ops skcipher_ops = {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .register_algs = qce_skcipher_register,
+ .unregister_algs = qce_skcipher_unregister,
+ .async_req_handle = qce_skcipher_async_req_handle,
+};

@ -9,7 +9,7 @@ Signed-off-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -370,7 +370,7 @@ static int qce_ablkcipher_register_one(c
@@ -380,7 +380,7 @@ static int qce_ablkcipher_register_one(c
alg->cra_priority = 300;
alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
@ -20,7 +20,7 @@ Signed-off-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
alg->cra_type = &crypto_ablkcipher_type;
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -503,7 +503,7 @@ static int qce_ahash_register_one(const
@@ -495,7 +495,7 @@ static int qce_ahash_register_one(const
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
base->cra_priority = 300;

@ -0,0 +1,993 @@
From f441873642eebf20566c18d2966a8cd4b433ec1c Mon Sep 17 00:00:00 2001
From: Ard Biesheuvel <ardb@kernel.org>
Date: Tue, 5 Nov 2019 14:28:17 +0100
Subject: [PATCH] crypto: qce - switch to skcipher API
Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface")
dated 20 august 2015 introduced the new skcipher API which is supposed to
replace both blkcipher and ablkcipher. While all consumers of the API have
been converted long ago, some producers of the ablkcipher remain, forcing
us to keep the ablkcipher support routines alive, along with the matching
code to expose [a]blkciphers via the skcipher API.
So switch this driver to the skcipher API, allowing us to finally drop the
blkcipher code in the near future.
Reviewed-by: Stanimir Varbanov <stanimir.varbanov@linaro.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Backported-to-4.19-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -4,4 +4,4 @@ qcrypto-objs := core.o \
common.o \
dma.o \
sha.o \
- ablkcipher.o
+ skcipher.o
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -45,12 +45,12 @@ struct qce_cipher_reqctx {
unsigned int cryptlen;
};
-static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- return container_of(alg, struct qce_alg_template, alg.crypto);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ return container_of(alg, struct qce_alg_template, alg.skcipher);
}
-extern const struct qce_algo_ops ablkcipher_ops;
+extern const struct qce_algo_ops skcipher_ops;
#endif /* _CIPHER_H_ */
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -304,13 +304,13 @@ go_proc:
return 0;
}
-static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
@@ -389,8 +389,8 @@ int qce_start(struct crypto_async_reques
u32 offset)
{
switch (type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return qce_setup_regs_skcipher(async_req, totallen, offset);
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
default:
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
+#include <crypto/internal/skcipher.h>
/* key size in bytes */
#define QCE_SHA_HMAC_KEY_SIZE 64
@@ -79,7 +80,7 @@ struct qce_alg_template {
unsigned long alg_flags;
const u32 *std_iv;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg ahash;
} alg;
struct qce_device *qce;
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,7 +22,7 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
- &ablkcipher_ops,
+ &skcipher_ops,
&ahash_ops,
};
--- a/drivers/crypto/qce/ablkcipher.c
+++ /dev/null
@@ -1,440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/skcipher.h>
-
-#include "cipher.h"
-
-static LIST_HEAD(ablkcipher_algs);
-
-static void qce_ablkcipher_done(void *data)
-{
- struct crypto_async_request *async_req = data;
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
- struct qce_device *qce = tmpl->qce;
- enum dma_data_direction dir_src, dir_dst;
- u32 status;
- int error;
- bool diff_dst;
-
- diff_dst = (req->src != req->dst) ? true : false;
- dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
- dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
-
- error = qce_dma_terminate_all(&qce->dma);
- if (error)
- dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
- error);
-
- if (diff_dst)
- dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
- dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
-
- sg_free_table(&rctx->dst_tbl);
-
- error = qce_check_status(qce, &status);
- if (error < 0)
- dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
-
- qce->async_req_done(tmpl->qce, error);
-}
-
-static int
-qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
-{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
- struct qce_device *qce = tmpl->qce;
- enum dma_data_direction dir_src, dir_dst;
- struct scatterlist *sg;
- bool diff_dst;
- gfp_t gfp;
- int ret;
-
- rctx->iv = req->info;
- rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- rctx->cryptlen = req->nbytes;
-
- diff_dst = (req->src != req->dst) ? true : false;
- dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
- dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
-
- rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (diff_dst)
- rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- else
- rctx->dst_nents = rctx->src_nents;
- if (rctx->src_nents < 0) {
- dev_err(qce->dev, "Invalid numbers of src SG.\n");
- return rctx->src_nents;
- }
- if (rctx->dst_nents < 0) {
- dev_err(qce->dev, "Invalid numbers of dst SG.\n");
- return -rctx->dst_nents;
- }
-
- rctx->dst_nents += 1;
-
- gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
-
- ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
- if (ret)
- return ret;
-
- sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
-
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto error_free;
- }
-
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto error_free;
- }
-
- sg_mark_end(sg);
- rctx->dst_sg = rctx->dst_tbl.sgl;
-
- ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (ret < 0)
- goto error_free;
-
- if (diff_dst) {
- ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
- if (ret < 0)
- goto error_unmap_dst;
- rctx->src_sg = req->src;
- } else {
- rctx->src_sg = rctx->dst_sg;
- }
-
- ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
- rctx->dst_sg, rctx->dst_nents,
- qce_ablkcipher_done, async_req);
- if (ret)
- goto error_unmap_src;
-
- qce_dma_issue_pending(&qce->dma);
-
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
- if (ret)
- goto error_terminate;
-
- return 0;
-
-error_terminate:
- qce_dma_terminate_all(&qce->dma);
-error_unmap_src:
- if (diff_dst)
- dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
-error_unmap_dst:
- dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
-error_free:
- sg_free_table(&rctx->dst_tbl);
- return ret;
-}
-
-static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret;
-
- if (!key || !keylen)
- return -EINVAL;
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_256:
- break;
- default:
- goto fallback;
- }
-
- ctx->enc_keylen = keylen;
- memcpy(ctx->enc_key, key, keylen);
- return 0;
-fallback:
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- if (!ret)
- ctx->enc_keylen = keylen;
- return ret;
-}
-
-static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
- unsigned int keylen)
-{
- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
- int err;
-
- err = verify_ablkcipher_des_key(ablk, key);
- if (err)
- return err;
-
- ctx->enc_keylen = keylen;
- memcpy(ctx->enc_key, key, keylen);
- return 0;
-}
-
-static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
- unsigned int keylen)
-{
- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
- int err;
-
- err = verify_ablkcipher_des3_key(ablk, key);
- if (err)
- return err;
-
- ctx->enc_keylen = keylen;
- memcpy(ctx->enc_key, key, keylen);
- return 0;
-}
-
-static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
-{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
- int ret;
-
- rctx->flags = tmpl->alg_flags;
- rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
-
- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
- ctx->enc_keylen != AES_KEYSIZE_256) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
- ret = encrypt ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
- return ret;
- }
-
- return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
-}
-
-static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
-{
- return qce_ablkcipher_crypt(req, 1);
-}
-
-static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
-{
- return qce_ablkcipher_crypt(req, 0);
-}
-
-static int qce_ablkcipher_init(struct crypto_tfm *tfm)
-{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- memset(ctx, 0, sizeof(*ctx));
- tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
-
- ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
- 0, CRYPTO_ALG_NEED_FALLBACK);
- return PTR_ERR_OR_ZERO(ctx->fallback);
-}
-
-static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
-{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_sync_skcipher(ctx->fallback);
-}
-
-struct qce_ablkcipher_def {
- unsigned long flags;
- const char *name;
- const char *drv_name;
- unsigned int blocksize;
- unsigned int ivsize;
- unsigned int min_keysize;
- unsigned int max_keysize;
-};
-
-static const struct qce_ablkcipher_def ablkcipher_def[] = {
- {
- .flags = QCE_ALG_AES | QCE_MODE_ECB,
- .name = "ecb(aes)",
- .drv_name = "ecb-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_AES | QCE_MODE_CBC,
- .name = "cbc(aes)",
- .drv_name = "cbc-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_AES | QCE_MODE_CTR,
- .name = "ctr(aes)",
- .drv_name = "ctr-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_AES | QCE_MODE_XTS,
- .name = "xts(aes)",
- .drv_name = "xts-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_DES | QCE_MODE_ECB,
- .name = "ecb(des)",
- .drv_name = "ecb-des-qce",
- .blocksize = DES_BLOCK_SIZE,
- .ivsize = 0,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_DES | QCE_MODE_CBC,
- .name = "cbc(des)",
- .drv_name = "cbc-des-qce",
- .blocksize = DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_3DES | QCE_MODE_ECB,
- .name = "ecb(des3_ede)",
- .drv_name = "ecb-3des-qce",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .ivsize = 0,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
- {
- .flags = QCE_ALG_3DES | QCE_MODE_CBC,
- .name = "cbc(des3_ede)",
- .drv_name = "cbc-3des-qce",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
-};
-
-static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
- struct qce_device *qce)
-{
- struct qce_alg_template *tmpl;
- struct crypto_alg *alg;
- int ret;
-
- tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
- if (!tmpl)
- return -ENOMEM;
-
- alg = &tmpl->alg.crypto;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
-
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
- alg->cra_ablkcipher.min_keysize = def->min_keysize;
- alg->cra_ablkcipher.max_keysize = def->max_keysize;
- alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey :
- IS_DES(def->flags) ? qce_des_setkey :
- qce_ablkcipher_setkey;
- alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
- alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
-
- alg->cra_priority = 300;
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY;
- alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
- alg->cra_alignmask = 0;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_module = THIS_MODULE;
- alg->cra_init = qce_ablkcipher_init;
- alg->cra_exit = qce_ablkcipher_exit;
-
- INIT_LIST_HEAD(&tmpl->entry);
- tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
- tmpl->alg_flags = def->flags;
- tmpl->qce = qce;
-
- ret = crypto_register_alg(alg);
- if (ret) {
- kfree(tmpl);
- dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
- return ret;
- }
-
- list_add_tail(&tmpl->entry, &ablkcipher_algs);
- dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
- return 0;
-}
-
-static void qce_ablkcipher_unregister(struct qce_device *qce)
-{
- struct qce_alg_template *tmpl, *n;
-
- list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
- crypto_unregister_alg(&tmpl->alg.crypto);
- list_del(&tmpl->entry);
- kfree(tmpl);
- }
-}
-
-static int qce_ablkcipher_register(struct qce_device *qce)
-{
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
- ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
- if (ret)
- goto err;
- }
-
- return 0;
-err:
- qce_ablkcipher_unregister(qce);
- return ret;
-}
-
-const struct qce_algo_ops ablkcipher_ops = {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .register_algs = qce_ablkcipher_register,
- .unregister_algs = qce_ablkcipher_unregister,
- .async_req_handle = qce_ablkcipher_async_req_handle,
-};
--- /dev/null
+++ b/drivers/crypto/qce/skcipher.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+
+#include "cipher.h"
+
+static LIST_HEAD(skcipher_algs);
+
+static void qce_skcipher_done(void *data)
+{
+ struct crypto_async_request *async_req = data;
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ enum dma_data_direction dir_src, dir_dst;
+ u32 status;
+ int error;
+ bool diff_dst;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ error = qce_dma_terminate_all(&qce->dma);
+ if (error)
+ dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
+ error);
+
+ if (diff_dst)
+ dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+
+ sg_free_table(&rctx->dst_tbl);
+
+ error = qce_check_status(qce, &status);
+ if (error < 0)
+ dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
+
+ qce->async_req_done(tmpl->qce, error);
+}
+
+static int
+qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
+{
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ enum dma_data_direction dir_src, dir_dst;
+ struct scatterlist *sg;
+ bool diff_dst;
+ gfp_t gfp;
+ int ret;
+
+ rctx->iv = req->iv;
+ rctx->ivsize = crypto_skcipher_ivsize(skcipher);
+ rctx->cryptlen = req->cryptlen;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (diff_dst)
+ rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+ else
+ rctx->dst_nents = rctx->src_nents;
+ if (rctx->src_nents < 0) {
+ dev_err(qce->dev, "Invalid numbers of src SG.\n");
+ return rctx->src_nents;
+ }
+ if (rctx->dst_nents < 0) {
+ dev_err(qce->dev, "Invalid numbers of dst SG.\n");
+ return -rctx->dst_nents;
+ }
+
+ rctx->dst_nents += 1;
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
+ if (ret)
+ return ret;
+
+ sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto error_free;
+ }
+
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto error_free;
+ }
+
+ sg_mark_end(sg);
+ rctx->dst_sg = rctx->dst_tbl.sgl;
+
+ ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+ if (ret < 0)
+ goto error_free;
+
+ if (diff_dst) {
+ ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+ if (ret < 0)
+ goto error_unmap_dst;
+ rctx->src_sg = req->src;
+ } else {
+ rctx->src_sg = rctx->dst_sg;
+ }
+
+ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
+ rctx->dst_sg, rctx->dst_nents,
+ qce_skcipher_done, async_req);
+ if (ret)
+ goto error_unmap_src;
+
+ qce_dma_issue_pending(&qce->dma);
+
+ ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+ if (ret)
+ goto error_terminate;
+
+ return 0;
+
+error_terminate:
+ qce_dma_terminate_all(&qce->dma);
+error_unmap_src:
+ if (diff_dst)
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+error_unmap_dst:
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+error_free:
+ sg_free_table(&rctx->dst_tbl);
+ return ret;
+}
+
+static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
+ struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ goto fallback;
+ }
+
+ ctx->enc_keylen = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ return 0;
+fallback:
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ if (!ret)
+ ctx->enc_keylen = keylen;
+ return ret;
+}
+
+static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
+ unsigned int keylen)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+ int err;
+
+ err = verify_skcipher_des_key(ablk, key);
+ if (err)
+ return err;
+
+ ctx->enc_keylen = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ return 0;
+}
+
+static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
+ unsigned int keylen)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+ int err;
+
+ err = verify_skcipher_des3_key(ablk, key);
+ if (err)
+ return err;
+
+ ctx->enc_keylen = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ return 0;
+}
+
+static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int ret;
+
+ rctx->flags = tmpl->alg_flags;
+ rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+
+ if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
+ ctx->enc_keylen != AES_KEYSIZE_256) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+ return ret;
+ }
+
+ return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_skcipher_encrypt(struct skcipher_request *req)
+{
+ return qce_skcipher_crypt(req, 1);
+}
+
+static int qce_skcipher_decrypt(struct skcipher_request *req)
+{
+ return qce_skcipher_crypt(req, 0);
+}
+
+static int qce_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+
+ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
+ return PTR_ERR_OR_ZERO(ctx->fallback);
+}
+
+static void qce_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_sync_skcipher(ctx->fallback);
+}
+
+struct qce_skcipher_def {
+ unsigned long flags;
+ const char *name;
+ const char *drv_name;
+ unsigned int blocksize;
+ unsigned int ivsize;
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+};
+
+static const struct qce_skcipher_def skcipher_def[] = {
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_ECB,
+ .name = "ecb(aes)",
+ .drv_name = "ecb-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CBC,
+ .name = "cbc(aes)",
+ .drv_name = "cbc-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CTR,
+ .name = "ctr(aes)",
+ .drv_name = "ctr-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_XTS,
+ .name = "xts(aes)",
+ .drv_name = "xts-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_ECB,
+ .name = "ecb(des)",
+ .drv_name = "ecb-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = 0,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_CBC,
+ .name = "cbc(des)",
+ .drv_name = "cbc-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_ECB,
+ .name = "ecb(des3_ede)",
+ .drv_name = "ecb-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = 0,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_CBC,
+ .name = "cbc(des3_ede)",
+ .drv_name = "cbc-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+};
+
+static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
+ struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl;
+ struct skcipher_alg *alg;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl)
+ return -ENOMEM;
+
+ alg = &tmpl->alg.skcipher;
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
+ alg->min_keysize = def->min_keysize;
+ alg->max_keysize = def->max_keysize;
+ alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
+ IS_DES(def->flags) ? qce_des_setkey :
+ qce_skcipher_setkey;
+ alg->encrypt = qce_skcipher_encrypt;
+ alg->decrypt = qce_skcipher_decrypt;
+
+ alg->base.cra_priority = 300;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = qce_skcipher_init;
+ alg->exit = qce_skcipher_exit;
+
+ INIT_LIST_HEAD(&tmpl->entry);
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
+ tmpl->alg_flags = def->flags;
+ tmpl->qce = qce;
+
+ ret = crypto_register_skcipher(alg);
+ if (ret) {
+ kfree(tmpl);
+ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
+ return ret;
+ }
+
+ list_add_tail(&tmpl->entry, &skcipher_algs);
+ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
+ return 0;
+}
+
+static void qce_skcipher_unregister(struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl, *n;
+
+ list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&tmpl->alg.skcipher);
+ list_del(&tmpl->entry);
+ kfree(tmpl);
+ }
+}
+
+static int qce_skcipher_register(struct qce_device *qce)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
+ ret = qce_skcipher_register_one(&skcipher_def[i], qce);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ qce_skcipher_unregister(qce);
+ return ret;
+}
+
+const struct qce_algo_ops skcipher_ops = {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .register_algs = qce_skcipher_register,
+ .unregister_algs = qce_skcipher_unregister,
+ .async_req_handle = qce_skcipher_async_req_handle,
+};
Loading…
Cancel
Save