9 #include <crypto/aes.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
21 #include <crypto/sha.h>
28 #define MV_CESA "MV-CESA:"
29 #define MAX_HW_HASH_SIZE 0xFFFF
30 #define MV_CESA_EXPIRE 500
146 static void mv_completion_timer_callback(
unsigned long unused)
151 "completion timer expired (CESA %sactive), cleaning up.\n",
157 printk(
KERN_INFO MV_CESA
"%s: waiting for engine finishing\n", __func__);
162 static void mv_setup_timer(
void)
169 static void compute_aes_dec_key(
struct mv_ctx *
ctx)
197 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
198 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
216 static void copy_src_to_buf(
struct req_progress *
p,
char *dbuf,
int len)
233 memcpy(dbuf, sbuf, copy_len);
243 static void setup_data_in(
void)
253 static void mv_process_current_q(
int first_block)
256 struct mv_ctx *ctx = crypto_tfm_ctx(req->
base.tfm);
257 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
260 switch (req_ctx->
op) {
299 op.enc_len = cpg->
p.crypt_len;
308 static void mv_crypto_algo_completion(
void)
311 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
322 static void mv_process_hash_current(
int first_block)
331 switch (req_ctx->
op) {
339 tfm_ctx->
ivs,
sizeof(tfm_ctx->
ivs));
388 static inline int mv_hash_import_sha1_ctx(
const struct mv_req_hash_ctx *ctx,
395 for (i = 0; i < 5; i++)
396 shash_state.state[i] = ctx->
state[i];
397 memcpy(shash_state.buffer, ctx->
buffer,
sizeof(shash_state.buffer));
398 return crypto_shash_import(desc, &shash_state);
407 char ctx[crypto_shash_descsize(tfm_ctx->
fallback)];
414 crypto_shash_init(&desc.shash);
420 rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
438 static void mv_hash_algo_completion(
void)
450 crypto_ahash_digestsize(crypto_ahash_reqtfm
453 mv_save_digest_state(ctx);
454 mv_hash_final_fallback(req);
457 mv_save_digest_state(ctx);
461 static void dequeue_complete_req(
void)
466 cpg->
p.hw_processed_bytes += cpg->
p.crypt_len;
467 if (cpg->
p.copy_back) {
468 int need_copy_len = cpg->
p.crypt_len;
473 if (!cpg->
p.sg_dst_left) {
476 cpg->
p.sg_dst_left = cpg->
p.dst_sg_it.length;
477 cpg->
p.dst_start = 0;
480 buf = cpg->
p.dst_sg_it.addr;
481 buf += cpg->
p.dst_start;
483 dst_copy =
min(need_copy_len, cpg->
p.sg_dst_left);
488 sram_offset += dst_copy;
489 cpg->
p.sg_dst_left -= dst_copy;
490 need_copy_len -= dst_copy;
491 cpg->
p.dst_start += dst_copy;
492 }
while (need_copy_len > 0);
495 cpg->
p.crypt_len = 0;
498 if (cpg->
p.hw_processed_bytes < cpg->
p.hw_nbytes) {
519 if (total_bytes > cur_len)
520 total_bytes -= cur_len;
536 p->
complete = mv_crypto_algo_completion;
537 p->
process = mv_process_current_q;
540 num_sgs = count_sgs(req->
src, req->
nbytes);
543 num_sgs = count_sgs(req->
dst, req->
nbytes);
546 mv_process_current_q(1);
553 int num_sgs, hw_bytes, old_extra_bytes,
rc;
566 num_sgs = count_sgs(req->
src, req->
nbytes);
571 p->
complete = mv_hash_algo_completion;
572 p->
process = mv_process_hash_current;
580 mv_process_hash_current(1);
582 copy_src_to_buf(p, ctx->
buffer + old_extra_bytes,
586 rc = mv_hash_final_fallback(req);
596 static int queue_manag(
void *
data)
606 dequeue_complete_req();
608 spin_lock_irq(&cpg->
lock);
610 backlog = crypto_get_backlog(&cpg->
queue);
617 spin_unlock_irq(&cpg->
lock);
625 if (async_req->
tfm->__crt_alg->cra_type !=
628 ablkcipher_request_cast(async_req);
629 mv_start_new_crypt_req(req);
632 ahash_request_cast(async_req);
633 mv_start_new_hash_req(req);
651 spin_unlock_irqrestore(&cpg->
lock, flags);
658 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
663 return mv_handle_req(&req->
base);
668 struct mv_ctx *ctx = crypto_tfm_ctx(req->
base.tfm);
669 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
674 compute_aes_dec_key(ctx);
675 return mv_handle_req(&req->
base);
680 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
685 return mv_handle_req(&req->
base);
690 struct mv_ctx *ctx = crypto_tfm_ctx(req->
base.tfm);
691 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
696 compute_aes_dec_key(ctx);
697 return mv_handle_req(&req->
base);
700 static int mv_cra_init(
struct crypto_tfm *tfm)
702 tfm->crt_ablkcipher.reqsize =
sizeof(
struct mv_req_ctx);
707 int is_last,
unsigned int req_len,
710 memset(ctx, 0,
sizeof(*ctx));
718 static void mv_update_hash_req_ctx(
struct mv_req_hash_ctx *ctx,
int is_last,
728 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->
op, 0, 0,
738 mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->
nbytes);
739 return mv_handle_req(&req->
base);
746 ahash_request_set_crypt(req,
NULL, req->
result, 0);
747 mv_update_hash_req_ctx(ctx, 1, 0);
748 return mv_handle_req(&req->
base);
753 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->
nbytes);
754 return mv_handle_req(&req->
base);
760 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->
op, 1,
762 return mv_handle_req(&req->
base);
770 for (i = 0; i < 5; i++) {
776 static int mv_hash_setkey(
struct crypto_ahash *tfm,
const u8 * key,
792 bs = crypto_shash_blocksize(ctx->
base_hash);
793 ds = crypto_shash_digestsize(ctx->
base_hash);
794 ss = crypto_shash_statesize(ctx->
base_hash);
799 char ctx[crypto_shash_descsize(ctx->
base_hash)];
819 memcpy(ipad, key, keylen);
821 memset(ipad + keylen, 0, bs - keylen);
824 for (i = 0; i < bs; i++) {
829 rc = crypto_shash_init(&desc.shash) ? :
831 crypto_shash_export(&desc.shash, ipad) ? :
832 crypto_shash_init(&desc.shash) ? :
834 crypto_shash_export(&desc.shash, opad);
837 mv_hash_init_ivs(ctx, ipad, opad);
843 static int mv_cra_hash_init(
struct crypto_tfm *tfm,
const char *base_hash_name,
844 enum hash_op op,
int count_add)
846 const char *fallback_driver_name = tfm->
__crt_alg->cra_name;
858 if (IS_ERR(fallback_tfm)) {
860 "Fallback driver '%s' could not be loaded!\n",
861 fallback_driver_name);
862 err = PTR_ERR(fallback_tfm);
867 if (base_hash_name) {
871 if (IS_ERR(base_hash)) {
873 "Base driver '%s' could not be loaded!\n",
875 err = PTR_ERR(base_hash);
881 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
883 crypto_shash_descsize(ctx->
fallback));
886 crypto_free_shash(fallback_tfm);
891 static void mv_cra_hash_exit(
struct crypto_tfm *tfm)
900 static int mv_cra_hash_sha1_init(
struct crypto_tfm *tfm)
905 static int mv_cra_hash_hmac_sha1_init(
struct crypto_tfm *tfm)
920 "got an interrupt but no pending timer?\n");
922 val &= ~SEC_INT_ACCEL0_DONE;
932 .cra_name =
"ecb(aes)",
933 .cra_driver_name =
"mv-ecb-aes",
938 .cra_ctxsize =
sizeof(
struct mv_ctx),
942 .cra_init = mv_cra_init,
947 .setkey = mv_setkey_aes,
948 .encrypt = mv_enc_aes_ecb,
949 .decrypt = mv_dec_aes_ecb,
955 .cra_name =
"cbc(aes)",
956 .cra_driver_name =
"mv-cbc-aes",
961 .cra_ctxsize =
sizeof(
struct mv_ctx),
965 .cra_init = mv_cra_init,
971 .setkey = mv_setkey_aes,
972 .encrypt = mv_enc_aes_cbc,
973 .decrypt = mv_dec_aes_cbc,
979 .init = mv_hash_init,
980 .update = mv_hash_update,
981 .final = mv_hash_final,
982 .finup = mv_hash_finup,
983 .digest = mv_hash_digest,
988 .cra_driver_name =
"mv-sha1",
995 .cra_init = mv_cra_hash_sha1_init,
996 .cra_exit = mv_cra_hash_exit,
1003 .init = mv_hash_init,
1004 .update = mv_hash_update,
1005 .final = mv_hash_final,
1006 .finup = mv_hash_finup,
1007 .digest = mv_hash_digest,
1008 .setkey = mv_hash_setkey,
1012 .cra_name =
"hmac(sha1)",
1013 .cra_driver_name =
"mv-hmac-sha1",
1014 .cra_priority = 300,
1020 .cra_init = mv_cra_hash_hmac_sha1_init,
1021 .cra_exit = mv_cra_hash_exit,
1068 if (pdev->
dev.of_node)
1072 if (irq < 0 || irq ==
NO_IRQ) {
1074 goto err_unmap_sram;
1078 platform_set_drvdata(pdev, cp);
1084 goto err_unmap_sram;
1095 if (!IS_ERR(cp->
clk))
1096 clk_prepare_enable(cp->
clk);
1106 "Could not register aes-ecb driver\n");
1113 "Could not register aes-cbc driver\n");
1128 "Could not register hmac-sha1 driver\n");
1136 if (!IS_ERR(cp->
clk)) {
1137 clk_disable_unprepare(cp->
clk);
1149 platform_set_drvdata(pdev,
NULL);
1155 struct crypto_priv *cp = platform_get_drvdata(pdev);
1169 if (!IS_ERR(cp->
clk)) {
1170 clk_disable_unprepare(cp->
clk);
1179 static const struct of_device_id mv_cesa_of_match_table[] = {
1180 { .compatible =
"marvell,orion-crypto", },
1190 .name =
"mv_crypto",
1191 .of_match_table =
of_match_ptr(mv_cesa_of_match_table),