17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
26 #include <linux/device.h>
28 #include <linux/errno.h>
38 #include <crypto/sha.h>
44 #define SHA_FLAGS_BUSY BIT(0)
45 #define SHA_FLAGS_FINAL BIT(1)
46 #define SHA_FLAGS_DMA_ACTIVE BIT(2)
47 #define SHA_FLAGS_OUTPUT_READY BIT(3)
48 #define SHA_FLAGS_INIT BIT(4)
49 #define SHA_FLAGS_CPU BIT(5)
50 #define SHA_FLAGS_DMA_READY BIT(6)
52 #define SHA_FLAGS_FINUP BIT(16)
53 #define SHA_FLAGS_SG BIT(17)
54 #define SHA_FLAGS_SHA1 BIT(18)
55 #define SHA_FLAGS_SHA256 BIT(19)
56 #define SHA_FLAGS_ERROR BIT(20)
57 #define SHA_FLAGS_PAD BIT(21)
59 #define SHA_FLAGS_DUALBUFF BIT(24)
61 #define SHA_OP_UPDATE 1
62 #define SHA_OP_FINAL 2
64 #define SHA_BUFFER_LEN PAGE_SIZE
66 #define ATMEL_SHA_DMA_THRESHOLD 56
100 #define ATMEL_SHA_QUEUE_LENGTH 1
137 writel_relaxed(value, dd->
io_base + offset);
166 if (ctx->
offset == ctx->
sg->length) {
190 unsigned int index, padlen;
197 index = ctx->
bufcnt & 0x3f;
198 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
199 *(ctx->buffer + ctx->
bufcnt) = 0x80;
202 ctx->
bufcnt += padlen + 8;
214 spin_lock_bh(&atmel_sha.
lock);
225 spin_unlock_bh(&atmel_sha.
lock);
232 crypto_ahash_digestsize(tfm));
267 atmel_sha_write(dd,
SHA_CR, valcr);
268 atmel_sha_write(dd,
SHA_MR, valmr);
272 size_t length,
int final)
278 dev_dbg(dd->
dev,
"xmit_cpu: digcnt: %d, length: %d, final: %d\n",
279 ctx->
digcnt, length,
final);
281 atmel_sha_write_ctrl(dd, 0);
293 for (count = 0; count < len32; count++)
294 atmel_sha_write(dd,
SHA_REG_DIN(count), buffer[count]);
300 size_t length1,
dma_addr_t dma_addr2,
size_t length2,
int final)
305 dev_dbg(dd->
dev,
"xmit_pdc: digcnt: %d, length: %d, final: %d\n",
306 ctx->
digcnt, length1,
final);
310 atmel_sha_write(dd,
SHA_TPR, dma_addr1);
311 atmel_sha_write(dd,
SHA_TCR, len32);
314 atmel_sha_write(dd,
SHA_TNPR, dma_addr2);
315 atmel_sha_write(dd,
SHA_TNCR, len32);
317 atmel_sha_write_ctrl(dd, 1);
338 atmel_sha_append_sg(ctx);
339 atmel_sha_fill_padding(ctx, 0);
344 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
349 size_t length,
int final)
362 return atmel_sha_xmit_pdc(dd, ctx->
dma_addr, length, 0, 0,
final);
365 static int atmel_sha_update_dma_slow(
struct atmel_sha_dev *dd)
371 atmel_sha_append_sg(ctx);
375 dev_dbg(dd->
dev,
"slow: bufcnt: %u, digcnt: %d, final: %d\n",
379 atmel_sha_fill_padding(ctx, 0);
384 return atmel_sha_xmit_dma_map(dd, ctx, count,
final);
390 static int atmel_sha_update_dma_start(
struct atmel_sha_dev *dd)
401 return atmel_sha_update_dma_slow(dd);
403 dev_dbg(dd->
dev,
"fast: digcnt: %d, bufcnt: %u, total: %u\n",
409 return atmel_sha_update_dma_slow(dd);
413 return atmel_sha_update_dma_slow(dd);
425 return atmel_sha_update_dma_slow(dd);
443 atmel_sha_append_sg(ctx);
445 atmel_sha_fill_padding(ctx, length);
459 return atmel_sha_xmit_pdc(dd, ctx->
dma_addr, count, 0,
474 length, ctx->
dma_addr, count,
final);
490 static int atmel_sha_update_dma_stop(
struct atmel_sha_dev *dd)
496 if (ctx->
sg->length == ctx->
offset) {
518 dev_dbg(dd->
dev,
"update_req: total: %u, digcnt: %d, finup: %d\n",
522 err = atmel_sha_update_cpu(dd);
524 err = atmel_sha_update_dma_start(dd);
527 dev_dbg(dd->
dev,
"update: err: %d, digcnt: %d\n",
541 atmel_sha_fill_padding(ctx, 0);
544 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
548 atmel_sha_fill_padding(ctx, 0);
551 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
554 dev_dbg(dd->
dev,
"final_req: err: %d\n", err);
573 static void atmel_sha_copy_ready_hash(
struct ahash_request *req)
593 atmel_sha_copy_ready_hash(req);
601 static void atmel_sha_finish_req(
struct ahash_request *req,
int err)
607 atmel_sha_copy_hash(req);
609 err = atmel_sha_finish(req);
618 clk_disable_unprepare(dd->
iclk);
620 if (req->
base.complete)
621 req->
base.complete(&req->
base, err);
629 clk_prepare_enable(dd->
iclk);
633 atmel_sha_dualbuff_test(dd);
647 int err = 0,
ret = 0;
651 ret = ahash_enqueue_request(&dd->
queue, req);
654 spin_unlock_irqrestore(&dd->
lock, flags);
658 backlog = crypto_get_backlog(&dd->
queue);
663 spin_unlock_irqrestore(&dd->
lock, flags);
671 req = ahash_request_cast(async_req);
673 ctx = ahash_request_ctx(req);
675 dev_dbg(dd->
dev,
"handling new req, op: %lu, nbytes: %d\n",
678 err = atmel_sha_hw_init(dd);
684 err = atmel_sha_update_req(dd);
687 err = atmel_sha_final_req(dd);
690 err = atmel_sha_final_req(dd);
696 atmel_sha_finish_req(req, err);
703 static int atmel_sha_enqueue(
struct ahash_request *req,
unsigned int op)
711 return atmel_sha_handle_queue(dd, req);
730 atmel_sha_append_sg(ctx);
752 err = atmel_sha_hw_init(dd);
757 err = atmel_sha_final_req(dd);
760 return atmel_sha_finish(req);
766 atmel_sha_finish_req(req, err);
778 err1 = atmel_sha_update(req);
786 err2 = atmel_sha_final(req);
793 return atmel_sha_init(req) ?: atmel_sha_finup(req);
796 static int atmel_sha_cra_init_alg(
struct crypto_tfm *tfm,
const char *alg_base)
799 const char *alg_name = crypto_tfm_alg_name(tfm);
805 pr_err(
"atmel-sha: fallback driver '%s' could not be loaded.\n",
809 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
816 static int atmel_sha_cra_init(
struct crypto_tfm *tfm)
818 return atmel_sha_cra_init_alg(tfm,
NULL);
821 static void atmel_sha_cra_exit(
struct crypto_tfm *tfm)
831 .init = atmel_sha_init,
832 .update = atmel_sha_update,
833 .final = atmel_sha_final,
834 .finup = atmel_sha_finup,
835 .digest = atmel_sha_digest,
840 .cra_driver_name =
"atmel-sha1",
848 .cra_init = atmel_sha_cra_init,
849 .cra_exit = atmel_sha_cra_exit,
854 .init = atmel_sha_init,
855 .update = atmel_sha_update,
856 .final = atmel_sha_final,
857 .finup = atmel_sha_finup,
858 .digest = atmel_sha_digest,
862 .cra_name =
"sha256",
863 .cra_driver_name =
"atmel-sha256",
871 .cra_init = atmel_sha_cra_init,
872 .cra_exit = atmel_sha_cra_exit,
878 static void atmel_sha_done_task(
unsigned long data)
884 atmel_sha_handle_queue(dd,
NULL);
896 atmel_sha_update_dma_stop(dd);
906 err = atmel_sha_update_dma_start(dd);
915 atmel_sha_finish_req(dd->
req, err);
923 reg = atmel_sha_read(sha_dd,
SHA_ISR);
924 if (reg & atmel_sha_read(sha_dd,
SHA_IMR)) {
925 atmel_sha_write(sha_dd,
SHA_IDR, reg);
932 dev_warn(sha_dd->
dev,
"SHA interrupt when no active requests.\n");
940 static void atmel_sha_unregister_algs(
struct atmel_sha_dev *dd)
961 for (j = 0; j <
i; j++)
972 unsigned long sha_phys_size;
976 if (sha_dd ==
NULL) {
977 dev_err(dev,
"unable to alloc data struct.\n");
984 platform_set_drvdata(pdev, sha_dd);
986 INIT_LIST_HEAD(&sha_dd->
list);
989 (
unsigned long)sha_dd);
998 dev_err(dev,
"no MEM resource info\n");
1003 sha_phys_size = resource_size(sha_res);
1007 if (sha_dd->
irq < 0) {
1008 dev_err(dev,
"no IRQ resource info\n");
1016 dev_err(dev,
"unable to request sha irq.\n");
1022 if (IS_ERR(sha_dd->
iclk)) {
1023 dev_err(dev,
"clock intialization failed.\n");
1024 err = PTR_ERR(sha_dd->
iclk);
1030 dev_err(dev,
"can't ioremap\n");
1035 spin_lock(&atmel_sha.
lock);
1037 spin_unlock(&atmel_sha.
lock);
1039 err = atmel_sha_register_algs(sha_dd);
1043 dev_info(dev,
"Atmel SHA1/SHA256\n");
1048 spin_lock(&atmel_sha.
lock);
1050 spin_unlock(&atmel_sha.
lock);
1061 dev_err(dev,
"initialization failed.\n");
1070 sha_dd = platform_get_drvdata(pdev);
1073 spin_lock(&atmel_sha.
lock);
1075 spin_unlock(&atmel_sha.
lock);
1077 atmel_sha_unregister_algs(sha_dd);
1085 if (sha_dd->
irq >= 0)
1095 .probe = atmel_sha_probe,
1098 .name =
"atmel_sha",