16 #define pr_fmt(fmt) "%s: " fmt, __func__
19 #include <linux/device.h>
20 #include <linux/module.h>
22 #include <linux/errno.h>
24 #include <linux/kernel.h>
36 #include <crypto/sha.h>
42 #include <mach/irqs.h>
44 #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
45 #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
47 #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48 #define MD5_DIGEST_SIZE 16
50 #define SHA_REG_DIGCNT 0x14
52 #define SHA_REG_CTRL 0x18
53 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
54 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
55 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
56 #define SHA_REG_CTRL_ALGO (1 << 2)
57 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
58 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
60 #define SHA_REG_REV 0x5C
61 #define SHA_REG_REV_MAJOR 0xF0
62 #define SHA_REG_REV_MINOR 0x0F
64 #define SHA_REG_MASK 0x60
65 #define SHA_REG_MASK_DMA_EN (1 << 3)
66 #define SHA_REG_MASK_IT_EN (1 << 2)
67 #define SHA_REG_MASK_SOFTRESET (1 << 1)
68 #define SHA_REG_AUTOIDLE (1 << 0)
70 #define SHA_REG_SYSSTATUS 0x64
71 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
73 #define DEFAULT_TIMEOUT_INTERVAL HZ
78 #define FLAGS_DMA_ACTIVE 2
79 #define FLAGS_OUTPUT_READY 3
82 #define FLAGS_DMA_READY 6
84 #define FLAGS_FINUP 16
88 #define FLAGS_ERROR 20
93 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
94 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
96 #define BUFLEN PAGE_SIZE
136 #define OMAP_SHAM_QUEUE_LENGTH 1
183 val = omap_sham_read(dd, address);
186 omap_sham_write(dd, address, val);
193 while (!(omap_sham_read(dd, offset) & bit)) {
210 hash[
i] = omap_sham_read(ctx->
dd,
213 omap_sham_write(ctx->
dd,
218 static void omap_sham_copy_ready_hash(
struct ahash_request *req)
288 size_t length,
int final)
294 dev_dbg(dd->
dev,
"xmit_cpu: digcnt: %d, length: %d, final: %d\n",
295 ctx->
digcnt, length,
final);
297 omap_sham_write_ctrl(dd, length,
final, 0);
312 for (count = 0; count < len32; count++)
313 omap_sham_write(dd,
SHA_REG_DIN(count), buffer[count]);
319 size_t length,
int final)
324 dev_dbg(dd->
dev,
"xmit_dma: digcnt: %d, length: %d, final: %d\n",
325 ctx->
digcnt, length,
final);
336 omap_sham_write_ctrl(dd, length,
final, 1);
351 const u8 *
data,
size_t length)
369 count = omap_sham_append_buffer(ctx,
376 if (ctx->
offset == ctx->
sg->length) {
390 size_t length,
int final)
402 return omap_sham_xmit_dma(dd, ctx->
dma_addr, length,
final);
405 static int omap_sham_update_dma_slow(
struct omap_sham_dev *dd)
411 omap_sham_append_sg(ctx);
415 dev_dbg(dd->
dev,
"slow: bufcnt: %u, digcnt: %d, final: %d\n",
421 return omap_sham_xmit_dma_map(dd, ctx, count,
final);
428 #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
430 #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
432 static int omap_sham_update_dma_start(
struct omap_sham_dev *dd)
442 return omap_sham_update_dma_slow(dd);
444 dev_dbg(dd->
dev,
"fast: digcnt: %d, bufcnt: %u, total: %u\n",
450 return omap_sham_update_dma_slow(dd);
454 return omap_sham_update_dma_slow(dd);
490 omap_sham_append_sg(ctx);
494 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
497 static int omap_sham_update_dma_stop(
struct omap_sham_dev *dd)
504 if (ctx->
sg->length == ctx->
offset) {
524 spin_lock_bh(&sham.
lock);
534 spin_unlock_bh(&sham.
lock);
541 crypto_ahash_digestsize(tfm));
568 dev_dbg(dd->
dev,
"update_req: total: %u, digcnt: %d, finup: %d\n",
572 err = omap_sham_update_cpu(dd);
574 err = omap_sham_update_dma_start(dd);
593 err = omap_sham_xmit_dma_map(dd, ctx, ctx->
bufcnt, 1);
595 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->
bufcnt, 1);
599 dev_dbg(dd->
dev,
"final_req: err: %d\n", err);
608 int bs = crypto_shash_blocksize(bctx->
shash);
609 int ds = crypto_shash_digestsize(bctx->
shash);
612 char ctx[crypto_shash_descsize(bctx->
shash)];
618 return crypto_shash_init(&
desc.shash) ?:
630 omap_sham_copy_ready_hash(req);
632 err = omap_sham_finish_hmac(req);
640 static void omap_sham_finish_req(
struct ahash_request *req,
int err)
646 omap_sham_copy_hash(req, 1);
648 err = omap_sham_finish(req);
658 if (req->
base.complete)
659 req->
base.complete(&req->
base, err);
671 int err = 0,
ret = 0;
675 ret = ahash_enqueue_request(&dd->
queue, req);
677 spin_unlock_irqrestore(&dd->
lock, flags);
680 backlog = crypto_get_backlog(&dd->
queue);
684 spin_unlock_irqrestore(&dd->
lock, flags);
692 req = ahash_request_cast(async_req);
694 ctx = ahash_request_ctx(req);
696 dev_dbg(dd->
dev,
"handling new req, op: %lu, nbytes: %d\n",
699 err = omap_sham_hw_init(dd);
715 omap_sham_copy_hash(req, 0);
718 err = omap_sham_update_req(dd);
721 err = omap_sham_final_req(dd);
723 err = omap_sham_final_req(dd);
728 omap_sham_finish_req(req, err);
735 static int omap_sham_enqueue(
struct ahash_request *req,
unsigned int op)
743 return omap_sham_handle_queue(dd, req);
764 omap_sham_append_sg(ctx);
773 omap_sham_append_sg(ctx);
777 return omap_sham_enqueue(req,
OP_UPDATE);
780 static int omap_sham_shash_digest(
struct crypto_shash *shash,
u32 flags,
781 const u8 *data,
unsigned int len,
u8 *out)
785 char ctx[crypto_shash_descsize(shash)];
788 desc.shash.tfm = shash;
799 return omap_sham_shash_digest(tctx->
fallback, req->
base.flags,
815 return omap_sham_final_shash(req);
817 return omap_sham_enqueue(req,
OP_FINAL);
820 return omap_sham_finish(req);
830 err1 = omap_sham_update(req);
837 err2 = omap_sham_final(req);
844 return omap_sham_init(req) ?: omap_sham_finup(req);
852 int bs = crypto_shash_blocksize(bctx->
shash);
853 int ds = crypto_shash_digestsize(bctx->
shash);
860 err = omap_sham_shash_digest(bctx->
shash,
861 crypto_shash_get_flags(bctx->
shash),
862 key, keylen, bctx->
ipad);
873 for (i = 0; i < bs; i++) {
874 bctx->
ipad[
i] ^= 0x36;
875 bctx->
opad[
i] ^= 0x5c;
881 static int omap_sham_cra_init_alg(
struct crypto_tfm *tfm,
const char *alg_base)
884 const char *alg_name = crypto_tfm_alg_name(tfm);
890 pr_err(
"omap-sham: fallback driver '%s' "
891 "could not be loaded.\n", alg_name);
895 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
903 if (IS_ERR(bctx->
shash)) {
904 pr_err(
"omap-sham: base driver '%s' "
905 "could not be loaded.\n", alg_base);
907 return PTR_ERR(bctx->
shash);
915 static int omap_sham_cra_init(
struct crypto_tfm *tfm)
917 return omap_sham_cra_init_alg(tfm,
NULL);
920 static int omap_sham_cra_sha1_init(
struct crypto_tfm *tfm)
922 return omap_sham_cra_init_alg(tfm,
"sha1");
925 static int omap_sham_cra_md5_init(
struct crypto_tfm *tfm)
927 return omap_sham_cra_init_alg(tfm,
"md5");
930 static void omap_sham_cra_exit(
struct crypto_tfm *tfm)
939 crypto_free_shash(bctx->
shash);
945 .init = omap_sham_init,
946 .update = omap_sham_update,
947 .final = omap_sham_final,
948 .finup = omap_sham_finup,
949 .digest = omap_sham_digest,
953 .cra_driver_name =
"omap-sha1",
963 .cra_init = omap_sham_cra_init,
964 .cra_exit = omap_sham_cra_exit,
968 .init = omap_sham_init,
969 .update = omap_sham_update,
970 .final = omap_sham_final,
971 .finup = omap_sham_finup,
972 .digest = omap_sham_digest,
976 .cra_driver_name =
"omap-md5",
986 .cra_init = omap_sham_cra_init,
987 .cra_exit = omap_sham_cra_exit,
991 .init = omap_sham_init,
992 .update = omap_sham_update,
993 .final = omap_sham_final,
994 .finup = omap_sham_finup,
995 .digest = omap_sham_digest,
996 .setkey = omap_sham_setkey,
999 .cra_name =
"hmac(sha1)",
1000 .cra_driver_name =
"omap-hmac-sha1",
1001 .cra_priority = 100,
1011 .cra_init = omap_sham_cra_sha1_init,
1012 .cra_exit = omap_sham_cra_exit,
1016 .init = omap_sham_init,
1017 .update = omap_sham_update,
1018 .final = omap_sham_final,
1019 .finup = omap_sham_finup,
1020 .digest = omap_sham_digest,
1021 .setkey = omap_sham_setkey,
1024 .cra_name =
"hmac(md5)",
1025 .cra_driver_name =
"omap-hmac-md5",
1026 .cra_priority = 100,
1036 .cra_init = omap_sham_cra_md5_init,
1037 .cra_exit = omap_sham_cra_exit,
1042 static void omap_sham_done_task(
unsigned long data)
1048 omap_sham_handle_queue(dd,
NULL);
1057 omap_sham_update_dma_stop(dd);
1066 err = omap_sham_update_dma_start(dd);
1075 dev_dbg(dd->
dev,
"update done: err: %d\n", err);
1077 omap_sham_finish_req(dd->
req, err);
1093 dev_warn(dd->
dev,
"Interrupt when no active requests.\n");
1103 static void omap_sham_dma_callback(
int lch,
u16 ch_status,
void *data)
1108 pr_err(
"omap-sham DMA error status: 0x%hx\n", ch_status);
1124 omap_sham_dma_callback, dd, &dd->
dma_lch);
1126 dev_err(dd->
dev,
"Unable to request DMA channel\n");
1150 dev_err(dev,
"unable to alloc data struct.\n");
1155 platform_set_drvdata(pdev, dd);
1157 INIT_LIST_HEAD(&dd->
list);
1167 dev_err(dev,
"no MEM resource info\n");
1176 dev_err(dev,
"no DMA resource info\n");
1185 dev_err(dev,
"no IRQ resource info\n");
1193 dev_err(dev,
"unable to request irq.\n");
1197 err = omap_sham_dma_init(dd);
1203 if (IS_ERR(dd->
iclk)) {
1204 dev_err(dev,
"clock intialization failed.\n");
1205 err = PTR_ERR(dd->
iclk);
1211 dev_err(dev,
"can't ioremap\n");
1217 dev_info(dev,
"hw accel on OMAP rev %u.%u\n",
1222 spin_lock(&sham.
lock);
1224 spin_unlock(&sham.
lock);
1235 for (j = 0; j <
i; j++)
1241 omap_sham_dma_cleanup(dd);
1249 dev_err(dev,
"initialization failed.\n");
1259 dd = platform_get_drvdata(pdev);
1262 spin_lock(&sham.
lock);
1264 spin_unlock(&sham.
lock);
1270 omap_sham_dma_cleanup(dd);
1280 .probe = omap_sham_probe,
1281 .remove = omap_sham_remove,
1283 .name =
"omap-sham",
1288 static int __init omap_sham_mod_init(
void)
1290 pr_info(
"loading %s driver\n",
"omap-sham");
1295 pr_err(
"Unsupported cpu\n");
1302 static void __exit omap_sham_mod_exit(
void)