28 #include <linux/kernel.h>
29 #include <linux/module.h>
31 #include <linux/device.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/slab.h>
41 #include <linux/string.h>
44 #include <crypto/aes.h>
46 #include <crypto/sha.h>
66 static void map_single_talitos_ptr(
struct device *
dev,
67 struct talitos_ptr *talitos_ptr,
68 unsigned short len,
void *
data,
75 to_talitos_ptr(talitos_ptr, dma_addr);
82 static void unmap_single_talitos_ptr(
struct device *
dev,
83 struct talitos_ptr *talitos_ptr,
90 static int reset_channel(
struct device *dev,
int ch)
102 dev_err(dev,
"failed to reset channel %d\n", ch);
118 static int reset_device(
struct device *dev)
136 dev_err(dev,
"failed to reset device\n");
146 static int init_device(
struct device *dev)
157 err = reset_device(dev);
161 err = reset_device(dev);
167 err = reset_channel(dev, ch);
211 spin_unlock_irqrestore(&priv->
chan[ch].head_lock, flags);
215 head = priv->
chan[ch].head;
216 request = &priv->
chan[ch].fifo[
head];
237 spin_unlock_irqrestore(&priv->
chan[ch].head_lock, flags);
246 static void flush_channel(
struct device *dev,
int ch,
int error,
int reset_ch)
255 tail = priv->
chan[ch].tail;
256 while (priv->
chan[ch].fifo[tail].desc) {
257 request = &priv->
chan[ch].fifo[
tail];
283 priv->
chan[ch].tail = (tail + 1) & (priv->
fifo_len - 1);
285 spin_unlock_irqrestore(&priv->
chan[ch].tail_lock, flags);
292 if (error && !reset_ch && status == error)
295 tail = priv->
chan[ch].tail;
298 spin_unlock_irqrestore(&priv->
chan[ch].tail_lock, flags);
304 #define DEF_TALITOS_DONE(name, ch_done_mask) \
305 static void talitos_done_##name(unsigned long data) \
307 struct device *dev = (struct device *)data; \
308 struct talitos_private *priv = dev_get_drvdata(dev); \
309 unsigned long flags; \
311 if (ch_done_mask & 1) \
312 flush_channel(dev, 0, 0, 0); \
313 if (priv->num_channels == 1) \
315 if (ch_done_mask & (1 << 2)) \
316 flush_channel(dev, 1, 0, 0); \
317 if (ch_done_mask & (1 << 4)) \
318 flush_channel(dev, 2, 0, 0); \
319 if (ch_done_mask & (1 << 6)) \
320 flush_channel(dev, 3, 0, 0); \
325 spin_lock_irqsave(&priv->reg_lock, flags); \
326 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
327 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
328 spin_unlock_irqrestore(&priv->reg_lock, flags); \
340 int tail = priv->
chan[ch].tail;
345 while (priv->
chan[ch].fifo[tail].dma_desc != cur_desc) {
346 tail = (tail + 1) & (priv->
fifo_len - 1);
347 if (tail == priv->
chan[ch].tail) {
348 dev_err(dev,
"couldn't locate current descriptor\n");
353 return priv->
chan[ch].fifo[
tail].desc->hdr;
359 static void report_eu_error(
struct device *dev,
int ch,
u32 desc_hdr)
369 dev_err(dev,
"AFEUISR 0x%08x_%08x\n",
374 dev_err(dev,
"DEUISR 0x%08x_%08x\n",
380 dev_err(dev,
"MDEUISR 0x%08x_%08x\n",
385 dev_err(dev,
"RNGUISR 0x%08x_%08x\n",
390 dev_err(dev,
"PKEUISR 0x%08x_%08x\n",
395 dev_err(dev,
"AESUISR 0x%08x_%08x\n",
400 dev_err(dev,
"CRCUISR 0x%08x_%08x\n",
405 dev_err(dev,
"KEUISR 0x%08x_%08x\n",
414 dev_err(dev,
"MDEUISR 0x%08x_%08x\n",
419 dev_err(dev,
"CRCUISR 0x%08x_%08x\n",
425 for (i = 0; i < 8; i++)
426 dev_err(dev,
"DESCBUF 0x%08x_%08x\n",
438 int ch,
error, reset_dev = 0, reset_ch = 0;
443 if (!(isr & (1 << (ch * 2 + 1))))
452 dev_err(dev,
"double fetch fifo overflow error\n");
458 dev_err(dev,
"single fetch fifo overflow error\n");
462 dev_err(dev,
"master data transfer error\n");
464 dev_err(dev,
"s/g data length zero error\n");
466 dev_err(dev,
"fetch pointer zero error\n");
468 dev_err(dev,
"illegal descriptor header error\n");
470 dev_err(dev,
"invalid execution unit error\n");
472 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
474 dev_err(dev,
"gather boundary error\n");
476 dev_err(dev,
"gather return/length error\n");
478 dev_err(dev,
"scatter boundary error\n");
480 dev_err(dev,
"scatter return/length error\n");
482 flush_channel(dev, ch, error, reset_ch);
485 reset_channel(dev, ch);
494 dev_err(dev,
"failed to restart channel %d\n",
501 dev_err(dev,
"done overflow, internal time out, or rngu error: "
502 "ISR 0x%08x_%08x\n", isr, isr_lo);
506 flush_channel(dev, ch, -
EIO, 1);
513 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
514 static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
516 struct device *dev = data; \
517 struct talitos_private *priv = dev_get_drvdata(dev); \
519 unsigned long flags; \
521 spin_lock_irqsave(&priv->reg_lock, flags); \
522 isr = in_be32(priv->reg + TALITOS_ISR); \
523 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
525 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
526 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
528 if (unlikely(isr & ch_err_mask || isr_lo)) { \
529 spin_unlock_irqrestore(&priv->reg_lock, flags); \
530 talitos_error(dev, isr & ch_err_mask, isr_lo); \
533 if (likely(isr & ch_done_mask)) { \
535 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
537 tasklet_schedule(&priv->done_task[tlet]); \
539 spin_unlock_irqrestore(&priv->reg_lock, flags); \
542 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
559 for (i = 0; i < 20; i++) {
570 static int talitos_rng_data_read(
struct hwrng *rng,
u32 *
data)
582 static int talitos_rng_init(
struct hwrng *rng)
593 dev_err(dev,
"failed to reset rng hw\n");
603 static int talitos_register_rng(
struct device *dev)
608 priv->
rng.init = talitos_rng_init,
609 priv->
rng.data_present = talitos_rng_data_present,
610 priv->
rng.data_read = talitos_rng_data_read,
611 priv->
rng.priv = (
unsigned long)dev;
616 static void talitos_unregister_rng(
struct device *dev)
626 #define TALITOS_CRA_PRIORITY 3000
627 #define TALITOS_MAX_KEY_SIZE 96
628 #define TALITOS_MAX_IV_LENGTH 16
630 #define MD5_BLOCK_SIZE 64
644 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
645 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
661 static int aead_setauthsize(
struct crypto_aead *authenc,
662 unsigned int authsize)
671 static int aead_setkey(
struct crypto_aead *authenc,
675 struct rtattr *rta = (
void *)key;
677 unsigned int authkeylen;
695 if (keylen < enckeylen)
755 sg = scatterwalk_sg_next(sg);
767 sg = scatterwalk_sg_next(sg);
771 static void talitos_sg_unmap(
struct device *dev,
776 unsigned int src_nents = edesc->
src_nents ? : 1;
777 unsigned int dst_nents = edesc->
dst_nents ? : 1;
787 talitos_unmap_sg_chain(dev, dst,
800 static void ipsec_esp_unmap(
struct device *dev,
817 talitos_sg_unmap(dev, edesc, areq->
src, areq->
dst);
827 static void ipsec_esp_encrypt_done(
struct device *dev,
832 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
833 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
840 ipsec_esp_unmap(dev, edesc, areq);
854 aead_request_complete(areq, err);
857 static void ipsec_esp_decrypt_swauth_done(
struct device *dev,
859 void *context,
int err)
862 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
863 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
870 ipsec_esp_unmap(dev, edesc, req);
882 err =
memcmp(icvdata, (
char *)sg_virt(sg) + sg->
length -
888 aead_request_complete(req, err);
891 static void ipsec_esp_decrypt_hwauth_done(
struct device *dev,
893 void *context,
int err)
900 ipsec_esp_unmap(dev, edesc, req);
909 aead_request_complete(req, err);
917 int cryptlen,
struct talitos_ptr *link_tbl_ptr)
927 sg = scatterwalk_sg_next(sg);
935 link_tbl_ptr->
len = 0;
954 void *context,
int error))
956 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
960 unsigned int cryptlen = areq->
cryptlen;
961 unsigned int authsize = ctx->
authsize;
962 unsigned int ivsize = crypto_aead_ivsize(aead);
974 struct talitos_ptr *tbl_ptr = &edesc->
link_tbl[tbl_off];
977 sizeof(
struct talitos_ptr));
985 tbl_ptr += sg_count - 1;
988 to_talitos_ptr(tbl_ptr, edesc->
iv_dma);
996 desc->
ptr[1].j_extent = 0;
1000 to_talitos_ptr(&desc->
ptr[2], edesc->
iv_dma);
1002 desc->
ptr[2].j_extent = 0;
1007 map_single_talitos_ptr(dev, &desc->
ptr[3], ctx->
enckeylen,
1018 desc->
ptr[4].j_extent = authsize;
1020 sg_count = talitos_map_sg(dev, areq->
src, edesc->
src_nents ? : 1,
1025 if (sg_count == 1) {
1028 sg_link_tbl_len = cryptlen;
1031 sg_link_tbl_len = cryptlen + authsize;
1033 sg_count = sg_to_link_tbl(areq->
src, sg_count, sg_link_tbl_len,
1043 to_talitos_ptr(&desc->
ptr[4],
1050 desc->
ptr[5].j_extent = authsize;
1052 if (areq->
src != areq->
dst)
1053 sg_count = talitos_map_sg(dev, areq->
dst,
1057 if (sg_count == 1) {
1061 struct talitos_ptr *tbl_ptr = &edesc->
link_tbl[tbl_off];
1064 tbl_off *
sizeof(
struct talitos_ptr));
1065 sg_count = sg_to_link_tbl(areq->
dst, sg_count, cryptlen,
1069 tbl_ptr += sg_count - 1;
1079 sizeof(
struct talitos_ptr));
1086 map_single_talitos_ptr(dev, &desc->
ptr[6], ivsize, ctx->
iv, 0,
1091 ipsec_esp_unmap(dev, edesc, areq);
1106 while (nbytes > 0) {
1111 sg = scatterwalk_sg_next(sg);
1129 static size_t sg_copy_end_to_buffer(
struct scatterlist *
sgl,
unsigned int nents,
1133 unsigned int boffset = 0;
1135 unsigned long flags;
1137 size_t total_buffer = buflen +
skip;
1147 unsigned int ignore;
1149 if ((offset + miter.length) > skip) {
1150 if (offset < skip) {
1153 len = miter.length - ignore;
1154 if (boffset + len > buflen)
1155 len = buflen - boffset;
1156 memcpy(buf + boffset, miter.addr + ignore, len);
1160 if (boffset + len > buflen)
1161 len = buflen - boffset;
1162 memcpy(buf + boffset, miter.addr, len);
1166 offset += miter.length;
1183 unsigned int assoclen,
1184 unsigned int cryptlen,
1185 unsigned int authsize,
1186 unsigned int ivsize,
1198 dev_err(dev,
"length exceeds h/w max limit\n");
1212 assoc_nents =
sg_count(assoc, assoclen, &assoc_chained);
1215 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1218 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1222 src_nents = (src_nents == 1) ? 0 : src_nents;
1230 dst_nents =
sg_count(dst, cryptlen + authsize,
1232 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1242 if (assoc_nents || src_nents || dst_nents) {
1243 dma_len = (src_nents + dst_nents + 2 +
assoc_nents) *
1244 sizeof(
struct talitos_ptr) + authsize;
1248 alloc_len += icv_stashing ? authsize : 0;
1256 dev_err(dev,
"could not allocate edescriptor\n");
1279 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1280 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1281 unsigned int ivsize = crypto_aead_ivsize(authenc);
1283 return talitos_edesc_alloc(ctx->
dev, areq->
assoc, areq->
src, areq->
dst,
1285 ctx->
authsize, ivsize, icv_stashing,
1291 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1292 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1296 edesc = aead_edesc_alloc(req, req->
iv, 0);
1298 return PTR_ERR(edesc);
1303 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1308 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1309 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1310 unsigned int authsize = ctx->
authsize;
1319 edesc = aead_edesc_alloc(req, req->
iv, 1);
1321 return PTR_ERR(edesc);
1333 edesc->
desc.hdr_lo = 0;
1335 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1354 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1360 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1361 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1365 edesc = aead_edesc_alloc(areq, req->
giv, 0);
1367 return PTR_ERR(edesc);
1372 memcpy(req->
giv, ctx->
iv, crypto_aead_ivsize(authenc));
1376 return ipsec_esp(edesc, areq, req->
seq, ipsec_esp_encrypt_done);
1380 const u8 *key,
unsigned int keylen)
1382 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1390 static void common_nonsnoop_unmap(
struct device *dev,
1398 talitos_sg_unmap(dev, edesc, areq->
src, areq->
dst);
1405 static void ablkcipher_done(
struct device *dev,
1414 common_nonsnoop_unmap(dev, edesc, areq);
1418 areq->
base.complete(&areq->
base, err);
1425 void *context,
int error))
1428 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1431 unsigned int cryptlen = areq->
nbytes;
1432 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1436 desc->
ptr[0].len = 0;
1437 to_talitos_ptr(&desc->
ptr[0], 0);
1438 desc->
ptr[0].j_extent = 0;
1441 to_talitos_ptr(&desc->
ptr[1], edesc->
iv_dma);
1443 desc->
ptr[1].j_extent = 0;
1446 map_single_talitos_ptr(dev, &desc->
ptr[2], ctx->
keylen,
1453 desc->
ptr[3].j_extent = 0;
1455 sg_count = talitos_map_sg(dev, areq->
src, edesc->
src_nents ? : 1,
1460 if (sg_count == 1) {
1463 sg_count = sg_to_link_tbl(areq->
src, sg_count, cryptlen,
1473 to_talitos_ptr(&desc->
ptr[3],
1480 desc->
ptr[4].j_extent = 0;
1482 if (areq->
src != areq->
dst)
1483 sg_count = talitos_map_sg(dev, areq->
dst,
1487 if (sg_count == 1) {
1490 struct talitos_ptr *link_tbl_ptr =
1495 sizeof(
struct talitos_ptr));
1497 sg_count = sg_to_link_tbl(areq->
dst, sg_count, cryptlen,
1504 map_single_talitos_ptr(dev, &desc->
ptr[5], ivsize, ctx->
iv, 0,
1508 desc->
ptr[6].len = 0;
1509 to_talitos_ptr(&desc->
ptr[6], 0);
1510 desc->
ptr[6].j_extent = 0;
1514 common_nonsnoop_unmap(dev, edesc, areq);
1524 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1525 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1527 return talitos_edesc_alloc(ctx->
dev,
NULL, areq->
src, areq->
dst,
1535 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1539 edesc = ablkcipher_edesc_alloc(areq);
1541 return PTR_ERR(edesc);
1546 return common_nonsnoop(edesc, areq, ablkcipher_done);
1552 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1556 edesc = ablkcipher_edesc_alloc(areq);
1558 return PTR_ERR(edesc);
1562 return common_nonsnoop(edesc, areq, ablkcipher_done);
1565 static void common_nonsnoop_hash_unmap(
struct device *dev,
1574 if (edesc->
desc.ptr[1].len)
1575 unmap_single_talitos_ptr(dev, &edesc->
desc.ptr[1],
1578 if (edesc->
desc.ptr[2].len)
1579 unmap_single_talitos_ptr(dev, &edesc->
desc.ptr[2],
1582 talitos_sg_unmap(dev, edesc, req_ctx->
psrc,
NULL);
1590 static void ahash_done(
struct device *dev,
1604 common_nonsnoop_hash_unmap(dev, edesc, areq);
1608 areq->
base.complete(&areq->
base, err);
1611 static int common_nonsnoop_hash(
struct talitos_edesc *edesc,
1615 void *context,
int error))
1625 desc->
ptr[0] = zero_entry;
1629 map_single_talitos_ptr(dev, &desc->
ptr[1],
1635 desc->
ptr[1] = zero_entry;
1642 map_single_talitos_ptr(dev, &desc->
ptr[2], ctx->
keylen,
1645 desc->
ptr[2] = zero_entry;
1651 desc->
ptr[3].j_extent = 0;
1653 sg_count = talitos_map_sg(dev, req_ctx->
psrc,
1657 if (sg_count == 1) {
1660 sg_count = sg_to_link_tbl(req_ctx->
psrc, sg_count, length,
1671 to_talitos_ptr(&desc->
ptr[3],
1677 desc->
ptr[4] = zero_entry;
1681 map_single_talitos_ptr(dev, &desc->
ptr[5],
1682 crypto_ahash_digestsize(tfm),
1685 map_single_talitos_ptr(dev, &desc->
ptr[5],
1690 desc->
ptr[6] = zero_entry;
1694 common_nonsnoop_hash_unmap(dev, edesc, areq);
1701 unsigned int nbytes)
1708 nbytes, 0, 0, 0, areq->
base.flags);
1732 static int ahash_init_sha224_swinit(
struct ahash_request *areq)
1755 static int ahash_process_req(
struct ahash_request *areq,
unsigned int nbytes)
1761 unsigned int blocksize =
1762 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1763 unsigned int nbytes_to_hash;
1764 unsigned int to_hash_later;
1768 if (!req_ctx->
last && (nbytes + req_ctx->
nbuf <= blocksize)) {
1771 sg_count(areq->
src, nbytes, &chained),
1772 req_ctx->
buf + req_ctx->
nbuf, nbytes);
1778 nbytes_to_hash = nbytes + req_ctx->
nbuf;
1779 to_hash_later = nbytes_to_hash & (blocksize - 1);
1783 else if (to_hash_later)
1785 nbytes_to_hash -= to_hash_later;
1788 nbytes_to_hash -= blocksize;
1789 to_hash_later = blocksize;
1793 if (req_ctx->
nbuf) {
1794 nsg = (req_ctx->
nbuf < nbytes_to_hash) ? 2 : 1;
1796 sg_set_buf(req_ctx->
bufsl, req_ctx->
buf, req_ctx->
nbuf);
1798 scatterwalk_sg_chain(req_ctx->
bufsl, 2, areq->
src);
1803 if (to_hash_later) {
1804 int nents =
sg_count(areq->
src, nbytes, &chained);
1805 sg_copy_end_to_buffer(areq->
src, nents,
1808 nbytes - to_hash_later);
1813 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1815 return PTR_ERR(edesc);
1835 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1845 return ahash_process_req(areq, areq->
nbytes);
1854 return ahash_process_req(areq, 0);
1863 return ahash_process_req(areq, areq->
nbytes);
1869 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1874 return ahash_process_req(areq, areq->
nbytes);
1893 static int keyhash(
struct crypto_ahash *tfm,
const u8 *key,
unsigned int keylen,
1896 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1903 init_completion(&hresult.completion);
1912 keyhash_complete, &hresult);
1916 ahash_request_set_crypt(req, sg, hash, keylen);
1924 &hresult.completion);
1931 ahash_request_free(req);
1937 unsigned int keylen)
1939 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1940 unsigned int blocksize =
1941 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1942 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1947 if (keylen <= blocksize)
1951 ret = keyhash(tfm, key, keylen, hash);
1958 keysize = digestsize;
1985 .cra_name =
"authenc(hmac(sha1),cbc(aes))",
1986 .cra_driver_name =
"authenc-hmac-sha1-cbc-aes-talitos",
2004 .cra_name =
"authenc(hmac(sha1),cbc(des3_ede))",
2005 .cra_driver_name =
"authenc-hmac-sha1-cbc-3des-talitos",
2024 .cra_name =
"authenc(hmac(sha224),cbc(aes))",
2025 .cra_driver_name =
"authenc-hmac-sha224-cbc-aes-talitos",
2043 .cra_name =
"authenc(hmac(sha224),cbc(des3_ede))",
2044 .cra_driver_name =
"authenc-hmac-sha224-cbc-3des-talitos",
2063 .cra_name =
"authenc(hmac(sha256),cbc(aes))",
2064 .cra_driver_name =
"authenc-hmac-sha256-cbc-aes-talitos",
2082 .cra_name =
"authenc(hmac(sha256),cbc(des3_ede))",
2083 .cra_driver_name =
"authenc-hmac-sha256-cbc-3des-talitos",
2102 .cra_name =
"authenc(hmac(sha384),cbc(aes))",
2103 .cra_driver_name =
"authenc-hmac-sha384-cbc-aes-talitos",
2121 .cra_name =
"authenc(hmac(sha384),cbc(des3_ede))",
2122 .cra_driver_name =
"authenc-hmac-sha384-cbc-3des-talitos",
2141 .cra_name =
"authenc(hmac(sha512),cbc(aes))",
2142 .cra_driver_name =
"authenc-hmac-sha512-cbc-aes-talitos",
2160 .cra_name =
"authenc(hmac(sha512),cbc(des3_ede))",
2161 .cra_driver_name =
"authenc-hmac-sha512-cbc-3des-talitos",
2180 .cra_name =
"authenc(hmac(md5),cbc(aes))",
2181 .cra_driver_name =
"authenc-hmac-md5-cbc-aes-talitos",
2199 .cra_name =
"authenc(hmac(md5),cbc(des3_ede))",
2200 .cra_driver_name =
"authenc-hmac-md5-cbc-3des-talitos",
2220 .cra_name =
"cbc(aes)",
2221 .cra_driver_name =
"cbc-aes-talitos",
2237 .cra_name =
"cbc(des3_ede)",
2238 .cra_driver_name =
"cbc-3des-talitos",
2259 .cra_driver_name =
"md5-talitos",
2274 .cra_driver_name =
"sha1-talitos",
2288 .cra_name =
"sha224",
2289 .cra_driver_name =
"sha224-talitos",
2303 .cra_name =
"sha256",
2304 .cra_driver_name =
"sha256-talitos",
2318 .cra_name =
"sha384",
2319 .cra_driver_name =
"sha384-talitos",
2333 .cra_name =
"sha512",
2334 .cra_driver_name =
"sha512-talitos",
2348 .cra_name =
"hmac(md5)",
2349 .cra_driver_name =
"hmac-md5-talitos",
2363 .cra_name =
"hmac(sha1)",
2364 .cra_driver_name =
"hmac-sha1-talitos",
2378 .cra_name =
"hmac(sha224)",
2379 .cra_driver_name =
"hmac-sha224-talitos",
2393 .cra_name =
"hmac(sha256)",
2394 .cra_driver_name =
"hmac-sha256-talitos",
2408 .cra_name =
"hmac(sha384)",
2409 .cra_driver_name =
"hmac-sha384-talitos",
2423 .cra_name =
"hmac(sha512)",
2424 .cra_driver_name =
"hmac-sha512-talitos",
2442 static int talitos_cra_init(
struct crypto_tfm *tfm)
2458 ctx->
dev = talitos_alg->
dev;
2474 static int talitos_cra_init_aead(
struct crypto_tfm *tfm)
2478 talitos_cra_init(tfm);
2486 static int talitos_cra_init_ahash(
struct crypto_tfm *tfm)
2490 talitos_cra_init(tfm);
2493 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2527 switch (t_alg->
algt.type) {
2541 talitos_unregister_rng(dev);
2548 for (i = 0; i < 2; i++)
2579 t_alg->
algt = *
template;
2581 switch (t_alg->
algt.type) {
2583 alg = &t_alg->
algt.alg.crypto;
2586 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2587 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2588 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2589 alg->cra_ablkcipher.geniv =
"eseqiv";
2592 alg = &t_alg->
algt.alg.crypto;
2593 alg->
cra_init = talitos_cra_init_aead;
2595 alg->cra_aead.setkey = aead_setkey;
2596 alg->cra_aead.setauthsize = aead_setauthsize;
2597 alg->cra_aead.encrypt = aead_encrypt;
2598 alg->cra_aead.decrypt = aead_decrypt;
2599 alg->cra_aead.givencrypt = aead_givencrypt;
2600 alg->cra_aead.geniv =
"<built-in>";
2603 alg = &t_alg->
algt.alg.hash.halg.base;
2604 alg->
cra_init = talitos_cra_init_ahash;
2606 t_alg->
algt.alg.hash.init = ahash_init;
2607 t_alg->
algt.alg.hash.update = ahash_update;
2608 t_alg->
algt.alg.hash.final = ahash_final;
2609 t_alg->
algt.alg.hash.finup = ahash_finup;
2610 t_alg->
algt.alg.hash.digest = ahash_digest;
2611 t_alg->
algt.alg.hash.setkey = ahash_setkey;
2621 t_alg->
algt.alg.hash.init = ahash_init_sha224_swinit;
2622 t_alg->
algt.desc_hdr_template =
2629 dev_err(dev,
"unknown algorithm type %d\n", t_alg->
algt.type);
2652 if (!priv->
irq[0]) {
2653 dev_err(dev,
"failed to map irq\n");
2660 if (!priv->
irq[1]) {
2675 dev_err(dev,
"failed to request secondary irq\n");
2684 dev_err(dev,
"failed to request primary irq\n");
2697 const unsigned int *prop;
2706 priv->
ofdev = ofdev;
2710 err = talitos_probe_irq(ofdev);
2714 if (!priv->
irq[1]) {
2716 (
unsigned long)dev);
2719 (
unsigned long)dev);
2721 (
unsigned long)dev);
2728 dev_err(dev,
"failed to of_iomap\n");
2752 dev_err(dev,
"invalid property data in device tree node\n");
2768 dev_err(dev,
"failed to allocate channel management space\n");
2775 if (!priv->
irq[1] || !(i & 1))
2789 if (!priv->
chan[i].fifo) {
2790 dev_err(dev,
"failed to allocate request fifo %d\n", i);
2803 err = init_device(dev);
2805 dev_err(dev,
"failed to initialize device\n");
2811 err = talitos_register_rng(dev);
2813 dev_err(dev,
"failed to register hwrng: %d\n", err);
2820 for (i = 0; i <
ARRAY_SIZE(driver_algs); i++) {
2821 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2824 bool authenc =
false;
2827 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2828 if (IS_ERR(t_alg)) {
2829 err = PTR_ERR(t_alg);
2835 switch (t_alg->
algt.type) {
2839 &t_alg->
algt.alg.crypto);
2840 name = t_alg->
algt.alg.crypto.cra_driver_name;
2841 authenc = authenc ? !authenc :
2846 &t_alg->
algt.alg.hash);
2848 t_alg->
algt.alg.hash.halg.base.cra_driver_name;
2852 dev_err(dev,
"%s alg registration failed\n",
2864 memcpy(name + 7,
"esn", 3);
2869 memcpy(name + 7,
"esn", 3);
2877 dev_info(dev,
"%s algorithms registered in /proc/crypto\n",
2883 talitos_remove(ofdev);
2890 .compatible =
"fsl,sec2.0",
2900 .of_match_table = talitos_match,
2902 .probe = talitos_probe,
2903 .remove = talitos_remove,