66 #define CAAM_CRA_PRIORITY 3000
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
75 #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
77 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
79 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
80 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
81 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
82 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
84 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
85 CAAM_MAX_HASH_KEY_SIZE)
86 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
89 #define HASH_MSG_LEN 8
90 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
94 #define xstr(s) str(s)
96 #define debug(format, arg...) printk(format, arg)
98 #define debug(format, arg...)
141 static inline void map_seq_out_ptr_ctx(
u32 *
desc,
struct device *jrdev,
147 append_seq_out_ptr(desc, state->
ctx_dma, ctx_len, 0);
157 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
170 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
176 static inline void src_map_to_sec4_sg(
struct device *jrdev,
181 dma_map_sg_chained(jrdev, src, src_nents,
DMA_TO_DEVICE, chained);
182 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
197 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
205 static inline void ctx_map_to_sec4_sg(
u32 *
desc,
struct device *jrdev,
212 dma_to_sec4_sg_one(sec4_sg, state->
ctx_dma, ctx_len, 0);
235 append_key_ahash(desc, ctx);
237 set_jump_tgt_here(desc, key_jump_cmd);
249 static inline void ahash_append_load_str(
u32 *desc,
int digestsize)
270 init_sh_desc_key_ahash(desc, ctx);
282 ahash_append_load_str(desc, digestsize);
289 init_sh_desc_key_ahash(desc, ctx);
297 ahash_append_load_str(desc, digestsize);
300 static int ahash_set_sh_desc(
struct crypto_ahash *ahash)
303 int digestsize = crypto_ahash_digestsize(ahash);
325 ahash_append_load_str(desc, ctx->
ctx_len);
330 dev_err(jrdev,
"unable to map shared descriptor\n");
334 print_hex_dump(
KERN_ERR,
"ahash update shdesc@"xstr(__LINE__)
": ",
348 dev_err(jrdev,
"unable to map shared descriptor\n");
352 print_hex_dump(
KERN_ERR,
"ahash update first shdesc@"xstr(__LINE__)
": ",
359 ahash_ctx_data_to_out(desc, have_key | ctx->
alg_type,
365 dev_err(jrdev,
"unable to map shared descriptor\n");
369 print_hex_dump(
KERN_ERR,
"ahash final shdesc@"xstr(__LINE__)
": ",
371 desc_bytes(desc), 1);
377 ahash_ctx_data_to_out(desc, have_key | ctx->
alg_type,
383 dev_err(jrdev,
"unable to map shared descriptor\n");
387 print_hex_dump(
KERN_ERR,
"ahash finup shdesc@"xstr(__LINE__)
": ",
389 desc_bytes(desc), 1);
402 dev_err(jrdev,
"unable to map shared descriptor\n");
406 print_hex_dump(
KERN_ERR,
"ahash digest shdesc@"xstr(__LINE__)
": ",
408 desc_bytes(desc), 1);
424 u32 *keylen,
u8 *key_out,
u32 digestsize)
434 dev_err(jrdev,
"unable to allocate key input memory\n");
438 init_job_desc(desc, 0);
443 dev_err(jrdev,
"unable to map key input memory\n");
450 dev_err(jrdev,
"unable to map key output memory\n");
459 append_seq_in_ptr(desc, src_dma, *keylen, 0);
462 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
474 init_completion(&
result.completion);
482 print_hex_dump(
KERN_ERR,
"digested key@"xstr(__LINE__)
": ",
487 *keylen = digestsize;
498 const u8 *
key,
unsigned int keylen)
501 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
504 int blocksize = crypto_tfm_alg_blocksize(&ahash->
base);
505 int digestsize = crypto_ahash_digestsize(ahash);
513 if (keylen > blocksize) {
518 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
537 ret = gen_split_hash_key(ctx, key, keylen);
544 dev_err(jrdev,
"unable to map key i/o memory\n");
553 ret = ahash_set_sh_desc(ahash);
587 static inline void ahash_unmap(
struct device *
dev,
602 static inline void ahash_unmap_ctx(
struct device *
dev,
612 ahash_unmap(dev, edesc, req, dst_len);
621 int digestsize = crypto_ahash_digestsize(ahash);
626 dev_err(jrdev,
"%s %d: err 0x%x\n", __func__, __LINE__, err);
637 ahash_unmap(jrdev, edesc, req, digestsize);
650 req->
base.complete(&req->
base, err);
653 static void ahash_done_bi(
struct device *jrdev,
u32 *desc,
u32 err,
662 int digestsize = crypto_ahash_digestsize(ahash);
664 dev_err(jrdev,
"%s %d: err 0x%x\n", __func__, __LINE__, err);
688 req->
base.complete(&req->
base, err);
691 static void ahash_done_ctx_src(
struct device *jrdev,
u32 *desc,
u32 err,
697 int digestsize = crypto_ahash_digestsize(ahash);
702 dev_err(jrdev,
"%s %d: err 0x%x\n", __func__, __LINE__, err);
726 req->
base.complete(&req->
base, err);
729 static void ahash_done_ctx_dst(
struct device *jrdev,
u32 *desc,
u32 err,
738 int digestsize = crypto_ahash_digestsize(ahash);
740 dev_err(jrdev,
"%s %d: err 0x%x\n", __func__, __LINE__, err);
764 req->
base.complete(&req->
base, err);
784 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790 last_buflen = *next_buflen;
791 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->
base) - 1);
792 to_hash = in_len - *next_buflen;
795 src_nents = __sg_count(req->
src, req->
nbytes - (*next_buflen),
797 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
798 sec4_sg_bytes = (sec4_sg_src_index +
src_nents) *
806 sec4_sg_bytes,
GFP_DMA | flags);
809 "could not allocate extended descriptor\n");
822 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->
ctx_len,
825 state->
buf_dma = try_buf_map_to_sec4_sg(jrdev,
828 *buflen, last_buflen);
831 src_map_to_sec4_sg(jrdev, req->
src, src_nents,
832 edesc->
sec4_sg + sec4_sg_src_index,
835 sg_copy_part(next_buf, req->
src, to_hash -
840 (edesc->
sec4_sg + sec4_sg_src_index - 1)->len |=
844 sh_len = desc_len(sh_desc);
857 desc_bytes(desc), 1);
864 ahash_unmap_ctx(jrdev, edesc, req, ctx->
ctx_len,
868 }
else if (*next_buflen) {
869 sg_copy(buf + *buflen, req->
src, req->
nbytes);
870 *buflen = *next_buflen;
871 *next_buflen = last_buflen;
899 int digestsize = crypto_ahash_digestsize(ahash);
904 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) *
sizeof(
struct sec4_sg_entry);
908 sec4_sg_bytes,
GFP_DMA | flags);
910 dev_err(jrdev,
"could not allocate extended descriptor\n");
914 sh_len = desc_len(sh_desc);
925 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->
ctx_len, edesc->
sec4_sg,
928 state->
buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->
sec4_sg + 1,
936 edesc->
dst_dma = map_seq_out_ptr_result(desc, jrdev, req->
result,
969 int sec4_sg_bytes, sec4_sg_src_index;
971 int digestsize = crypto_ahash_digestsize(ahash);
973 bool chained =
false;
977 src_nents = __sg_count(req->
src, req->
nbytes, &chained);
978 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
979 sec4_sg_bytes = (sec4_sg_src_index +
src_nents) *
984 sec4_sg_bytes,
GFP_DMA | flags);
986 dev_err(jrdev,
"could not allocate extended descriptor\n");
990 sh_len = desc_len(sh_desc);
1002 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->
ctx_len, edesc->
sec4_sg,
1005 state->
buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->
sec4_sg + 1,
1009 src_map_to_sec4_sg(jrdev, req->
src, src_nents, edesc->
sec4_sg +
1010 sec4_sg_src_index, chained);
1015 edesc->
dst_dma = map_seq_out_ptr_result(desc, jrdev, req->
result,
1043 int digestsize = crypto_ahash_digestsize(ahash);
1044 int src_nents, sec4_sg_bytes;
1047 bool chained =
false;
1061 dev_err(jrdev,
"could not allocate extended descriptor\n");
1064 edesc->sec4_sg = (
void *)edesc +
sizeof(
struct ahash_edesc) +
1068 edesc->src_nents = src_nents;
1069 edesc->chained = chained;
1071 sh_len = desc_len(sh_desc);
1072 desc = edesc->hw_desc;
1076 sg_to_sec4_sg_last(req->
src, src_nents, edesc->sec4_sg, 0);
1077 src_dma = edesc->sec4_sg_dma;
1083 append_seq_in_ptr(desc, src_dma, req->
nbytes, options);
1085 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->
result,
1097 ahash_unmap(jrdev, edesc, req, digestsize);
1117 int digestsize = crypto_ahash_digestsize(ahash);
1126 dev_err(jrdev,
"could not allocate extended descriptor\n");
1130 sh_len = desc_len(sh_desc);
1136 append_seq_in_ptr(desc, state->
buf_dma, buflen, 0);
1138 edesc->
dst_dma = map_seq_out_ptr_result(desc, jrdev, req->
result,
1151 ahash_unmap(jrdev, edesc, req, digestsize);
1169 u8 *next_buf = state->
current_buf ? state->buf_0 : state->buf_1;
1172 int in_len = *buflen + req->
nbytes, to_hash;
1173 int sec4_sg_bytes, src_nents;
1177 bool chained =
false;
1181 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->
base) - 1);
1182 to_hash = in_len - *next_buflen;
1185 src_nents = __sg_count(req->
src, req->
nbytes - (*next_buflen),
1195 sec4_sg_bytes,
GFP_DMA | flags);
1198 "could not allocate extended descriptor\n");
1213 src_map_to_sec4_sg(jrdev, req->
src, src_nents,
1216 sg_copy_part(next_buf, req->
src, to_hash - *buflen,
1221 sh_len = desc_len(sh_desc);
1228 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->
ctx_len);
1233 desc_bytes(desc), 1);
1239 state->
update = ahash_update_ctx;
1240 state->
finup = ahash_finup_ctx;
1241 state->
final = ahash_final_ctx;
1243 ahash_unmap_ctx(jrdev, edesc, req, ctx->
ctx_len,
1247 }
else if (*next_buflen) {
1248 sg_copy(buf + *buflen, req->
src, req->
nbytes);
1249 *buflen = *next_buflen;
1255 print_hex_dump(
KERN_ERR,
"next buf@"xstr(__LINE__)
": ",
1278 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1279 int digestsize = crypto_ahash_digestsize(ahash);
1281 bool chained =
false;
1285 src_nents = __sg_count(req->
src, req->
nbytes, &chained);
1286 sec4_sg_src_index = 2;
1287 sec4_sg_bytes = (sec4_sg_src_index +
src_nents) *
1292 sec4_sg_bytes,
GFP_DMA | flags);
1294 dev_err(jrdev,
"could not allocate extended descriptor\n");
1298 sh_len = desc_len(sh_desc);
1310 state->
buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->
sec4_sg, buf,
1314 src_map_to_sec4_sg(jrdev, req->
src, src_nents, edesc->
sec4_sg + 1,
1317 append_seq_in_ptr(desc, edesc->
sec4_sg_dma, buflen +
1320 edesc->
dst_dma = map_seq_out_ptr_result(desc, jrdev, req->
result,
1332 ahash_unmap(jrdev, edesc, req, digestsize);
1354 int sec4_sg_bytes, src_nents;
1358 bool chained =
false;
1362 *next_buflen = req->
nbytes & (crypto_tfm_alg_blocksize(&ahash->
base) -
1364 to_hash = req->
nbytes - *next_buflen;
1369 dma_map_sg_chained(jrdev, req->
src, src_nents ? : 1,
1378 sec4_sg_bytes,
GFP_DMA | flags);
1381 "could not allocate extended descriptor\n");
1385 edesc->src_nents = src_nents;
1386 edesc->chained = chained;
1387 edesc->sec4_sg_bytes = sec4_sg_bytes;
1388 edesc->sec4_sg = (
void *)edesc +
sizeof(
struct ahash_edesc) +
1395 sg_to_sec4_sg_last(req->
src, src_nents,
1397 src_dma = edesc->sec4_sg_dma;
1405 sg_copy_part(next_buf, req->
src, to_hash, req->
nbytes);
1407 sh_len = desc_len(sh_desc);
1408 desc = edesc->hw_desc;
1412 append_seq_in_ptr(desc, src_dma, to_hash, options);
1414 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->
ctx_len);
1419 desc_bytes(desc), 1);
1426 state->
update = ahash_update_ctx;
1427 state->
finup = ahash_finup_ctx;
1428 state->
final = ahash_final_ctx;
1430 ahash_unmap_ctx(jrdev, edesc, req, ctx->
ctx_len,
1434 }
else if (*next_buflen) {
1435 state->
update = ahash_update_no_ctx;
1436 state->
finup = ahash_finup_no_ctx;
1437 state->
final = ahash_final_no_ctx;
1438 sg_copy(next_buf, req->
src, req->
nbytes);
1441 print_hex_dump(
KERN_ERR,
"next buf@"xstr(__LINE__)
": ",
1451 return ahash_digest(req);
1458 state->
update = ahash_update_first;
1459 state->
finup = ahash_finup_first;
1460 state->
final = ahash_final_no_ctx;
1471 return state->
update(req);
1478 return state->
finup(req);
1485 return state->
final(req);
1527 .driver_name =
"sha1-caam",
1528 .hmac_name =
"hmac(sha1)",
1529 .hmac_driver_name =
"hmac-sha1-caam",
1533 .update = ahash_update,
1534 .final = ahash_final,
1535 .finup = ahash_finup,
1536 .digest = ahash_digest,
1537 .export = ahash_export,
1538 .import = ahash_import,
1539 .setkey = ahash_setkey,
1548 .driver_name =
"sha224-caam",
1549 .hmac_name =
"hmac(sha224)",
1550 .hmac_driver_name =
"hmac-sha224-caam",
1554 .update = ahash_update,
1555 .final = ahash_final,
1556 .finup = ahash_finup,
1557 .digest = ahash_digest,
1558 .export = ahash_export,
1559 .import = ahash_import,
1560 .setkey = ahash_setkey,
1569 .driver_name =
"sha256-caam",
1570 .hmac_name =
"hmac(sha256)",
1571 .hmac_driver_name =
"hmac-sha256-caam",
1575 .update = ahash_update,
1576 .final = ahash_final,
1577 .finup = ahash_finup,
1578 .digest = ahash_digest,
1579 .export = ahash_export,
1580 .import = ahash_import,
1581 .setkey = ahash_setkey,
1590 .driver_name =
"sha384-caam",
1591 .hmac_name =
"hmac(sha384)",
1592 .hmac_driver_name =
"hmac-sha384-caam",
1596 .update = ahash_update,
1597 .final = ahash_final,
1598 .finup = ahash_finup,
1599 .digest = ahash_digest,
1600 .export = ahash_export,
1601 .import = ahash_import,
1602 .setkey = ahash_setkey,
1611 .driver_name =
"sha512-caam",
1612 .hmac_name =
"hmac(sha512)",
1613 .hmac_driver_name =
"hmac-sha512-caam",
1617 .update = ahash_update,
1618 .final = ahash_final,
1619 .finup = ahash_finup,
1620 .digest = ahash_digest,
1621 .export = ahash_export,
1622 .import = ahash_import,
1623 .setkey = ahash_setkey,
1632 .driver_name =
"md5-caam",
1633 .hmac_name =
"hmac(md5)",
1634 .hmac_driver_name =
"hmac-md5-caam",
1638 .update = ahash_update,
1639 .final = ahash_final,
1640 .finup = ahash_finup,
1641 .digest = ahash_digest,
1642 .export = ahash_export,
1643 .import = ahash_import,
1644 .setkey = ahash_setkey,
1662 static int caam_hash_cra_init(
struct crypto_tfm *tfm)
1697 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1700 ret = ahash_set_sh_desc(ahash);
1705 static void caam_hash_cra_exit(
struct crypto_tfm *tfm)
1734 static void __exit caam_algapi_hash_exit(
void)
1753 ctrldev = &pdev->
dev;
1754 of_node_put(dev_node);
1777 dev_err(ctrldev,
"failed to allocate t_alg\n");
1781 t_alg->
ahash_alg =
template->template_ahash;
1783 alg = &halg->
halg.base;
1797 alg->
cra_init = caam_hash_cra_init;
1798 alg->
cra_exit = caam_hash_cra_exit;
1806 t_alg->
alg_type =
template->alg_type;
1807 t_alg->
alg_op =
template->alg_op;
1813 static int __init caam_algapi_hash_init(
void)
1832 ctrldev = &pdev->
dev;
1834 of_node_put(dev_node);
1841 for (i = 0; i <
ARRAY_SIZE(driver_hash); i++) {
1846 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i],
true);
1847 if (IS_ERR(t_alg)) {
1848 err = PTR_ERR(t_alg);
1849 dev_warn(ctrldev,
"%s alg allocation failed\n",
1856 dev_warn(ctrldev,
"%s alg registration failed\n",
1857 t_alg->
ahash_alg.halg.base.cra_driver_name);
1863 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i],
false);
1864 if (IS_ERR(t_alg)) {
1865 err = PTR_ERR(t_alg);
1866 dev_warn(ctrldev,
"%s alg allocation failed\n",
1873 dev_warn(ctrldev,
"%s alg registration failed\n",
1874 t_alg->
ahash_alg.halg.base.cra_driver_name);