28
28
29
29
#define DCP_MAX_CHANS 4
30
30
#define DCP_BUF_SZ PAGE_SIZE
31
+ #define DCP_SHA_PAY_SZ 64
31
32
32
33
#define DCP_ALIGNMENT 64
33
34
35
+ /*
36
+ * Null hashes to align with hw behavior on imx6sl and ull
37
+ * these are flipped for consistency with hw output
38
+ */
39
+ const uint8_t sha1_null_hash [] =
40
+ "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
41
+ "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda" ;
42
+
43
+ const uint8_t sha256_null_hash [] =
44
+ "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
45
+ "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
46
+ "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
47
+ "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3" ;
48
+
34
49
/* DCP DMA descriptor. */
35
50
struct dcp_dma_desc {
36
51
uint32_t next_cmd_addr ;
@@ -48,6 +63,7 @@ struct dcp_coherent_block {
48
63
uint8_t aes_in_buf [DCP_BUF_SZ ];
49
64
uint8_t aes_out_buf [DCP_BUF_SZ ];
50
65
uint8_t sha_in_buf [DCP_BUF_SZ ];
66
+ uint8_t sha_out_buf [DCP_SHA_PAY_SZ ];
51
67
52
68
uint8_t aes_key [2 * AES_KEYSIZE_128 ];
53
69
@@ -513,8 +529,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
513
529
struct crypto_ahash * tfm = crypto_ahash_reqtfm (req );
514
530
struct dcp_async_ctx * actx = crypto_ahash_ctx (tfm );
515
531
struct dcp_sha_req_ctx * rctx = ahash_request_ctx (req );
516
- struct hash_alg_common * halg = crypto_hash_alg_common (tfm );
517
-
518
532
struct dcp_dma_desc * desc = & sdcp -> coh -> desc [actx -> chan ];
519
533
520
534
dma_addr_t digest_phys = 0 ;
@@ -536,20 +550,34 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
536
550
desc -> payload = 0 ;
537
551
desc -> status = 0 ;
538
552
553
+ /*
554
+ * Align driver with hw behavior when generating null hashes
555
+ */
556
+ if (rctx -> init && rctx -> fini && desc -> size == 0 ) {
557
+ struct hash_alg_common * halg = crypto_hash_alg_common (tfm );
558
+ const uint8_t * sha_buf =
559
+ (actx -> alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1 ) ?
560
+ sha1_null_hash : sha256_null_hash ;
561
+ memcpy (sdcp -> coh -> sha_out_buf , sha_buf , halg -> digestsize );
562
+ ret = 0 ;
563
+ goto done_run ;
564
+ }
565
+
539
566
/* Set HASH_TERM bit for last transfer block. */
540
567
if (rctx -> fini ) {
541
- digest_phys = dma_map_single (sdcp -> dev , req -> result ,
542
- halg -> digestsize , DMA_FROM_DEVICE );
568
+ digest_phys = dma_map_single (sdcp -> dev , sdcp -> coh -> sha_out_buf ,
569
+ DCP_SHA_PAY_SZ , DMA_FROM_DEVICE );
543
570
desc -> control0 |= MXS_DCP_CONTROL0_HASH_TERM ;
544
571
desc -> payload = digest_phys ;
545
572
}
546
573
547
574
ret = mxs_dcp_start_dma (actx );
548
575
549
576
if (rctx -> fini )
550
- dma_unmap_single (sdcp -> dev , digest_phys , halg -> digestsize ,
577
+ dma_unmap_single (sdcp -> dev , digest_phys , DCP_SHA_PAY_SZ ,
551
578
DMA_FROM_DEVICE );
552
579
580
+ done_run :
553
581
dma_unmap_single (sdcp -> dev , buf_phys , DCP_BUF_SZ , DMA_TO_DEVICE );
554
582
555
583
return ret ;
@@ -567,6 +595,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
567
595
const int nents = sg_nents (req -> src );
568
596
569
597
uint8_t * in_buf = sdcp -> coh -> sha_in_buf ;
598
+ uint8_t * out_buf = sdcp -> coh -> sha_out_buf ;
570
599
571
600
uint8_t * src_buf ;
572
601
@@ -621,11 +650,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
621
650
622
651
actx -> fill = 0 ;
623
652
624
- /* For some reason, the result is flipped. */
625
- for (i = 0 ; i < halg -> digestsize / 2 ; i ++ ) {
626
- swap (req -> result [i ],
627
- req -> result [halg -> digestsize - i - 1 ]);
628
- }
653
+ /* For some reason the result is flipped */
654
+ for (i = 0 ; i < halg -> digestsize ; i ++ )
655
+ req -> result [i ] = out_buf [halg -> digestsize - i - 1 ];
629
656
}
630
657
631
658
return 0 ;
0 commit comments