28
28
#include <crypto/hash.h>
29
29
#include <crypto/md5.h>
30
30
#include <crypto/algapi.h>
31
+ #include <crypto/skcipher.h>
31
32
32
33
#include <linux/device-mapper.h>
33
34
@@ -44,7 +45,7 @@ struct convert_context {
44
45
struct bvec_iter iter_out ;
45
46
sector_t cc_sector ;
46
47
atomic_t cc_pending ;
47
- struct ablkcipher_request * req ;
48
+ struct skcipher_request * req ;
48
49
};
49
50
50
51
/*
@@ -86,7 +87,7 @@ struct crypt_iv_operations {
86
87
};
87
88
88
89
struct iv_essiv_private {
89
- struct crypto_hash * hash_tfm ;
90
+ struct crypto_ahash * hash_tfm ;
90
91
u8 * salt ;
91
92
};
92
93
@@ -153,13 +154,13 @@ struct crypt_config {
153
154
154
155
/* ESSIV: struct crypto_cipher *essiv_tfm */
155
156
void * iv_private ;
156
- struct crypto_ablkcipher * * tfms ;
157
+ struct crypto_skcipher * * tfms ;
157
158
unsigned tfms_count ;
158
159
159
160
/*
160
161
* Layout of each crypto request:
161
162
*
162
- * struct ablkcipher_request
163
+ * struct skcipher_request
163
164
* context
164
165
* padding
165
166
* struct dm_crypt_request
@@ -189,7 +190,7 @@ static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
189
190
/*
190
191
* Use this to access cipher attributes that are the same for each CPU.
191
192
*/
192
- static struct crypto_ablkcipher * any_tfm (struct crypt_config * cc )
193
+ static struct crypto_skcipher * any_tfm (struct crypt_config * cc )
193
194
{
194
195
return cc -> tfms [0 ];
195
196
}
@@ -263,23 +264,25 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
263
264
static int crypt_iv_essiv_init (struct crypt_config * cc )
264
265
{
265
266
struct iv_essiv_private * essiv = & cc -> iv_gen_private .essiv ;
266
- struct hash_desc desc ;
267
+ AHASH_REQUEST_ON_STACK ( req , essiv -> hash_tfm ) ;
267
268
struct scatterlist sg ;
268
269
struct crypto_cipher * essiv_tfm ;
269
270
int err ;
270
271
271
272
sg_init_one (& sg , cc -> key , cc -> key_size );
272
- desc .tfm = essiv -> hash_tfm ;
273
- desc .flags = CRYPTO_TFM_REQ_MAY_SLEEP ;
273
+ ahash_request_set_tfm (req , essiv -> hash_tfm );
274
+ ahash_request_set_callback (req , CRYPTO_TFM_REQ_MAY_SLEEP , NULL , NULL );
275
+ ahash_request_set_crypt (req , & sg , essiv -> salt , cc -> key_size );
274
276
275
- err = crypto_hash_digest (& desc , & sg , cc -> key_size , essiv -> salt );
277
+ err = crypto_ahash_digest (req );
278
+ ahash_request_zero (req );
276
279
if (err )
277
280
return err ;
278
281
279
282
essiv_tfm = cc -> iv_private ;
280
283
281
284
err = crypto_cipher_setkey (essiv_tfm , essiv -> salt ,
282
- crypto_hash_digestsize (essiv -> hash_tfm ));
285
+ crypto_ahash_digestsize (essiv -> hash_tfm ));
283
286
if (err )
284
287
return err ;
285
288
@@ -290,7 +293,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
290
293
static int crypt_iv_essiv_wipe (struct crypt_config * cc )
291
294
{
292
295
struct iv_essiv_private * essiv = & cc -> iv_gen_private .essiv ;
293
- unsigned salt_size = crypto_hash_digestsize (essiv -> hash_tfm );
296
+ unsigned salt_size = crypto_ahash_digestsize (essiv -> hash_tfm );
294
297
struct crypto_cipher * essiv_tfm ;
295
298
int r , err = 0 ;
296
299
@@ -320,7 +323,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
320
323
}
321
324
322
325
if (crypto_cipher_blocksize (essiv_tfm ) !=
323
- crypto_ablkcipher_ivsize (any_tfm (cc ))) {
326
+ crypto_skcipher_ivsize (any_tfm (cc ))) {
324
327
ti -> error = "Block size of ESSIV cipher does "
325
328
"not match IV size of block cipher" ;
326
329
crypto_free_cipher (essiv_tfm );
@@ -342,7 +345,7 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
342
345
struct crypto_cipher * essiv_tfm ;
343
346
struct iv_essiv_private * essiv = & cc -> iv_gen_private .essiv ;
344
347
345
- crypto_free_hash (essiv -> hash_tfm );
348
+ crypto_free_ahash (essiv -> hash_tfm );
346
349
essiv -> hash_tfm = NULL ;
347
350
348
351
kzfree (essiv -> salt );
@@ -360,7 +363,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
360
363
const char * opts )
361
364
{
362
365
struct crypto_cipher * essiv_tfm = NULL ;
363
- struct crypto_hash * hash_tfm = NULL ;
366
+ struct crypto_ahash * hash_tfm = NULL ;
364
367
u8 * salt = NULL ;
365
368
int err ;
366
369
@@ -370,14 +373,14 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
370
373
}
371
374
372
375
/* Allocate hash algorithm */
373
- hash_tfm = crypto_alloc_hash (opts , 0 , CRYPTO_ALG_ASYNC );
376
+ hash_tfm = crypto_alloc_ahash (opts , 0 , CRYPTO_ALG_ASYNC );
374
377
if (IS_ERR (hash_tfm )) {
375
378
ti -> error = "Error initializing ESSIV hash" ;
376
379
err = PTR_ERR (hash_tfm );
377
380
goto bad ;
378
381
}
379
382
380
- salt = kzalloc (crypto_hash_digestsize (hash_tfm ), GFP_KERNEL );
383
+ salt = kzalloc (crypto_ahash_digestsize (hash_tfm ), GFP_KERNEL );
381
384
if (!salt ) {
382
385
ti -> error = "Error kmallocing salt storage in ESSIV" ;
383
386
err = - ENOMEM ;
@@ -388,7 +391,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
388
391
cc -> iv_gen_private .essiv .hash_tfm = hash_tfm ;
389
392
390
393
essiv_tfm = setup_essiv_cpu (cc , ti , salt ,
391
- crypto_hash_digestsize (hash_tfm ));
394
+ crypto_ahash_digestsize (hash_tfm ));
392
395
if (IS_ERR (essiv_tfm )) {
393
396
crypt_iv_essiv_dtr (cc );
394
397
return PTR_ERR (essiv_tfm );
@@ -399,7 +402,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
399
402
400
403
bad :
401
404
if (hash_tfm && !IS_ERR (hash_tfm ))
402
- crypto_free_hash (hash_tfm );
405
+ crypto_free_ahash (hash_tfm );
403
406
kfree (salt );
404
407
return err ;
405
408
}
@@ -419,7 +422,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
419
422
static int crypt_iv_benbi_ctr (struct crypt_config * cc , struct dm_target * ti ,
420
423
const char * opts )
421
424
{
422
- unsigned bs = crypto_ablkcipher_blocksize (any_tfm (cc ));
425
+ unsigned bs = crypto_skcipher_blocksize (any_tfm (cc ));
423
426
int log = ilog2 (bs );
424
427
425
428
/* we need to calculate how far we must shift the sector count
@@ -816,27 +819,27 @@ static void crypt_convert_init(struct crypt_config *cc,
816
819
}
817
820
818
821
static struct dm_crypt_request * dmreq_of_req (struct crypt_config * cc ,
819
- struct ablkcipher_request * req )
822
+ struct skcipher_request * req )
820
823
{
821
824
return (struct dm_crypt_request * )((char * )req + cc -> dmreq_start );
822
825
}
823
826
824
- static struct ablkcipher_request * req_of_dmreq (struct crypt_config * cc ,
827
+ static struct skcipher_request * req_of_dmreq (struct crypt_config * cc ,
825
828
struct dm_crypt_request * dmreq )
826
829
{
827
- return (struct ablkcipher_request * )((char * )dmreq - cc -> dmreq_start );
830
+ return (struct skcipher_request * )((char * )dmreq - cc -> dmreq_start );
828
831
}
829
832
830
833
static u8 * iv_of_dmreq (struct crypt_config * cc ,
831
834
struct dm_crypt_request * dmreq )
832
835
{
833
836
return (u8 * )ALIGN ((unsigned long )(dmreq + 1 ),
834
- crypto_ablkcipher_alignmask (any_tfm (cc )) + 1 );
837
+ crypto_skcipher_alignmask (any_tfm (cc )) + 1 );
835
838
}
836
839
837
840
static int crypt_convert_block (struct crypt_config * cc ,
838
841
struct convert_context * ctx ,
839
- struct ablkcipher_request * req )
842
+ struct skcipher_request * req )
840
843
{
841
844
struct bio_vec bv_in = bio_iter_iovec (ctx -> bio_in , ctx -> iter_in );
842
845
struct bio_vec bv_out = bio_iter_iovec (ctx -> bio_out , ctx -> iter_out );
@@ -866,13 +869,13 @@ static int crypt_convert_block(struct crypt_config *cc,
866
869
return r ;
867
870
}
868
871
869
- ablkcipher_request_set_crypt (req , & dmreq -> sg_in , & dmreq -> sg_out ,
870
- 1 << SECTOR_SHIFT , iv );
872
+ skcipher_request_set_crypt (req , & dmreq -> sg_in , & dmreq -> sg_out ,
873
+ 1 << SECTOR_SHIFT , iv );
871
874
872
875
if (bio_data_dir (ctx -> bio_in ) == WRITE )
873
- r = crypto_ablkcipher_encrypt (req );
876
+ r = crypto_skcipher_encrypt (req );
874
877
else
875
- r = crypto_ablkcipher_decrypt (req );
878
+ r = crypto_skcipher_decrypt (req );
876
879
877
880
if (!r && cc -> iv_gen_ops && cc -> iv_gen_ops -> post )
878
881
r = cc -> iv_gen_ops -> post (cc , iv , dmreq );
@@ -891,23 +894,23 @@ static void crypt_alloc_req(struct crypt_config *cc,
891
894
if (!ctx -> req )
892
895
ctx -> req = mempool_alloc (cc -> req_pool , GFP_NOIO );
893
896
894
- ablkcipher_request_set_tfm (ctx -> req , cc -> tfms [key_index ]);
897
+ skcipher_request_set_tfm (ctx -> req , cc -> tfms [key_index ]);
895
898
896
899
/*
897
900
* Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
898
901
* requests if driver request queue is full.
899
902
*/
900
- ablkcipher_request_set_callback (ctx -> req ,
903
+ skcipher_request_set_callback (ctx -> req ,
901
904
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP ,
902
905
kcryptd_async_done , dmreq_of_req (cc , ctx -> req ));
903
906
}
904
907
905
908
static void crypt_free_req (struct crypt_config * cc ,
906
- struct ablkcipher_request * req , struct bio * base_bio )
909
+ struct skcipher_request * req , struct bio * base_bio )
907
910
{
908
911
struct dm_crypt_io * io = dm_per_bio_data (base_bio , cc -> per_bio_data_size );
909
912
910
- if ((struct ablkcipher_request * )(io + 1 ) != req )
913
+ if ((struct skcipher_request * )(io + 1 ) != req )
911
914
mempool_free (req , cc -> req_pool );
912
915
}
913
916
@@ -1437,7 +1440,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
1437
1440
1438
1441
for (i = 0 ; i < cc -> tfms_count ; i ++ )
1439
1442
if (cc -> tfms [i ] && !IS_ERR (cc -> tfms [i ])) {
1440
- crypto_free_ablkcipher (cc -> tfms [i ]);
1443
+ crypto_free_skcipher (cc -> tfms [i ]);
1441
1444
cc -> tfms [i ] = NULL ;
1442
1445
}
1443
1446
@@ -1450,13 +1453,13 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1450
1453
unsigned i ;
1451
1454
int err ;
1452
1455
1453
- cc -> tfms = kmalloc (cc -> tfms_count * sizeof (struct crypto_ablkcipher * ),
1456
+ cc -> tfms = kmalloc (cc -> tfms_count * sizeof (struct crypto_skcipher * ),
1454
1457
GFP_KERNEL );
1455
1458
if (!cc -> tfms )
1456
1459
return - ENOMEM ;
1457
1460
1458
1461
for (i = 0 ; i < cc -> tfms_count ; i ++ ) {
1459
- cc -> tfms [i ] = crypto_alloc_ablkcipher (ciphermode , 0 , 0 );
1462
+ cc -> tfms [i ] = crypto_alloc_skcipher (ciphermode , 0 , 0 );
1460
1463
if (IS_ERR (cc -> tfms [i ])) {
1461
1464
err = PTR_ERR (cc -> tfms [i ]);
1462
1465
crypt_free_tfms (cc );
@@ -1476,9 +1479,9 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
1476
1479
subkey_size = (cc -> key_size - cc -> key_extra_size ) >> ilog2 (cc -> tfms_count );
1477
1480
1478
1481
for (i = 0 ; i < cc -> tfms_count ; i ++ ) {
1479
- r = crypto_ablkcipher_setkey (cc -> tfms [i ],
1480
- cc -> key + (i * subkey_size ),
1481
- subkey_size );
1482
+ r = crypto_skcipher_setkey (cc -> tfms [i ],
1483
+ cc -> key + (i * subkey_size ),
1484
+ subkey_size );
1482
1485
if (r )
1483
1486
err = r ;
1484
1487
}
@@ -1645,7 +1648,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1645
1648
}
1646
1649
1647
1650
/* Initialize IV */
1648
- cc -> iv_size = crypto_ablkcipher_ivsize (any_tfm (cc ));
1651
+ cc -> iv_size = crypto_skcipher_ivsize (any_tfm (cc ));
1649
1652
if (cc -> iv_size )
1650
1653
/* at least a 64 bit sector number should fit in our buffer */
1651
1654
cc -> iv_size = max (cc -> iv_size ,
@@ -1763,21 +1766,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1763
1766
if (ret < 0 )
1764
1767
goto bad ;
1765
1768
1766
- cc -> dmreq_start = sizeof (struct ablkcipher_request );
1767
- cc -> dmreq_start += crypto_ablkcipher_reqsize (any_tfm (cc ));
1769
+ cc -> dmreq_start = sizeof (struct skcipher_request );
1770
+ cc -> dmreq_start += crypto_skcipher_reqsize (any_tfm (cc ));
1768
1771
cc -> dmreq_start = ALIGN (cc -> dmreq_start , __alignof__(struct dm_crypt_request ));
1769
1772
1770
- if (crypto_ablkcipher_alignmask (any_tfm (cc )) < CRYPTO_MINALIGN ) {
1773
+ if (crypto_skcipher_alignmask (any_tfm (cc )) < CRYPTO_MINALIGN ) {
1771
1774
/* Allocate the padding exactly */
1772
1775
iv_size_padding = - (cc -> dmreq_start + sizeof (struct dm_crypt_request ))
1773
- & crypto_ablkcipher_alignmask (any_tfm (cc ));
1776
+ & crypto_skcipher_alignmask (any_tfm (cc ));
1774
1777
} else {
1775
1778
/*
1776
1779
* If the cipher requires greater alignment than kmalloc
1777
1780
* alignment, we don't know the exact position of the
1778
1781
* initialization vector. We must assume worst case.
1779
1782
*/
1780
- iv_size_padding = crypto_ablkcipher_alignmask (any_tfm (cc ));
1783
+ iv_size_padding = crypto_skcipher_alignmask (any_tfm (cc ));
1781
1784
}
1782
1785
1783
1786
ret = - ENOMEM ;
@@ -1922,7 +1925,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1922
1925
1923
1926
io = dm_per_bio_data (bio , cc -> per_bio_data_size );
1924
1927
crypt_io_init (io , cc , bio , dm_target_offset (ti , bio -> bi_iter .bi_sector ));
1925
- io -> ctx .req = (struct ablkcipher_request * )(io + 1 );
1928
+ io -> ctx .req = (struct skcipher_request * )(io + 1 );
1926
1929
1927
1930
if (bio_data_dir (io -> base_bio ) == READ ) {
1928
1931
if (kcryptd_io_read (io , GFP_NOWAIT ))
0 commit comments