Skip to content

Commit bbdb23b

Browse files
committed
dm crypt: Use skcipher and ahash
This patch replaces uses of ablkcipher with skcipher, and the long obsolete hash interface with ahash. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent a1d3839 commit bbdb23b

File tree

1 file changed

+48
-45
lines changed

1 file changed

+48
-45
lines changed

drivers/md/dm-crypt.c

Lines changed: 48 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#include <crypto/hash.h>
2929
#include <crypto/md5.h>
3030
#include <crypto/algapi.h>
31+
#include <crypto/skcipher.h>
3132

3233
#include <linux/device-mapper.h>
3334

@@ -44,7 +45,7 @@ struct convert_context {
4445
struct bvec_iter iter_out;
4546
sector_t cc_sector;
4647
atomic_t cc_pending;
47-
struct ablkcipher_request *req;
48+
struct skcipher_request *req;
4849
};
4950

5051
/*
@@ -86,7 +87,7 @@ struct crypt_iv_operations {
8687
};
8788

8889
struct iv_essiv_private {
89-
struct crypto_hash *hash_tfm;
90+
struct crypto_ahash *hash_tfm;
9091
u8 *salt;
9192
};
9293

@@ -153,13 +154,13 @@ struct crypt_config {
153154

154155
/* ESSIV: struct crypto_cipher *essiv_tfm */
155156
void *iv_private;
156-
struct crypto_ablkcipher **tfms;
157+
struct crypto_skcipher **tfms;
157158
unsigned tfms_count;
158159

159160
/*
160161
* Layout of each crypto request:
161162
*
162-
* struct ablkcipher_request
163+
* struct skcipher_request
163164
* context
164165
* padding
165166
* struct dm_crypt_request
@@ -189,7 +190,7 @@ static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
189190
/*
190191
* Use this to access cipher attributes that are the same for each CPU.
191192
*/
192-
static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
193+
static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
193194
{
194195
return cc->tfms[0];
195196
}
@@ -263,23 +264,25 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
263264
static int crypt_iv_essiv_init(struct crypt_config *cc)
264265
{
265266
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
266-
struct hash_desc desc;
267+
AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
267268
struct scatterlist sg;
268269
struct crypto_cipher *essiv_tfm;
269270
int err;
270271

271272
sg_init_one(&sg, cc->key, cc->key_size);
272-
desc.tfm = essiv->hash_tfm;
273-
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
273+
ahash_request_set_tfm(req, essiv->hash_tfm);
274+
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
275+
ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
274276

275-
err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
277+
err = crypto_ahash_digest(req);
278+
ahash_request_zero(req);
276279
if (err)
277280
return err;
278281

279282
essiv_tfm = cc->iv_private;
280283

281284
err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
282-
crypto_hash_digestsize(essiv->hash_tfm));
285+
crypto_ahash_digestsize(essiv->hash_tfm));
283286
if (err)
284287
return err;
285288

@@ -290,7 +293,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
290293
static int crypt_iv_essiv_wipe(struct crypt_config *cc)
291294
{
292295
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
293-
unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
296+
unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
294297
struct crypto_cipher *essiv_tfm;
295298
int r, err = 0;
296299

@@ -320,7 +323,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
320323
}
321324

322325
if (crypto_cipher_blocksize(essiv_tfm) !=
323-
crypto_ablkcipher_ivsize(any_tfm(cc))) {
326+
crypto_skcipher_ivsize(any_tfm(cc))) {
324327
ti->error = "Block size of ESSIV cipher does "
325328
"not match IV size of block cipher";
326329
crypto_free_cipher(essiv_tfm);
@@ -342,7 +345,7 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
342345
struct crypto_cipher *essiv_tfm;
343346
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
344347

345-
crypto_free_hash(essiv->hash_tfm);
348+
crypto_free_ahash(essiv->hash_tfm);
346349
essiv->hash_tfm = NULL;
347350

348351
kzfree(essiv->salt);
@@ -360,7 +363,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
360363
const char *opts)
361364
{
362365
struct crypto_cipher *essiv_tfm = NULL;
363-
struct crypto_hash *hash_tfm = NULL;
366+
struct crypto_ahash *hash_tfm = NULL;
364367
u8 *salt = NULL;
365368
int err;
366369

@@ -370,14 +373,14 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
370373
}
371374

372375
/* Allocate hash algorithm */
373-
hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
376+
hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
374377
if (IS_ERR(hash_tfm)) {
375378
ti->error = "Error initializing ESSIV hash";
376379
err = PTR_ERR(hash_tfm);
377380
goto bad;
378381
}
379382

380-
salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
383+
salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
381384
if (!salt) {
382385
ti->error = "Error kmallocing salt storage in ESSIV";
383386
err = -ENOMEM;
@@ -388,7 +391,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
388391
cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
389392

390393
essiv_tfm = setup_essiv_cpu(cc, ti, salt,
391-
crypto_hash_digestsize(hash_tfm));
394+
crypto_ahash_digestsize(hash_tfm));
392395
if (IS_ERR(essiv_tfm)) {
393396
crypt_iv_essiv_dtr(cc);
394397
return PTR_ERR(essiv_tfm);
@@ -399,7 +402,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
399402

400403
bad:
401404
if (hash_tfm && !IS_ERR(hash_tfm))
402-
crypto_free_hash(hash_tfm);
405+
crypto_free_ahash(hash_tfm);
403406
kfree(salt);
404407
return err;
405408
}
@@ -419,7 +422,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
419422
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
420423
const char *opts)
421424
{
422-
unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
425+
unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
423426
int log = ilog2(bs);
424427

425428
/* we need to calculate how far we must shift the sector count
@@ -816,27 +819,27 @@ static void crypt_convert_init(struct crypt_config *cc,
816819
}
817820

818821
static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
819-
struct ablkcipher_request *req)
822+
struct skcipher_request *req)
820823
{
821824
return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
822825
}
823826

824-
static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
827+
static struct skcipher_request *req_of_dmreq(struct crypt_config *cc,
825828
struct dm_crypt_request *dmreq)
826829
{
827-
return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
830+
return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start);
828831
}
829832

830833
static u8 *iv_of_dmreq(struct crypt_config *cc,
831834
struct dm_crypt_request *dmreq)
832835
{
833836
return (u8 *)ALIGN((unsigned long)(dmreq + 1),
834-
crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
837+
crypto_skcipher_alignmask(any_tfm(cc)) + 1);
835838
}
836839

837840
static int crypt_convert_block(struct crypt_config *cc,
838841
struct convert_context *ctx,
839-
struct ablkcipher_request *req)
842+
struct skcipher_request *req)
840843
{
841844
struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
842845
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
@@ -866,13 +869,13 @@ static int crypt_convert_block(struct crypt_config *cc,
866869
return r;
867870
}
868871

869-
ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
870-
1 << SECTOR_SHIFT, iv);
872+
skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
873+
1 << SECTOR_SHIFT, iv);
871874

872875
if (bio_data_dir(ctx->bio_in) == WRITE)
873-
r = crypto_ablkcipher_encrypt(req);
876+
r = crypto_skcipher_encrypt(req);
874877
else
875-
r = crypto_ablkcipher_decrypt(req);
878+
r = crypto_skcipher_decrypt(req);
876879

877880
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
878881
r = cc->iv_gen_ops->post(cc, iv, dmreq);
@@ -891,23 +894,23 @@ static void crypt_alloc_req(struct crypt_config *cc,
891894
if (!ctx->req)
892895
ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
893896

894-
ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
897+
skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
895898

896899
/*
897900
* Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
898901
* requests if driver request queue is full.
899902
*/
900-
ablkcipher_request_set_callback(ctx->req,
903+
skcipher_request_set_callback(ctx->req,
901904
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
902905
kcryptd_async_done, dmreq_of_req(cc, ctx->req));
903906
}
904907

905908
static void crypt_free_req(struct crypt_config *cc,
906-
struct ablkcipher_request *req, struct bio *base_bio)
909+
struct skcipher_request *req, struct bio *base_bio)
907910
{
908911
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
909912

910-
if ((struct ablkcipher_request *)(io + 1) != req)
913+
if ((struct skcipher_request *)(io + 1) != req)
911914
mempool_free(req, cc->req_pool);
912915
}
913916

@@ -1437,7 +1440,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
14371440

14381441
for (i = 0; i < cc->tfms_count; i++)
14391442
if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1440-
crypto_free_ablkcipher(cc->tfms[i]);
1443+
crypto_free_skcipher(cc->tfms[i]);
14411444
cc->tfms[i] = NULL;
14421445
}
14431446

@@ -1450,13 +1453,13 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
14501453
unsigned i;
14511454
int err;
14521455

1453-
cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1456+
cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
14541457
GFP_KERNEL);
14551458
if (!cc->tfms)
14561459
return -ENOMEM;
14571460

14581461
for (i = 0; i < cc->tfms_count; i++) {
1459-
cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1462+
cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
14601463
if (IS_ERR(cc->tfms[i])) {
14611464
err = PTR_ERR(cc->tfms[i]);
14621465
crypt_free_tfms(cc);
@@ -1476,9 +1479,9 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
14761479
subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
14771480

14781481
for (i = 0; i < cc->tfms_count; i++) {
1479-
r = crypto_ablkcipher_setkey(cc->tfms[i],
1480-
cc->key + (i * subkey_size),
1481-
subkey_size);
1482+
r = crypto_skcipher_setkey(cc->tfms[i],
1483+
cc->key + (i * subkey_size),
1484+
subkey_size);
14821485
if (r)
14831486
err = r;
14841487
}
@@ -1645,7 +1648,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
16451648
}
16461649

16471650
/* Initialize IV */
1648-
cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1651+
cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
16491652
if (cc->iv_size)
16501653
/* at least a 64 bit sector number should fit in our buffer */
16511654
cc->iv_size = max(cc->iv_size,
@@ -1763,21 +1766,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
17631766
if (ret < 0)
17641767
goto bad;
17651768

1766-
cc->dmreq_start = sizeof(struct ablkcipher_request);
1767-
cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1769+
cc->dmreq_start = sizeof(struct skcipher_request);
1770+
cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
17681771
cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
17691772

1770-
if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1773+
if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
17711774
/* Allocate the padding exactly */
17721775
iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1773-
& crypto_ablkcipher_alignmask(any_tfm(cc));
1776+
& crypto_skcipher_alignmask(any_tfm(cc));
17741777
} else {
17751778
/*
17761779
* If the cipher requires greater alignment than kmalloc
17771780
* alignment, we don't know the exact position of the
17781781
* initialization vector. We must assume worst case.
17791782
*/
1780-
iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1783+
iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
17811784
}
17821785

17831786
ret = -ENOMEM;
@@ -1922,7 +1925,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
19221925

19231926
io = dm_per_bio_data(bio, cc->per_bio_data_size);
19241927
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1925-
io->ctx.req = (struct ablkcipher_request *)(io + 1);
1928+
io->ctx.req = (struct skcipher_request *)(io + 1);
19261929

19271930
if (bio_data_dir(io->base_bio) == READ) {
19281931
if (kcryptd_io_read(io, GFP_NOWAIT))

0 commit comments

Comments
 (0)