Skip to content

Commit c459bd7

Browse files
pdxChenherbertx
authored andcommitted
crypto: sha512-mb - Protect sha512 mb ctx mgr access
The flusher and regular multi-buffer computation via mcryptd may race with another. Add here a lock and turn off interrupt to to access multi-buffer computation state cstate->mgr before a round of computation. This should prevent the flusher code jumping in. Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 5d3d9c8 commit c459bd7

File tree

1 file changed

+42
-22
lines changed

1 file changed

+42
-22
lines changed

arch/x86/crypto/sha512-mb/sha512_mb.c

Lines changed: 42 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,7 @@ static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
221221
}
222222

223223
static struct sha512_hash_ctx
224-
*sha512_ctx_mgr_get_comp_ctx(struct sha512_ctx_mgr *mgr)
224+
*sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate)
225225
{
226226
/*
227227
* If get_comp_job returns NULL, there are no jobs complete.
@@ -233,11 +233,17 @@ static struct sha512_hash_ctx
233233
* Otherwise, all jobs currently being managed by the hash_ctx_mgr
234234
* still need processing.
235235
*/
236+
struct sha512_ctx_mgr *mgr;
236237
struct sha512_hash_ctx *ctx;
238+
unsigned long flags;
237239

240+
mgr = cstate->mgr;
241+
spin_lock_irqsave(&cstate->work_lock, flags);
238242
ctx = (struct sha512_hash_ctx *)
239243
sha512_job_mgr_get_comp_job(&mgr->mgr);
240-
return sha512_ctx_mgr_resubmit(mgr, ctx);
244+
ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
245+
spin_unlock_irqrestore(&cstate->work_lock, flags);
246+
return ctx;
241247
}
242248

243249
static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
@@ -246,12 +252,17 @@ static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
246252
}
247253

248254
static struct sha512_hash_ctx
249-
*sha512_ctx_mgr_submit(struct sha512_ctx_mgr *mgr,
255+
*sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate,
250256
struct sha512_hash_ctx *ctx,
251257
const void *buffer,
252258
uint32_t len,
253259
int flags)
254260
{
261+
struct sha512_ctx_mgr *mgr;
262+
unsigned long irqflags;
263+
264+
mgr = cstate->mgr;
265+
spin_lock_irqsave(&cstate->work_lock, irqflags);
255266
if (flags & (~HASH_ENTIRE)) {
256267
/*
257268
* User should not pass anything other than FIRST, UPDATE, or
@@ -351,20 +362,26 @@ static struct sha512_hash_ctx
351362
}
352363
}
353364

354-
return sha512_ctx_mgr_resubmit(mgr, ctx);
365+
ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
366+
spin_unlock_irqrestore(&cstate->work_lock, irqflags);
367+
return ctx;
355368
}
356369

357-
static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
370+
static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate)
358371
{
372+
struct sha512_ctx_mgr *mgr;
359373
struct sha512_hash_ctx *ctx;
374+
unsigned long flags;
360375

376+
mgr = cstate->mgr;
377+
spin_lock_irqsave(&cstate->work_lock, flags);
361378
while (1) {
362379
ctx = (struct sha512_hash_ctx *)
363380
sha512_job_mgr_flush(&mgr->mgr);
364381

365382
/* If flush returned 0, there are no more jobs in flight. */
366383
if (!ctx)
367-
return NULL;
384+
break;
368385

369386
/*
370387
* If flush returned a job, resubmit the job to finish
@@ -378,8 +395,10 @@ static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
378395
* the sha512_ctx_mgr still need processing. Loop.
379396
*/
380397
if (ctx)
381-
return ctx;
398+
break;
382399
}
400+
spin_unlock_irqrestore(&cstate->work_lock, flags);
401+
return ctx;
383402
}
384403

385404
static int sha512_mb_init(struct ahash_request *areq)
@@ -439,11 +458,11 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
439458
sha_ctx = (struct sha512_hash_ctx *)
440459
ahash_request_ctx(&rctx->areq);
441460
kernel_fpu_begin();
442-
sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx,
461+
sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx,
443462
rctx->walk.data, nbytes, flag);
444463
if (!sha_ctx) {
445464
if (flush)
446-
sha_ctx = sha512_ctx_mgr_flush(cstate->mgr);
465+
sha_ctx = sha512_ctx_mgr_flush(cstate);
447466
}
448467
kernel_fpu_end();
449468
if (sha_ctx)
@@ -471,11 +490,12 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
471490
struct sha512_hash_ctx *sha_ctx;
472491
struct mcryptd_hash_request_ctx *req_ctx;
473492
int ret;
493+
unsigned long flags;
474494

475495
/* remove from work list */
476-
spin_lock(&cstate->work_lock);
496+
spin_lock_irqsave(&cstate->work_lock, flags);
477497
list_del(&rctx->waiter);
478-
spin_unlock(&cstate->work_lock);
498+
spin_unlock_irqrestore(&cstate->work_lock, flags);
479499

480500
if (irqs_disabled())
481501
rctx->complete(&req->base, err);
@@ -486,14 +506,14 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
486506
}
487507

488508
/* check to see if there are other jobs that are done */
489-
sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
509+
sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
490510
while (sha_ctx) {
491511
req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
492512
ret = sha_finish_walk(&req_ctx, cstate, false);
493513
if (req_ctx) {
494-
spin_lock(&cstate->work_lock);
514+
spin_lock_irqsave(&cstate->work_lock, flags);
495515
list_del(&req_ctx->waiter);
496-
spin_unlock(&cstate->work_lock);
516+
spin_unlock_irqrestore(&cstate->work_lock, flags);
497517

498518
req = cast_mcryptd_ctx_to_req(req_ctx);
499519
if (irqs_disabled())
@@ -504,7 +524,7 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
504524
local_bh_enable();
505525
}
506526
}
507-
sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
527+
sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
508528
}
509529

510530
return 0;
@@ -515,16 +535,17 @@ static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
515535
{
516536
unsigned long next_flush;
517537
unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
538+
unsigned long flags;
518539

519540
/* initialize tag */
520541
rctx->tag.arrival = jiffies; /* tag the arrival time */
521542
rctx->tag.seq_num = cstate->next_seq_num++;
522543
next_flush = rctx->tag.arrival + delay;
523544
rctx->tag.expire = next_flush;
524545

525-
spin_lock(&cstate->work_lock);
546+
spin_lock_irqsave(&cstate->work_lock, flags);
526547
list_add_tail(&rctx->waiter, &cstate->work_list);
527-
spin_unlock(&cstate->work_lock);
548+
spin_unlock_irqrestore(&cstate->work_lock, flags);
528549

529550
mcryptd_arm_flusher(cstate, delay);
530551
}
@@ -565,7 +586,7 @@ static int sha512_mb_update(struct ahash_request *areq)
565586
sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
566587
sha512_mb_add_list(rctx, cstate);
567588
kernel_fpu_begin();
568-
sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
589+
sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
569590
nbytes, HASH_UPDATE);
570591
kernel_fpu_end();
571592

@@ -628,7 +649,7 @@ static int sha512_mb_finup(struct ahash_request *areq)
628649
sha512_mb_add_list(rctx, cstate);
629650

630651
kernel_fpu_begin();
631-
sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
652+
sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
632653
nbytes, flag);
633654
kernel_fpu_end();
634655

@@ -677,8 +698,7 @@ static int sha512_mb_final(struct ahash_request *areq)
677698
/* flag HASH_FINAL and 0 data size */
678699
sha512_mb_add_list(rctx, cstate);
679700
kernel_fpu_begin();
680-
sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
681-
HASH_LAST);
701+
sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST);
682702
kernel_fpu_end();
683703

684704
/* check if anything is returned */
@@ -940,7 +960,7 @@ static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
940960
break;
941961
kernel_fpu_begin();
942962
sha_ctx = (struct sha512_hash_ctx *)
943-
sha512_ctx_mgr_flush(cstate->mgr);
963+
sha512_ctx_mgr_flush(cstate);
944964
kernel_fpu_end();
945965
if (!sha_ctx) {
946966
pr_err("sha512_mb error: nothing got flushed for"

0 commit comments

Comments
 (0)