@@ -221,7 +221,7 @@ static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
221
221
}
222
222
223
223
static struct sha512_hash_ctx
224
- * sha512_ctx_mgr_get_comp_ctx (struct sha512_ctx_mgr * mgr )
224
+ * sha512_ctx_mgr_get_comp_ctx (struct mcryptd_alg_cstate * cstate )
225
225
{
226
226
/*
227
227
* If get_comp_job returns NULL, there are no jobs complete.
@@ -233,11 +233,17 @@ static struct sha512_hash_ctx
233
233
* Otherwise, all jobs currently being managed by the hash_ctx_mgr
234
234
* still need processing.
235
235
*/
236
+ struct sha512_ctx_mgr * mgr ;
236
237
struct sha512_hash_ctx * ctx ;
238
+ unsigned long flags ;
237
239
240
+ mgr = cstate -> mgr ;
241
+ spin_lock_irqsave (& cstate -> work_lock , flags );
238
242
ctx = (struct sha512_hash_ctx * )
239
243
sha512_job_mgr_get_comp_job (& mgr -> mgr );
240
- return sha512_ctx_mgr_resubmit (mgr , ctx );
244
+ ctx = sha512_ctx_mgr_resubmit (mgr , ctx );
245
+ spin_unlock_irqrestore (& cstate -> work_lock , flags );
246
+ return ctx ;
241
247
}
242
248
243
249
static void sha512_ctx_mgr_init (struct sha512_ctx_mgr * mgr )
@@ -246,12 +252,17 @@ static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
246
252
}
247
253
248
254
static struct sha512_hash_ctx
249
- * sha512_ctx_mgr_submit (struct sha512_ctx_mgr * mgr ,
255
+ * sha512_ctx_mgr_submit (struct mcryptd_alg_cstate * cstate ,
250
256
struct sha512_hash_ctx * ctx ,
251
257
const void * buffer ,
252
258
uint32_t len ,
253
259
int flags )
254
260
{
261
+ struct sha512_ctx_mgr * mgr ;
262
+ unsigned long irqflags ;
263
+
264
+ mgr = cstate -> mgr ;
265
+ spin_lock_irqsave (& cstate -> work_lock , irqflags );
255
266
if (flags & (~HASH_ENTIRE )) {
256
267
/*
257
268
* User should not pass anything other than FIRST, UPDATE, or
@@ -351,20 +362,26 @@ static struct sha512_hash_ctx
351
362
}
352
363
}
353
364
354
- return sha512_ctx_mgr_resubmit (mgr , ctx );
365
+ ctx = sha512_ctx_mgr_resubmit (mgr , ctx );
366
+ spin_unlock_irqrestore (& cstate -> work_lock , irqflags );
367
+ return ctx ;
355
368
}
356
369
357
- static struct sha512_hash_ctx * sha512_ctx_mgr_flush (struct sha512_ctx_mgr * mgr )
370
+ static struct sha512_hash_ctx * sha512_ctx_mgr_flush (struct mcryptd_alg_cstate * cstate )
358
371
{
372
+ struct sha512_ctx_mgr * mgr ;
359
373
struct sha512_hash_ctx * ctx ;
374
+ unsigned long flags ;
360
375
376
+ mgr = cstate -> mgr ;
377
+ spin_lock_irqsave (& cstate -> work_lock , flags );
361
378
while (1 ) {
362
379
ctx = (struct sha512_hash_ctx * )
363
380
sha512_job_mgr_flush (& mgr -> mgr );
364
381
365
382
/* If flush returned 0, there are no more jobs in flight. */
366
383
if (!ctx )
367
- return NULL ;
384
+ break ;
368
385
369
386
/*
370
387
* If flush returned a job, resubmit the job to finish
@@ -378,8 +395,10 @@ static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
378
395
* the sha512_ctx_mgr still need processing. Loop.
379
396
*/
380
397
if (ctx )
381
- return ctx ;
398
+ break ;
382
399
}
400
+ spin_unlock_irqrestore (& cstate -> work_lock , flags );
401
+ return ctx ;
383
402
}
384
403
385
404
static int sha512_mb_init (struct ahash_request * areq )
@@ -439,11 +458,11 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
439
458
sha_ctx = (struct sha512_hash_ctx * )
440
459
ahash_request_ctx (& rctx -> areq );
441
460
kernel_fpu_begin ();
442
- sha_ctx = sha512_ctx_mgr_submit (cstate -> mgr , sha_ctx ,
461
+ sha_ctx = sha512_ctx_mgr_submit (cstate , sha_ctx ,
443
462
rctx -> walk .data , nbytes , flag );
444
463
if (!sha_ctx ) {
445
464
if (flush )
446
- sha_ctx = sha512_ctx_mgr_flush (cstate -> mgr );
465
+ sha_ctx = sha512_ctx_mgr_flush (cstate );
447
466
}
448
467
kernel_fpu_end ();
449
468
if (sha_ctx )
@@ -471,11 +490,12 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
471
490
struct sha512_hash_ctx * sha_ctx ;
472
491
struct mcryptd_hash_request_ctx * req_ctx ;
473
492
int ret ;
493
+ unsigned long flags ;
474
494
475
495
/* remove from work list */
476
- spin_lock (& cstate -> work_lock );
496
+ spin_lock_irqsave (& cstate -> work_lock , flags );
477
497
list_del (& rctx -> waiter );
478
- spin_unlock (& cstate -> work_lock );
498
+ spin_unlock_irqrestore (& cstate -> work_lock , flags );
479
499
480
500
if (irqs_disabled ())
481
501
rctx -> complete (& req -> base , err );
@@ -486,14 +506,14 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
486
506
}
487
507
488
508
/* check to see if there are other jobs that are done */
489
- sha_ctx = sha512_ctx_mgr_get_comp_ctx (cstate -> mgr );
509
+ sha_ctx = sha512_ctx_mgr_get_comp_ctx (cstate );
490
510
while (sha_ctx ) {
491
511
req_ctx = cast_hash_to_mcryptd_ctx (sha_ctx );
492
512
ret = sha_finish_walk (& req_ctx , cstate , false);
493
513
if (req_ctx ) {
494
- spin_lock (& cstate -> work_lock );
514
+ spin_lock_irqsave (& cstate -> work_lock , flags );
495
515
list_del (& req_ctx -> waiter );
496
- spin_unlock (& cstate -> work_lock );
516
+ spin_unlock_irqrestore (& cstate -> work_lock , flags );
497
517
498
518
req = cast_mcryptd_ctx_to_req (req_ctx );
499
519
if (irqs_disabled ())
@@ -504,7 +524,7 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
504
524
local_bh_enable ();
505
525
}
506
526
}
507
- sha_ctx = sha512_ctx_mgr_get_comp_ctx (cstate -> mgr );
527
+ sha_ctx = sha512_ctx_mgr_get_comp_ctx (cstate );
508
528
}
509
529
510
530
return 0 ;
@@ -515,16 +535,17 @@ static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
515
535
{
516
536
unsigned long next_flush ;
517
537
unsigned long delay = usecs_to_jiffies (FLUSH_INTERVAL );
538
+ unsigned long flags ;
518
539
519
540
/* initialize tag */
520
541
rctx -> tag .arrival = jiffies ; /* tag the arrival time */
521
542
rctx -> tag .seq_num = cstate -> next_seq_num ++ ;
522
543
next_flush = rctx -> tag .arrival + delay ;
523
544
rctx -> tag .expire = next_flush ;
524
545
525
- spin_lock (& cstate -> work_lock );
546
+ spin_lock_irqsave (& cstate -> work_lock , flags );
526
547
list_add_tail (& rctx -> waiter , & cstate -> work_list );
527
- spin_unlock (& cstate -> work_lock );
548
+ spin_unlock_irqrestore (& cstate -> work_lock , flags );
528
549
529
550
mcryptd_arm_flusher (cstate , delay );
530
551
}
@@ -565,7 +586,7 @@ static int sha512_mb_update(struct ahash_request *areq)
565
586
sha_ctx = (struct sha512_hash_ctx * ) ahash_request_ctx (areq );
566
587
sha512_mb_add_list (rctx , cstate );
567
588
kernel_fpu_begin ();
568
- sha_ctx = sha512_ctx_mgr_submit (cstate -> mgr , sha_ctx , rctx -> walk .data ,
589
+ sha_ctx = sha512_ctx_mgr_submit (cstate , sha_ctx , rctx -> walk .data ,
569
590
nbytes , HASH_UPDATE );
570
591
kernel_fpu_end ();
571
592
@@ -628,7 +649,7 @@ static int sha512_mb_finup(struct ahash_request *areq)
628
649
sha512_mb_add_list (rctx , cstate );
629
650
630
651
kernel_fpu_begin ();
631
- sha_ctx = sha512_ctx_mgr_submit (cstate -> mgr , sha_ctx , rctx -> walk .data ,
652
+ sha_ctx = sha512_ctx_mgr_submit (cstate , sha_ctx , rctx -> walk .data ,
632
653
nbytes , flag );
633
654
kernel_fpu_end ();
634
655
@@ -677,8 +698,7 @@ static int sha512_mb_final(struct ahash_request *areq)
677
698
/* flag HASH_FINAL and 0 data size */
678
699
sha512_mb_add_list (rctx , cstate );
679
700
kernel_fpu_begin ();
680
- sha_ctx = sha512_ctx_mgr_submit (cstate -> mgr , sha_ctx , & data , 0 ,
681
- HASH_LAST );
701
+ sha_ctx = sha512_ctx_mgr_submit (cstate , sha_ctx , & data , 0 , HASH_LAST );
682
702
kernel_fpu_end ();
683
703
684
704
/* check if anything is returned */
@@ -940,7 +960,7 @@ static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
940
960
break ;
941
961
kernel_fpu_begin ();
942
962
sha_ctx = (struct sha512_hash_ctx * )
943
- sha512_ctx_mgr_flush (cstate -> mgr );
963
+ sha512_ctx_mgr_flush (cstate );
944
964
kernel_fpu_end ();
945
965
if (!sha_ctx ) {
946
966
pr_err ("sha512_mb error: nothing got flushed for"
0 commit comments