@@ -544,28 +544,30 @@ void ehca_tasklet_eq(unsigned long data)
544
544
545
545
static inline int find_next_online_cpu (struct ehca_comp_pool * pool )
546
546
{
547
- unsigned long flags_last_cpu ;
547
+ int cpu ;
548
+ unsigned long flags ;
548
549
550
+ WARN_ON_ONCE (!in_interrupt ());
549
551
if (ehca_debug_level )
550
552
ehca_dmp (& cpu_online_map , sizeof (cpumask_t ), "" );
551
553
552
- spin_lock_irqsave (& pool -> last_cpu_lock , flags_last_cpu );
553
- pool -> last_cpu = next_cpu (pool -> last_cpu , cpu_online_map );
554
- if (pool -> last_cpu == NR_CPUS )
555
- pool -> last_cpu = first_cpu (cpu_online_map );
556
- spin_unlock_irqrestore (& pool -> last_cpu_lock , flags_last_cpu );
554
+ spin_lock_irqsave (& pool -> last_cpu_lock , flags );
555
+ cpu = next_cpu (pool -> last_cpu , cpu_online_map );
556
+ if (cpu == NR_CPUS )
557
+ cpu = first_cpu (cpu_online_map );
558
+ pool -> last_cpu = cpu ;
559
+ spin_unlock_irqrestore (& pool -> last_cpu_lock , flags );
557
560
558
- return pool -> last_cpu ;
561
+ return cpu ;
559
562
}
560
563
561
564
static void __queue_comp_task (struct ehca_cq * __cq ,
562
565
struct ehca_cpu_comp_task * cct )
563
566
{
564
- unsigned long flags_cct ;
565
- unsigned long flags_cq ;
567
+ unsigned long flags ;
566
568
567
- spin_lock_irqsave (& cct -> task_lock , flags_cct );
568
- spin_lock_irqsave (& __cq -> task_lock , flags_cq );
569
+ spin_lock_irqsave (& cct -> task_lock , flags );
570
+ spin_lock (& __cq -> task_lock );
569
571
570
572
if (__cq -> nr_callbacks == 0 ) {
571
573
__cq -> nr_callbacks ++ ;
@@ -576,8 +578,8 @@ static void __queue_comp_task(struct ehca_cq *__cq,
576
578
else
577
579
__cq -> nr_callbacks ++ ;
578
580
579
- spin_unlock_irqrestore (& __cq -> task_lock , flags_cq );
580
- spin_unlock_irqrestore (& cct -> task_lock , flags_cct );
581
+ spin_unlock (& __cq -> task_lock );
582
+ spin_unlock_irqrestore (& cct -> task_lock , flags );
581
583
}
582
584
583
585
static void queue_comp_task (struct ehca_cq * __cq )
@@ -588,69 +590,69 @@ static void queue_comp_task(struct ehca_cq *__cq)
588
590
589
591
cpu = get_cpu ();
590
592
cpu_id = find_next_online_cpu (pool );
591
-
592
593
BUG_ON (!cpu_online (cpu_id ));
593
594
594
595
cct = per_cpu_ptr (pool -> cpu_comp_tasks , cpu_id );
596
+ BUG_ON (!cct );
595
597
596
598
if (cct -> cq_jobs > 0 ) {
597
599
cpu_id = find_next_online_cpu (pool );
598
600
cct = per_cpu_ptr (pool -> cpu_comp_tasks , cpu_id );
601
+ BUG_ON (!cct );
599
602
}
600
603
601
604
__queue_comp_task (__cq , cct );
602
-
603
- put_cpu ();
604
-
605
- return ;
606
605
}
607
606
608
607
static void run_comp_task (struct ehca_cpu_comp_task * cct )
609
608
{
610
609
struct ehca_cq * cq ;
611
- unsigned long flags_cct ;
612
- unsigned long flags_cq ;
610
+ unsigned long flags ;
613
611
614
- spin_lock_irqsave (& cct -> task_lock , flags_cct );
612
+ spin_lock_irqsave (& cct -> task_lock , flags );
615
613
616
614
while (!list_empty (& cct -> cq_list )) {
617
615
cq = list_entry (cct -> cq_list .next , struct ehca_cq , entry );
618
- spin_unlock_irqrestore (& cct -> task_lock , flags_cct );
616
+ spin_unlock_irqrestore (& cct -> task_lock , flags );
619
617
comp_event_callback (cq );
620
- spin_lock_irqsave (& cct -> task_lock , flags_cct );
618
+ spin_lock_irqsave (& cct -> task_lock , flags );
621
619
622
- spin_lock_irqsave (& cq -> task_lock , flags_cq );
620
+ spin_lock (& cq -> task_lock );
623
621
cq -> nr_callbacks -- ;
624
622
if (cq -> nr_callbacks == 0 ) {
625
623
list_del_init (cct -> cq_list .next );
626
624
cct -> cq_jobs -- ;
627
625
}
628
- spin_unlock_irqrestore (& cq -> task_lock , flags_cq );
629
-
626
+ spin_unlock (& cq -> task_lock );
630
627
}
631
628
632
- spin_unlock_irqrestore (& cct -> task_lock , flags_cct );
633
-
634
- return ;
629
+ spin_unlock_irqrestore (& cct -> task_lock , flags );
635
630
}
636
631
637
632
static int comp_task (void * __cct )
638
633
{
639
634
struct ehca_cpu_comp_task * cct = __cct ;
635
+ int cql_empty ;
640
636
DECLARE_WAITQUEUE (wait , current );
641
637
642
638
set_current_state (TASK_INTERRUPTIBLE );
643
639
while (!kthread_should_stop ()) {
644
640
add_wait_queue (& cct -> wait_queue , & wait );
645
641
646
- if (list_empty (& cct -> cq_list ))
642
+ spin_lock_irq (& cct -> task_lock );
643
+ cql_empty = list_empty (& cct -> cq_list );
644
+ spin_unlock_irq (& cct -> task_lock );
645
+ if (cql_empty )
647
646
schedule ();
648
647
else
649
648
__set_current_state (TASK_RUNNING );
650
649
651
650
remove_wait_queue (& cct -> wait_queue , & wait );
652
651
653
- if (!list_empty (& cct -> cq_list ))
652
+ spin_lock_irq (& cct -> task_lock );
653
+ cql_empty = list_empty (& cct -> cq_list );
654
+ spin_unlock_irq (& cct -> task_lock );
655
+ if (!cql_empty )
654
656
run_comp_task (__cct );
655
657
656
658
set_current_state (TASK_INTERRUPTIBLE );
@@ -693,8 +695,6 @@ static void destroy_comp_task(struct ehca_comp_pool *pool,
693
695
694
696
if (task )
695
697
kthread_stop (task );
696
-
697
- return ;
698
698
}
699
699
700
700
static void take_over_work (struct ehca_comp_pool * pool ,
@@ -815,6 +815,4 @@ void ehca_destroy_comp_pool(void)
815
815
free_percpu (pool -> cpu_comp_tasks );
816
816
kfree (pool );
817
817
#endif
818
-
819
- return ;
820
818
}
0 commit comments