@@ -206,7 +206,7 @@ static void qp_event_callback(struct ehca_shca *shca,
206
206
}
207
207
208
208
static void cq_event_callback (struct ehca_shca * shca ,
209
- u64 eqe )
209
+ u64 eqe )
210
210
{
211
211
struct ehca_cq * cq ;
212
212
unsigned long flags ;
@@ -318,15 +318,15 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
318
318
"disruptive port %x configuration change" , port );
319
319
320
320
ehca_info (& shca -> ib_device ,
321
- "port %x is inactive." , port );
321
+ "port %x is inactive." , port );
322
322
event .device = & shca -> ib_device ;
323
323
event .event = IB_EVENT_PORT_ERR ;
324
324
event .element .port_num = port ;
325
325
shca -> sport [port - 1 ].port_state = IB_PORT_DOWN ;
326
326
ib_dispatch_event (& event );
327
327
328
328
ehca_info (& shca -> ib_device ,
329
- "port %x is active." , port );
329
+ "port %x is active." , port );
330
330
event .device = & shca -> ib_device ;
331
331
event .event = IB_EVENT_PORT_ACTIVE ;
332
332
event .element .port_num = port ;
@@ -401,87 +401,143 @@ irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
401
401
return IRQ_HANDLED ;
402
402
}
403
403
404
- void ehca_tasklet_eq (unsigned long data )
405
- {
406
- struct ehca_shca * shca = (struct ehca_shca * )data ;
407
- struct ehca_eqe * eqe ;
408
- int int_state ;
409
- int query_cnt = 0 ;
410
404
411
- do {
412
- eqe = (struct ehca_eqe * )ehca_poll_eq (shca , & shca -> eq );
413
-
414
- if ((shca -> hw_level >= 2 ) && eqe )
415
- int_state = 1 ;
416
- else
417
- int_state = 0 ;
418
-
419
- while ((int_state == 1 ) || eqe ) {
420
- while (eqe ) {
421
- u64 eqe_value = eqe -> entry ;
422
-
423
- ehca_dbg (& shca -> ib_device ,
424
- "eqe_value=%lx" , eqe_value );
425
-
426
- /* TODO: better structure */
427
- if (EHCA_BMASK_GET (EQE_COMPLETION_EVENT ,
428
- eqe_value )) {
429
- unsigned long flags ;
430
- u32 token ;
431
- struct ehca_cq * cq ;
432
-
433
- ehca_dbg (& shca -> ib_device ,
434
- "... completion event" );
435
- token =
436
- EHCA_BMASK_GET (EQE_CQ_TOKEN ,
437
- eqe_value );
438
- spin_lock_irqsave (& ehca_cq_idr_lock ,
439
- flags );
440
- cq = idr_find (& ehca_cq_idr , token );
441
-
442
- if (cq == NULL ) {
443
- spin_unlock_irqrestore (& ehca_cq_idr_lock ,
444
- flags );
445
- break ;
446
- }
447
-
448
- reset_eq_pending (cq );
405
+ static inline void process_eqe (struct ehca_shca * shca , struct ehca_eqe * eqe )
406
+ {
407
+ u64 eqe_value ;
408
+ u32 token ;
409
+ unsigned long flags ;
410
+ struct ehca_cq * cq ;
411
+ eqe_value = eqe -> entry ;
412
+ ehca_dbg (& shca -> ib_device , "eqe_value=%lx" , eqe_value );
413
+ if (EHCA_BMASK_GET (EQE_COMPLETION_EVENT , eqe_value )) {
414
+ ehca_dbg (& shca -> ib_device , "... completion event" );
415
+ token = EHCA_BMASK_GET (EQE_CQ_TOKEN , eqe_value );
416
+ spin_lock_irqsave (& ehca_cq_idr_lock , flags );
417
+ cq = idr_find (& ehca_cq_idr , token );
418
+ if (cq == NULL ) {
419
+ spin_unlock_irqrestore (& ehca_cq_idr_lock , flags );
420
+ ehca_err (& shca -> ib_device ,
421
+ "Invalid eqe for non-existing cq token=%x" ,
422
+ token );
423
+ return ;
424
+ }
425
+ reset_eq_pending (cq );
449
426
#ifdef CONFIG_INFINIBAND_EHCA_SCALING
450
- queue_comp_task (cq );
451
- spin_unlock_irqrestore (& ehca_cq_idr_lock ,
452
- flags );
427
+ queue_comp_task (cq );
428
+ spin_unlock_irqrestore (& ehca_cq_idr_lock , flags );
453
429
#else
454
- spin_unlock_irqrestore (& ehca_cq_idr_lock ,
455
- flags );
456
- comp_event_callback (cq );
430
+ spin_unlock_irqrestore (& ehca_cq_idr_lock , flags );
431
+ comp_event_callback (cq );
457
432
#endif
458
- } else {
459
- ehca_dbg (& shca -> ib_device ,
460
- "... non completion event" );
461
- parse_identifier (shca , eqe_value );
462
- }
463
- eqe =
464
- (struct ehca_eqe * )ehca_poll_eq (shca ,
465
- & shca -> eq );
466
- }
433
+ } else {
434
+ ehca_dbg (& shca -> ib_device ,
435
+ "Got non completion event" );
436
+ parse_identifier (shca , eqe_value );
437
+ }
438
+ }
467
439
468
- if (shca -> hw_level >= 2 ) {
469
- int_state =
470
- hipz_h_query_int_state (shca -> ipz_hca_handle ,
471
- shca -> eq .ist );
472
- query_cnt ++ ;
473
- iosync ();
474
- if (query_cnt >= 100 ) {
475
- query_cnt = 0 ;
476
- int_state = 0 ;
477
- }
478
- }
479
- eqe = (struct ehca_eqe * )ehca_poll_eq (shca , & shca -> eq );
440
+ void ehca_process_eq (struct ehca_shca * shca , int is_irq )
441
+ {
442
+ struct ehca_eq * eq = & shca -> eq ;
443
+ struct ehca_eqe_cache_entry * eqe_cache = eq -> eqe_cache ;
444
+ u64 eqe_value ;
445
+ unsigned long flags ;
446
+ int eqe_cnt , i ;
447
+ int eq_empty = 0 ;
448
+
449
+ spin_lock_irqsave (& eq -> irq_spinlock , flags );
450
+ if (is_irq ) {
451
+ const int max_query_cnt = 100 ;
452
+ int query_cnt = 0 ;
453
+ int int_state = 1 ;
454
+ do {
455
+ int_state = hipz_h_query_int_state (
456
+ shca -> ipz_hca_handle , eq -> ist );
457
+ query_cnt ++ ;
458
+ iosync ();
459
+ } while (int_state && query_cnt < max_query_cnt );
460
+ if (unlikely ((query_cnt == max_query_cnt )))
461
+ ehca_dbg (& shca -> ib_device , "int_state=%x query_cnt=%x" ,
462
+ int_state , query_cnt );
463
+ }
480
464
465
+ /* read out all eqes */
466
+ eqe_cnt = 0 ;
467
+ do {
468
+ u32 token ;
469
+ eqe_cache [eqe_cnt ].eqe =
470
+ (struct ehca_eqe * )ehca_poll_eq (shca , eq );
471
+ if (!eqe_cache [eqe_cnt ].eqe )
472
+ break ;
473
+ eqe_value = eqe_cache [eqe_cnt ].eqe -> entry ;
474
+ if (EHCA_BMASK_GET (EQE_COMPLETION_EVENT , eqe_value )) {
475
+ token = EHCA_BMASK_GET (EQE_CQ_TOKEN , eqe_value );
476
+ spin_lock (& ehca_cq_idr_lock );
477
+ eqe_cache [eqe_cnt ].cq = idr_find (& ehca_cq_idr , token );
478
+ if (!eqe_cache [eqe_cnt ].cq ) {
479
+ spin_unlock (& ehca_cq_idr_lock );
480
+ ehca_err (& shca -> ib_device ,
481
+ "Invalid eqe for non-existing cq "
482
+ "token=%x" , token );
483
+ continue ;
484
+ }
485
+ spin_unlock (& ehca_cq_idr_lock );
486
+ } else
487
+ eqe_cache [eqe_cnt ].cq = NULL ;
488
+ eqe_cnt ++ ;
489
+ } while (eqe_cnt < EHCA_EQE_CACHE_SIZE );
490
+ if (!eqe_cnt ) {
491
+ if (is_irq )
492
+ ehca_dbg (& shca -> ib_device ,
493
+ "No eqe found for irq event" );
494
+ goto unlock_irq_spinlock ;
495
+ } else if (!is_irq )
496
+ ehca_dbg (& shca -> ib_device , "deadman found %x eqe" , eqe_cnt );
497
+ if (unlikely (eqe_cnt == EHCA_EQE_CACHE_SIZE ))
498
+ ehca_dbg (& shca -> ib_device , "too many eqes for one irq event" );
499
+ /* enable irq for new packets */
500
+ for (i = 0 ; i < eqe_cnt ; i ++ ) {
501
+ if (eq -> eqe_cache [i ].cq )
502
+ reset_eq_pending (eq -> eqe_cache [i ].cq );
503
+ }
504
+ /* check eq */
505
+ spin_lock (& eq -> spinlock );
506
+ eq_empty = (!ipz_eqit_eq_peek_valid (& shca -> eq .ipz_queue ));
507
+ spin_unlock (& eq -> spinlock );
508
+ /* call completion handler for cached eqes */
509
+ for (i = 0 ; i < eqe_cnt ; i ++ )
510
+ if (eq -> eqe_cache [i ].cq ) {
511
+ #ifdef CONFIG_INFINIBAND_EHCA_SCALING
512
+ spin_lock (& ehca_cq_idr_lock );
513
+ queue_comp_task (eq -> eqe_cache [i ].cq );
514
+ spin_unlock (& ehca_cq_idr_lock );
515
+ #else
516
+ comp_event_callback (eq -> eqe_cache [i ].cq );
517
+ #endif
518
+ } else {
519
+ ehca_dbg (& shca -> ib_device , "Got non completion event" );
520
+ parse_identifier (shca , eq -> eqe_cache [i ].eqe -> entry );
481
521
}
482
- } while (int_state != 0 );
522
+ /* poll eq if not empty */
523
+ if (eq_empty )
524
+ goto unlock_irq_spinlock ;
525
+ do {
526
+ struct ehca_eqe * eqe ;
527
+ eqe = (struct ehca_eqe * )ehca_poll_eq (shca , & shca -> eq );
528
+ if (!eqe )
529
+ break ;
530
+ process_eqe (shca , eqe );
531
+ eqe_cnt ++ ;
532
+ } while (1 );
533
+
534
+ unlock_irq_spinlock :
535
+ spin_unlock_irqrestore (& eq -> irq_spinlock , flags );
536
+ }
483
537
484
- return ;
538
+ void ehca_tasklet_eq (unsigned long data )
539
+ {
540
+ ehca_process_eq ((struct ehca_shca * )data , 1 );
485
541
}
486
542
487
543
#ifdef CONFIG_INFINIBAND_EHCA_SCALING
@@ -654,11 +710,11 @@ static void take_over_work(struct ehca_comp_pool *pool,
654
710
list_splice_init (& cct -> cq_list , & list );
655
711
656
712
while (!list_empty (& list )) {
657
- cq = list_entry (cct -> cq_list .next , struct ehca_cq , entry );
713
+ cq = list_entry (cct -> cq_list .next , struct ehca_cq , entry );
658
714
659
- list_del (& cq -> entry );
660
- __queue_comp_task (cq , per_cpu_ptr (pool -> cpu_comp_tasks ,
661
- smp_processor_id ()));
715
+ list_del (& cq -> entry );
716
+ __queue_comp_task (cq , per_cpu_ptr (pool -> cpu_comp_tasks ,
717
+ smp_processor_id ()));
662
718
}
663
719
664
720
spin_unlock_irqrestore (& cct -> task_lock , flags_cct );
0 commit comments