Skip to content

Commit 78d8d5f

Browse files
Hoang-Nam NguyenRoland Dreier
authored andcommitted
IB/ehca: Rework irq handler
Rework ehca interrupt handling to avoid/reduce missed irq events. Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
1 parent 551fd61 commit 78d8d5f

File tree

6 files changed

+183
-92
lines changed

6 files changed

+183
-92
lines changed

drivers/infiniband/hw/ehca/ehca_classes.h

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,6 @@
4242
#ifndef __EHCA_CLASSES_H__
4343
#define __EHCA_CLASSES_H__
4444

45-
#include "ehca_classes.h"
46-
#include "ipz_pt_fn.h"
4745

4846
struct ehca_module;
4947
struct ehca_qp;
@@ -54,14 +52,22 @@ struct ehca_mw;
5452
struct ehca_pd;
5553
struct ehca_av;
5654

55+
#include <rdma/ib_verbs.h>
56+
#include <rdma/ib_user_verbs.h>
57+
5758
#ifdef CONFIG_PPC64
5859
#include "ehca_classes_pSeries.h"
5960
#endif
61+
#include "ipz_pt_fn.h"
62+
#include "ehca_qes.h"
63+
#include "ehca_irq.h"
6064

61-
#include <rdma/ib_verbs.h>
62-
#include <rdma/ib_user_verbs.h>
65+
#define EHCA_EQE_CACHE_SIZE 20
6366

64-
#include "ehca_irq.h"
67+
struct ehca_eqe_cache_entry {
68+
struct ehca_eqe *eqe;
69+
struct ehca_cq *cq;
70+
};
6571

6672
struct ehca_eq {
6773
u32 length;
@@ -74,6 +80,8 @@ struct ehca_eq {
7480
spinlock_t spinlock;
7581
struct tasklet_struct interrupt_task;
7682
u32 ist;
83+
spinlock_t irq_spinlock;
84+
struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
7785
};
7886

7987
struct ehca_sport {

drivers/infiniband/hw/ehca/ehca_eq.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ int ehca_create_eq(struct ehca_shca *shca,
6161
struct ib_device *ib_dev = &shca->ib_device;
6262

6363
spin_lock_init(&eq->spinlock);
64+
spin_lock_init(&eq->irq_spinlock);
6465
eq->is_initialized = 0;
6566

6667
if (type != EHCA_EQ && type != EHCA_NEQ) {

drivers/infiniband/hw/ehca/ehca_irq.c

Lines changed: 136 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ static void qp_event_callback(struct ehca_shca *shca,
206206
}
207207

208208
static void cq_event_callback(struct ehca_shca *shca,
209-
u64 eqe)
209+
u64 eqe)
210210
{
211211
struct ehca_cq *cq;
212212
unsigned long flags;
@@ -318,15 +318,15 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
318318
"disruptive port %x configuration change", port);
319319

320320
ehca_info(&shca->ib_device,
321-
"port %x is inactive.", port);
321+
"port %x is inactive.", port);
322322
event.device = &shca->ib_device;
323323
event.event = IB_EVENT_PORT_ERR;
324324
event.element.port_num = port;
325325
shca->sport[port - 1].port_state = IB_PORT_DOWN;
326326
ib_dispatch_event(&event);
327327

328328
ehca_info(&shca->ib_device,
329-
"port %x is active.", port);
329+
"port %x is active.", port);
330330
event.device = &shca->ib_device;
331331
event.event = IB_EVENT_PORT_ACTIVE;
332332
event.element.port_num = port;
@@ -401,87 +401,143 @@ irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
401401
return IRQ_HANDLED;
402402
}
403403

404-
void ehca_tasklet_eq(unsigned long data)
405-
{
406-
struct ehca_shca *shca = (struct ehca_shca*)data;
407-
struct ehca_eqe *eqe;
408-
int int_state;
409-
int query_cnt = 0;
410404

411-
do {
412-
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
413-
414-
if ((shca->hw_level >= 2) && eqe)
415-
int_state = 1;
416-
else
417-
int_state = 0;
418-
419-
while ((int_state == 1) || eqe) {
420-
while (eqe) {
421-
u64 eqe_value = eqe->entry;
422-
423-
ehca_dbg(&shca->ib_device,
424-
"eqe_value=%lx", eqe_value);
425-
426-
/* TODO: better structure */
427-
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT,
428-
eqe_value)) {
429-
unsigned long flags;
430-
u32 token;
431-
struct ehca_cq *cq;
432-
433-
ehca_dbg(&shca->ib_device,
434-
"... completion event");
435-
token =
436-
EHCA_BMASK_GET(EQE_CQ_TOKEN,
437-
eqe_value);
438-
spin_lock_irqsave(&ehca_cq_idr_lock,
439-
flags);
440-
cq = idr_find(&ehca_cq_idr, token);
441-
442-
if (cq == NULL) {
443-
spin_unlock_irqrestore(&ehca_cq_idr_lock,
444-
flags);
445-
break;
446-
}
447-
448-
reset_eq_pending(cq);
405+
static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
406+
{
407+
u64 eqe_value;
408+
u32 token;
409+
unsigned long flags;
410+
struct ehca_cq *cq;
411+
eqe_value = eqe->entry;
412+
ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
413+
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
414+
ehca_dbg(&shca->ib_device, "... completion event");
415+
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
416+
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
417+
cq = idr_find(&ehca_cq_idr, token);
418+
if (cq == NULL) {
419+
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
420+
ehca_err(&shca->ib_device,
421+
"Invalid eqe for non-existing cq token=%x",
422+
token);
423+
return;
424+
}
425+
reset_eq_pending(cq);
449426
#ifdef CONFIG_INFINIBAND_EHCA_SCALING
450-
queue_comp_task(cq);
451-
spin_unlock_irqrestore(&ehca_cq_idr_lock,
452-
flags);
427+
queue_comp_task(cq);
428+
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
453429
#else
454-
spin_unlock_irqrestore(&ehca_cq_idr_lock,
455-
flags);
456-
comp_event_callback(cq);
430+
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
431+
comp_event_callback(cq);
457432
#endif
458-
} else {
459-
ehca_dbg(&shca->ib_device,
460-
"... non completion event");
461-
parse_identifier(shca, eqe_value);
462-
}
463-
eqe =
464-
(struct ehca_eqe *)ehca_poll_eq(shca,
465-
&shca->eq);
466-
}
433+
} else {
434+
ehca_dbg(&shca->ib_device,
435+
"Got non completion event");
436+
parse_identifier(shca, eqe_value);
437+
}
438+
}
467439

468-
if (shca->hw_level >= 2) {
469-
int_state =
470-
hipz_h_query_int_state(shca->ipz_hca_handle,
471-
shca->eq.ist);
472-
query_cnt++;
473-
iosync();
474-
if (query_cnt >= 100) {
475-
query_cnt = 0;
476-
int_state = 0;
477-
}
478-
}
479-
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
440+
void ehca_process_eq(struct ehca_shca *shca, int is_irq)
441+
{
442+
struct ehca_eq *eq = &shca->eq;
443+
struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
444+
u64 eqe_value;
445+
unsigned long flags;
446+
int eqe_cnt, i;
447+
int eq_empty = 0;
448+
449+
spin_lock_irqsave(&eq->irq_spinlock, flags);
450+
if (is_irq) {
451+
const int max_query_cnt = 100;
452+
int query_cnt = 0;
453+
int int_state = 1;
454+
do {
455+
int_state = hipz_h_query_int_state(
456+
shca->ipz_hca_handle, eq->ist);
457+
query_cnt++;
458+
iosync();
459+
} while (int_state && query_cnt < max_query_cnt);
460+
if (unlikely((query_cnt == max_query_cnt)))
461+
ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
462+
int_state, query_cnt);
463+
}
480464

465+
/* read out all eqes */
466+
eqe_cnt = 0;
467+
do {
468+
u32 token;
469+
eqe_cache[eqe_cnt].eqe =
470+
(struct ehca_eqe *)ehca_poll_eq(shca, eq);
471+
if (!eqe_cache[eqe_cnt].eqe)
472+
break;
473+
eqe_value = eqe_cache[eqe_cnt].eqe->entry;
474+
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
475+
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
476+
spin_lock(&ehca_cq_idr_lock);
477+
eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
478+
if (!eqe_cache[eqe_cnt].cq) {
479+
spin_unlock(&ehca_cq_idr_lock);
480+
ehca_err(&shca->ib_device,
481+
"Invalid eqe for non-existing cq "
482+
"token=%x", token);
483+
continue;
484+
}
485+
spin_unlock(&ehca_cq_idr_lock);
486+
} else
487+
eqe_cache[eqe_cnt].cq = NULL;
488+
eqe_cnt++;
489+
} while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
490+
if (!eqe_cnt) {
491+
if (is_irq)
492+
ehca_dbg(&shca->ib_device,
493+
"No eqe found for irq event");
494+
goto unlock_irq_spinlock;
495+
} else if (!is_irq)
496+
ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
497+
if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
498+
ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
499+
/* enable irq for new packets */
500+
for (i = 0; i < eqe_cnt; i++) {
501+
if (eq->eqe_cache[i].cq)
502+
reset_eq_pending(eq->eqe_cache[i].cq);
503+
}
504+
/* check eq */
505+
spin_lock(&eq->spinlock);
506+
eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
507+
spin_unlock(&eq->spinlock);
508+
/* call completion handler for cached eqes */
509+
for (i = 0; i < eqe_cnt; i++)
510+
if (eq->eqe_cache[i].cq) {
511+
#ifdef CONFIG_INFINIBAND_EHCA_SCALING
512+
spin_lock(&ehca_cq_idr_lock);
513+
queue_comp_task(eq->eqe_cache[i].cq);
514+
spin_unlock(&ehca_cq_idr_lock);
515+
#else
516+
comp_event_callback(eq->eqe_cache[i].cq);
517+
#endif
518+
} else {
519+
ehca_dbg(&shca->ib_device, "Got non completion event");
520+
parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
481521
}
482-
} while (int_state != 0);
522+
/* poll eq if not empty */
523+
if (eq_empty)
524+
goto unlock_irq_spinlock;
525+
do {
526+
struct ehca_eqe *eqe;
527+
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
528+
if (!eqe)
529+
break;
530+
process_eqe(shca, eqe);
531+
eqe_cnt++;
532+
} while (1);
533+
534+
unlock_irq_spinlock:
535+
spin_unlock_irqrestore(&eq->irq_spinlock, flags);
536+
}
483537

484-
return;
538+
void ehca_tasklet_eq(unsigned long data)
539+
{
540+
ehca_process_eq((struct ehca_shca*)data, 1);
485541
}
486542

487543
#ifdef CONFIG_INFINIBAND_EHCA_SCALING
@@ -654,11 +710,11 @@ static void take_over_work(struct ehca_comp_pool *pool,
654710
list_splice_init(&cct->cq_list, &list);
655711

656712
while(!list_empty(&list)) {
657-
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
713+
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
658714

659-
list_del(&cq->entry);
660-
__queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
661-
smp_processor_id()));
715+
list_del(&cq->entry);
716+
__queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
717+
smp_processor_id()));
662718
}
663719

664720
spin_unlock_irqrestore(&cct->task_lock, flags_cct);

drivers/infiniband/hw/ehca/ehca_irq.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ void ehca_tasklet_neq(unsigned long data);
5656

5757
irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
5858
void ehca_tasklet_eq(unsigned long data);
59+
void ehca_process_eq(struct ehca_shca *shca, int is_irq);
5960

6061
struct ehca_cpu_comp_task {
6162
wait_queue_head_t wait_queue;

drivers/infiniband/hw/ehca/ehca_main.c

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
MODULE_LICENSE("Dual BSD/GPL");
5353
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
5454
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55-
MODULE_VERSION("SVNEHCA_0020");
55+
MODULE_VERSION("SVNEHCA_0021");
5656

5757
int ehca_open_aqp1 = 0;
5858
int ehca_debug_level = 0;
@@ -432,8 +432,8 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
432432

433433
static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
434434
{
435-
return snprintf(buf, PAGE_SIZE, "%d\n",
436-
ehca_debug_level);
435+
return snprintf(buf, PAGE_SIZE, "%d\n",
436+
ehca_debug_level);
437437
}
438438

439439
static ssize_t ehca_store_debug_level(struct device_driver *ddp,
@@ -778,8 +778,24 @@ void ehca_poll_eqs(unsigned long data)
778778

779779
spin_lock(&shca_list_lock);
780780
list_for_each_entry(shca, &shca_list, shca_list) {
781-
if (shca->eq.is_initialized)
782-
ehca_tasklet_eq((unsigned long)(void*)shca);
781+
if (shca->eq.is_initialized) {
782+
/* call deadman proc only if eq ptr does not change */
783+
struct ehca_eq *eq = &shca->eq;
784+
int max = 3;
785+
volatile u64 q_ofs, q_ofs2;
786+
u64 flags;
787+
spin_lock_irqsave(&eq->spinlock, flags);
788+
q_ofs = eq->ipz_queue.current_q_offset;
789+
spin_unlock_irqrestore(&eq->spinlock, flags);
790+
do {
791+
spin_lock_irqsave(&eq->spinlock, flags);
792+
q_ofs2 = eq->ipz_queue.current_q_offset;
793+
spin_unlock_irqrestore(&eq->spinlock, flags);
794+
max--;
795+
} while (q_ofs == q_ofs2 && max > 0);
796+
if (q_ofs == q_ofs2)
797+
ehca_process_eq(shca, 0);
798+
}
783799
}
784800
mod_timer(&poll_eqs_timer, jiffies + HZ);
785801
spin_unlock(&shca_list_lock);
@@ -790,7 +806,7 @@ int __init ehca_module_init(void)
790806
int ret;
791807

792808
printk(KERN_INFO "eHCA Infiniband Device Driver "
793-
"(Rel.: SVNEHCA_0020)\n");
809+
"(Rel.: SVNEHCA_0021)\n");
794810
idr_init(&ehca_qp_idr);
795811
idr_init(&ehca_cq_idr);
796812
spin_lock_init(&ehca_qp_idr_lock);

0 commit comments

Comments
 (0)