@@ -2362,6 +2362,46 @@ static void hclge_service_complete(struct hclge_dev *hdev)
2362
2362
clear_bit (HCLGE_STATE_SERVICE_SCHED , & hdev -> state );
2363
2363
}
2364
2364
2365
+ static u32 hclge_check_event_cause (struct hclge_dev * hdev , u32 * clearval )
2366
+ {
2367
+ u32 rst_src_reg ;
2368
+
2369
+ /* fetch the events from their corresponding regs */
2370
+ rst_src_reg = hclge_read_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG );
2371
+
2372
+ /* check for vector0 reset event sources */
2373
+ if (BIT (HCLGE_VECTOR0_GLOBALRESET_INT_B ) & rst_src_reg ) {
2374
+ set_bit (HNAE3_GLOBAL_RESET , & hdev -> reset_pending );
2375
+ * clearval = BIT (HCLGE_VECTOR0_GLOBALRESET_INT_B );
2376
+ return HCLGE_VECTOR0_EVENT_RST ;
2377
+ }
2378
+
2379
+ if (BIT (HCLGE_VECTOR0_CORERESET_INT_B ) & rst_src_reg ) {
2380
+ set_bit (HNAE3_CORE_RESET , & hdev -> reset_pending );
2381
+ * clearval = BIT (HCLGE_VECTOR0_CORERESET_INT_B );
2382
+ return HCLGE_VECTOR0_EVENT_RST ;
2383
+ }
2384
+
2385
+ if (BIT (HCLGE_VECTOR0_IMPRESET_INT_B ) & rst_src_reg ) {
2386
+ set_bit (HNAE3_IMP_RESET , & hdev -> reset_pending );
2387
+ * clearval = BIT (HCLGE_VECTOR0_IMPRESET_INT_B );
2388
+ return HCLGE_VECTOR0_EVENT_RST ;
2389
+ }
2390
+
2391
+ /* mailbox event sharing vector 0 interrupt would be placed here */
2392
+
2393
+ return HCLGE_VECTOR0_EVENT_OTHER ;
2394
+ }
2395
+
2396
+ static void hclge_clear_event_cause (struct hclge_dev * hdev , u32 event_type ,
2397
+ u32 regclr )
2398
+ {
2399
+ if (event_type == HCLGE_VECTOR0_EVENT_RST )
2400
+ hclge_write_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG , regclr );
2401
+
2402
+ /* mailbox event sharing vector 0 interrupt would be placed here */
2403
+ }
2404
+
2365
2405
static void hclge_enable_vector (struct hclge_misc_vector * vector , bool enable )
2366
2406
{
2367
2407
writel (enable ? 1 : 0 , vector -> addr );
@@ -2370,10 +2410,28 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2370
2410
static irqreturn_t hclge_misc_irq_handle (int irq , void * data )
2371
2411
{
2372
2412
struct hclge_dev * hdev = data ;
2413
+ u32 event_cause ;
2414
+ u32 clearval ;
2373
2415
2374
2416
hclge_enable_vector (& hdev -> misc_vector , false);
2375
- if (!test_and_set_bit (HCLGE_STATE_SERVICE_SCHED , & hdev -> state ))
2376
- schedule_work (& hdev -> service_task );
2417
+ event_cause = hclge_check_event_cause (hdev , & clearval );
2418
+
2419
+ /* vector 0 interrupt is shared with reset and mailbox source events.
2420
+ * For now, we are not handling mailbox events.
2421
+ */
2422
+ switch (event_cause ) {
2423
+ case HCLGE_VECTOR0_EVENT_RST :
2424
+ /* reset task to be scheduled here */
2425
+ break ;
2426
+ default :
2427
+ dev_dbg (& hdev -> pdev -> dev ,
2428
+ "received unknown or unhandled event of vector0\n" );
2429
+ break ;
2430
+ }
2431
+
2432
+ /* we should clear the source of interrupt */
2433
+ hclge_clear_event_cause (hdev , event_cause , clearval );
2434
+ hclge_enable_vector (& hdev -> misc_vector , true);
2377
2435
2378
2436
return IRQ_HANDLED ;
2379
2437
}
@@ -2404,9 +2462,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
2404
2462
2405
2463
hclge_get_misc_vector (hdev );
2406
2464
2407
- ret = devm_request_irq ( & hdev -> pdev -> dev ,
2408
- hdev -> misc_vector .vector_irq ,
2409
- hclge_misc_irq_handle , 0 , "hclge_misc" , hdev );
2465
+ /* this would be explicitly freed in the end */
2466
+ ret = request_irq ( hdev -> misc_vector .vector_irq , hclge_misc_irq_handle ,
2467
+ 0 , "hclge_misc" , hdev );
2410
2468
if (ret ) {
2411
2469
hclge_free_vector (hdev , 0 );
2412
2470
dev_err (& hdev -> pdev -> dev , "request misc irq(%d) fail\n" ,
@@ -2416,6 +2474,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
2416
2474
return ret ;
2417
2475
}
2418
2476
2477
+ static void hclge_misc_irq_uninit (struct hclge_dev * hdev )
2478
+ {
2479
+ free_irq (hdev -> misc_vector .vector_irq , hdev );
2480
+ hclge_free_vector (hdev , 0 );
2481
+ }
2482
+
2419
2483
static int hclge_notify_client (struct hclge_dev * hdev ,
2420
2484
enum hnae3_reset_notify_type type )
2421
2485
{
@@ -2471,12 +2535,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
2471
2535
cnt ++ ;
2472
2536
}
2473
2537
2474
- /* must clear reset status register to
2475
- * prevent driver detect reset interrupt again
2476
- */
2477
- reg = hclge_read_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG );
2478
- hclge_write_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG , reg );
2479
-
2480
2538
if (cnt >= HCLGE_RESET_WAIT_CNT ) {
2481
2539
dev_warn (& hdev -> pdev -> dev ,
2482
2540
"Wait for reset timeout: %d\n" , hdev -> reset_type );
@@ -2534,22 +2592,6 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
2534
2592
}
2535
2593
}
2536
2594
2537
- static enum hnae3_reset_type hclge_detected_reset_event (struct hclge_dev * hdev )
2538
- {
2539
- enum hnae3_reset_type rst_level = HNAE3_NONE_RESET ;
2540
- u32 rst_reg_val ;
2541
-
2542
- rst_reg_val = hclge_read_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG );
2543
- if (BIT (HCLGE_VECTOR0_GLOBALRESET_INT_B ) & rst_reg_val )
2544
- rst_level = HNAE3_GLOBAL_RESET ;
2545
- else if (BIT (HCLGE_VECTOR0_CORERESET_INT_B ) & rst_reg_val )
2546
- rst_level = HNAE3_CORE_RESET ;
2547
- else if (BIT (HCLGE_VECTOR0_IMPRESET_INT_B ) & rst_reg_val )
2548
- rst_level = HNAE3_IMP_RESET ;
2549
-
2550
- return rst_level ;
2551
- }
2552
-
2553
2595
static void hclge_reset_event (struct hnae3_handle * handle ,
2554
2596
enum hnae3_reset_type reset )
2555
2597
{
@@ -2584,9 +2626,6 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
2584
2626
2585
2627
do_reset = hdev -> reset_type != HNAE3_NONE_RESET ;
2586
2628
2587
- /* Reset is detected by interrupt */
2588
- if (hdev -> reset_type == HNAE3_NONE_RESET )
2589
- hdev -> reset_type = hclge_detected_reset_event (hdev );
2590
2629
2591
2630
if (hdev -> reset_type == HNAE3_NONE_RESET )
2592
2631
return ;
@@ -2622,7 +2661,6 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
2622
2661
static void hclge_misc_irq_service_task (struct hclge_dev * hdev )
2623
2662
{
2624
2663
hclge_reset_subtask (hdev );
2625
- hclge_enable_vector (& hdev -> misc_vector , true);
2626
2664
}
2627
2665
2628
2666
static void hclge_service_task (struct work_struct * work )
@@ -4661,6 +4699,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4661
4699
hdev -> pdev = pdev ;
4662
4700
hdev -> ae_dev = ae_dev ;
4663
4701
hdev -> reset_type = HNAE3_NONE_RESET ;
4702
+ hdev -> reset_pending = 0 ;
4664
4703
ae_dev -> priv = hdev ;
4665
4704
4666
4705
ret = hclge_pci_init (hdev );
@@ -4895,8 +4934,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4895
4934
4896
4935
/* Disable MISC vector(vector0) */
4897
4936
hclge_enable_vector (& hdev -> misc_vector , false);
4898
- hclge_free_vector (hdev , 0 );
4899
4937
hclge_destroy_cmd_queue (& hdev -> hw );
4938
+ hclge_misc_irq_uninit (hdev );
4900
4939
hclge_pci_uninit (hdev );
4901
4940
ae_dev -> priv = NULL ;
4902
4941
}
0 commit comments