17
17
#include <linux/netdevice.h>
18
18
#include <linux/pci.h>
19
19
#include <linux/platform_device.h>
20
-
20
+ #include <net/rtnetlink.h>
21
21
#include "hclge_cmd.h"
22
22
#include "hclge_dcb.h"
23
23
#include "hclge_main.h"
@@ -2226,6 +2226,12 @@ static int hclge_mac_init(struct hclge_dev *hdev)
2226
2226
return hclge_cfg_func_mta_filter (hdev , 0 , hdev -> accept_mta_mc );
2227
2227
}
2228
2228
2229
+ static void hclge_reset_task_schedule (struct hclge_dev * hdev )
2230
+ {
2231
+ if (!test_and_set_bit (HCLGE_STATE_RST_SERVICE_SCHED , & hdev -> state ))
2232
+ schedule_work (& hdev -> rst_service_task );
2233
+ }
2234
+
2229
2235
static void hclge_task_schedule (struct hclge_dev * hdev )
2230
2236
{
2231
2237
if (!test_bit (HCLGE_STATE_DOWN , & hdev -> state ) &&
@@ -2362,6 +2368,46 @@ static void hclge_service_complete(struct hclge_dev *hdev)
2362
2368
clear_bit (HCLGE_STATE_SERVICE_SCHED , & hdev -> state );
2363
2369
}
2364
2370
2371
+ static u32 hclge_check_event_cause (struct hclge_dev * hdev , u32 * clearval )
2372
+ {
2373
+ u32 rst_src_reg ;
2374
+
2375
+ /* fetch the events from their corresponding regs */
2376
+ rst_src_reg = hclge_read_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG );
2377
+
2378
+ /* check for vector0 reset event sources */
2379
+ if (BIT (HCLGE_VECTOR0_GLOBALRESET_INT_B ) & rst_src_reg ) {
2380
+ set_bit (HNAE3_GLOBAL_RESET , & hdev -> reset_pending );
2381
+ * clearval = BIT (HCLGE_VECTOR0_GLOBALRESET_INT_B );
2382
+ return HCLGE_VECTOR0_EVENT_RST ;
2383
+ }
2384
+
2385
+ if (BIT (HCLGE_VECTOR0_CORERESET_INT_B ) & rst_src_reg ) {
2386
+ set_bit (HNAE3_CORE_RESET , & hdev -> reset_pending );
2387
+ * clearval = BIT (HCLGE_VECTOR0_CORERESET_INT_B );
2388
+ return HCLGE_VECTOR0_EVENT_RST ;
2389
+ }
2390
+
2391
+ if (BIT (HCLGE_VECTOR0_IMPRESET_INT_B ) & rst_src_reg ) {
2392
+ set_bit (HNAE3_IMP_RESET , & hdev -> reset_pending );
2393
+ * clearval = BIT (HCLGE_VECTOR0_IMPRESET_INT_B );
2394
+ return HCLGE_VECTOR0_EVENT_RST ;
2395
+ }
2396
+
2397
+ /* mailbox event sharing vector 0 interrupt would be placed here */
2398
+
2399
+ return HCLGE_VECTOR0_EVENT_OTHER ;
2400
+ }
2401
+
2402
+ static void hclge_clear_event_cause (struct hclge_dev * hdev , u32 event_type ,
2403
+ u32 regclr )
2404
+ {
2405
+ if (event_type == HCLGE_VECTOR0_EVENT_RST )
2406
+ hclge_write_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG , regclr );
2407
+
2408
+ /* mailbox event sharing vector 0 interrupt would be placed here */
2409
+ }
2410
+
2365
2411
static void hclge_enable_vector (struct hclge_misc_vector * vector , bool enable )
2366
2412
{
2367
2413
writel (enable ? 1 : 0 , vector -> addr );
@@ -2370,10 +2416,28 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2370
2416
static irqreturn_t hclge_misc_irq_handle (int irq , void * data )
2371
2417
{
2372
2418
struct hclge_dev * hdev = data ;
2419
+ u32 event_cause ;
2420
+ u32 clearval ;
2373
2421
2374
2422
hclge_enable_vector (& hdev -> misc_vector , false);
2375
- if (!test_and_set_bit (HCLGE_STATE_SERVICE_SCHED , & hdev -> state ))
2376
- schedule_work (& hdev -> service_task );
2423
+ event_cause = hclge_check_event_cause (hdev , & clearval );
2424
+
2425
+ /* vector 0 interrupt is shared with reset and mailbox source events.
2426
+ * For now, we are not handling mailbox events.
2427
+ */
2428
+ switch (event_cause ) {
2429
+ case HCLGE_VECTOR0_EVENT_RST :
2430
+ hclge_reset_task_schedule (hdev );
2431
+ break ;
2432
+ default :
2433
+ dev_dbg (& hdev -> pdev -> dev ,
2434
+ "received unknown or unhandled event of vector0\n" );
2435
+ break ;
2436
+ }
2437
+
2438
+ /* we should clear the source of interrupt */
2439
+ hclge_clear_event_cause (hdev , event_cause , clearval );
2440
+ hclge_enable_vector (& hdev -> misc_vector , true);
2377
2441
2378
2442
return IRQ_HANDLED ;
2379
2443
}
@@ -2404,9 +2468,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
2404
2468
2405
2469
hclge_get_misc_vector (hdev );
2406
2470
2407
- ret = devm_request_irq ( & hdev -> pdev -> dev ,
2408
- hdev -> misc_vector .vector_irq ,
2409
- hclge_misc_irq_handle , 0 , "hclge_misc" , hdev );
2471
+ /* this would be explicitly freed in the end */
2472
+ ret = request_irq ( hdev -> misc_vector .vector_irq , hclge_misc_irq_handle ,
2473
+ 0 , "hclge_misc" , hdev );
2410
2474
if (ret ) {
2411
2475
hclge_free_vector (hdev , 0 );
2412
2476
dev_err (& hdev -> pdev -> dev , "request misc irq(%d) fail\n" ,
@@ -2416,6 +2480,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
2416
2480
return ret ;
2417
2481
}
2418
2482
2483
+ static void hclge_misc_irq_uninit (struct hclge_dev * hdev )
2484
+ {
2485
+ free_irq (hdev -> misc_vector .vector_irq , hdev );
2486
+ hclge_free_vector (hdev , 0 );
2487
+ }
2488
+
2419
2489
static int hclge_notify_client (struct hclge_dev * hdev ,
2420
2490
enum hnae3_reset_notify_type type )
2421
2491
{
@@ -2471,12 +2541,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
2471
2541
cnt ++ ;
2472
2542
}
2473
2543
2474
- /* must clear reset status register to
2475
- * prevent driver detect reset interrupt again
2476
- */
2477
- reg = hclge_read_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG );
2478
- hclge_write_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG , reg );
2479
-
2480
2544
if (cnt >= HCLGE_RESET_WAIT_CNT ) {
2481
2545
dev_warn (& hdev -> pdev -> dev ,
2482
2546
"Wait for reset timeout: %d\n" , hdev -> reset_type );
@@ -2505,12 +2569,12 @@ static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2505
2569
return ret ;
2506
2570
}
2507
2571
2508
- static void hclge_do_reset (struct hclge_dev * hdev , enum hnae3_reset_type type )
2572
+ static void hclge_do_reset (struct hclge_dev * hdev )
2509
2573
{
2510
2574
struct pci_dev * pdev = hdev -> pdev ;
2511
2575
u32 val ;
2512
2576
2513
- switch (type ) {
2577
+ switch (hdev -> reset_type ) {
2514
2578
case HNAE3_GLOBAL_RESET :
2515
2579
val = hclge_read_dev (& hdev -> hw , HCLGE_GLOBAL_RESET_REG );
2516
2580
hnae_set_bit (val , HCLGE_GLOBAL_RESET_BIT , 1 );
@@ -2526,30 +2590,62 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
2526
2590
case HNAE3_FUNC_RESET :
2527
2591
dev_info (& pdev -> dev , "PF Reset requested\n" );
2528
2592
hclge_func_reset_cmd (hdev , 0 );
2593
+ /* schedule again to check later */
2594
+ set_bit (HNAE3_FUNC_RESET , & hdev -> reset_pending );
2595
+ hclge_reset_task_schedule (hdev );
2529
2596
break ;
2530
2597
default :
2531
2598
dev_warn (& pdev -> dev ,
2532
- "Unsupported reset type: %d\n" , type );
2599
+ "Unsupported reset type: %d\n" , hdev -> reset_type );
2533
2600
break ;
2534
2601
}
2535
2602
}
2536
2603
2537
- static enum hnae3_reset_type hclge_detected_reset_event (struct hclge_dev * hdev )
2604
+ static enum hnae3_reset_type hclge_get_reset_level (struct hclge_dev * hdev ,
2605
+ unsigned long * addr )
2538
2606
{
2539
2607
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET ;
2540
- u32 rst_reg_val ;
2541
2608
2542
- rst_reg_val = hclge_read_dev ( & hdev -> hw , HCLGE_MISC_RESET_STS_REG );
2543
- if (BIT ( HCLGE_VECTOR0_GLOBALRESET_INT_B ) & rst_reg_val )
2609
+ /* return the highest priority reset level amongst all */
2610
+ if (test_bit ( HNAE3_GLOBAL_RESET , addr ) )
2544
2611
rst_level = HNAE3_GLOBAL_RESET ;
2545
- else if (BIT ( HCLGE_VECTOR0_CORERESET_INT_B ) & rst_reg_val )
2612
+ else if (test_bit ( HNAE3_CORE_RESET , addr ) )
2546
2613
rst_level = HNAE3_CORE_RESET ;
2547
- else if (BIT ( HCLGE_VECTOR0_IMPRESET_INT_B ) & rst_reg_val )
2614
+ else if (test_bit ( HNAE3_IMP_RESET , addr ) )
2548
2615
rst_level = HNAE3_IMP_RESET ;
2616
+ else if (test_bit (HNAE3_FUNC_RESET , addr ))
2617
+ rst_level = HNAE3_FUNC_RESET ;
2618
+
2619
+ /* now, clear all other resets */
2620
+ clear_bit (HNAE3_GLOBAL_RESET , addr );
2621
+ clear_bit (HNAE3_CORE_RESET , addr );
2622
+ clear_bit (HNAE3_IMP_RESET , addr );
2623
+ clear_bit (HNAE3_FUNC_RESET , addr );
2549
2624
2550
2625
return rst_level ;
2551
2626
}
2552
2627
2628
+ static void hclge_reset (struct hclge_dev * hdev )
2629
+ {
2630
+ /* perform reset of the stack & ae device for a client */
2631
+
2632
+ hclge_notify_client (hdev , HNAE3_DOWN_CLIENT );
2633
+
2634
+ if (!hclge_reset_wait (hdev )) {
2635
+ rtnl_lock ();
2636
+ hclge_notify_client (hdev , HNAE3_UNINIT_CLIENT );
2637
+ hclge_reset_ae_dev (hdev -> ae_dev );
2638
+ hclge_notify_client (hdev , HNAE3_INIT_CLIENT );
2639
+ rtnl_unlock ();
2640
+ } else {
2641
+ /* schedule again to check pending resets later */
2642
+ set_bit (hdev -> reset_type , & hdev -> reset_pending );
2643
+ hclge_reset_task_schedule (hdev );
2644
+ }
2645
+
2646
+ hclge_notify_client (hdev , HNAE3_UP_CLIENT );
2647
+ }
2648
+
2553
2649
static void hclge_reset_event (struct hnae3_handle * handle ,
2554
2650
enum hnae3_reset_type reset )
2555
2651
{
@@ -2563,14 +2659,9 @@ static void hclge_reset_event(struct hnae3_handle *handle,
2563
2659
case HNAE3_FUNC_RESET :
2564
2660
case HNAE3_CORE_RESET :
2565
2661
case HNAE3_GLOBAL_RESET :
2566
- if (test_bit (HCLGE_STATE_RESET_INT , & hdev -> state )) {
2567
- dev_err (& hdev -> pdev -> dev , "Already in reset state" );
2568
- return ;
2569
- }
2570
- hdev -> reset_type = reset ;
2571
- set_bit (HCLGE_STATE_RESET_INT , & hdev -> state );
2572
- set_bit (HCLGE_STATE_SERVICE_SCHED , & hdev -> state );
2573
- schedule_work (& hdev -> service_task );
2662
+ /* request reset & schedule reset task */
2663
+ set_bit (reset , & hdev -> reset_request );
2664
+ hclge_reset_task_schedule (hdev );
2574
2665
break ;
2575
2666
default :
2576
2667
dev_warn (& hdev -> pdev -> dev , "Unsupported reset event:%d" , reset );
@@ -2580,57 +2671,47 @@ static void hclge_reset_event(struct hnae3_handle *handle,
2580
2671
2581
2672
static void hclge_reset_subtask (struct hclge_dev * hdev )
2582
2673
{
2583
- bool do_reset ;
2584
-
2585
- do_reset = hdev -> reset_type != HNAE3_NONE_RESET ;
2586
-
2587
- /* Reset is detected by interrupt */
2588
- if (hdev -> reset_type == HNAE3_NONE_RESET )
2589
- hdev -> reset_type = hclge_detected_reset_event (hdev );
2590
-
2591
- if (hdev -> reset_type == HNAE3_NONE_RESET )
2592
- return ;
2593
-
2594
- switch (hdev -> reset_type ) {
2595
- case HNAE3_FUNC_RESET :
2596
- case HNAE3_CORE_RESET :
2597
- case HNAE3_GLOBAL_RESET :
2598
- case HNAE3_IMP_RESET :
2599
- hclge_notify_client (hdev , HNAE3_DOWN_CLIENT );
2674
+ /* check if there is any ongoing reset in the hardware. This status can
2675
+ * be checked from reset_pending. If there is then, we need to wait for
2676
+ * hardware to complete reset.
2677
+ * a. If we are able to figure out in reasonable time that hardware
2678
+ * has fully resetted then, we can proceed with driver, client
2679
+ * reset.
2680
+ * b. else, we can come back later to check this status so re-sched
2681
+ * now.
2682
+ */
2683
+ hdev -> reset_type = hclge_get_reset_level (hdev , & hdev -> reset_pending );
2684
+ if (hdev -> reset_type != HNAE3_NONE_RESET )
2685
+ hclge_reset (hdev );
2600
2686
2601
- if ( do_reset )
2602
- hclge_do_reset (hdev , hdev -> reset_type );
2603
- else
2604
- set_bit ( HCLGE_STATE_RESET_INT , & hdev -> state );
2687
+ /* check if we got any *new* reset requests to be honored */
2688
+ hdev -> reset_type = hclge_get_reset_level (hdev , & hdev -> reset_request );
2689
+ if ( hdev -> reset_type != HNAE3_NONE_RESET )
2690
+ hclge_do_reset ( hdev );
2605
2691
2606
- if (!hclge_reset_wait (hdev )) {
2607
- hclge_notify_client (hdev , HNAE3_UNINIT_CLIENT );
2608
- hclge_reset_ae_dev (hdev -> ae_dev );
2609
- hclge_notify_client (hdev , HNAE3_INIT_CLIENT );
2610
- clear_bit (HCLGE_STATE_RESET_INT , & hdev -> state );
2611
- }
2612
- hclge_notify_client (hdev , HNAE3_UP_CLIENT );
2613
- break ;
2614
- default :
2615
- dev_err (& hdev -> pdev -> dev , "Unsupported reset type:%d\n" ,
2616
- hdev -> reset_type );
2617
- break ;
2618
- }
2619
2692
hdev -> reset_type = HNAE3_NONE_RESET ;
2620
2693
}
2621
2694
2622
- static void hclge_misc_irq_service_task (struct hclge_dev * hdev )
2695
+ static void hclge_reset_service_task (struct work_struct * work )
2623
2696
{
2697
+ struct hclge_dev * hdev =
2698
+ container_of (work , struct hclge_dev , rst_service_task );
2699
+
2700
+ if (test_and_set_bit (HCLGE_STATE_RST_HANDLING , & hdev -> state ))
2701
+ return ;
2702
+
2703
+ clear_bit (HCLGE_STATE_RST_SERVICE_SCHED , & hdev -> state );
2704
+
2624
2705
hclge_reset_subtask (hdev );
2625
- hclge_enable_vector (& hdev -> misc_vector , true);
2706
+
2707
+ clear_bit (HCLGE_STATE_RST_HANDLING , & hdev -> state );
2626
2708
}
2627
2709
2628
2710
static void hclge_service_task (struct work_struct * work )
2629
2711
{
2630
2712
struct hclge_dev * hdev =
2631
2713
container_of (work , struct hclge_dev , service_task );
2632
2714
2633
- hclge_misc_irq_service_task (hdev );
2634
2715
hclge_update_speed_duplex (hdev );
2635
2716
hclge_update_link_status (hdev );
2636
2717
hclge_update_stats_for_all (hdev );
@@ -4661,6 +4742,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4661
4742
hdev -> pdev = pdev ;
4662
4743
hdev -> ae_dev = ae_dev ;
4663
4744
hdev -> reset_type = HNAE3_NONE_RESET ;
4745
+ hdev -> reset_request = 0 ;
4746
+ hdev -> reset_pending = 0 ;
4664
4747
ae_dev -> priv = hdev ;
4665
4748
4666
4749
ret = hclge_pci_init (hdev );
@@ -4772,12 +4855,15 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4772
4855
4773
4856
timer_setup (& hdev -> service_timer , hclge_service_timer , 0 );
4774
4857
INIT_WORK (& hdev -> service_task , hclge_service_task );
4858
+ INIT_WORK (& hdev -> rst_service_task , hclge_reset_service_task );
4775
4859
4776
4860
/* Enable MISC vector(vector0) */
4777
4861
hclge_enable_vector (& hdev -> misc_vector , true);
4778
4862
4779
4863
set_bit (HCLGE_STATE_SERVICE_INITED , & hdev -> state );
4780
4864
set_bit (HCLGE_STATE_DOWN , & hdev -> state );
4865
+ clear_bit (HCLGE_STATE_RST_SERVICE_SCHED , & hdev -> state );
4866
+ clear_bit (HCLGE_STATE_RST_HANDLING , & hdev -> state );
4781
4867
4782
4868
pr_info ("%s driver initialization finished.\n" , HCLGE_DRIVER_NAME );
4783
4869
return 0 ;
@@ -4889,14 +4975,16 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4889
4975
del_timer_sync (& hdev -> service_timer );
4890
4976
if (hdev -> service_task .func )
4891
4977
cancel_work_sync (& hdev -> service_task );
4978
+ if (hdev -> rst_service_task .func )
4979
+ cancel_work_sync (& hdev -> rst_service_task );
4892
4980
4893
4981
if (mac -> phydev )
4894
4982
mdiobus_unregister (mac -> mdio_bus );
4895
4983
4896
4984
/* Disable MISC vector(vector0) */
4897
4985
hclge_enable_vector (& hdev -> misc_vector , false);
4898
- hclge_free_vector (hdev , 0 );
4899
4986
hclge_destroy_cmd_queue (& hdev -> hw );
4987
+ hclge_misc_irq_uninit (hdev );
4900
4988
hclge_pci_uninit (hdev );
4901
4989
ae_dev -> priv = NULL ;
4902
4990
}
0 commit comments