@@ -133,7 +133,7 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
133
133
if (bar < 0 )
134
134
return bar ;
135
135
136
- mw_size = pci_resource_len (ndev -> ntb . pdev , bar );
136
+ mw_size = pci_resource_len (ntb -> pdev , bar );
137
137
138
138
/* make sure the range fits in the usable mw size */
139
139
if (size > mw_size )
@@ -142,7 +142,7 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
142
142
mmio = ndev -> self_mmio ;
143
143
peer_mmio = ndev -> peer_mmio ;
144
144
145
- base_addr = pci_resource_start (ndev -> ntb . pdev , bar );
145
+ base_addr = pci_resource_start (ntb -> pdev , bar );
146
146
147
147
if (bar != 1 ) {
148
148
xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2 ) << 2 );
@@ -232,7 +232,7 @@ static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
232
232
if (width )
233
233
* width = NTB_LNK_STA_WIDTH (ndev -> lnk_sta );
234
234
235
- dev_dbg (ndev_dev ( ndev ) , "link is up.\n" );
235
+ dev_dbg (& ntb -> pdev -> dev , "link is up.\n" );
236
236
237
237
ret = 1 ;
238
238
} else {
@@ -241,7 +241,7 @@ static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
241
241
if (width )
242
242
* width = NTB_WIDTH_NONE ;
243
243
244
- dev_dbg (ndev_dev ( ndev ) , "link is down.\n" );
244
+ dev_dbg (& ntb -> pdev -> dev , "link is down.\n" );
245
245
}
246
246
247
247
return ret ;
@@ -261,7 +261,7 @@ static int amd_ntb_link_enable(struct ntb_dev *ntb,
261
261
262
262
if (ndev -> ntb .topo == NTB_TOPO_SEC )
263
263
return - EINVAL ;
264
- dev_dbg (ndev_dev ( ndev ) , "Enabling Link.\n" );
264
+ dev_dbg (& ntb -> pdev -> dev , "Enabling Link.\n" );
265
265
266
266
ntb_ctl = readl (mmio + AMD_CNTL_OFFSET );
267
267
ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL );
@@ -282,7 +282,7 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb)
282
282
283
283
if (ndev -> ntb .topo == NTB_TOPO_SEC )
284
284
return - EINVAL ;
285
- dev_dbg (ndev_dev ( ndev ) , "Enabling Link.\n" );
285
+ dev_dbg (& ntb -> pdev -> dev , "Enabling Link.\n" );
286
286
287
287
ntb_ctl = readl (mmio + AMD_CNTL_OFFSET );
288
288
ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL );
@@ -500,18 +500,19 @@ static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
500
500
static void amd_handle_event (struct amd_ntb_dev * ndev , int vec )
501
501
{
502
502
void __iomem * mmio = ndev -> self_mmio ;
503
+ struct device * dev = & ndev -> ntb .pdev -> dev ;
503
504
u32 status ;
504
505
505
506
status = readl (mmio + AMD_INTSTAT_OFFSET );
506
507
if (!(status & AMD_EVENT_INTMASK ))
507
508
return ;
508
509
509
- dev_dbg (ndev_dev ( ndev ) , "status = 0x%x and vec = %d\n" , status , vec );
510
+ dev_dbg (dev , "status = 0x%x and vec = %d\n" , status , vec );
510
511
511
512
status &= AMD_EVENT_INTMASK ;
512
513
switch (status ) {
513
514
case AMD_PEER_FLUSH_EVENT :
514
- dev_info (ndev_dev ( ndev ) , "Flush is done.\n" );
515
+ dev_info (dev , "Flush is done.\n" );
515
516
break ;
516
517
case AMD_PEER_RESET_EVENT :
517
518
amd_ack_smu (ndev , AMD_PEER_RESET_EVENT );
@@ -537,7 +538,7 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
537
538
status = readl (mmio + AMD_PMESTAT_OFFSET );
538
539
/* check if this is WAKEUP event */
539
540
if (status & 0x1 )
540
- dev_info (ndev_dev ( ndev ) , "Wakeup is done.\n" );
541
+ dev_info (dev , "Wakeup is done.\n" );
541
542
542
543
amd_ack_smu (ndev , AMD_PEER_D0_EVENT );
543
544
@@ -546,14 +547,14 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
546
547
AMD_LINK_HB_TIMEOUT );
547
548
break ;
548
549
default :
549
- dev_info (ndev_dev ( ndev ) , "event status = 0x%x.\n" , status );
550
+ dev_info (dev , "event status = 0x%x.\n" , status );
550
551
break ;
551
552
}
552
553
}
553
554
554
555
static irqreturn_t ndev_interrupt (struct amd_ntb_dev * ndev , int vec )
555
556
{
556
- dev_dbg (ndev_dev ( ndev ) , "vec %d\n" , vec );
557
+ dev_dbg (& ndev -> ntb . pdev -> dev , "vec %d\n" , vec );
557
558
558
559
if (vec > (AMD_DB_CNT - 1 ) || (ndev -> msix_vec_count == 1 ))
559
560
amd_handle_event (ndev , vec );
@@ -575,7 +576,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev)
575
576
{
576
577
struct amd_ntb_dev * ndev = dev ;
577
578
578
- return ndev_interrupt (ndev , irq - ndev_pdev ( ndev ) -> irq );
579
+ return ndev_interrupt (ndev , irq - ndev -> ntb . pdev -> irq );
579
580
}
580
581
581
582
static int ndev_init_isr (struct amd_ntb_dev * ndev ,
@@ -584,7 +585,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
584
585
struct pci_dev * pdev ;
585
586
int rc , i , msix_count , node ;
586
587
587
- pdev = ndev_pdev ( ndev ) ;
588
+ pdev = ndev -> ntb . pdev ;
588
589
589
590
node = dev_to_node (& pdev -> dev );
590
591
@@ -626,7 +627,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
626
627
goto err_msix_request ;
627
628
}
628
629
629
- dev_dbg (ndev_dev ( ndev ) , "Using msix interrupts\n" );
630
+ dev_dbg (& pdev -> dev , "Using msix interrupts\n" );
630
631
ndev -> db_count = msix_min ;
631
632
ndev -> msix_vec_count = msix_max ;
632
633
return 0 ;
@@ -653,7 +654,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
653
654
if (rc )
654
655
goto err_msi_request ;
655
656
656
- dev_dbg (ndev_dev ( ndev ) , "Using msi interrupts\n" );
657
+ dev_dbg (& pdev -> dev , "Using msi interrupts\n" );
657
658
ndev -> db_count = 1 ;
658
659
ndev -> msix_vec_count = 1 ;
659
660
return 0 ;
@@ -670,7 +671,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
670
671
if (rc )
671
672
goto err_intx_request ;
672
673
673
- dev_dbg (ndev_dev ( ndev ) , "Using intx interrupts\n" );
674
+ dev_dbg (& pdev -> dev , "Using intx interrupts\n" );
674
675
ndev -> db_count = 1 ;
675
676
ndev -> msix_vec_count = 1 ;
676
677
return 0 ;
@@ -685,7 +686,7 @@ static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
685
686
void __iomem * mmio = ndev -> self_mmio ;
686
687
int i ;
687
688
688
- pdev = ndev_pdev ( ndev ) ;
689
+ pdev = ndev -> ntb . pdev ;
689
690
690
691
/* Mask all doorbell interrupts */
691
692
ndev -> db_mask = ndev -> db_valid_mask ;
@@ -811,7 +812,8 @@ static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
811
812
ndev -> debugfs_info = NULL ;
812
813
} else {
813
814
ndev -> debugfs_dir =
814
- debugfs_create_dir (ndev_name (ndev ), debugfs_dir );
815
+ debugfs_create_dir (pci_name (ndev -> ntb .pdev ),
816
+ debugfs_dir );
815
817
if (!ndev -> debugfs_dir )
816
818
ndev -> debugfs_info = NULL ;
817
819
else
@@ -846,7 +848,7 @@ static int amd_poll_link(struct amd_ntb_dev *ndev)
846
848
reg = readl (mmio + AMD_SIDEINFO_OFFSET );
847
849
reg &= NTB_LIN_STA_ACTIVE_BIT ;
848
850
849
- dev_dbg (ndev_dev ( ndev ) , "%s: reg_val = 0x%x.\n" , __func__ , reg );
851
+ dev_dbg (& ndev -> ntb . pdev -> dev , "%s: reg_val = 0x%x.\n" , __func__ , reg );
850
852
851
853
if (reg == ndev -> cntl_sta )
852
854
return 0 ;
@@ -928,7 +930,8 @@ static int amd_init_ntb(struct amd_ntb_dev *ndev)
928
930
929
931
break ;
930
932
default :
931
- dev_err (ndev_dev (ndev ), "AMD NTB does not support B2B mode.\n" );
933
+ dev_err (& ndev -> ntb .pdev -> dev ,
934
+ "AMD NTB does not support B2B mode.\n" );
932
935
return - EINVAL ;
933
936
}
934
937
@@ -957,10 +960,10 @@ static int amd_init_dev(struct amd_ntb_dev *ndev)
957
960
struct pci_dev * pdev ;
958
961
int rc = 0 ;
959
962
960
- pdev = ndev_pdev ( ndev ) ;
963
+ pdev = ndev -> ntb . pdev ;
961
964
962
965
ndev -> ntb .topo = amd_get_topo (ndev );
963
- dev_dbg (ndev_dev ( ndev ) , "AMD NTB topo is %s\n" ,
966
+ dev_dbg (& pdev -> dev , "AMD NTB topo is %s\n" ,
964
967
ntb_topo_string (ndev -> ntb .topo ));
965
968
966
969
rc = amd_init_ntb (ndev );
@@ -969,7 +972,7 @@ static int amd_init_dev(struct amd_ntb_dev *ndev)
969
972
970
973
rc = amd_init_isr (ndev );
971
974
if (rc ) {
972
- dev_err (ndev_dev ( ndev ) , "fail to init isr.\n" );
975
+ dev_err (& pdev -> dev , "fail to init isr.\n" );
973
976
return rc ;
974
977
}
975
978
@@ -1007,15 +1010,15 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
1007
1010
rc = pci_set_dma_mask (pdev , DMA_BIT_MASK (32 ));
1008
1011
if (rc )
1009
1012
goto err_dma_mask ;
1010
- dev_warn (ndev_dev ( ndev ) , "Cannot DMA highmem\n" );
1013
+ dev_warn (& pdev -> dev , "Cannot DMA highmem\n" );
1011
1014
}
1012
1015
1013
1016
rc = pci_set_consistent_dma_mask (pdev , DMA_BIT_MASK (64 ));
1014
1017
if (rc ) {
1015
1018
rc = pci_set_consistent_dma_mask (pdev , DMA_BIT_MASK (32 ));
1016
1019
if (rc )
1017
1020
goto err_dma_mask ;
1018
- dev_warn (ndev_dev ( ndev ) , "Cannot DMA consistent highmem\n" );
1021
+ dev_warn (& pdev -> dev , "Cannot DMA consistent highmem\n" );
1019
1022
}
1020
1023
1021
1024
ndev -> self_mmio = pci_iomap (pdev , 0 , 0 );
@@ -1038,7 +1041,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
1038
1041
1039
1042
static void amd_ntb_deinit_pci (struct amd_ntb_dev * ndev )
1040
1043
{
1041
- struct pci_dev * pdev = ndev_pdev ( ndev ) ;
1044
+ struct pci_dev * pdev = ndev -> ntb . pdev ;
1042
1045
1043
1046
pci_iounmap (pdev , ndev -> self_mmio );
1044
1047
0 commit comments