@@ -22,13 +22,11 @@ struct aq_pci_func_s {
22
22
void * aq_vec [AQ_CFG_PCI_FUNC_MSIX_IRQS ];
23
23
resource_size_t mmio_pa ;
24
24
unsigned int msix_entry_mask ;
25
- unsigned int irq_type ;
26
25
unsigned int ports ;
27
26
bool is_pci_enabled ;
28
27
bool is_regions ;
29
28
bool is_pci_using_dac ;
30
29
struct aq_hw_caps_s aq_hw_caps ;
31
- struct msix_entry msix_entry [AQ_CFG_PCI_FUNC_MSIX_IRQS ];
32
30
};
33
31
34
32
struct aq_pci_func_s * aq_pci_func_alloc (struct aq_hw_ops * aq_hw_ops ,
@@ -87,7 +85,6 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
87
85
int err = 0 ;
88
86
unsigned int bar = 0U ;
89
87
unsigned int port = 0U ;
90
- unsigned int i = 0U ;
91
88
92
89
err = pci_enable_device (self -> pdev );
93
90
if (err < 0 )
@@ -145,27 +142,16 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
145
142
}
146
143
}
147
144
148
- for (i = 0 ; i < self -> aq_hw_caps .msix_irqs ; i ++ )
149
- self -> msix_entry [i ].entry = i ;
150
-
151
145
/*enable interrupts */
152
- #if AQ_CFG_FORCE_LEGACY_INT
153
- self -> irq_type = AQ_HW_IRQ_LEGACY ;
154
- #else
155
- err = pci_enable_msix (self -> pdev , self -> msix_entry ,
156
- self -> aq_hw_caps .msix_irqs );
146
+ #if !AQ_CFG_FORCE_LEGACY_INT
147
+ err = pci_alloc_irq_vectors (self -> pdev , self -> aq_hw_caps .msix_irqs ,
148
+ self -> aq_hw_caps .msix_irqs , PCI_IRQ_MSIX );
157
149
158
- if (err >= 0 ) {
159
- self -> irq_type = AQ_HW_IRQ_MSIX ;
160
- } else {
161
- err = pci_enable_msi (self -> pdev );
162
-
163
- if (err >= 0 ) {
164
- self -> irq_type = AQ_HW_IRQ_MSI ;
165
- } else {
166
- self -> irq_type = AQ_HW_IRQ_LEGACY ;
167
- err = 0 ;
168
- }
150
+ if (err < 0 ) {
151
+ err = pci_alloc_irq_vectors (self -> pdev , 1 , 1 ,
152
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY );
153
+ if (err < 0 )
154
+ goto err_exit ;
169
155
}
170
156
#endif
171
157
@@ -196,34 +182,22 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
196
182
int aq_pci_func_alloc_irq (struct aq_pci_func_s * self , unsigned int i ,
197
183
char * name , void * aq_vec , cpumask_t * affinity_mask )
198
184
{
185
+ struct pci_dev * pdev = self -> pdev ;
199
186
int err = 0 ;
200
187
201
- switch (self -> irq_type ) {
202
- case AQ_HW_IRQ_MSIX :
203
- err = request_irq (self -> msix_entry [i ].vector , aq_vec_isr , 0 ,
188
+ if (pdev -> msix_enabled || pdev -> msi_enabled )
189
+ err = request_irq (pci_irq_vector (pdev , i ), aq_vec_isr , 0 ,
204
190
name , aq_vec );
205
- break ;
206
-
207
- case AQ_HW_IRQ_MSI :
208
- err = request_irq (self -> pdev -> irq , aq_vec_isr , 0 , name , aq_vec );
209
- break ;
210
-
211
- case AQ_HW_IRQ_LEGACY :
212
- err = request_irq (self -> pdev -> irq , aq_vec_isr_legacy ,
191
+ else
192
+ err = request_irq (pci_irq_vector (pdev , i ), aq_vec_isr_legacy ,
213
193
IRQF_SHARED , name , aq_vec );
214
- break ;
215
-
216
- default :
217
- err = - EFAULT ;
218
- break ;
219
- }
220
194
221
195
if (err >= 0 ) {
222
196
self -> msix_entry_mask |= (1 << i );
223
197
self -> aq_vec [i ] = aq_vec ;
224
198
225
- if (self -> irq_type == AQ_HW_IRQ_MSIX )
226
- irq_set_affinity_hint (self -> msix_entry [ i ]. vector ,
199
+ if (pdev -> msix_enabled )
200
+ irq_set_affinity_hint (pci_irq_vector ( pdev , i ) ,
227
201
affinity_mask );
228
202
}
229
203
@@ -232,30 +206,16 @@ int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
232
206
233
207
void aq_pci_func_free_irqs (struct aq_pci_func_s * self )
234
208
{
209
+ struct pci_dev * pdev = self -> pdev ;
235
210
unsigned int i = 0U ;
236
211
237
212
for (i = 32U ; i -- ;) {
238
213
if (!((1U << i ) & self -> msix_entry_mask ))
239
214
continue ;
240
215
241
- switch (self -> irq_type ) {
242
- case AQ_HW_IRQ_MSIX :
243
- irq_set_affinity_hint (self -> msix_entry [i ].vector , NULL );
244
- free_irq (self -> msix_entry [i ].vector , self -> aq_vec [i ]);
245
- break ;
246
-
247
- case AQ_HW_IRQ_MSI :
248
- free_irq (self -> pdev -> irq , self -> aq_vec [i ]);
249
- break ;
250
-
251
- case AQ_HW_IRQ_LEGACY :
252
- free_irq (self -> pdev -> irq , self -> aq_vec [i ]);
253
- break ;
254
-
255
- default :
256
- break ;
257
- }
258
-
216
+ free_irq (pci_irq_vector (pdev , i ), self -> aq_vec [i ]);
217
+ if (pdev -> msix_enabled )
218
+ irq_set_affinity_hint (pci_irq_vector (pdev , i ), NULL );
259
219
self -> msix_entry_mask &= ~(1U << i );
260
220
}
261
221
}
@@ -267,7 +227,11 @@ void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
267
227
268
228
unsigned int aq_pci_func_get_irq_type (struct aq_pci_func_s * self )
269
229
{
270
- return self -> irq_type ;
230
+ if (self -> pdev -> msix_enabled )
231
+ return AQ_HW_IRQ_MSIX ;
232
+ if (self -> pdev -> msi_enabled )
233
+ return AQ_HW_IRQ_MSIX ;
234
+ return AQ_HW_IRQ_LEGACY ;
271
235
}
272
236
273
237
void aq_pci_func_deinit (struct aq_pci_func_s * self )
@@ -276,22 +240,7 @@ void aq_pci_func_deinit(struct aq_pci_func_s *self)
276
240
goto err_exit ;
277
241
278
242
aq_pci_func_free_irqs (self );
279
-
280
- switch (self -> irq_type ) {
281
- case AQ_HW_IRQ_MSI :
282
- pci_disable_msi (self -> pdev );
283
- break ;
284
-
285
- case AQ_HW_IRQ_MSIX :
286
- pci_disable_msix (self -> pdev );
287
- break ;
288
-
289
- case AQ_HW_IRQ_LEGACY :
290
- break ;
291
-
292
- default :
293
- break ;
294
- }
243
+ pci_free_irq_vectors (self -> pdev );
295
244
296
245
if (self -> is_regions )
297
246
pci_release_regions (self -> pdev );
0 commit comments