@@ -285,6 +285,181 @@ static int efx_ef10_free_vis(struct efx_nic *efx)
285
285
return rc ;
286
286
}
287
287
288
+ #ifdef EFX_USE_PIO
289
+
290
+ static void efx_ef10_free_piobufs (struct efx_nic * efx )
291
+ {
292
+ struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
293
+ MCDI_DECLARE_BUF (inbuf , MC_CMD_FREE_PIOBUF_IN_LEN );
294
+ unsigned int i ;
295
+ int rc ;
296
+
297
+ BUILD_BUG_ON (MC_CMD_FREE_PIOBUF_OUT_LEN != 0 );
298
+
299
+ for (i = 0 ; i < nic_data -> n_piobufs ; i ++ ) {
300
+ MCDI_SET_DWORD (inbuf , FREE_PIOBUF_IN_PIOBUF_HANDLE ,
301
+ nic_data -> piobuf_handle [i ]);
302
+ rc = efx_mcdi_rpc (efx , MC_CMD_FREE_PIOBUF , inbuf , sizeof (inbuf ),
303
+ NULL , 0 , NULL );
304
+ WARN_ON (rc );
305
+ }
306
+
307
+ nic_data -> n_piobufs = 0 ;
308
+ }
309
+
310
+ static int efx_ef10_alloc_piobufs (struct efx_nic * efx , unsigned int n )
311
+ {
312
+ struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
313
+ MCDI_DECLARE_BUF (outbuf , MC_CMD_ALLOC_PIOBUF_OUT_LEN );
314
+ unsigned int i ;
315
+ size_t outlen ;
316
+ int rc = 0 ;
317
+
318
+ BUILD_BUG_ON (MC_CMD_ALLOC_PIOBUF_IN_LEN != 0 );
319
+
320
+ for (i = 0 ; i < n ; i ++ ) {
321
+ rc = efx_mcdi_rpc (efx , MC_CMD_ALLOC_PIOBUF , NULL , 0 ,
322
+ outbuf , sizeof (outbuf ), & outlen );
323
+ if (rc )
324
+ break ;
325
+ if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN ) {
326
+ rc = - EIO ;
327
+ break ;
328
+ }
329
+ nic_data -> piobuf_handle [i ] =
330
+ MCDI_DWORD (outbuf , ALLOC_PIOBUF_OUT_PIOBUF_HANDLE );
331
+ netif_dbg (efx , probe , efx -> net_dev ,
332
+ "allocated PIO buffer %u handle %x\n" , i ,
333
+ nic_data -> piobuf_handle [i ]);
334
+ }
335
+
336
+ nic_data -> n_piobufs = i ;
337
+ if (rc )
338
+ efx_ef10_free_piobufs (efx );
339
+ return rc ;
340
+ }
341
+
342
+ static int efx_ef10_link_piobufs (struct efx_nic * efx )
343
+ {
344
+ struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
345
+ MCDI_DECLARE_BUF (inbuf ,
346
+ max (MC_CMD_LINK_PIOBUF_IN_LEN ,
347
+ MC_CMD_UNLINK_PIOBUF_IN_LEN ));
348
+ struct efx_channel * channel ;
349
+ struct efx_tx_queue * tx_queue ;
350
+ unsigned int offset , index ;
351
+ int rc ;
352
+
353
+ BUILD_BUG_ON (MC_CMD_LINK_PIOBUF_OUT_LEN != 0 );
354
+ BUILD_BUG_ON (MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0 );
355
+
356
+ /* Link a buffer to each VI in the write-combining mapping */
357
+ for (index = 0 ; index < nic_data -> n_piobufs ; ++ index ) {
358
+ MCDI_SET_DWORD (inbuf , LINK_PIOBUF_IN_PIOBUF_HANDLE ,
359
+ nic_data -> piobuf_handle [index ]);
360
+ MCDI_SET_DWORD (inbuf , LINK_PIOBUF_IN_TXQ_INSTANCE ,
361
+ nic_data -> pio_write_vi_base + index );
362
+ rc = efx_mcdi_rpc (efx , MC_CMD_LINK_PIOBUF ,
363
+ inbuf , MC_CMD_LINK_PIOBUF_IN_LEN ,
364
+ NULL , 0 , NULL );
365
+ if (rc ) {
366
+ netif_err (efx , drv , efx -> net_dev ,
367
+ "failed to link VI %u to PIO buffer %u (%d)\n" ,
368
+ nic_data -> pio_write_vi_base + index , index ,
369
+ rc );
370
+ goto fail ;
371
+ }
372
+ netif_dbg (efx , probe , efx -> net_dev ,
373
+ "linked VI %u to PIO buffer %u\n" ,
374
+ nic_data -> pio_write_vi_base + index , index );
375
+ }
376
+
377
+ /* Link a buffer to each TX queue */
378
+ efx_for_each_channel (channel , efx ) {
379
+ efx_for_each_channel_tx_queue (tx_queue , channel ) {
380
+ /* We assign the PIO buffers to queues in
381
+ * reverse order to allow for the following
382
+ * special case.
383
+ */
384
+ offset = ((efx -> tx_channel_offset + efx -> n_tx_channels -
385
+ tx_queue -> channel -> channel - 1 ) *
386
+ efx_piobuf_size );
387
+ index = offset / ER_DZ_TX_PIOBUF_SIZE ;
388
+ offset = offset % ER_DZ_TX_PIOBUF_SIZE ;
389
+
390
+ /* When the host page size is 4K, the first
391
+ * host page in the WC mapping may be within
392
+ * the same VI page as the last TX queue. We
393
+ * can only link one buffer to each VI.
394
+ */
395
+ if (tx_queue -> queue == nic_data -> pio_write_vi_base ) {
396
+ BUG_ON (index != 0 );
397
+ rc = 0 ;
398
+ } else {
399
+ MCDI_SET_DWORD (inbuf ,
400
+ LINK_PIOBUF_IN_PIOBUF_HANDLE ,
401
+ nic_data -> piobuf_handle [index ]);
402
+ MCDI_SET_DWORD (inbuf ,
403
+ LINK_PIOBUF_IN_TXQ_INSTANCE ,
404
+ tx_queue -> queue );
405
+ rc = efx_mcdi_rpc (efx , MC_CMD_LINK_PIOBUF ,
406
+ inbuf , MC_CMD_LINK_PIOBUF_IN_LEN ,
407
+ NULL , 0 , NULL );
408
+ }
409
+
410
+ if (rc ) {
411
+ /* This is non-fatal; the TX path just
412
+ * won't use PIO for this queue
413
+ */
414
+ netif_err (efx , drv , efx -> net_dev ,
415
+ "failed to link VI %u to PIO buffer %u (%d)\n" ,
416
+ tx_queue -> queue , index , rc );
417
+ tx_queue -> piobuf = NULL ;
418
+ } else {
419
+ tx_queue -> piobuf =
420
+ nic_data -> pio_write_base +
421
+ index * EFX_VI_PAGE_SIZE + offset ;
422
+ tx_queue -> piobuf_offset = offset ;
423
+ netif_dbg (efx , probe , efx -> net_dev ,
424
+ "linked VI %u to PIO buffer %u offset %x addr %p\n" ,
425
+ tx_queue -> queue , index ,
426
+ tx_queue -> piobuf_offset ,
427
+ tx_queue -> piobuf );
428
+ }
429
+ }
430
+ }
431
+
432
+ return 0 ;
433
+
434
+ fail :
435
+ while (index -- ) {
436
+ MCDI_SET_DWORD (inbuf , UNLINK_PIOBUF_IN_TXQ_INSTANCE ,
437
+ nic_data -> pio_write_vi_base + index );
438
+ efx_mcdi_rpc (efx , MC_CMD_UNLINK_PIOBUF ,
439
+ inbuf , MC_CMD_UNLINK_PIOBUF_IN_LEN ,
440
+ NULL , 0 , NULL );
441
+ }
442
+ return rc ;
443
+ }
444
+
445
+ #else /* !EFX_USE_PIO */
446
+
447
+ static int efx_ef10_alloc_piobufs (struct efx_nic * efx , unsigned int n )
448
+ {
449
+ return n == 0 ? 0 : - ENOBUFS ;
450
+ }
451
+
452
+ static int efx_ef10_link_piobufs (struct efx_nic * efx )
453
+ {
454
+ return 0 ;
455
+ }
456
+
457
+ static void efx_ef10_free_piobufs (struct efx_nic * efx )
458
+ {
459
+ }
460
+
461
+ #endif /* EFX_USE_PIO */
462
+
288
463
static void efx_ef10_remove (struct efx_nic * efx )
289
464
{
290
465
struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
@@ -295,9 +470,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
295
470
/* This needs to be after efx_ptp_remove_channel() with no filters */
296
471
efx_ef10_rx_free_indir_table (efx );
297
472
473
+ if (nic_data -> wc_membase )
474
+ iounmap (nic_data -> wc_membase );
475
+
298
476
rc = efx_ef10_free_vis (efx );
299
477
WARN_ON (rc != 0 );
300
478
479
+ if (!nic_data -> must_restore_piobufs )
480
+ efx_ef10_free_piobufs (efx );
481
+
301
482
efx_mcdi_fini (efx );
302
483
efx_nic_free_buffer (efx , & nic_data -> mcdi_buf );
303
484
kfree (nic_data );
@@ -330,12 +511,126 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
330
511
return 0 ;
331
512
}
332
513
514
+ /* Note that the failure path of this function does not free
515
+ * resources, as this will be done by efx_ef10_remove().
516
+ */
333
517
static int efx_ef10_dimension_resources (struct efx_nic * efx )
334
518
{
335
- unsigned int n_vis =
336
- max (efx -> n_channels , efx -> n_tx_channels * EFX_TXQ_TYPES );
519
+ struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
520
+ unsigned int uc_mem_map_size , wc_mem_map_size ;
521
+ unsigned int min_vis , pio_write_vi_base , max_vis ;
522
+ void __iomem * membase ;
523
+ int rc ;
524
+
525
+ min_vis = max (efx -> n_channels , efx -> n_tx_channels * EFX_TXQ_TYPES );
526
+
527
+ #ifdef EFX_USE_PIO
528
+ /* Try to allocate PIO buffers if wanted and if the full
529
+ * number of PIO buffers would be sufficient to allocate one
530
+ * copy-buffer per TX channel. Failure is non-fatal, as there
531
+ * are only a small number of PIO buffers shared between all
532
+ * functions of the controller.
533
+ */
534
+ if (efx_piobuf_size != 0 &&
535
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
536
+ efx -> n_tx_channels ) {
537
+ unsigned int n_piobufs =
538
+ DIV_ROUND_UP (efx -> n_tx_channels ,
539
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size );
540
+
541
+ rc = efx_ef10_alloc_piobufs (efx , n_piobufs );
542
+ if (rc )
543
+ netif_err (efx , probe , efx -> net_dev ,
544
+ "failed to allocate PIO buffers (%d)\n" , rc );
545
+ else
546
+ netif_dbg (efx , probe , efx -> net_dev ,
547
+ "allocated %u PIO buffers\n" , n_piobufs );
548
+ }
549
+ #else
550
+ nic_data -> n_piobufs = 0 ;
551
+ #endif
337
552
338
- return efx_ef10_alloc_vis (efx , n_vis , n_vis );
553
+ /* PIO buffers should be mapped with write-combining enabled,
554
+ * and we want to make single UC and WC mappings rather than
555
+ * several of each (in fact that's the only option if host
556
+ * page size is >4K). So we may allocate some extra VIs just
557
+ * for writing PIO buffers through.
558
+ */
559
+ uc_mem_map_size = PAGE_ALIGN ((min_vis - 1 ) * EFX_VI_PAGE_SIZE +
560
+ ER_DZ_TX_PIOBUF );
561
+ if (nic_data -> n_piobufs ) {
562
+ pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE ;
563
+ wc_mem_map_size = (PAGE_ALIGN ((pio_write_vi_base +
564
+ nic_data -> n_piobufs ) *
565
+ EFX_VI_PAGE_SIZE ) -
566
+ uc_mem_map_size );
567
+ max_vis = pio_write_vi_base + nic_data -> n_piobufs ;
568
+ } else {
569
+ pio_write_vi_base = 0 ;
570
+ wc_mem_map_size = 0 ;
571
+ max_vis = min_vis ;
572
+ }
573
+
574
+ /* In case the last attached driver failed to free VIs, do it now */
575
+ rc = efx_ef10_free_vis (efx );
576
+ if (rc != 0 )
577
+ return rc ;
578
+
579
+ rc = efx_ef10_alloc_vis (efx , min_vis , max_vis );
580
+ if (rc != 0 )
581
+ return rc ;
582
+
583
+ /* If we didn't get enough VIs to map all the PIO buffers, free the
584
+ * PIO buffers
585
+ */
586
+ if (nic_data -> n_piobufs &&
587
+ nic_data -> n_allocated_vis <
588
+ pio_write_vi_base + nic_data -> n_piobufs ) {
589
+ netif_dbg (efx , probe , efx -> net_dev ,
590
+ "%u VIs are not sufficient to map %u PIO buffers\n" ,
591
+ nic_data -> n_allocated_vis , nic_data -> n_piobufs );
592
+ efx_ef10_free_piobufs (efx );
593
+ }
594
+
595
+ /* Shrink the original UC mapping of the memory BAR */
596
+ membase = ioremap_nocache (efx -> membase_phys , uc_mem_map_size );
597
+ if (!membase ) {
598
+ netif_err (efx , probe , efx -> net_dev ,
599
+ "could not shrink memory BAR to %x\n" ,
600
+ uc_mem_map_size );
601
+ return - ENOMEM ;
602
+ }
603
+ iounmap (efx -> membase );
604
+ efx -> membase = membase ;
605
+
606
+ /* Set up the WC mapping if needed */
607
+ if (wc_mem_map_size ) {
608
+ nic_data -> wc_membase = ioremap_wc (efx -> membase_phys +
609
+ uc_mem_map_size ,
610
+ wc_mem_map_size );
611
+ if (!nic_data -> wc_membase ) {
612
+ netif_err (efx , probe , efx -> net_dev ,
613
+ "could not allocate WC mapping of size %x\n" ,
614
+ wc_mem_map_size );
615
+ return - ENOMEM ;
616
+ }
617
+ nic_data -> pio_write_vi_base = pio_write_vi_base ;
618
+ nic_data -> pio_write_base =
619
+ nic_data -> wc_membase +
620
+ (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
621
+ uc_mem_map_size );
622
+
623
+ rc = efx_ef10_link_piobufs (efx );
624
+ if (rc )
625
+ efx_ef10_free_piobufs (efx );
626
+ }
627
+
628
+ netif_dbg (efx , probe , efx -> net_dev ,
629
+ "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n" ,
630
+ & efx -> membase_phys , efx -> membase , uc_mem_map_size ,
631
+ nic_data -> wc_membase , wc_mem_map_size );
632
+
633
+ return 0 ;
339
634
}
340
635
341
636
static int efx_ef10_init_nic (struct efx_nic * efx )
@@ -359,6 +654,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
359
654
nic_data -> must_realloc_vis = false;
360
655
}
361
656
657
+ if (nic_data -> must_restore_piobufs && nic_data -> n_piobufs ) {
658
+ rc = efx_ef10_alloc_piobufs (efx , nic_data -> n_piobufs );
659
+ if (rc == 0 ) {
660
+ rc = efx_ef10_link_piobufs (efx );
661
+ if (rc )
662
+ efx_ef10_free_piobufs (efx );
663
+ }
664
+
665
+ /* Log an error on failure, but this is non-fatal */
666
+ if (rc )
667
+ netif_err (efx , drv , efx -> net_dev ,
668
+ "failed to restore PIO buffers (%d)\n" , rc );
669
+ nic_data -> must_restore_piobufs = false;
670
+ }
671
+
362
672
efx_ef10_rx_push_indir_table (efx );
363
673
return 0 ;
364
674
}
@@ -716,6 +1026,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
716
1026
/* All our allocations have been reset */
717
1027
nic_data -> must_realloc_vis = true;
718
1028
nic_data -> must_restore_filters = true;
1029
+ nic_data -> must_restore_piobufs = true;
719
1030
nic_data -> rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID ;
720
1031
721
1032
/* The datapath firmware might have been changed */
0 commit comments