@@ -96,6 +96,13 @@ struct w5100_priv {
96
96
struct net_device * ndev ;
97
97
bool promisc ;
98
98
u32 msg_enable ;
99
+
100
+ struct workqueue_struct * xfer_wq ;
101
+ struct work_struct rx_work ;
102
+ struct sk_buff * tx_skb ;
103
+ struct work_struct tx_work ;
104
+ struct work_struct setrx_work ;
105
+ struct work_struct restart_work ;
99
106
};
100
107
101
108
/************************************************************************
@@ -502,10 +509,12 @@ static int w5100_reset(struct w5100_priv *priv)
502
509
503
510
static int w5100_command (struct w5100_priv * priv , u16 cmd )
504
511
{
505
- unsigned long timeout = jiffies + msecs_to_jiffies ( 100 ) ;
512
+ unsigned long timeout ;
506
513
507
514
w5100_write (priv , W5100_S0_CR , cmd );
508
515
516
+ timeout = jiffies + msecs_to_jiffies (100 );
517
+
509
518
while (w5100_read (priv , W5100_S0_CR ) != 0 ) {
510
519
if (time_after (jiffies , timeout ))
511
520
return - EIO ;
@@ -605,7 +614,7 @@ static void w5100_get_regs(struct net_device *ndev,
605
614
w5100_readbulk (priv , W5100_S0_REGS , buf , W5100_S0_REGS_LEN );
606
615
}
607
616
608
- static void w5100_tx_timeout (struct net_device * ndev )
617
+ static void w5100_restart (struct net_device * ndev )
609
618
{
610
619
struct w5100_priv * priv = netdev_priv (ndev );
611
620
@@ -617,12 +626,28 @@ static void w5100_tx_timeout(struct net_device *ndev)
617
626
netif_wake_queue (ndev );
618
627
}
619
628
620
- static int w5100_start_tx (struct sk_buff * skb , struct net_device * ndev )
629
+ static void w5100_restart_work (struct work_struct * work )
630
+ {
631
+ struct w5100_priv * priv = container_of (work , struct w5100_priv ,
632
+ restart_work );
633
+
634
+ w5100_restart (priv -> ndev );
635
+ }
636
+
637
+ static void w5100_tx_timeout (struct net_device * ndev )
621
638
{
622
639
struct w5100_priv * priv = netdev_priv (ndev );
623
- u16 offset ;
624
640
625
- netif_stop_queue (ndev );
641
+ if (priv -> ops -> may_sleep )
642
+ schedule_work (& priv -> restart_work );
643
+ else
644
+ w5100_restart (ndev );
645
+ }
646
+
647
+ static void w5100_tx_skb (struct net_device * ndev , struct sk_buff * skb )
648
+ {
649
+ struct w5100_priv * priv = netdev_priv (ndev );
650
+ u16 offset ;
626
651
627
652
offset = w5100_read16 (priv , W5100_S0_TX_WR );
628
653
w5100_writebuf (priv , offset , skb -> data , skb -> len );
@@ -632,47 +657,98 @@ static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
632
657
dev_kfree_skb (skb );
633
658
634
659
w5100_command (priv , S0_CR_SEND );
660
+ }
661
+
662
+ static void w5100_tx_work (struct work_struct * work )
663
+ {
664
+ struct w5100_priv * priv = container_of (work , struct w5100_priv ,
665
+ tx_work );
666
+ struct sk_buff * skb = priv -> tx_skb ;
667
+
668
+ priv -> tx_skb = NULL ;
669
+
670
+ if (WARN_ON (!skb ))
671
+ return ;
672
+ w5100_tx_skb (priv -> ndev , skb );
673
+ }
674
+
675
+ static int w5100_start_tx (struct sk_buff * skb , struct net_device * ndev )
676
+ {
677
+ struct w5100_priv * priv = netdev_priv (ndev );
678
+
679
+ netif_stop_queue (ndev );
680
+
681
+ if (priv -> ops -> may_sleep ) {
682
+ WARN_ON (priv -> tx_skb );
683
+ priv -> tx_skb = skb ;
684
+ queue_work (priv -> xfer_wq , & priv -> tx_work );
685
+ } else {
686
+ w5100_tx_skb (ndev , skb );
687
+ }
635
688
636
689
return NETDEV_TX_OK ;
637
690
}
638
691
639
- static int w5100_napi_poll ( struct napi_struct * napi , int budget )
692
+ static struct sk_buff * w5100_rx_skb ( struct net_device * ndev )
640
693
{
641
- struct w5100_priv * priv = container_of (napi , struct w5100_priv , napi );
642
- struct net_device * ndev = priv -> ndev ;
694
+ struct w5100_priv * priv = netdev_priv (ndev );
643
695
struct sk_buff * skb ;
644
- int rx_count ;
645
696
u16 rx_len ;
646
697
u16 offset ;
647
698
u8 header [2 ];
699
+ u16 rx_buf_len = w5100_read16 (priv , W5100_S0_RX_RSR );
648
700
649
- for (rx_count = 0 ; rx_count < budget ; rx_count ++ ) {
650
- u16 rx_buf_len = w5100_read16 (priv , W5100_S0_RX_RSR );
651
- if (rx_buf_len == 0 )
652
- break ;
701
+ if (rx_buf_len == 0 )
702
+ return NULL ;
653
703
654
- offset = w5100_read16 (priv , W5100_S0_RX_RD );
655
- w5100_readbuf (priv , offset , header , 2 );
656
- rx_len = get_unaligned_be16 (header ) - 2 ;
657
-
658
- skb = netdev_alloc_skb_ip_align (ndev , rx_len );
659
- if (unlikely (!skb )) {
660
- w5100_write16 (priv , W5100_S0_RX_RD ,
661
- offset + rx_buf_len );
662
- w5100_command (priv , S0_CR_RECV );
663
- ndev -> stats .rx_dropped ++ ;
664
- return - ENOMEM ;
665
- }
704
+ offset = w5100_read16 (priv , W5100_S0_RX_RD );
705
+ w5100_readbuf (priv , offset , header , 2 );
706
+ rx_len = get_unaligned_be16 (header ) - 2 ;
666
707
667
- skb_put ( skb , rx_len );
668
- w5100_readbuf ( priv , offset + 2 , skb -> data , rx_len );
669
- w5100_write16 (priv , W5100_S0_RX_RD , offset + 2 + rx_len );
708
+ skb = netdev_alloc_skb_ip_align ( ndev , rx_len );
709
+ if ( unlikely (! skb )) {
710
+ w5100_write16 (priv , W5100_S0_RX_RD , offset + rx_buf_len );
670
711
w5100_command (priv , S0_CR_RECV );
671
- skb -> protocol = eth_type_trans (skb , ndev );
712
+ ndev -> stats .rx_dropped ++ ;
713
+ return NULL ;
714
+ }
715
+
716
+ skb_put (skb , rx_len );
717
+ w5100_readbuf (priv , offset + 2 , skb -> data , rx_len );
718
+ w5100_write16 (priv , W5100_S0_RX_RD , offset + 2 + rx_len );
719
+ w5100_command (priv , S0_CR_RECV );
720
+ skb -> protocol = eth_type_trans (skb , ndev );
721
+
722
+ ndev -> stats .rx_packets ++ ;
723
+ ndev -> stats .rx_bytes += rx_len ;
724
+
725
+ return skb ;
726
+ }
727
+
728
+ static void w5100_rx_work (struct work_struct * work )
729
+ {
730
+ struct w5100_priv * priv = container_of (work , struct w5100_priv ,
731
+ rx_work );
732
+ struct sk_buff * skb ;
733
+
734
+ while ((skb = w5100_rx_skb (priv -> ndev )))
735
+ netif_rx_ni (skb );
736
+
737
+ w5100_write (priv , W5100_IMR , IR_S0 );
738
+ }
739
+
740
+ static int w5100_napi_poll (struct napi_struct * napi , int budget )
741
+ {
742
+ struct w5100_priv * priv = container_of (napi , struct w5100_priv , napi );
743
+ int rx_count ;
744
+
745
+ for (rx_count = 0 ; rx_count < budget ; rx_count ++ ) {
746
+ struct sk_buff * skb = w5100_rx_skb (priv -> ndev );
672
747
673
- netif_receive_skb (skb );
674
- ndev -> stats .rx_packets ++ ;
675
- ndev -> stats .rx_bytes += rx_len ;
748
+ if (skb )
749
+ netif_receive_skb (skb );
750
+ else
751
+ break ;
676
752
}
677
753
678
754
if (rx_count < budget ) {
@@ -699,10 +775,12 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
699
775
}
700
776
701
777
if (ir & S0_IR_RECV ) {
702
- if (napi_schedule_prep (& priv -> napi )) {
703
- w5100_write (priv , W5100_IMR , 0 );
778
+ w5100_write (priv , W5100_IMR , 0 );
779
+
780
+ if (priv -> ops -> may_sleep )
781
+ queue_work (priv -> xfer_wq , & priv -> rx_work );
782
+ else if (napi_schedule_prep (& priv -> napi ))
704
783
__napi_schedule (& priv -> napi );
705
- }
706
784
}
707
785
708
786
return IRQ_HANDLED ;
@@ -726,14 +804,26 @@ static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
726
804
return IRQ_HANDLED ;
727
805
}
728
806
807
+ static void w5100_setrx_work (struct work_struct * work )
808
+ {
809
+ struct w5100_priv * priv = container_of (work , struct w5100_priv ,
810
+ setrx_work );
811
+
812
+ w5100_hw_start (priv );
813
+ }
814
+
729
815
static void w5100_set_rx_mode (struct net_device * ndev )
730
816
{
731
817
struct w5100_priv * priv = netdev_priv (ndev );
732
818
bool set_promisc = (ndev -> flags & IFF_PROMISC ) != 0 ;
733
819
734
820
if (priv -> promisc != set_promisc ) {
735
821
priv -> promisc = set_promisc ;
736
- w5100_hw_start (priv );
822
+
823
+ if (priv -> ops -> may_sleep )
824
+ schedule_work (& priv -> setrx_work );
825
+ else
826
+ w5100_hw_start (priv );
737
827
}
738
828
}
739
829
@@ -872,6 +962,17 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
872
962
if (err < 0 )
873
963
goto err_register ;
874
964
965
+ priv -> xfer_wq = create_workqueue (netdev_name (ndev ));
966
+ if (!priv -> xfer_wq ) {
967
+ err = - ENOMEM ;
968
+ goto err_wq ;
969
+ }
970
+
971
+ INIT_WORK (& priv -> rx_work , w5100_rx_work );
972
+ INIT_WORK (& priv -> tx_work , w5100_tx_work );
973
+ INIT_WORK (& priv -> setrx_work , w5100_setrx_work );
974
+ INIT_WORK (& priv -> restart_work , w5100_restart_work );
975
+
875
976
if (mac_addr )
876
977
memcpy (ndev -> dev_addr , mac_addr , ETH_ALEN );
877
978
else
@@ -889,8 +990,14 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
889
990
goto err_hw ;
890
991
}
891
992
892
- err = request_irq (priv -> irq , w5100_interrupt , IRQF_TRIGGER_LOW ,
893
- netdev_name (ndev ), ndev );
993
+ if (ops -> may_sleep ) {
994
+ err = request_threaded_irq (priv -> irq , NULL , w5100_interrupt ,
995
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT ,
996
+ netdev_name (ndev ), ndev );
997
+ } else {
998
+ err = request_irq (priv -> irq , w5100_interrupt ,
999
+ IRQF_TRIGGER_LOW , netdev_name (ndev ), ndev );
1000
+ }
894
1001
if (err )
895
1002
goto err_hw ;
896
1003
@@ -915,6 +1022,8 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
915
1022
err_gpio :
916
1023
free_irq (priv -> irq , ndev );
917
1024
err_hw :
1025
+ destroy_workqueue (priv -> xfer_wq );
1026
+ err_wq :
918
1027
unregister_netdev (ndev );
919
1028
err_register :
920
1029
free_netdev (ndev );
@@ -932,6 +1041,11 @@ int w5100_remove(struct device *dev)
932
1041
if (gpio_is_valid (priv -> link_gpio ))
933
1042
free_irq (priv -> link_irq , ndev );
934
1043
1044
+ flush_work (& priv -> setrx_work );
1045
+ flush_work (& priv -> restart_work );
1046
+ flush_workqueue (priv -> xfer_wq );
1047
+ destroy_workqueue (priv -> xfer_wq );
1048
+
935
1049
unregister_netdev (ndev );
936
1050
free_netdev (ndev );
937
1051
return 0 ;
0 commit comments