27
27
28
28
void rmnet_vnd_rx_fixup (struct sk_buff * skb , struct net_device * dev )
29
29
{
30
- dev -> stats .rx_packets ++ ;
31
- dev -> stats .rx_bytes += skb -> len ;
30
+ struct rmnet_priv * priv = netdev_priv (dev );
31
+ struct rmnet_pcpu_stats * pcpu_ptr ;
32
+
33
+ pcpu_ptr = this_cpu_ptr (priv -> pcpu_stats );
34
+
35
+ u64_stats_update_begin (& pcpu_ptr -> syncp );
36
+ pcpu_ptr -> stats .rx_pkts ++ ;
37
+ pcpu_ptr -> stats .rx_bytes += skb -> len ;
38
+ u64_stats_update_end (& pcpu_ptr -> syncp );
32
39
}
33
40
34
41
void rmnet_vnd_tx_fixup (struct sk_buff * skb , struct net_device * dev )
35
42
{
36
- dev -> stats .tx_packets ++ ;
37
- dev -> stats .tx_bytes += skb -> len ;
43
+ struct rmnet_priv * priv = netdev_priv (dev );
44
+ struct rmnet_pcpu_stats * pcpu_ptr ;
45
+
46
+ pcpu_ptr = this_cpu_ptr (priv -> pcpu_stats );
47
+
48
+ u64_stats_update_begin (& pcpu_ptr -> syncp );
49
+ pcpu_ptr -> stats .tx_pkts ++ ;
50
+ pcpu_ptr -> stats .tx_bytes += skb -> len ;
51
+ u64_stats_update_end (& pcpu_ptr -> syncp );
38
52
}
39
53
40
54
/* Network Device Operations */
@@ -48,7 +62,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
48
62
if (priv -> real_dev ) {
49
63
rmnet_egress_handler (skb );
50
64
} else {
51
- dev -> stats .tx_dropped ++ ;
65
+ this_cpu_inc ( priv -> pcpu_stats -> stats .tx_drops ) ;
52
66
kfree_skb (skb );
53
67
}
54
68
return NETDEV_TX_OK ;
@@ -70,12 +84,64 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev)
70
84
return priv -> real_dev -> ifindex ;
71
85
}
72
86
87
+ static int rmnet_vnd_init (struct net_device * dev )
88
+ {
89
+ struct rmnet_priv * priv = netdev_priv (dev );
90
+
91
+ priv -> pcpu_stats = alloc_percpu (struct rmnet_pcpu_stats );
92
+ if (!priv -> pcpu_stats )
93
+ return - ENOMEM ;
94
+
95
+ return 0 ;
96
+ }
97
+
98
+ static void rmnet_vnd_uninit (struct net_device * dev )
99
+ {
100
+ struct rmnet_priv * priv = netdev_priv (dev );
101
+
102
+ free_percpu (priv -> pcpu_stats );
103
+ }
104
+
105
+ static void rmnet_get_stats64 (struct net_device * dev ,
106
+ struct rtnl_link_stats64 * s )
107
+ {
108
+ struct rmnet_priv * priv = netdev_priv (dev );
109
+ struct rmnet_vnd_stats total_stats ;
110
+ struct rmnet_pcpu_stats * pcpu_ptr ;
111
+ unsigned int cpu , start ;
112
+
113
+ memset (& total_stats , 0 , sizeof (struct rmnet_vnd_stats ));
114
+
115
+ for_each_possible_cpu (cpu ) {
116
+ pcpu_ptr = this_cpu_ptr (priv -> pcpu_stats );
117
+
118
+ do {
119
+ start = u64_stats_fetch_begin_irq (& pcpu_ptr -> syncp );
120
+ total_stats .rx_pkts += pcpu_ptr -> stats .rx_pkts ;
121
+ total_stats .rx_bytes += pcpu_ptr -> stats .rx_bytes ;
122
+ total_stats .tx_pkts += pcpu_ptr -> stats .tx_pkts ;
123
+ total_stats .tx_bytes += pcpu_ptr -> stats .tx_bytes ;
124
+ } while (u64_stats_fetch_retry_irq (& pcpu_ptr -> syncp , start ));
125
+
126
+ total_stats .tx_drops += pcpu_ptr -> stats .tx_drops ;
127
+ }
128
+
129
+ s -> rx_packets = total_stats .rx_pkts ;
130
+ s -> rx_bytes = total_stats .rx_bytes ;
131
+ s -> tx_packets = total_stats .tx_pkts ;
132
+ s -> tx_bytes = total_stats .tx_bytes ;
133
+ s -> tx_dropped = total_stats .tx_drops ;
134
+ }
135
+
73
136
static const struct net_device_ops rmnet_vnd_ops = {
74
137
.ndo_start_xmit = rmnet_vnd_start_xmit ,
75
138
.ndo_change_mtu = rmnet_vnd_change_mtu ,
76
139
.ndo_get_iflink = rmnet_vnd_get_iflink ,
77
140
.ndo_add_slave = rmnet_add_bridge ,
78
141
.ndo_del_slave = rmnet_del_bridge ,
142
+ .ndo_init = rmnet_vnd_init ,
143
+ .ndo_uninit = rmnet_vnd_uninit ,
144
+ .ndo_get_stats64 = rmnet_get_stats64 ,
79
145
};
80
146
81
147
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
0 commit comments