60
60
#include <net/net_namespace.h>
61
61
62
62
struct pcpu_lstats {
63
- unsigned long packets ;
64
- unsigned long bytes ;
63
+ u64 packets ;
64
+ u64 bytes ;
65
+ #if BITS_PER_LONG == 32 && defined(CONFIG_SMP )
66
+ seqcount_t seq ;
67
+ #endif
65
68
unsigned long drops ;
66
69
};
67
70
71
+ #if BITS_PER_LONG == 32 && defined(CONFIG_SMP )
72
+ static void inline lstats_update_begin (struct pcpu_lstats * lstats )
73
+ {
74
+ write_seqcount_begin (& lstats -> seq );
75
+ }
76
+ static void inline lstats_update_end (struct pcpu_lstats * lstats )
77
+ {
78
+ write_seqcount_end (& lstats -> seq );
79
+ }
80
+ static void inline lstats_fetch_and_add (u64 * packets , u64 * bytes , const struct pcpu_lstats * lstats )
81
+ {
82
+ u64 tpackets , tbytes ;
83
+ unsigned int seq ;
84
+
85
+ do {
86
+ seq = read_seqcount_begin (& lstats -> seq );
87
+ tpackets = lstats -> packets ;
88
+ tbytes = lstats -> bytes ;
89
+ } while (read_seqcount_retry (& lstats -> seq , seq ));
90
+
91
+ * packets += tpackets ;
92
+ * bytes += tbytes ;
93
+ }
94
+ #else
95
+ static void inline lstats_update_begin (struct pcpu_lstats * lstats )
96
+ {
97
+ }
98
+ static void inline lstats_update_end (struct pcpu_lstats * lstats )
99
+ {
100
+ }
101
+ static void inline lstats_fetch_and_add (u64 * packets , u64 * bytes , const struct pcpu_lstats * lstats )
102
+ {
103
+ * packets += lstats -> packets ;
104
+ * bytes += lstats -> bytes ;
105
+ }
106
+ #endif
107
+
68
108
/*
69
109
* The higher levels take care of making this non-reentrant (it's
70
110
* called with bh's disabled).
@@ -86,30 +126,31 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
86
126
87
127
len = skb -> len ;
88
128
if (likely (netif_rx (skb ) == NET_RX_SUCCESS )) {
129
+ lstats_update_begin (lb_stats );
89
130
lb_stats -> bytes += len ;
90
131
lb_stats -> packets ++ ;
132
+ lstats_update_end (lb_stats );
91
133
} else
92
134
lb_stats -> drops ++ ;
93
135
94
136
return NETDEV_TX_OK ;
95
137
}
96
138
97
- static struct net_device_stats * loopback_get_stats (struct net_device * dev )
139
+ static struct rtnl_link_stats64 * loopback_get_stats64 (struct net_device * dev )
98
140
{
99
141
const struct pcpu_lstats __percpu * pcpu_lstats ;
100
- struct net_device_stats * stats = & dev -> stats ;
101
- unsigned long bytes = 0 ;
102
- unsigned long packets = 0 ;
103
- unsigned long drops = 0 ;
142
+ struct rtnl_link_stats64 * stats = & dev -> stats64 ;
143
+ u64 bytes = 0 ;
144
+ u64 packets = 0 ;
145
+ u64 drops = 0 ;
104
146
int i ;
105
147
106
148
pcpu_lstats = (void __percpu __force * )dev -> ml_priv ;
107
149
for_each_possible_cpu (i ) {
108
150
const struct pcpu_lstats * lb_stats ;
109
151
110
152
lb_stats = per_cpu_ptr (pcpu_lstats , i );
111
- bytes += lb_stats -> bytes ;
112
- packets += lb_stats -> packets ;
153
+ lstats_fetch_and_add (& packets , & bytes , lb_stats );
113
154
drops += lb_stats -> drops ;
114
155
}
115
156
stats -> rx_packets = packets ;
@@ -158,7 +199,7 @@ static void loopback_dev_free(struct net_device *dev)
158
199
static const struct net_device_ops loopback_ops = {
159
200
.ndo_init = loopback_dev_init ,
160
201
.ndo_start_xmit = loopback_xmit ,
161
- .ndo_get_stats = loopback_get_stats ,
202
+ .ndo_get_stats64 = loopback_get_stats64 ,
162
203
};
163
204
164
205
/*
0 commit comments