Skip to content

Commit 6b10de3

Browse files
Eric Dumazetdavem330
authored andcommitted
loopback: Implement 64bit stats on 32bit arches
Uses a seqcount_t to synchronize stat producer and consumer, for packets and bytes counter, now u64 types. (dropped counter being rarely used, stay a native "unsigned long" type) No noticeable performance impact on x86, as it only adds two increments per frame. It might be more expensive on arches where smp_wmb() is not free. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent c68f24c commit 6b10de3

File tree

1 file changed

+51
-10
lines changed

1 file changed

+51
-10
lines changed

drivers/net/loopback.c

Lines changed: 51 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -60,11 +60,51 @@
6060
#include <net/net_namespace.h>
6161

6262
struct pcpu_lstats {
63-
unsigned long packets;
64-
unsigned long bytes;
63+
u64 packets;
64+
u64 bytes;
65+
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
66+
seqcount_t seq;
67+
#endif
6568
unsigned long drops;
6669
};
6770

71+
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
72+
static void inline lstats_update_begin(struct pcpu_lstats *lstats)
73+
{
74+
write_seqcount_begin(&lstats->seq);
75+
}
76+
static void inline lstats_update_end(struct pcpu_lstats *lstats)
77+
{
78+
write_seqcount_end(&lstats->seq);
79+
}
80+
static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
81+
{
82+
u64 tpackets, tbytes;
83+
unsigned int seq;
84+
85+
do {
86+
seq = read_seqcount_begin(&lstats->seq);
87+
tpackets = lstats->packets;
88+
tbytes = lstats->bytes;
89+
} while (read_seqcount_retry(&lstats->seq, seq));
90+
91+
*packets += tpackets;
92+
*bytes += tbytes;
93+
}
94+
#else
95+
static void inline lstats_update_begin(struct pcpu_lstats *lstats)
96+
{
97+
}
98+
static void inline lstats_update_end(struct pcpu_lstats *lstats)
99+
{
100+
}
101+
static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
102+
{
103+
*packets += lstats->packets;
104+
*bytes += lstats->bytes;
105+
}
106+
#endif
107+
68108
/*
69109
* The higher levels take care of making this non-reentrant (it's
70110
* called with bh's disabled).
@@ -86,30 +126,31 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
86126

87127
len = skb->len;
88128
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
129+
lstats_update_begin(lb_stats);
89130
lb_stats->bytes += len;
90131
lb_stats->packets++;
132+
lstats_update_end(lb_stats);
91133
} else
92134
lb_stats->drops++;
93135

94136
return NETDEV_TX_OK;
95137
}
96138

97-
static struct net_device_stats *loopback_get_stats(struct net_device *dev)
139+
static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev)
98140
{
99141
const struct pcpu_lstats __percpu *pcpu_lstats;
100-
struct net_device_stats *stats = &dev->stats;
101-
unsigned long bytes = 0;
102-
unsigned long packets = 0;
103-
unsigned long drops = 0;
142+
struct rtnl_link_stats64 *stats = &dev->stats64;
143+
u64 bytes = 0;
144+
u64 packets = 0;
145+
u64 drops = 0;
104146
int i;
105147

106148
pcpu_lstats = (void __percpu __force *)dev->ml_priv;
107149
for_each_possible_cpu(i) {
108150
const struct pcpu_lstats *lb_stats;
109151

110152
lb_stats = per_cpu_ptr(pcpu_lstats, i);
111-
bytes += lb_stats->bytes;
112-
packets += lb_stats->packets;
153+
lstats_fetch_and_add(&packets, &bytes, lb_stats);
113154
drops += lb_stats->drops;
114155
}
115156
stats->rx_packets = packets;
@@ -158,7 +199,7 @@ static void loopback_dev_free(struct net_device *dev)
158199
static const struct net_device_ops loopback_ops = {
159200
.ndo_init = loopback_dev_init,
160201
.ndo_start_xmit= loopback_xmit,
161-
.ndo_get_stats = loopback_get_stats,
202+
.ndo_get_stats64 = loopback_get_stats64,
162203
};
163204

164205
/*

0 commit comments

Comments
 (0)