Skip to content

Commit a351a1b

Browse files
amirvdavem330
authored andcommitted
net/mlx5: Introduce bulk reading of flow counters
This commit utilize the ability of ConnectX-4 to bulk read flow counters. Few bulk counter queries could be done instead of issuing thousands of firmware commands per second to get statistics of all flows set to HW, such as those programmed when we offload tc filters. Counters are stored sorted by hardware id, and queried in blocks (id + number of counters). Due to hardware requirement, start of block and number of counters in a block must be four aligned. Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Amir Vadai <amir@vadai.me> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 29cc667 commit a351a1b

File tree

4 files changed

+146
-24
lines changed

4 files changed

+146
-24
lines changed

drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -413,3 +413,70 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
413413

414414
return 0;
415415
}
416+
417+
struct mlx5_cmd_fc_bulk {
418+
u16 id;
419+
int num;
420+
int outlen;
421+
u32 out[0];
422+
};
423+
424+
struct mlx5_cmd_fc_bulk *
425+
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
426+
{
427+
struct mlx5_cmd_fc_bulk *b;
428+
int outlen = sizeof(*b) +
429+
MLX5_ST_SZ_BYTES(query_flow_counter_out) +
430+
MLX5_ST_SZ_BYTES(traffic_counter) * num;
431+
432+
b = kzalloc(outlen, GFP_KERNEL);
433+
if (!b)
434+
return NULL;
435+
436+
b->id = id;
437+
b->num = num;
438+
b->outlen = outlen;
439+
440+
return b;
441+
}
442+
443+
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
444+
{
445+
kfree(b);
446+
}
447+
448+
int
449+
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
450+
{
451+
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
452+
453+
memset(in, 0, sizeof(in));
454+
455+
MLX5_SET(query_flow_counter_in, in, opcode,
456+
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
457+
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
458+
MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
459+
MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
460+
461+
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
462+
b->out, b->outlen);
463+
}
464+
465+
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
466+
struct mlx5_cmd_fc_bulk *b, u16 id,
467+
u64 *packets, u64 *bytes)
468+
{
469+
int index = id - b->id;
470+
void *stats;
471+
472+
if (index < 0 || index >= b->num) {
473+
mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
474+
id, b->id, b->id + b->num - 1);
475+
return;
476+
}
477+
478+
stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
479+
flow_statistics[index]);
480+
*packets = MLX5_GET64(traffic_counter, stats, packets);
481+
*bytes = MLX5_GET64(traffic_counter, stats, octets);
482+
}

drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,4 +76,16 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
7676
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
7777
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
7878
u64 *packets, u64 *bytes);
79+
80+
struct mlx5_cmd_fc_bulk;
81+
82+
struct mlx5_cmd_fc_bulk *
83+
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num);
84+
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
85+
int
86+
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
87+
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
88+
struct mlx5_cmd_fc_bulk *b, u16 id,
89+
u64 *packets, u64 *bytes);
90+
7991
#endif

drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c

Lines changed: 61 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -90,16 +90,66 @@ static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
9090
rb_insert_color(&counter->node, root);
9191
}
9292

93+
static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
94+
struct mlx5_fc *first,
95+
u16 last_id)
96+
{
97+
struct mlx5_cmd_fc_bulk *b;
98+
struct rb_node *node = NULL;
99+
u16 afirst_id;
100+
int num;
101+
int err;
102+
int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk);
103+
104+
/* first id must be aligned to 4 when using bulk query */
105+
afirst_id = first->id & ~0x3;
106+
107+
/* number of counters to query inc. the last counter */
108+
num = ALIGN(last_id - afirst_id + 1, 4);
109+
if (num > max_bulk) {
110+
num = max_bulk;
111+
last_id = afirst_id + num - 1;
112+
}
113+
114+
b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
115+
if (!b) {
116+
mlx5_core_err(dev, "Error allocating resources for bulk query\n");
117+
return NULL;
118+
}
119+
120+
err = mlx5_cmd_fc_bulk_query(dev, b);
121+
if (err) {
122+
mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
123+
goto out;
124+
}
125+
126+
for (node = &first->node; node; node = rb_next(node)) {
127+
struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
128+
struct mlx5_fc_cache *c = &counter->cache;
129+
130+
if (counter->id > last_id)
131+
break;
132+
133+
mlx5_cmd_fc_bulk_get(dev, b,
134+
counter->id, &c->packets, &c->bytes);
135+
}
136+
137+
out:
138+
mlx5_cmd_fc_bulk_free(b);
139+
140+
return node;
141+
}
142+
93143
static void mlx5_fc_stats_work(struct work_struct *work)
94144
{
95145
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
96146
priv.fc_stats.work.work);
97147
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
98148
unsigned long now = jiffies;
99-
struct mlx5_fc *counter;
149+
struct mlx5_fc *counter = NULL;
150+
struct mlx5_fc *last = NULL;
100151
struct rb_node *node;
101152
LIST_HEAD(tmplist);
102-
int err = 0;
103153

104154
spin_lock(&fc_stats->addlist_lock);
105155

@@ -115,12 +165,7 @@ static void mlx5_fc_stats_work(struct work_struct *work)
115165

116166
node = rb_first(&fc_stats->counters);
117167
while (node) {
118-
struct mlx5_fc_cache *c;
119-
u64 packets;
120-
u64 bytes;
121-
122168
counter = rb_entry(node, struct mlx5_fc, node);
123-
c = &counter->cache;
124169

125170
node = rb_next(node);
126171

@@ -133,26 +178,20 @@ static void mlx5_fc_stats_work(struct work_struct *work)
133178
continue;
134179
}
135180

136-
if (time_before(now, fc_stats->next_query))
137-
continue;
181+
last = counter;
182+
}
138183

139-
err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes);
140-
if (err) {
141-
pr_err("Error querying stats for counter id %d\n",
142-
counter->id);
143-
continue;
144-
}
184+
if (time_before(now, fc_stats->next_query) || !last)
185+
return;
145186

146-
if (packets == c->packets)
147-
continue;
187+
node = rb_first(&fc_stats->counters);
188+
while (node) {
189+
counter = rb_entry(node, struct mlx5_fc, node);
148190

149-
c->lastuse = jiffies;
150-
c->packets = packets;
151-
c->bytes = bytes;
191+
node = mlx5_fc_stats_query(dev, counter, last->id);
152192
}
153193

154-
if (time_after_eq(now, fc_stats->next_query))
155-
fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
194+
fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
156195
}
157196

158197
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)

include/linux/mlx5/mlx5_ifc.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -893,7 +893,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
893893
u8 reserved_at_330[0xb];
894894
u8 log_max_xrcd[0x5];
895895

896-
u8 reserved_at_340[0x20];
896+
u8 reserved_at_340[0x8];
897+
u8 log_max_flow_counter_bulk[0x8];
898+
u8 max_flow_counter[0x10];
899+
897900

898901
u8 reserved_at_360[0x3];
899902
u8 log_max_rq[0x5];
@@ -980,7 +983,8 @@ struct mlx5_ifc_dest_format_struct_bits {
980983
};
981984

982985
struct mlx5_ifc_flow_counter_list_bits {
983-
u8 reserved_at_0[0x10];
986+
u8 clear[0x1];
987+
u8 num_of_counters[0xf];
984988
u8 flow_counter_id[0x10];
985989

986990
u8 reserved_at_20[0x20];

0 commit comments

Comments
 (0)