Skip to content

Commit 84e095d

Browse files
Salil Mehtadavem330
authored andcommitted
net: hns3: Change PF to add ring-vect binding & resetQ to mailbox
This patch is required to support ring-vector binding and reset of TQPs requested by the VF driver to the PF driver. Mailbox handler is added with corresponding VF commands/messages to handle the request. Signed-off-by: Salil Mehta <salil.mehta@huawei.com> Signed-off-by: lipeng <lipeng321@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent dde1a86 commit 84e095d

File tree

3 files changed

+159
-92
lines changed

3 files changed

+159
-92
lines changed

drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c

Lines changed: 48 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -3256,71 +3256,70 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
32563256
return ret;
32573257
}
32583258

3259-
int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
3260-
struct hnae3_ring_chain_node *ring_chain)
3259+
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3260+
int vector_id, bool en,
3261+
struct hnae3_ring_chain_node *ring_chain)
32613262
{
32623263
struct hclge_dev *hdev = vport->back;
3263-
struct hclge_ctrl_vector_chain_cmd *req;
32643264
struct hnae3_ring_chain_node *node;
32653265
struct hclge_desc desc;
3266-
int ret;
3266+
struct hclge_ctrl_vector_chain_cmd *req
3267+
= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3268+
enum hclge_cmd_status status;
3269+
enum hclge_opcode_type op;
3270+
u16 tqp_type_and_id;
32673271
int i;
32683272

3269-
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
3270-
3271-
req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3273+
op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3274+
hclge_cmd_setup_basic_desc(&desc, op, false);
32723275
req->int_vector_id = vector_id;
32733276

32743277
i = 0;
32753278
for (node = ring_chain; node; node = node->next) {
3276-
u16 type_and_id = 0;
3277-
3278-
hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
3279+
tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3280+
hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3281+
HCLGE_INT_TYPE_S,
32793282
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3280-
hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
3281-
node->tqp_index);
3282-
hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
3283-
HCLGE_INT_GL_IDX_S,
3284-
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3285-
req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
3286-
req->vfid = vport->vport_id;
3287-
3283+
hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3284+
HCLGE_TQP_ID_S, node->tqp_index);
3285+
req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
32883286
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
32893287
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3288+
req->vfid = vport->vport_id;
32903289

3291-
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3292-
if (ret) {
3290+
status = hclge_cmd_send(&hdev->hw, &desc, 1);
3291+
if (status) {
32933292
dev_err(&hdev->pdev->dev,
32943293
"Map TQP fail, status is %d.\n",
3295-
ret);
3296-
return ret;
3294+
status);
3295+
return -EIO;
32973296
}
32983297
i = 0;
32993298

33003299
hclge_cmd_setup_basic_desc(&desc,
3301-
HCLGE_OPC_ADD_RING_TO_VECTOR,
3300+
op,
33023301
false);
33033302
req->int_vector_id = vector_id;
33043303
}
33053304
}
33063305

33073306
if (i > 0) {
33083307
req->int_cause_num = i;
3309-
3310-
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3311-
if (ret) {
3308+
req->vfid = vport->vport_id;
3309+
status = hclge_cmd_send(&hdev->hw, &desc, 1);
3310+
if (status) {
33123311
dev_err(&hdev->pdev->dev,
3313-
"Map TQP fail, status is %d.\n", ret);
3314-
return ret;
3312+
"Map TQP fail, status is %d.\n", status);
3313+
return -EIO;
33153314
}
33163315
}
33173316

33183317
return 0;
33193318
}
33203319

3321-
static int hclge_map_handle_ring_to_vector(
3322-
struct hnae3_handle *handle, int vector,
3323-
struct hnae3_ring_chain_node *ring_chain)
3320+
static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3321+
int vector,
3322+
struct hnae3_ring_chain_node *ring_chain)
33243323
{
33253324
struct hclge_vport *vport = hclge_get_vport(handle);
33263325
struct hclge_dev *hdev = vport->back;
@@ -3329,24 +3328,20 @@ static int hclge_map_handle_ring_to_vector(
33293328
vector_id = hclge_get_vector_index(hdev, vector);
33303329
if (vector_id < 0) {
33313330
dev_err(&hdev->pdev->dev,
3332-
"Get vector index fail. ret =%d\n", vector_id);
3331+
"Get vector index fail. vector_id =%d\n", vector_id);
33333332
return vector_id;
33343333
}
33353334

3336-
return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
3335+
return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
33373336
}
33383337

3339-
static int hclge_unmap_ring_from_vector(
3340-
struct hnae3_handle *handle, int vector,
3341-
struct hnae3_ring_chain_node *ring_chain)
3338+
static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3339+
int vector,
3340+
struct hnae3_ring_chain_node *ring_chain)
33423341
{
33433342
struct hclge_vport *vport = hclge_get_vport(handle);
33443343
struct hclge_dev *hdev = vport->back;
3345-
struct hclge_ctrl_vector_chain_cmd *req;
3346-
struct hnae3_ring_chain_node *node;
3347-
struct hclge_desc desc;
3348-
int i, vector_id;
3349-
int ret;
3344+
int vector_id, ret;
33503345

33513346
vector_id = hclge_get_vector_index(hdev, vector);
33523347
if (vector_id < 0) {
@@ -3355,54 +3350,17 @@ static int hclge_unmap_ring_from_vector(
33553350
return vector_id;
33563351
}
33573352

3358-
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
3359-
3360-
req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3361-
req->int_vector_id = vector_id;
3362-
3363-
i = 0;
3364-
for (node = ring_chain; node; node = node->next) {
3365-
u16 type_and_id = 0;
3366-
3367-
hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
3368-
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3369-
hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
3370-
node->tqp_index);
3371-
hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
3372-
HCLGE_INT_GL_IDX_S,
3373-
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3374-
3375-
req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
3376-
req->vfid = vport->vport_id;
3377-
3378-
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3379-
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3380-
3381-
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3382-
if (ret) {
3383-
dev_err(&hdev->pdev->dev,
3384-
"Unmap TQP fail, status is %d.\n",
3385-
ret);
3386-
return ret;
3387-
}
3388-
i = 0;
3389-
hclge_cmd_setup_basic_desc(&desc,
3390-
HCLGE_OPC_DEL_RING_TO_VECTOR,
3391-
false);
3392-
req->int_vector_id = vector_id;
3393-
}
3353+
ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3354+
if (ret) {
3355+
dev_err(&handle->pdev->dev,
3356+
"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3357+
vector_id,
3358+
ret);
3359+
return ret;
33943360
}
33953361

3396-
if (i > 0) {
3397-
req->int_cause_num = i;
3398-
3399-
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3400-
if (ret) {
3401-
dev_err(&hdev->pdev->dev,
3402-
"Unmap TQP fail, status is %d.\n", ret);
3403-
return ret;
3404-
}
3405-
}
3362+
/* Free this MSIX or MSI vector */
3363+
hclge_free_vector(hdev, vector_id);
34063364

34073365
return 0;
34083366
}
@@ -4423,7 +4381,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
44234381
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
44244382
}
44254383

4426-
static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
4384+
void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
44274385
{
44284386
struct hclge_vport *vport = hclge_get_vport(handle);
44294387
struct hclge_dev *hdev = vport->back;
@@ -4995,8 +4953,8 @@ static const struct hnae3_ae_ops hclge_ops = {
49954953
.uninit_ae_dev = hclge_uninit_ae_dev,
49964954
.init_client_instance = hclge_init_client_instance,
49974955
.uninit_client_instance = hclge_uninit_client_instance,
4998-
.map_ring_to_vector = hclge_map_handle_ring_to_vector,
4999-
.unmap_ring_from_vector = hclge_unmap_ring_from_vector,
4956+
.map_ring_to_vector = hclge_map_ring_to_vector,
4957+
.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
50004958
.get_vector = hclge_get_vector,
50014959
.set_promisc_mode = hclge_set_promisc_mode,
50024960
.set_loopback = hclge_set_loopback,

drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -539,8 +539,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
539539
u8 func_id,
540540
bool enable);
541541
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
542-
int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector,
543-
struct hnae3_ring_chain_node *ring_chain);
542+
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
543+
int vector_id, bool en,
544+
struct hnae3_ring_chain_node *ring_chain);
545+
544546
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
545547
{
546548
struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
@@ -556,4 +558,5 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
556558
int hclge_rss_init_hw(struct hclge_dev *hdev);
557559

558560
void hclge_mbx_handler(struct hclge_dev *hdev);
561+
void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
559562
#endif

drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,91 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
7979
return status;
8080
}
8181

82+
static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
83+
{
84+
struct hnae3_ring_chain_node *chain_tmp, *chain;
85+
86+
chain = head->next;
87+
88+
while (chain) {
89+
chain_tmp = chain->next;
90+
kzfree(chain);
91+
chain = chain_tmp;
92+
}
93+
}
94+
95+
/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
96+
* msg[0]: opcode
97+
* msg[1]: <not relevant to this function>
98+
* msg[2]: ring_num
99+
* msg[3]: first ring type (TX|RX)
100+
* msg[4]: first tqp id
101+
* msg[5] ~ msg[14]: other ring type and tqp id
102+
*/
103+
static int hclge_get_ring_chain_from_mbx(
104+
struct hclge_mbx_vf_to_pf_cmd *req,
105+
struct hnae3_ring_chain_node *ring_chain,
106+
struct hclge_vport *vport)
107+
{
108+
#define HCLGE_RING_NODE_VARIABLE_NUM 3
109+
#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3
110+
struct hnae3_ring_chain_node *cur_chain, *new_chain;
111+
int ring_num;
112+
int i;
113+
114+
ring_num = req->msg[2];
115+
116+
hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
117+
ring_chain->tqp_index =
118+
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
119+
120+
cur_chain = ring_chain;
121+
122+
for (i = 1; i < ring_num; i++) {
123+
new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
124+
if (!new_chain)
125+
goto err;
126+
127+
hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
128+
req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
129+
HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]);
130+
131+
new_chain->tqp_index =
132+
hclge_get_queue_id(vport->nic.kinfo.tqp
133+
[req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
134+
HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]);
135+
136+
cur_chain->next = new_chain;
137+
cur_chain = new_chain;
138+
}
139+
140+
return 0;
141+
err:
142+
hclge_free_vector_ring_chain(ring_chain);
143+
return -ENOMEM;
144+
}
145+
146+
static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
147+
struct hclge_mbx_vf_to_pf_cmd *req)
148+
{
149+
struct hnae3_ring_chain_node ring_chain;
150+
int vector_id = req->msg[1];
151+
int ret;
152+
153+
memset(&ring_chain, 0, sizeof(ring_chain));
154+
ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
155+
if (ret)
156+
return ret;
157+
158+
ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
159+
if (ret)
160+
return ret;
161+
162+
hclge_free_vector_ring_chain(&ring_chain);
163+
164+
return 0;
165+
}
166+
82167
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
83168
struct hclge_mbx_vf_to_pf_cmd *req)
84169
{
@@ -224,6 +309,16 @@ static int hclge_get_link_info(struct hclge_vport *vport,
224309
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
225310
}
226311

312+
static void hclge_reset_vf_queue(struct hclge_vport *vport,
313+
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
314+
{
315+
u16 queue_id;
316+
317+
memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
318+
319+
hclge_reset_tqp(&vport->nic, queue_id);
320+
}
321+
227322
void hclge_mbx_handler(struct hclge_dev *hdev)
228323
{
229324
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -241,6 +336,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
241336
vport = &hdev->vport[req->mbx_src_vfid];
242337

243338
switch (req->msg[0]) {
339+
case HCLGE_MBX_MAP_RING_TO_VECTOR:
340+
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
341+
req);
342+
break;
343+
case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
344+
ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
345+
req);
346+
break;
244347
case HCLGE_MBX_SET_PROMISC_MODE:
245348
ret = hclge_set_vf_promisc_mode(vport, req);
246349
if (ret)
@@ -290,6 +393,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
290393
"PF fail(%d) to get link stat for VF\n",
291394
ret);
292395
break;
396+
case HCLGE_MBX_QUEUE_RESET:
397+
hclge_reset_vf_queue(vport, req);
398+
break;
293399
default:
294400
dev_err(&hdev->pdev->dev,
295401
"un-supported mailbox message, code = %d\n",

0 commit comments

Comments
 (0)