Skip to content

Commit 064c94f

Browse files
author
Linus Torvalds
committed
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/mad: RMPP support for additional classes IB/mad: include GID/class when matching receives IB/mthca: Fix section mismatch problems IPoIB: Fix oops with raw sockets IB/mthca: Fix check of size in SRQ creation IB/srp: Fix unmapping of fake scatterlist
2 parents 256414d + 618a3c0 commit 064c94f

File tree

16 files changed

+168
-90
lines changed

16 files changed

+168
-90
lines changed

drivers/infiniband/core/mad.c

Lines changed: 101 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
227227
if (!is_vendor_oui(mad_reg_req->oui))
228228
goto error1;
229229
}
230+
/* Make sure class supplied is consistent with RMPP */
231+
if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
232+
if (!rmpp_version)
233+
goto error1;
234+
} else {
235+
if (rmpp_version)
236+
goto error1;
237+
}
230238
/* Make sure class supplied is consistent with QP type */
231239
if (qp_type == IB_QPT_SMI) {
232240
if ((mad_reg_req->mgmt_class !=
@@ -890,6 +898,35 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
890898
}
891899
EXPORT_SYMBOL(ib_create_send_mad);
892900

901+
int ib_get_mad_data_offset(u8 mgmt_class)
902+
{
903+
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
904+
return IB_MGMT_SA_HDR;
905+
else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
906+
(mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
907+
(mgmt_class == IB_MGMT_CLASS_BIS))
908+
return IB_MGMT_DEVICE_HDR;
909+
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
910+
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
911+
return IB_MGMT_VENDOR_HDR;
912+
else
913+
return IB_MGMT_MAD_HDR;
914+
}
915+
EXPORT_SYMBOL(ib_get_mad_data_offset);
916+
917+
int ib_is_mad_class_rmpp(u8 mgmt_class)
918+
{
919+
if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
920+
(mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
921+
(mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
922+
(mgmt_class == IB_MGMT_CLASS_BIS) ||
923+
((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
924+
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
925+
return 1;
926+
return 0;
927+
}
928+
EXPORT_SYMBOL(ib_is_mad_class_rmpp);
929+
893930
void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
894931
{
895932
struct ib_mad_send_wr_private *mad_send_wr;
@@ -1022,6 +1059,13 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
10221059
goto error;
10231060
}
10241061

1062+
if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1063+
if (mad_agent_priv->agent.rmpp_version) {
1064+
ret = -EINVAL;
1065+
goto error;
1066+
}
1067+
}
1068+
10251069
/*
10261070
* Save pointer to next work request to post in case the
10271071
* current one completes, and the user modifies the work
@@ -1618,14 +1662,59 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
16181662
(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
16191663
}
16201664

1665+
static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1666+
struct ib_mad_recv_wc *rwc)
1667+
{
1668+
return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1669+
rwc->recv_buf.mad->mad_hdr.mgmt_class;
1670+
}
1671+
1672+
static inline int rcv_has_same_gid(struct ib_mad_send_wr_private *wr,
1673+
struct ib_mad_recv_wc *rwc )
1674+
{
1675+
struct ib_ah_attr attr;
1676+
u8 send_resp, rcv_resp;
1677+
1678+
send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
1679+
mad_hdr.method & IB_MGMT_METHOD_RESP;
1680+
rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1681+
1682+
if (!send_resp && rcv_resp)
1683+
/* is request/response. GID/LIDs are both local (same). */
1684+
return 1;
1685+
1686+
if (send_resp == rcv_resp)
1687+
/* both requests, or both responses. GIDs different */
1688+
return 0;
1689+
1690+
if (ib_query_ah(wr->send_buf.ah, &attr))
1691+
/* Assume not equal, to avoid false positives. */
1692+
return 0;
1693+
1694+
if (!(attr.ah_flags & IB_AH_GRH) && !(rwc->wc->wc_flags & IB_WC_GRH))
1695+
return attr.dlid == rwc->wc->slid;
1696+
else if ((attr.ah_flags & IB_AH_GRH) &&
1697+
(rwc->wc->wc_flags & IB_WC_GRH))
1698+
return memcmp(attr.grh.dgid.raw,
1699+
rwc->recv_buf.grh->sgid.raw, 16) == 0;
1700+
else
1701+
/* one has GID, other does not. Assume different */
1702+
return 0;
1703+
}
16211704
struct ib_mad_send_wr_private*
1622-
ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
1705+
ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1706+
struct ib_mad_recv_wc *mad_recv_wc)
16231707
{
16241708
struct ib_mad_send_wr_private *mad_send_wr;
1709+
struct ib_mad *mad;
1710+
1711+
mad = (struct ib_mad *)mad_recv_wc->recv_buf.mad;
16251712

16261713
list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
16271714
agent_list) {
1628-
if (mad_send_wr->tid == tid)
1715+
if ((mad_send_wr->tid == mad->mad_hdr.tid) &&
1716+
rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
1717+
rcv_has_same_gid(mad_send_wr, mad_recv_wc))
16291718
return mad_send_wr;
16301719
}
16311720

@@ -1636,7 +1725,10 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
16361725
list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
16371726
agent_list) {
16381727
if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
1639-
mad_send_wr->tid == tid && mad_send_wr->timeout) {
1728+
mad_send_wr->tid == mad->mad_hdr.tid &&
1729+
mad_send_wr->timeout &&
1730+
rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
1731+
rcv_has_same_gid(mad_send_wr, mad_recv_wc)) {
16401732
/* Verify request has not been canceled */
16411733
return (mad_send_wr->status == IB_WC_SUCCESS) ?
16421734
mad_send_wr : NULL;
@@ -1661,7 +1753,6 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
16611753
struct ib_mad_send_wr_private *mad_send_wr;
16621754
struct ib_mad_send_wc mad_send_wc;
16631755
unsigned long flags;
1664-
__be64 tid;
16651756

16661757
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
16671758
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
@@ -1677,9 +1768,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
16771768

16781769
/* Complete corresponding request */
16791770
if (response_mad(mad_recv_wc->recv_buf.mad)) {
1680-
tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
16811771
spin_lock_irqsave(&mad_agent_priv->lock, flags);
1682-
mad_send_wr = ib_find_send_mad(mad_agent_priv, tid);
1772+
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
16831773
if (!mad_send_wr) {
16841774
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
16851775
ib_free_recv_mad(mad_recv_wc);
@@ -2408,11 +2498,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
24082498
}
24092499
}
24102500
sg_list.addr = dma_map_single(qp_info->port_priv->
2411-
device->dma_device,
2412-
&mad_priv->grh,
2413-
sizeof *mad_priv -
2414-
sizeof mad_priv->header,
2415-
DMA_FROM_DEVICE);
2501+
device->dma_device,
2502+
&mad_priv->grh,
2503+
sizeof *mad_priv -
2504+
sizeof mad_priv->header,
2505+
DMA_FROM_DEVICE);
24162506
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
24172507
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
24182508
mad_priv->header.mad_list.mad_queue = recv_queue;

drivers/infiniband/core/mad_priv.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,8 @@ extern kmem_cache_t *ib_mad_cache;
216216
int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
217217

218218
struct ib_mad_send_wr_private *
219-
ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid);
219+
ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
220+
struct ib_mad_recv_wc *mad_recv_wc);
220221

221222
void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
222223
struct ib_mad_send_wc *mad_send_wc);

drivers/infiniband/core/mad_rmpp.c

Lines changed: 17 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
/*
22
* Copyright (c) 2005 Intel Inc. All rights reserved.
3-
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
3+
* Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
44
*
55
* This software is available to you under a choice of one of two
66
* licenses. You may choose to be licensed under the terms of the GNU
@@ -100,17 +100,6 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
100100
}
101101
}
102102

103-
static int data_offset(u8 mgmt_class)
104-
{
105-
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
106-
return IB_MGMT_SA_HDR;
107-
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
108-
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
109-
return IB_MGMT_VENDOR_HDR;
110-
else
111-
return IB_MGMT_RMPP_HDR;
112-
}
113-
114103
static void format_ack(struct ib_mad_send_buf *msg,
115104
struct ib_rmpp_mad *data,
116105
struct mad_rmpp_recv *rmpp_recv)
@@ -137,7 +126,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
137126
struct ib_mad_send_buf *msg;
138127
int ret, hdr_len;
139128

140-
hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
129+
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
141130
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
142131
recv_wc->wc->pkey_index, 1, hdr_len,
143132
0, GFP_KERNEL);
@@ -163,7 +152,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
163152
if (IS_ERR(ah))
164153
return (void *) ah;
165154

166-
hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
155+
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
167156
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
168157
recv_wc->wc->pkey_index, 1,
169158
hdr_len, 0, GFP_KERNEL);
@@ -408,7 +397,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
408397

409398
rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
410399

411-
hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
400+
hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
412401
data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
413402
pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
414403
if (pad > IB_MGMT_RMPP_DATA || pad < 0)
@@ -562,15 +551,15 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
562551
return ib_send_mad(mad_send_wr);
563552
}
564553

565-
static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
566-
u8 rmpp_status)
554+
static void abort_send(struct ib_mad_agent_private *agent,
555+
struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
567556
{
568557
struct ib_mad_send_wr_private *mad_send_wr;
569558
struct ib_mad_send_wc wc;
570559
unsigned long flags;
571560

572561
spin_lock_irqsave(&agent->lock, flags);
573-
mad_send_wr = ib_find_send_mad(agent, tid);
562+
mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
574563
if (!mad_send_wr)
575564
goto out; /* Unmatched send */
576565

@@ -612,23 +601,21 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
612601

613602
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
614603
if (rmpp_mad->rmpp_hdr.rmpp_status) {
615-
abort_send(agent, rmpp_mad->mad_hdr.tid,
616-
IB_MGMT_RMPP_STATUS_BAD_STATUS);
604+
abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
617605
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
618606
return;
619607
}
620608

621609
seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
622610
newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
623611
if (newwin < seg_num) {
624-
abort_send(agent, rmpp_mad->mad_hdr.tid,
625-
IB_MGMT_RMPP_STATUS_W2S);
612+
abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
626613
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
627614
return;
628615
}
629616

630617
spin_lock_irqsave(&agent->lock, flags);
631-
mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
618+
mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
632619
if (!mad_send_wr)
633620
goto out; /* Unmatched ACK */
634621

@@ -639,8 +626,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
639626
if (seg_num > mad_send_wr->send_buf.seg_count ||
640627
seg_num > mad_send_wr->newwin) {
641628
spin_unlock_irqrestore(&agent->lock, flags);
642-
abort_send(agent, rmpp_mad->mad_hdr.tid,
643-
IB_MGMT_RMPP_STATUS_S2B);
629+
abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
644630
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
645631
return;
646632
}
@@ -728,12 +714,10 @@ static void process_rmpp_stop(struct ib_mad_agent_private *agent,
728714
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
729715

730716
if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
731-
abort_send(agent, rmpp_mad->mad_hdr.tid,
732-
IB_MGMT_RMPP_STATUS_BAD_STATUS);
717+
abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
733718
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
734719
} else
735-
abort_send(agent, rmpp_mad->mad_hdr.tid,
736-
rmpp_mad->rmpp_hdr.rmpp_status);
720+
abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
737721
}
738722

739723
static void process_rmpp_abort(struct ib_mad_agent_private *agent,
@@ -745,12 +729,10 @@ static void process_rmpp_abort(struct ib_mad_agent_private *agent,
745729

746730
if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
747731
rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
748-
abort_send(agent, rmpp_mad->mad_hdr.tid,
749-
IB_MGMT_RMPP_STATUS_BAD_STATUS);
732+
abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
750733
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
751734
} else
752-
abort_send(agent, rmpp_mad->mad_hdr.tid,
753-
rmpp_mad->rmpp_hdr.rmpp_status);
735+
abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
754736
}
755737

756738
struct ib_mad_recv_wc *
@@ -764,8 +746,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
764746
return mad_recv_wc;
765747

766748
if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
767-
abort_send(agent, rmpp_mad->mad_hdr.tid,
768-
IB_MGMT_RMPP_STATUS_UNV);
749+
abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
769750
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
770751
goto out;
771752
}
@@ -783,8 +764,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
783764
process_rmpp_abort(agent, mad_recv_wc);
784765
break;
785766
default:
786-
abort_send(agent, rmpp_mad->mad_hdr.tid,
787-
IB_MGMT_RMPP_STATUS_BADT);
767+
abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
788768
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
789769
break;
790770
}

drivers/infiniband/core/user_mad.c

Lines changed: 6 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -177,17 +177,6 @@ static int queue_packet(struct ib_umad_file *file,
177177
return ret;
178178
}
179179

180-
static int data_offset(u8 mgmt_class)
181-
{
182-
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
183-
return IB_MGMT_SA_HDR;
184-
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
185-
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
186-
return IB_MGMT_VENDOR_HDR;
187-
else
188-
return IB_MGMT_RMPP_HDR;
189-
}
190-
191180
static void send_handler(struct ib_mad_agent *agent,
192181
struct ib_mad_send_wc *send_wc)
193182
{
@@ -283,7 +272,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
283272
*/
284273
return -ENOSPC;
285274
}
286-
offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class);
275+
offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
287276
max_seg_payload = sizeof (struct ib_mad) - offset;
288277

289278
for (left = packet->length - seg_payload, buf += seg_payload;
@@ -441,21 +430,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
441430
}
442431

443432
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
444-
if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
445-
hdr_len = IB_MGMT_SA_HDR;
446-
copy_offset = IB_MGMT_RMPP_HDR;
447-
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
448-
IB_MGMT_RMPP_FLAG_ACTIVE;
449-
} else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&
450-
rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {
451-
hdr_len = IB_MGMT_VENDOR_HDR;
433+
hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
434+
if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
435+
copy_offset = IB_MGMT_MAD_HDR;
436+
rmpp_active = 0;
437+
} else {
452438
copy_offset = IB_MGMT_RMPP_HDR;
453439
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
454440
IB_MGMT_RMPP_FLAG_ACTIVE;
455-
} else {
456-
hdr_len = IB_MGMT_MAD_HDR;
457-
copy_offset = IB_MGMT_MAD_HDR;
458-
rmpp_active = 0;
459441
}
460442

461443
data_len = count - sizeof (struct ib_user_mad) - hdr_len;

0 commit comments

Comments
 (0)