Skip to content

Commit ed8ada3

Browse files
committed
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband updates from Roland Dreier: "Last batch of IB changes for 3.12: many mlx5 hardware driver fixes plus one trivial semicolon cleanup" * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB: Remove unnecessary semicolons IB/mlx5: Ensure proper synchronization accessing memory IB/mlx5: Fix alignment of reg umr gather buffers IB/mlx5: Fix eq names to display nicely in /proc/interrupts mlx5: Fix error code translation from firmware to driver IB/mlx5: Fix opt param mask according to firmware spec mlx5: Fix opt param mask for sq err to rts transition IB/mlx5: Disable atomic operations mlx5: Fix layout of struct mlx5_init_seg mlx5: Keep polling to reclaim pages while any returned IB/mlx5: Avoid async events on invalid port number IB/mlx5: Decrease memory consumption of mr caches mlx5: Remove checksum on command interface commands IB/mlx5: Fix memory leak in mlx5_ib_create_srq IB/mlx5: Flush cache workqueue before destroying it IB/mlx5: Fix send work queue size calculation
2 parents d6099ae + 59b5b28 commit ed8ada3

File tree

15 files changed

+126
-141
lines changed

15 files changed

+126
-141
lines changed

drivers/infiniband/hw/amso1100/c2_ae.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
141141
return "C2_QP_STATE_ERROR";
142142
default:
143143
return "<invalid QP state>";
144-
};
144+
}
145145
}
146146

147147
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)

drivers/infiniband/hw/mlx5/main.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
164164
static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
165165
{
166166
struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
167+
char name[MLX5_MAX_EQ_NAME];
167168
struct mlx5_eq *eq, *n;
168169
int ncomp_vec;
169170
int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
180181
goto clean;
181182
}
182183

183-
snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
184+
snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
184185
err = mlx5_create_map_eq(&dev->mdev, eq,
185186
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
186-
eq->name,
187-
&dev->mdev.priv.uuari.uars[0]);
187+
name, &dev->mdev.priv.uuari.uars[0]);
188188
if (err) {
189189
kfree(eq);
190190
goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
301301
props->max_srq_sge = max_rq_sg - 1;
302302
props->max_fast_reg_page_list_len = (unsigned int)-1;
303303
props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
304-
props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
305-
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
306-
props->masked_atomic_cap = IB_ATOMIC_HCA;
304+
props->atomic_cap = IB_ATOMIC_NONE;
305+
props->masked_atomic_cap = IB_ATOMIC_NONE;
307306
props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
308307
props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
309308
props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
10061005
ibev.device = &ibdev->ib_dev;
10071006
ibev.element.port_num = port;
10081007

1008+
if (port < 1 || port > ibdev->num_ports) {
1009+
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
1010+
return;
1011+
}
1012+
10091013
if (ibdev->ib_active)
10101014
ib_dispatch_event(&ibev);
10111015
}

drivers/infiniband/hw/mlx5/mr.c

Lines changed: 33 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,10 @@ enum {
4242
DEF_CACHE_SIZE = 10,
4343
};
4444

45+
enum {
46+
MLX5_UMR_ALIGN = 2048
47+
};
48+
4549
static __be64 *mr_align(__be64 *ptr, int align)
4650
{
4751
unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
6165

6266
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
6367
{
64-
struct device *ddev = dev->ib_dev.dma_device;
6568
struct mlx5_mr_cache *cache = &dev->cache;
6669
struct mlx5_cache_ent *ent = &cache->ent[c];
6770
struct mlx5_create_mkey_mbox_in *in;
6871
struct mlx5_ib_mr *mr;
6972
int npages = 1 << ent->order;
70-
int size = sizeof(u64) * npages;
7173
int err = 0;
7274
int i;
7375

@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
8385
}
8486
mr->order = ent->order;
8587
mr->umred = 1;
86-
mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
87-
if (!mr->pas) {
88-
kfree(mr);
89-
err = -ENOMEM;
90-
goto out;
91-
}
92-
mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
93-
DMA_TO_DEVICE);
94-
if (dma_mapping_error(ddev, mr->dma)) {
95-
kfree(mr->pas);
96-
kfree(mr);
97-
err = -ENOMEM;
98-
goto out;
99-
}
100-
10188
in->seg.status = 1 << 6;
10289
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
10390
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
10895
sizeof(*in));
10996
if (err) {
11097
mlx5_ib_warn(dev, "create mkey failed %d\n", err);
111-
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
112-
kfree(mr->pas);
11398
kfree(mr);
11499
goto out;
115100
}
@@ -129,11 +114,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
129114

130115
static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
131116
{
132-
struct device *ddev = dev->ib_dev.dma_device;
133117
struct mlx5_mr_cache *cache = &dev->cache;
134118
struct mlx5_cache_ent *ent = &cache->ent[c];
135119
struct mlx5_ib_mr *mr;
136-
int size;
137120
int err;
138121
int i;
139122

@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
149132
ent->size--;
150133
spin_unlock(&ent->lock);
151134
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
152-
if (err) {
135+
if (err)
153136
mlx5_ib_warn(dev, "failed destroy mkey\n");
154-
} else {
155-
size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
156-
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
157-
kfree(mr->pas);
137+
else
158138
kfree(mr);
159-
}
160139
}
161140
}
162141

@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
408387

409388
static void clean_keys(struct mlx5_ib_dev *dev, int c)
410389
{
411-
struct device *ddev = dev->ib_dev.dma_device;
412390
struct mlx5_mr_cache *cache = &dev->cache;
413391
struct mlx5_cache_ent *ent = &cache->ent[c];
414392
struct mlx5_ib_mr *mr;
415-
int size;
416393
int err;
417394

395+
cancel_delayed_work(&ent->dwork);
418396
while (1) {
419397
spin_lock(&ent->lock);
420398
if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
427405
ent->size--;
428406
spin_unlock(&ent->lock);
429407
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
430-
if (err) {
408+
if (err)
431409
mlx5_ib_warn(dev, "failed destroy mkey\n");
432-
} else {
433-
size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
434-
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
435-
kfree(mr->pas);
410+
else
436411
kfree(mr);
437-
}
438412
}
439413
}
440414

@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
540514
int i;
541515

542516
dev->cache.stopped = 1;
543-
destroy_workqueue(dev->cache.wq);
517+
flush_workqueue(dev->cache.wq);
544518

545519
mlx5_mr_cache_debugfs_cleanup(dev);
546520

547521
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
548522
clean_keys(dev, i);
549523

524+
destroy_workqueue(dev->cache.wq);
525+
550526
return 0;
551527
}
552528

@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
675651
int page_shift, int order, int access_flags)
676652
{
677653
struct mlx5_ib_dev *dev = to_mdev(pd->device);
654+
struct device *ddev = dev->ib_dev.dma_device;
678655
struct umr_common *umrc = &dev->umrc;
679656
struct ib_send_wr wr, *bad;
680657
struct mlx5_ib_mr *mr;
681658
struct ib_sge sg;
659+
int size = sizeof(u64) * npages;
682660
int err;
683661
int i;
684662

@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
697675
if (!mr)
698676
return ERR_PTR(-EAGAIN);
699677

700-
mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1);
678+
mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
679+
if (!mr->pas) {
680+
err = -ENOMEM;
681+
goto error;
682+
}
683+
684+
mlx5_ib_populate_pas(dev, umem, page_shift,
685+
mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
686+
687+
mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
688+
DMA_TO_DEVICE);
689+
if (dma_mapping_error(ddev, mr->dma)) {
690+
kfree(mr->pas);
691+
err = -ENOMEM;
692+
goto error;
693+
}
701694

702695
memset(&wr, 0, sizeof(wr));
703696
wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
718711
wait_for_completion(&mr->done);
719712
up(&umrc->sem);
720713

714+
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
715+
kfree(mr->pas);
716+
721717
if (mr->status != IB_WC_SUCCESS) {
722718
mlx5_ib_warn(dev, "reg umr failed\n");
723719
err = -EFAULT;

drivers/infiniband/hw/mlx5/qp.c

Lines changed: 30 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -203,28 +203,31 @@ static int sq_overhead(enum ib_qp_type qp_type)
203203

204204
switch (qp_type) {
205205
case IB_QPT_XRC_INI:
206-
size = sizeof(struct mlx5_wqe_xrc_seg);
206+
size += sizeof(struct mlx5_wqe_xrc_seg);
207207
/* fall through */
208208
case IB_QPT_RC:
209209
size += sizeof(struct mlx5_wqe_ctrl_seg) +
210210
sizeof(struct mlx5_wqe_atomic_seg) +
211211
sizeof(struct mlx5_wqe_raddr_seg);
212212
break;
213213

214+
case IB_QPT_XRC_TGT:
215+
return 0;
216+
214217
case IB_QPT_UC:
215-
size = sizeof(struct mlx5_wqe_ctrl_seg) +
218+
size += sizeof(struct mlx5_wqe_ctrl_seg) +
216219
sizeof(struct mlx5_wqe_raddr_seg);
217220
break;
218221

219222
case IB_QPT_UD:
220223
case IB_QPT_SMI:
221224
case IB_QPT_GSI:
222-
size = sizeof(struct mlx5_wqe_ctrl_seg) +
225+
size += sizeof(struct mlx5_wqe_ctrl_seg) +
223226
sizeof(struct mlx5_wqe_datagram_seg);
224227
break;
225228

226229
case MLX5_IB_QPT_REG_UMR:
227-
size = sizeof(struct mlx5_wqe_ctrl_seg) +
230+
size += sizeof(struct mlx5_wqe_ctrl_seg) +
228231
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
229232
sizeof(struct mlx5_mkey_seg);
230233
break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
270273
return wqe_size;
271274

272275
if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
273-
mlx5_ib_dbg(dev, "\n");
276+
mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
277+
wqe_size, dev->mdev.caps.max_sq_desc_sz);
274278
return -EINVAL;
275279
}
276280

@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
280284

281285
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
282286
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
287+
if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
288+
mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
289+
qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
290+
return -ENOMEM;
291+
}
283292
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
284293
qp->sq.max_gs = attr->cap.max_send_sge;
285-
qp->sq.max_post = 1 << ilog2(wq_size / wqe_size);
294+
qp->sq.max_post = wq_size / wqe_size;
295+
attr->cap.max_send_wr = qp->sq.max_post;
286296

287297
return wq_size;
288298
}
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
12801290
MLX5_QP_OPTPAR_Q_KEY,
12811291
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
12821292
MLX5_QP_OPTPAR_Q_KEY,
1293+
[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1294+
MLX5_QP_OPTPAR_RRE |
1295+
MLX5_QP_OPTPAR_RAE |
1296+
MLX5_QP_OPTPAR_RWE |
1297+
MLX5_QP_OPTPAR_PKEY_INDEX,
12831298
},
12841299
},
12851300
[MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
13141329
[MLX5_QP_STATE_RTS] = {
13151330
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
13161331
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1332+
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1333+
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1334+
MLX5_QP_OPTPAR_RWE |
1335+
MLX5_QP_OPTPAR_RAE |
1336+
MLX5_QP_OPTPAR_RRE,
13171337
},
13181338
},
13191339
};
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
16511671
rseg->reserved = 0;
16521672
}
16531673

1654-
static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
1655-
{
1656-
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1657-
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1658-
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1659-
} else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1660-
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1661-
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1662-
} else {
1663-
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1664-
aseg->compare = 0;
1665-
}
1666-
}
1667-
1668-
static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
1669-
struct ib_send_wr *wr)
1670-
{
1671-
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1672-
aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1673-
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1674-
aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1675-
}
1676-
16771674
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
16781675
struct ib_send_wr *wr)
16791676
{
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
20632060

20642061
case IB_WR_ATOMIC_CMP_AND_SWP:
20652062
case IB_WR_ATOMIC_FETCH_AND_ADD:
2066-
set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2067-
wr->wr.atomic.rkey);
2068-
seg += sizeof(struct mlx5_wqe_raddr_seg);
2069-
2070-
set_atomic_seg(seg, wr);
2071-
seg += sizeof(struct mlx5_wqe_atomic_seg);
2072-
2073-
size += (sizeof(struct mlx5_wqe_raddr_seg) +
2074-
sizeof(struct mlx5_wqe_atomic_seg)) / 16;
2075-
break;
2076-
20772063
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2078-
set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2079-
wr->wr.atomic.rkey);
2080-
seg += sizeof(struct mlx5_wqe_raddr_seg);
2081-
2082-
set_masked_atomic_seg(seg, wr);
2083-
seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
2084-
2085-
size += (sizeof(struct mlx5_wqe_raddr_seg) +
2086-
sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
2087-
break;
2064+
mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2065+
err = -ENOSYS;
2066+
*bad_wr = wr;
2067+
goto out;
20882068

20892069
case IB_WR_LOCAL_INV:
20902070
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;

0 commit comments

Comments
 (0)