Skip to content

Commit 4a5a553

Browse files
committed
brcmfmac: Use standard SKB list accessors in brcmf_sdiod_sglist_rw.
Instead of direct SKB list pointer accesses. The loops in this function had to be rewritten to accommodate this more easily. The first loop iterates now over the target list in the outer loop, and triggers an mmc data operation when the per-operation limits are hit. Then after the loops, if we have any residue, we trigger the last and final operation. For the page aligned workaround, where we have to copy the read data back into the original list of SKBs, we use a two-tiered loop. The outer loop stays the same and iterates over pktlist, and then we have an inner loop which uses skb_peek_next(). The break logic has been simplified because we know that the aggregate length of the SKBs in the source and destination lists are the same. This change also ends up fixing a bug, having to do with the maintainance of the seg_sz variable and how it drove the outermost loop. It begins as: seg_sz = target_list->qlen; ie. the number of packets in the target_list queue. The loop structure was then: while (seq_sz) { ... while (not at end of target_list) { ... sg_cnt++ ... } ... seg_sz -= sg_cnt; The assumption built into that last statement is that sg_cnt counts how many packets from target_list have been fully processed by the inner loop. But this not true. If we hit one of the limits, such as the max segment size or the max request size, we will break and copy a partial packet then contine back up to the top of the outermost loop. With the new loops we don't have this problem as we don't guard the loop exit with a packet count, but instead use the progression of the pkt_next SKB through the list to the end. The general structure is: sg_cnt = 0; skb_queue_walk(target_list, pkt_next) { pkt_offset = 0; ... sg_cnt++; ... while (pkt_offset < pkt_next->len) { pkt_offset += sg_data_size; if (queued up max per request) mmc_submit_one(); } } if (sg_cnt) mmc_submit_one(); The variables that maintain where we are in the MMC command state such as req_sz, sg_cnt, and sgl are reset when we emit one of these full sized requests. Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 6083e28 commit 4a5a553

File tree

1 file changed

+74
-63
lines changed
  • drivers/net/wireless/broadcom/brcm80211/brcmfmac

1 file changed

+74
-63
lines changed

drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c

Lines changed: 74 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,37 @@ static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
342342
return err;
343343
}
344344

345+
static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
346+
struct mmc_command *mc, int sg_cnt, int req_sz,
347+
int func_blk_sz, u32 *addr,
348+
struct brcmf_sdio_dev *sdiodev,
349+
struct sdio_func *func, int write)
350+
{
351+
int ret;
352+
353+
md->sg_len = sg_cnt;
354+
md->blocks = req_sz / func_blk_sz;
355+
mc->arg |= (*addr & 0x1FFFF) << 9; /* address */
356+
mc->arg |= md->blocks & 0x1FF; /* block count */
357+
/* incrementing addr for function 1 */
358+
if (func->num == 1)
359+
*addr += req_sz;
360+
361+
mmc_set_data_timeout(md, func->card);
362+
mmc_wait_for_req(func->card->host, mr);
363+
364+
ret = mc->error ? mc->error : md->error;
365+
if (ret == -ENOMEDIUM) {
366+
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
367+
} else if (ret != 0) {
368+
brcmf_err("CMD53 sg block %s failed %d\n",
369+
write ? "write" : "read", ret);
370+
ret = -EIO;
371+
}
372+
373+
return ret;
374+
}
375+
345376
/**
346377
* brcmf_sdiod_sglist_rw - SDIO interface function for block data access
347378
* @sdiodev: brcmfmac sdio device
@@ -360,11 +391,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
360391
struct sk_buff_head *pktlist)
361392
{
362393
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
363-
unsigned int max_req_sz, orig_offset, dst_offset;
364-
unsigned short max_seg_cnt, seg_sz;
394+
unsigned int max_req_sz, src_offset, dst_offset;
365395
unsigned char *pkt_data, *orig_data, *dst_data;
366-
struct sk_buff *pkt_next = NULL, *local_pkt_next;
367396
struct sk_buff_head local_list, *target_list;
397+
struct sk_buff *pkt_next = NULL, *src;
398+
unsigned short max_seg_cnt;
368399
struct mmc_request mmc_req;
369400
struct mmc_command mmc_cmd;
370401
struct mmc_data mmc_dat;
@@ -404,9 +435,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
404435
max_req_sz = sdiodev->max_request_size;
405436
max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
406437
target_list->qlen);
407-
seg_sz = target_list->qlen;
408-
pkt_offset = 0;
409-
pkt_next = target_list->next;
410438

411439
memset(&mmc_req, 0, sizeof(struct mmc_request));
412440
memset(&mmc_cmd, 0, sizeof(struct mmc_command));
@@ -425,12 +453,12 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
425453
mmc_req.cmd = &mmc_cmd;
426454
mmc_req.data = &mmc_dat;
427455

428-
while (seg_sz) {
429-
req_sz = 0;
430-
sg_cnt = 0;
431-
sgl = sdiodev->sgtable.sgl;
432-
/* prep sg table */
433-
while (pkt_next != (struct sk_buff *)target_list) {
456+
req_sz = 0;
457+
sg_cnt = 0;
458+
sgl = sdiodev->sgtable.sgl;
459+
skb_queue_walk(target_list, pkt_next) {
460+
pkt_offset = 0;
461+
while (pkt_offset < pkt_next->len) {
434462
pkt_data = pkt_next->data + pkt_offset;
435463
sg_data_sz = pkt_next->len - pkt_offset;
436464
if (sg_data_sz > sdiodev->max_segment_size)
@@ -439,72 +467,55 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
439467
sg_data_sz = max_req_sz - req_sz;
440468

441469
sg_set_buf(sgl, pkt_data, sg_data_sz);
442-
443470
sg_cnt++;
471+
444472
sgl = sg_next(sgl);
445473
req_sz += sg_data_sz;
446474
pkt_offset += sg_data_sz;
447-
if (pkt_offset == pkt_next->len) {
448-
pkt_offset = 0;
449-
pkt_next = pkt_next->next;
475+
if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) {
476+
ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
477+
sg_cnt, req_sz, func_blk_sz,
478+
&addr, sdiodev, func, write);
479+
if (ret)
480+
goto exit_queue_walk;
481+
req_sz = 0;
482+
sg_cnt = 0;
483+
sgl = sdiodev->sgtable.sgl;
450484
}
451-
452-
if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
453-
break;
454-
}
455-
seg_sz -= sg_cnt;
456-
457-
if (req_sz % func_blk_sz != 0) {
458-
brcmf_err("sg request length %u is not %u aligned\n",
459-
req_sz, func_blk_sz);
460-
ret = -ENOTBLK;
461-
goto exit;
462-
}
463-
464-
mmc_dat.sg_len = sg_cnt;
465-
mmc_dat.blocks = req_sz / func_blk_sz;
466-
mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
467-
mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
468-
/* incrementing addr for function 1 */
469-
if (func->num == 1)
470-
addr += req_sz;
471-
472-
mmc_set_data_timeout(&mmc_dat, func->card);
473-
mmc_wait_for_req(func->card->host, &mmc_req);
474-
475-
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
476-
if (ret == -ENOMEDIUM) {
477-
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
478-
break;
479-
} else if (ret != 0) {
480-
brcmf_err("CMD53 sg block %s failed %d\n",
481-
write ? "write" : "read", ret);
482-
ret = -EIO;
483-
break;
484485
}
485486
}
486-
487+
if (sg_cnt)
488+
ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
489+
sg_cnt, req_sz, func_blk_sz,
490+
&addr, sdiodev, func, write);
491+
exit_queue_walk:
487492
if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
488-
local_pkt_next = local_list.next;
489-
orig_offset = 0;
493+
src = __skb_peek(&local_list);
494+
src_offset = 0;
490495
skb_queue_walk(pktlist, pkt_next) {
491496
dst_offset = 0;
492-
do {
493-
req_sz = local_pkt_next->len - orig_offset;
494-
req_sz = min_t(uint, pkt_next->len - dst_offset,
495-
req_sz);
496-
orig_data = local_pkt_next->data + orig_offset;
497+
498+
/* This is safe because we must have enough SKB data
499+
* in the local list to cover everything in pktlist.
500+
*/
501+
while (1) {
502+
req_sz = pkt_next->len - dst_offset;
503+
if (req_sz > src->len - src_offset)
504+
req_sz = src->len - src_offset;
505+
506+
orig_data = src->data + src_offset;
497507
dst_data = pkt_next->data + dst_offset;
498508
memcpy(dst_data, orig_data, req_sz);
499-
orig_offset += req_sz;
500-
dst_offset += req_sz;
501-
if (orig_offset == local_pkt_next->len) {
502-
orig_offset = 0;
503-
local_pkt_next = local_pkt_next->next;
509+
510+
src_offset += req_sz;
511+
if (src_offset == src->len) {
512+
src_offset = 0;
513+
src = skb_peek_next(src, &local_list);
504514
}
515+
dst_offset += req_sz;
505516
if (dst_offset == pkt_next->len)
506517
break;
507-
} while (!skb_queue_empty(&local_list));
518+
}
508519
}
509520
}
510521

0 commit comments

Comments
 (0)