Skip to content

Commit 7316b49

Browse files
author
Al Viro
committed
aio: move sanity checks and request allocation to io_submit_one()
makes for somewhat cleaner control flow in __io_submit_one() Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
1 parent fa0ca2a commit 7316b49

File tree

1 file changed

+53
-66
lines changed

1 file changed

+53
-66
lines changed

fs/aio.c

Lines changed: 53 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -1777,35 +1777,12 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
17771777
}
17781778

17791779
static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1780-
struct iocb __user *user_iocb, bool compat)
1780+
struct iocb __user *user_iocb, struct aio_kiocb *req,
1781+
bool compat)
17811782
{
1782-
struct aio_kiocb *req;
1783-
int ret;
1784-
1785-
/* enforce forwards compatibility on users */
1786-
if (unlikely(iocb->aio_reserved2)) {
1787-
pr_debug("EINVAL: reserve field set\n");
1788-
return -EINVAL;
1789-
}
1790-
1791-
/* prevent overflows */
1792-
if (unlikely(
1793-
(iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1794-
(iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1795-
((ssize_t)iocb->aio_nbytes < 0)
1796-
)) {
1797-
pr_debug("EINVAL: overflow check\n");
1798-
return -EINVAL;
1799-
}
1800-
1801-
req = aio_get_req(ctx);
1802-
if (unlikely(!req))
1803-
return -EAGAIN;
1804-
18051783
req->ki_filp = fget(iocb->aio_fildes);
1806-
ret = -EBADF;
18071784
if (unlikely(!req->ki_filp))
1808-
goto out_put_req;
1785+
return -EBADF;
18091786

18101787
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
18111788
struct eventfd_ctx *eventfd;
@@ -1816,17 +1793,15 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
18161793
* event using the eventfd_signal() function.
18171794
*/
18181795
eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1819-
if (IS_ERR(eventfd)) {
1820-
ret = PTR_ERR(eventfd);
1821-
goto out_put_req;
1822-
}
1796+
if (IS_ERR(eventfd))
1797+
return PTR_ERR(req->ki_eventfd);
1798+
18231799
req->ki_eventfd = eventfd;
18241800
}
18251801

1826-
ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
1827-
if (unlikely(ret)) {
1802+
if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
18281803
pr_debug("EFAULT: aio_key\n");
1829-
goto out_put_req;
1804+
return -EFAULT;
18301805
}
18311806

18321807
req->ki_res.obj = (u64)(unsigned long)user_iocb;
@@ -1836,58 +1811,70 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
18361811

18371812
switch (iocb->aio_lio_opcode) {
18381813
case IOCB_CMD_PREAD:
1839-
ret = aio_read(&req->rw, iocb, false, compat);
1840-
break;
1814+
return aio_read(&req->rw, iocb, false, compat);
18411815
case IOCB_CMD_PWRITE:
1842-
ret = aio_write(&req->rw, iocb, false, compat);
1843-
break;
1816+
return aio_write(&req->rw, iocb, false, compat);
18441817
case IOCB_CMD_PREADV:
1845-
ret = aio_read(&req->rw, iocb, true, compat);
1846-
break;
1818+
return aio_read(&req->rw, iocb, true, compat);
18471819
case IOCB_CMD_PWRITEV:
1848-
ret = aio_write(&req->rw, iocb, true, compat);
1849-
break;
1820+
return aio_write(&req->rw, iocb, true, compat);
18501821
case IOCB_CMD_FSYNC:
1851-
ret = aio_fsync(&req->fsync, iocb, false);
1852-
break;
1822+
return aio_fsync(&req->fsync, iocb, false);
18531823
case IOCB_CMD_FDSYNC:
1854-
ret = aio_fsync(&req->fsync, iocb, true);
1855-
break;
1824+
return aio_fsync(&req->fsync, iocb, true);
18561825
case IOCB_CMD_POLL:
1857-
ret = aio_poll(req, iocb);
1858-
break;
1826+
return aio_poll(req, iocb);
18591827
default:
18601828
pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1861-
ret = -EINVAL;
1862-
break;
1829+
return -EINVAL;
18631830
}
1864-
1865-
/* Done with the synchronous reference */
1866-
iocb_put(req);
1867-
1868-
/*
1869-
* If ret is 0, we'd either done aio_complete() ourselves or have
1870-
* arranged for that to be done asynchronously. Anything non-zero
1871-
* means that we need to destroy req ourselves.
1872-
*/
1873-
if (!ret)
1874-
return 0;
1875-
1876-
out_put_req:
1877-
iocb_destroy(req);
1878-
put_reqs_available(ctx, 1);
1879-
return ret;
18801831
}
18811832

18821833
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
18831834
bool compat)
18841835
{
1836+
struct aio_kiocb *req;
18851837
struct iocb iocb;
1838+
int err;
18861839

18871840
if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
18881841
return -EFAULT;
18891842

1890-
return __io_submit_one(ctx, &iocb, user_iocb, compat);
1843+
/* enforce forwards compatibility on users */
1844+
if (unlikely(iocb.aio_reserved2)) {
1845+
pr_debug("EINVAL: reserve field set\n");
1846+
return -EINVAL;
1847+
}
1848+
1849+
/* prevent overflows */
1850+
if (unlikely(
1851+
(iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
1852+
(iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
1853+
((ssize_t)iocb.aio_nbytes < 0)
1854+
)) {
1855+
pr_debug("EINVAL: overflow check\n");
1856+
return -EINVAL;
1857+
}
1858+
1859+
req = aio_get_req(ctx);
1860+
if (unlikely(!req))
1861+
return -EAGAIN;
1862+
1863+
err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
1864+
1865+
/* Done with the synchronous reference */
1866+
iocb_put(req);
1867+
1868+
/*
1869+
* If err is 0, we'd either done aio_complete() ourselves or have
1870+
* arranged for that to be done asynchronously. Anything non-zero
1871+
* means that we need to destroy req ourselves.
1872+
*/
1873+
if (unlikely(err)) {
1874+
iocb_destroy(req);
1875+
put_reqs_available(ctx, 1);
1876+
}
1877+
return err;
18911878
}
18921879

18931880
/* sys_io_submit:

0 commit comments

Comments
 (0)