Commit d195a66e authored by Brian Gianforcaro's avatar Brian Gianforcaro Committed by Jens Axboe

io_uring: fix stale comment and a few typos

- Fix a few typos found while reading the code.

- Fix stale io_get_sqring comment referencing s->sqe, the 's' parameter
  was renamed to 'req', but the comment still holds.
Signed-off-by: default avatarBrian Gianforcaro <b.gianfo@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e31736d9
...@@ -948,7 +948,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe, ...@@ -948,7 +948,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
/* /*
* Now check if a free (going busy) or busy worker has the work * Now check if a free (going busy) or busy worker has the work
* currently running. If we find it there, we'll return CANCEL_RUNNING * currently running. If we find it there, we'll return CANCEL_RUNNING
* as an indication that we attempte to signal cancellation. The * as an indication that we attempt to signal cancellation. The
* completion will run normally in this case. * completion will run normally in this case.
*/ */
rcu_read_lock(); rcu_read_lock();
......
...@@ -1178,7 +1178,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -1178,7 +1178,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
} }
/* /*
* Poll for a mininum of 'min' events. Note that if min == 0 we consider that a * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
* non-spinning poll check - we'll still enter the driver poll loop, but only * non-spinning poll check - we'll still enter the driver poll loop, but only
* as a non-spinning completion check. * as a non-spinning completion check.
*/ */
...@@ -2573,7 +2573,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) ...@@ -2573,7 +2573,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
/* /*
* Adjust the reqs sequence before the current one because it * Adjust the reqs sequence before the current one because it
* will consume a slot in the cq_ring and the the cq_tail * will consume a slot in the cq_ring and the cq_tail
* pointer will be increased, otherwise other timeout reqs may * pointer will be increased, otherwise other timeout reqs may
* return in advance without waiting for enough wait_nr. * return in advance without waiting for enough wait_nr.
*/ */
...@@ -3430,7 +3430,7 @@ static void io_commit_sqring(struct io_ring_ctx *ctx) ...@@ -3430,7 +3430,7 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
} }
/* /*
* Fetch an sqe, if one is available. Note that s->sqe will point to memory * Fetch an sqe, if one is available. Note that req->sqe will point to memory
* that is mapped by userspace. This means that care needs to be taken to * that is mapped by userspace. This means that care needs to be taken to
* ensure that reads are stable, as we cannot rely on userspace always * ensure that reads are stable, as we cannot rely on userspace always
* being a good citizen. If members of the sqe are validated and then later * being a good citizen. If members of the sqe are validated and then later
...@@ -3694,7 +3694,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush) ...@@ -3694,7 +3694,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
struct io_ring_ctx *ctx = iowq->ctx; struct io_ring_ctx *ctx = iowq->ctx;
/* /*
* Wake up if we have enough events, or if a timeout occured since we * Wake up if we have enough events, or if a timeout occurred since we
* started waiting. For timeouts, we always want to return to userspace, * started waiting. For timeouts, we always want to return to userspace,
* regardless of event count. * regardless of event count.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment