Skip to content

Commit

Permalink
netfs: Change the read result collector to only use one work item
Browse files Browse the repository at this point in the history
Change the way netfslib collects read results to do all the collection for
a particular read request using a single work item that walks along the
subrequest queue as subrequests make progress or complete, unlocking folios
progressively rather than doing the unlock in parallel as parallel requests
come in.

The code is remodelled to be more like the write-side code, though only
using a single stream.  This makes it more directly comparable and thus
easier to duplicate fixes between the two sides.

This has a number of advantages:

 (1) It's simpler.  There doesn't need to be a complex donation mechanism
     to handle mismatches between the size and alignment of subrequests and
     folios.  The collector unlocks folios as the subrequests covering each
     complete.

 (2) It should cause less scheduler overhead as there's a single work item
     in play unlocking pages in parallel when a read gets split up into a
     lot of subrequests instead of one per subrequest.

     Whilst the parallellism is nice in theory, in practice, the vast
     majority of loads are sequential reads of the whole file, so
     committing a bunch of threads to unlocking folios out of order doesn't
     help in those cases.

 (3) It should make it easier to implement content decryption.  A folio
     cannot be decrypted until all the requests that contribute to it have
     completed - and, again, most loads are sequential and so, most of the
     time, we want to begin decryption sequentially (though it's great if
     the decryption can happen in parallel).

There is a disadvantage in that we're losing the ability to decrypt and
unlock things on an as-things-arrive basis which may affect some
applications.

Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-28-dhowells@redhat.com
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
  • Loading branch information
dhowells authored and brauner committed Dec 20, 2024
1 parent eddf51f commit e2d46f2
Show file tree
Hide file tree
Showing 19 changed files with 819 additions and 763 deletions.
3 changes: 1 addition & 2 deletions fs/9p/vfs_addr.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,7 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
if (pos + total >= i_size_read(rreq->inode))
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);

if (!err) {
if (!err && total) {
subreq->transferred += total;
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
}
Expand Down
8 changes: 6 additions & 2 deletions fs/afs/dir.c
Original file line number Diff line number Diff line change
Expand Up @@ -325,8 +325,10 @@ static ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
* haven't read it yet.
*/
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
test_bit(AFS_VNODE_DIR_READ, &dvnode->flags))
test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) {
ret = i_size;
goto valid;
}

up_read(&dvnode->validate_lock);
if (down_write_killable(&dvnode->validate_lock) < 0)
Expand All @@ -346,11 +348,13 @@ static ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)

set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
set_bit(AFS_VNODE_DIR_READ, &dvnode->flags);
} else {
ret = i_size;
}

downgrade_write(&dvnode->validate_lock);
valid:
return i_size;
return ret;

error_unlock:
up_write(&dvnode->validate_lock);
Expand Down
9 changes: 7 additions & 2 deletions fs/ceph/addr.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,10 +223,13 @@ static void finish_netfs_read(struct ceph_osd_request *req)
subreq->len, i_size_read(req->r_inode));

/* no object means success but no data */
if (err == -ENOENT)
if (err == -ENOENT) {
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
err = 0;
else if (err == -EBLOCKLISTED)
} else if (err == -EBLOCKLISTED) {
fsc->blocklisted = true;
}

if (err >= 0) {
if (sparse && err > 0)
Expand All @@ -242,6 +245,8 @@ static void finish_netfs_read(struct ceph_osd_request *req)
if (err > subreq->len)
err = subreq->len;
}
if (err > 0)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
}

if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
Expand Down
160 changes: 79 additions & 81 deletions fs/netfs/buffered_read.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,12 +121,6 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)

subreq->io_iter = rreq->buffer.iter;

if (iov_iter_is_folioq(&subreq->io_iter)) {
subreq->curr_folioq = (struct folio_queue *)subreq->io_iter.folioq;
subreq->curr_folioq_slot = subreq->io_iter.folioq_slot;
subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
}

iov_iter_truncate(&subreq->io_iter, subreq->len);
rolling_buffer_advance(&rreq->buffer, subreq->len);
return subreq->len;
Expand All @@ -147,19 +141,6 @@ static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_request *rr

}

void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async)
{
struct netfs_io_subrequest *subreq = priv;

if (transferred_or_error > 0) {
subreq->transferred += transferred_or_error;
subreq->error = 0;
} else {
subreq->error = transferred_or_error;
}
schedule_work(&subreq->work);
}

/*
* Issue a read against the cache.
* - Eats the caller's ref on subreq.
Expand All @@ -174,6 +155,47 @@ static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq,
netfs_cache_read_terminated, subreq);
}

static void netfs_issue_read(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
struct netfs_io_stream *stream = &rreq->io_streams[0];

__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);

/* We add to the end of the list whilst the collector may be walking
* the list. The collector only goes nextwards and uses the lock to
* remove entries off of the front.
*/
spin_lock(&rreq->lock);
list_add_tail(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
stream->front = subreq;
if (!stream->active) {
stream->collected_to = stream->front->start;
/* Store list pointers before active flag */
smp_store_release(&stream->active, true);
}
}

spin_unlock(&rreq->lock);

switch (subreq->source) {
case NETFS_DOWNLOAD_FROM_SERVER:
rreq->netfs_ops->issue_read(subreq);
break;
case NETFS_READ_FROM_CACHE:
netfs_read_cache_to_pagecache(rreq, subreq);
break;
default:
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
subreq->error = 0;
iov_iter_zero(subreq->len, &subreq->io_iter);
subreq->transferred = subreq->len;
netfs_read_subreq_terminated(subreq);
break;
}
}

/*
* Perform a read to the pagecache from a series of sources of different types,
* slicing up the region to be read according to available cache blocks and
Expand All @@ -186,8 +208,6 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
ssize_t size = rreq->len;
int ret = 0;

atomic_inc(&rreq->nr_outstanding);

do {
struct netfs_io_subrequest *subreq;
enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
Expand All @@ -202,14 +222,6 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
subreq->start = start;
subreq->len = size;

atomic_inc(&rreq->nr_outstanding);
spin_lock(&rreq->lock);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
subreq->prev_donated = rreq->prev_donated;
rreq->prev_donated = 0;
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
spin_unlock(&rreq->lock);

source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
subreq->source = source;
if (source == NETFS_DOWNLOAD_FROM_SERVER) {
Expand Down Expand Up @@ -237,85 +249,69 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq);
if (ret < 0)
goto prep_failed;
if (ret < 0) {
subreq->error = ret;
/* Not queued - release both refs. */
netfs_put_subrequest(subreq, false,
netfs_sreq_trace_put_cancel);
netfs_put_subrequest(subreq, false,
netfs_sreq_trace_put_cancel);
break;
}
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
}

slice = netfs_prepare_read_iterator(subreq);
if (slice < 0)
goto prep_iter_failed;

rreq->netfs_ops->issue_read(subreq);
goto done;
goto issue;
}

fill_with_zeroes:
if (source == NETFS_FILL_WITH_ZEROES) {
subreq->source = NETFS_FILL_WITH_ZEROES;
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
netfs_stat(&netfs_n_rh_zero);
slice = netfs_prepare_read_iterator(subreq);
if (slice < 0)
goto prep_iter_failed;
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
subreq->error = 0;
netfs_read_subreq_terminated(subreq);
goto done;
goto issue;
}

if (source == NETFS_READ_FROM_CACHE) {
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
slice = netfs_prepare_read_iterator(subreq);
if (slice < 0)
goto prep_iter_failed;
netfs_read_cache_to_pagecache(rreq, subreq);
goto done;
goto issue;
}

pr_err("Unexpected read source %u\n", source);
WARN_ON_ONCE(1);
break;

prep_iter_failed:
ret = slice;
prep_failed:
subreq->error = ret;
atomic_dec(&rreq->nr_outstanding);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
break;

done:
issue:
slice = netfs_prepare_read_iterator(subreq);
if (slice < 0) {
ret = slice;
subreq->error = ret;
trace_netfs_sreq(subreq, netfs_sreq_trace_cancel);
/* Not queued - release both refs. */
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
break;
}
size -= slice;
start += slice;
if (size <= 0) {
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
}

netfs_issue_read(rreq, subreq);
cond_resched();
} while (size > 0);

if (atomic_dec_and_test(&rreq->nr_outstanding))
netfs_rreq_terminated(rreq);
if (unlikely(size > 0)) {
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
netfs_wake_read_collector(rreq);
}

/* Defer error return as we may need to wait for outstanding I/O. */
cmpxchg(&rreq->error, 0, ret);
}

/*
* Wait for the read operation to complete, successfully or otherwise.
*/
static int netfs_wait_for_read(struct netfs_io_request *rreq)
{
int ret;

trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
ret = rreq->error;
if (ret == 0 && rreq->submitted < rreq->len) {
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
ret = -EIO;
}

return ret;
}

/**
* netfs_readahead - Helper to manage a read request
* @ractl: The description of the readahead request
Expand Down Expand Up @@ -344,6 +340,8 @@ void netfs_readahead(struct readahead_control *ractl)
if (IS_ERR(rreq))
return;

__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);

ret = netfs_begin_cache_read(rreq, ictx);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto cleanup_free;
Expand Down Expand Up @@ -460,7 +458,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio)
folio_put(sink);

ret = netfs_wait_for_read(rreq);
if (ret == 0) {
if (ret >= 0) {
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
}
Expand Down Expand Up @@ -748,7 +746,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
netfs_read_to_pagecache(rreq);
ret = netfs_wait_for_read(rreq);
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
return ret;
return ret < 0 ? ret : 0;

error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
Expand Down
Loading

0 comments on commit e2d46f2

Please sign in to comment.