Re: linux-next: manual merge of the vfs-brauner tree with the vfs-brauner-fixes tree
From: Darrick J. Wong
Date: Wed Mar 25 2026 - 13:39:17 EST
On Wed, Mar 25, 2026 at 01:29:37PM +0000, Mark Brown wrote:
> Hi all,
>
> Today's linux-next merge of the vfs-brauner tree got a conflict in:
>
> fs/iomap/bio.c
>
> between commit:
>
> f621324dfb3d6 ("iomap: fix lockdep complaint when reads fail")
>
> from the vfs-brauner-fixes tree and commit:
>
> e8f9cf03c9dc9 ("iomap: support ioends for buffered reads")
>
> from the vfs-brauner tree.
>
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging. You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
That looks correct to me, thanks for pointing out the merge conflict. :)
--D
> diff --cc fs/iomap/bio.c
> index edd908183058f,f989ffcaac96d..0000000000000
> --- a/fs/iomap/bio.c
> +++ b/fs/iomap/bio.c
> @@@ -8,66 -9,33 +9,78 @@@
> #include "internal.h"
> #include "trace.h"
>
> +static DEFINE_SPINLOCK(failed_read_lock);
> +static struct bio_list failed_read_list = BIO_EMPTY_LIST;
> +
> - static void __iomap_read_end_io(struct bio *bio)
> + static u32 __iomap_read_end_io(struct bio *bio, int error)
> {
> - int error = blk_status_to_errno(bio->bi_status);
> struct folio_iter fi;
> + u32 folio_count = 0;
>
> - bio_for_each_folio_all(fi, bio)
> + bio_for_each_folio_all(fi, bio) {
> iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
> + folio_count++;
> + }
> + if (bio_integrity(bio))
> + fs_bio_integrity_free(bio);
> bio_put(bio);
> + return folio_count;
> }
>
> +static void
> +iomap_fail_reads(
> + struct work_struct *work)
> +{
> + struct bio *bio;
> + struct bio_list tmp = BIO_EMPTY_LIST;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&failed_read_lock, flags);
> + bio_list_merge_init(&tmp, &failed_read_list);
> + spin_unlock_irqrestore(&failed_read_lock, flags);
> +
> + while ((bio = bio_list_pop(&tmp)) != NULL) {
> - __iomap_read_end_io(bio);
> ++ __iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
> + cond_resched();
> + }
> +}
> +
> +static DECLARE_WORK(failed_read_work, iomap_fail_reads);
> +
> +static void iomap_fail_buffered_read(struct bio *bio)
> +{
> + unsigned long flags;
> +
> + /*
> + * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
> + * in the fserror code. The caller no longer owns the bio reference
> + * after the spinlock drops.
> + */
> + spin_lock_irqsave(&failed_read_lock, flags);
> + if (bio_list_empty(&failed_read_list))
> + WARN_ON_ONCE(!schedule_work(&failed_read_work));
> + bio_list_add(&failed_read_list, bio);
> + spin_unlock_irqrestore(&failed_read_lock, flags);
> +}
> +
> static void iomap_read_end_io(struct bio *bio)
> {
> - __iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
> + if (bio->bi_status) {
> + iomap_fail_buffered_read(bio);
> + return;
> + }
> +
> - __iomap_read_end_io(bio);
> ++ __iomap_read_end_io(bio, 0);
> }
>
> - static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
> ++
> + u32 iomap_finish_ioend_buffered_read(struct iomap_ioend *ioend)
> + {
> + return __iomap_read_end_io(&ioend->io_bio, ioend->io_error);
> + }
> +
> + static void iomap_bio_submit_read(const struct iomap_iter *iter,
> + struct iomap_read_folio_ctx *ctx)
> {
> struct bio *bio = ctx->read_ctx;
>