diff --git a/include/os/linux/kernel/linux/mod_compat.h b/include/os/linux/kernel/linux/mod_compat.h index 8e20a9613539..039865b703ef 100644 --- a/include/os/linux/kernel/linux/mod_compat.h +++ b/include/os/linux/kernel/linux/mod_compat.h @@ -68,6 +68,7 @@ enum scope_prefix_types { zfs_trim, zfs_txg, zfs_vdev, + zfs_vdev_disk, zfs_vdev_file, zfs_vdev_mirror, zfs_vnops, diff --git a/include/sys/abd.h b/include/sys/abd.h index b48dc36423f7..1aae5cc8f6bb 100644 --- a/include/sys/abd.h +++ b/include/sys/abd.h @@ -211,14 +211,6 @@ abd_get_size(abd_t *abd) void abd_init(void); void abd_fini(void); -/* - * Linux ABD bio functions - */ -#if defined(__linux__) && defined(_KERNEL) -unsigned int abd_bio_map_off(struct bio *, abd_t *, unsigned int, size_t); -unsigned long abd_nr_pages_off(abd_t *, unsigned int, size_t); -#endif - #ifdef __cplusplus } #endif diff --git a/man/man4/zfs.4 b/man/man4/zfs.4 index 5daf27e9d536..b430a3078438 100644 --- a/man/man4/zfs.4 +++ b/man/man4/zfs.4 @@ -2,6 +2,7 @@ .\" Copyright (c) 2013 by Turbo Fredriksson . All rights reserved. .\" Copyright (c) 2019, 2021 by Delphix. All rights reserved. .\" Copyright (c) 2019 Datto Inc. +.\" Copyright (c) 2023 Klara, Inc. .\" The contents of this file are subject to the terms of the Common Development .\" and Distribution License (the "License"). You may not use this file except .\" in compliance with the License. You can obtain a copy of the license at @@ -15,7 +16,7 @@ .\" own identifying information: .\" Portions Copyright [yyyy] [name of copyright owner] .\" -.Dd July 21, 2023 +.Dd November 27, 2023 .Dt ZFS 4 .Os . @@ -1353,6 +1354,13 @@ _ 4 Driver No driver retries on driver errors. .TE . +.It Sy zfs_vdev_disk_max_segs Ns = Ns Sy 0 Pq uint +Maximum number of segments to add to a BIO (min 4). +If this is higher than the maximum allowed by the device queue or the kernel +itself, it will be clamped. +Setting it to zero will cause the kernel's ideal size to be used. +This parameter only applies on Linux. +. .It Sy zfs_expire_snapshot Ns = Ns Sy 300 Ns s Pq int Time before expiring .Pa .zfs/snapshot . diff --git a/module/os/linux/zfs/abd_os.c b/module/os/linux/zfs/abd_os.c index 90ff84cb7668..f5b389d87115 100644 --- a/module/os/linux/zfs/abd_os.c +++ b/module/os/linux/zfs/abd_os.c @@ -1028,150 +1028,8 @@ abd_cache_reap_now(void) { } -#if defined(_KERNEL) -/* - * bio_nr_pages for ABD. - * @off is the offset in @abd - */ -unsigned long -abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off) -{ - unsigned long pos; - - if (abd_is_gang(abd)) { - unsigned long count = 0; - - for (abd_t *cabd = abd_gang_get_offset(abd, &off); - cabd != NULL && size != 0; - cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) { - ASSERT3U(off, <, cabd->abd_size); - int mysize = MIN(size, cabd->abd_size - off); - count += abd_nr_pages_off(cabd, mysize, off); - size -= mysize; - off = 0; - } - return (count); - } - - if (abd_is_linear(abd)) - pos = (unsigned long)abd_to_buf(abd) + off; - else - pos = ABD_SCATTER(abd).abd_offset + off; - - return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) - - (pos >> PAGE_SHIFT)); -} - -static unsigned int -bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size) -{ - unsigned int offset, size, i; - struct page *page; - - offset = offset_in_page(buf_ptr); - for (i = 0; i < bio->bi_max_vecs; i++) { - size = PAGE_SIZE - offset; - - if (bio_size <= 0) - break; - - if (size > bio_size) - size = bio_size; - - if (is_vmalloc_addr(buf_ptr)) - page = vmalloc_to_page(buf_ptr); - else - page = virt_to_page(buf_ptr); - - /* - * Some network related block device uses tcp_sendpage, which - * doesn't behave well when using 0-count page, this is a - * safety net to catch them. - */ - ASSERT3S(page_count(page), >, 0); - - if (bio_add_page(bio, page, size, offset) != size) - break; - - buf_ptr += size; - bio_size -= size; - offset = 0; - } - - return (bio_size); -} - -/* - * bio_map for gang ABD. - */ -static unsigned int -abd_gang_bio_map_off(struct bio *bio, abd_t *abd, - unsigned int io_size, size_t off) -{ - ASSERT(abd_is_gang(abd)); - - for (abd_t *cabd = abd_gang_get_offset(abd, &off); - cabd != NULL; - cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) { - ASSERT3U(off, <, cabd->abd_size); - int size = MIN(io_size, cabd->abd_size - off); - int remainder = abd_bio_map_off(bio, cabd, size, off); - io_size -= (size - remainder); - if (io_size == 0 || remainder > 0) - return (io_size); - off = 0; - } - ASSERT0(io_size); - return (io_size); -} - -/* - * bio_map for ABD. - * @off is the offset in @abd - * Remaining IO size is returned - */ -unsigned int -abd_bio_map_off(struct bio *bio, abd_t *abd, - unsigned int io_size, size_t off) -{ - struct abd_iter aiter; - - ASSERT3U(io_size, <=, abd->abd_size - off); - if (abd_is_linear(abd)) - return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size)); - - ASSERT(!abd_is_linear(abd)); - if (abd_is_gang(abd)) - return (abd_gang_bio_map_off(bio, abd, io_size, off)); - - abd_iter_init(&aiter, abd); - abd_iter_advance(&aiter, off); - - for (int i = 0; i < bio->bi_max_vecs; i++) { - struct page *pg; - size_t len, sgoff, pgoff; - struct scatterlist *sg; - - if (io_size <= 0) - break; - - sg = aiter.iter_sg; - sgoff = aiter.iter_offset; - pgoff = sgoff & (PAGESIZE - 1); - len = MIN(io_size, PAGESIZE - pgoff); - ASSERT(len > 0); - - pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT); - if (bio_add_page(bio, pg, len, pgoff) != len) - break; - - io_size -= len; - abd_iter_advance(&aiter, len); - } - - return (io_size); -} +#ifdef _KERNEL /* Tunable Parameters */ module_param(zfs_abd_scatter_enabled, int, 0644); MODULE_PARM_DESC(zfs_abd_scatter_enabled, diff --git a/module/os/linux/zfs/vdev_disk.c b/module/os/linux/zfs/vdev_disk.c index 48ac55f07034..d2317927a303 100644 --- a/module/os/linux/zfs/vdev_disk.c +++ b/module/os/linux/zfs/vdev_disk.c @@ -24,6 +24,7 @@ * Rewritten for Linux by Brian Behlendorf . * LLNL-CODE-403049. * Copyright (c) 2012, 2019 by Delphix. All rights reserved. + * Copyright (c) 2023, Klara Inc. */ #include @@ -37,6 +38,7 @@ #include #include #include +#include #ifdef HAVE_LINUX_BLK_CGROUP_HEADER #include #endif @@ -46,6 +48,13 @@ typedef struct vdev_disk { krwlock_t vd_lock; } vdev_disk_t; +/* + * Maximum number of segments to add to a bio (min 4). If this is higher than + * the maximum allowed by the device queue or the kernel itself, it will be + * clamped. Setting it to zero will cause the kernel's ideal size to be used. + */ +uint_t vdev_disk_max_segs = 0; + /* * Unique identifier for the exclusive vdev holder. */ @@ -63,16 +72,7 @@ static uint_t zfs_vdev_open_timeout_ms = 1000; */ #define EFI_MIN_RESV_SIZE (16 * 1024) -/* - * Virtual device vector for disks. - */ -typedef struct dio_request { - zio_t *dr_zio; /* Parent ZIO */ - atomic_t dr_ref; /* References */ - int dr_error; /* Bio error */ - int dr_bio_count; /* Count of bio's */ - struct bio *dr_bio[]; /* Attached bio's */ -} dio_request_t; + /* * BIO request failfast mask. @@ -440,85 +440,6 @@ vdev_disk_close(vdev_t *v) v->vdev_tsd = NULL; } -static dio_request_t * -vdev_disk_dio_alloc(int bio_count) -{ - dio_request_t *dr = kmem_zalloc(sizeof (dio_request_t) + - sizeof (struct bio *) * bio_count, KM_SLEEP); - atomic_set(&dr->dr_ref, 0); - dr->dr_bio_count = bio_count; - dr->dr_error = 0; - - for (int i = 0; i < dr->dr_bio_count; i++) - dr->dr_bio[i] = NULL; - - return (dr); -} - -static void -vdev_disk_dio_free(dio_request_t *dr) -{ - int i; - - for (i = 0; i < dr->dr_bio_count; i++) - if (dr->dr_bio[i]) - bio_put(dr->dr_bio[i]); - - kmem_free(dr, sizeof (dio_request_t) + - sizeof (struct bio *) * dr->dr_bio_count); -} - -static void -vdev_disk_dio_get(dio_request_t *dr) -{ - atomic_inc(&dr->dr_ref); -} - -static void -vdev_disk_dio_put(dio_request_t *dr) -{ - int rc = atomic_dec_return(&dr->dr_ref); - - /* - * Free the dio_request when the last reference is dropped and - * ensure zio_interpret is called only once with the correct zio - */ - if (rc == 0) { - zio_t *zio = dr->dr_zio; - int error = dr->dr_error; - - vdev_disk_dio_free(dr); - - if (zio) { - zio->io_error = error; - ASSERT3S(zio->io_error, >=, 0); - if (zio->io_error) - vdev_disk_error(zio); - - zio_delay_interrupt(zio); - } - } -} - -BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error) -{ - dio_request_t *dr = bio->bi_private; - - if (dr->dr_error == 0) { -#ifdef HAVE_1ARG_BIO_END_IO_T - dr->dr_error = BIO_END_IO_ERROR(bio); -#else - if (error) - dr->dr_error = -(error); - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - dr->dr_error = EIO; -#endif - } - - /* Drop reference acquired by __vdev_disk_physio */ - vdev_disk_dio_put(dr); -} - static inline void vdev_submit_bio_impl(struct bio *bio) { @@ -670,126 +591,486 @@ vdev_bio_alloc(struct block_device *bdev, gfp_t gfp_mask, return (bio); } -static inline unsigned int -vdev_bio_max_segs(zio_t *zio, int bio_size, uint64_t abd_offset) +static inline uint_t +vdev_bio_max_segs(struct block_device *bdev) { - unsigned long nr_segs = abd_nr_pages_off(zio->io_abd, - bio_size, abd_offset); + /* + * Smallest of the device max segs and the tuneable max segs. Minimum + * 4, so there's room to finish split pages if they come up. + */ + const uint_t dev_max_segs = queue_max_segments(bdev_get_queue(bdev)); + const uint_t tune_max_segs = (vdev_disk_max_segs > 0) ? + MAX(4, vdev_disk_max_segs) : dev_max_segs; + const uint_t max_segs = MIN(tune_max_segs, dev_max_segs); #ifdef HAVE_BIO_MAX_SEGS - return (bio_max_segs(nr_segs)); + return (bio_max_segs(max_segs)); #else - return (MIN(nr_segs, BIO_MAX_PAGES)); + return (MIN(max_segs, BIO_MAX_PAGES)); #endif } + +/* + * Virtual block IO object (VBIO) + * + * Linux block IO (BIO) objects have a limit on how many data segments (pages) + * they can hold. Depending on how they're allocated and structured, a large + * ZIO can require more than one BIO to be submitted to the kernel, which then + * all have to complete before we can return the completed ZIO back to ZFS. + * + * A VBIO is a wrapper around multiple BIOs, carrying everything needed to + * translate a ZIO down into the kernel block layer and back again. + * + * Note that these are only used for data ZIOs (read/write). Meta-operations + * (flush/trim) don't need multiple BIOs and so can just make the call + * directly. + */ +typedef struct { + zio_t *vbio_zio; /* parent zio */ + + struct block_device *vbio_bdev; /* blockdev to submit bios to */ + + abd_t *vbio_abd; /* abd carrying borrowed linear buf */ + + atomic_t vbio_ref; /* bio refcount */ + int vbio_error; /* error from failed bio */ + + uint_t vbio_max_segs; /* max segs per bio */ + uint_t vbio_max_bios; /* max bios (size of vbio_bio) */ + + uint_t vbio_npages; /* pages remaining */ + + uint64_t vbio_offset; /* start offset of next bio */ + + uint_t vbio_nbios; /* allocated bios */ + uint_t vbio_cur; /* current bio */ + struct bio *vbio_bio[]; /* attached bios */ +} vbio_t; + +static vbio_t * +vbio_alloc(zio_t *zio, struct block_device *bdev, uint_t pages) +{ + /* Max segments we need in each BIO to take all pages */ + uint_t max_segs = MIN(pages, vdev_bio_max_segs(bdev)); + + /* Number of BIOs we need, at max_segs segments each */ + int max_bios = (pages + max_segs - 1) / max_segs; + + vbio_t *vbio = kmem_zalloc(sizeof (vbio_t) + + sizeof (struct bio *) * max_bios, KM_SLEEP); + + vbio->vbio_zio = zio; + vbio->vbio_bdev = bdev; + atomic_set(&vbio->vbio_ref, 0); + vbio->vbio_max_segs = max_segs; + vbio->vbio_max_bios = max_bios; + vbio->vbio_npages = pages; + vbio->vbio_cur = 0; + vbio->vbio_offset = zio->io_offset; + + return (vbio); +} + static int -__vdev_disk_physio(struct block_device *bdev, zio_t *zio, - size_t io_size, uint64_t io_offset, int rw, int flags) +vbio_add_page(vbio_t *vbio, struct page *page, uint_t size, uint_t offset) +{ + struct bio *bio; + + /* + * Weird housekeeping error; shouldn't happen, but try and save the + * furniture if it happens in production. + */ + ASSERT3U(vbio->vbio_npages, >, 0); + if (vbio->vbio_npages == 0) + return (SET_ERROR(ENOMEM)); + + for (;;) { + bio = vbio->vbio_bio[vbio->vbio_cur]; + if (bio == NULL) { + /* New BIO, allocate and set up */ + bio = vdev_bio_alloc(vbio->vbio_bdev, GFP_NOIO, + MIN(vbio->vbio_npages, vbio->vbio_max_segs)); + if (unlikely(bio == NULL)) + return (SET_ERROR(ENOMEM)); + BIO_BI_SECTOR(bio) = vbio->vbio_offset >> 9; + vbio->vbio_bio[vbio->vbio_cur] = bio; + vbio->vbio_nbios++; + } + + if (bio_add_page(bio, page, size, offset) == size) { + vbio->vbio_npages--; + return (0); + } + + /* No room, set up for a new BIO and loop */ + vbio->vbio_offset += BIO_BI_SIZE(bio); + vbio->vbio_cur++; + + VERIFY3U(vbio->vbio_cur, <, vbio->vbio_max_bios); + } +} + +BIO_END_IO_PROTO(vdev_disk_io_rw_completion, bio, error); +static void vbio_put(vbio_t *vbio); + +static void +vbio_submit(vbio_t *vbio, int flags) { - dio_request_t *dr; - uint64_t abd_offset; - uint64_t bio_offset; - int bio_size; - int bio_count = 16; - int error = 0; struct blk_plug plug; - unsigned short nr_vecs; /* - * Accessing outside the block device is never allowed. + * Take a reference for each BIO we're about to submit, plus one to + * protect us from BIOs completing before we're done submitting them + * all, causing vbio_put() to free vbio out from under us and/or the + * zio to be returned before all its IO has completed. */ - if (io_offset + io_size > bdev->bd_inode->i_size) { - vdev_dbgmsg(zio->io_vd, - "Illegal access %llu size %llu, device size %llu", - (u_longlong_t)io_offset, - (u_longlong_t)io_size, - (u_longlong_t)i_size_read(bdev->bd_inode)); - return (SET_ERROR(EIO)); + atomic_set(&vbio->vbio_ref, vbio->vbio_nbios + 1); + + /* + * If we're submitting more than one BIO, inform the block layer so + * it can batch them if it wants. + */ + if (vbio->vbio_nbios > 1) + blk_start_plug(&plug); + + /* Submit all the BIOs */ + for (int i = 0; i < vbio->vbio_nbios; i++) { + struct bio *bio = vbio->vbio_bio[i]; + ASSERT3P(bio, !=, NULL); + + bio->bi_end_io = vdev_disk_io_rw_completion; + bio->bi_private = vbio; + bio_set_op_attrs(bio, + vbio->vbio_zio->io_type == ZIO_TYPE_WRITE ? + WRITE : READ, flags); + vdev_submit_bio(bio); } -retry: - dr = vdev_disk_dio_alloc(bio_count); + /* Finish the batch */ + if (vbio->vbio_nbios > 1) + blk_finish_plug(&plug); - if (!(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)) && - zio->io_vd->vdev_failfast == B_TRUE) { - bio_set_flags_failfast(bdev, &flags, zfs_vdev_failfast_mask & 1, - zfs_vdev_failfast_mask & 2, zfs_vdev_failfast_mask & 4); + /* Release the extra reference */ + vbio_put(vbio); +} + +static void +vbio_return_abd(vbio_t *vbio) +{ + zio_t *zio = vbio->vbio_zio; + if (vbio->vbio_abd == NULL || vbio->vbio_abd == zio->io_abd) + return; + + /* + * If we copied the ABD before issuing it, clean up and return the copy + * to the ADB, with changes if appropriate. + */ + void *buf = abd_to_buf(vbio->vbio_abd); + abd_free(vbio->vbio_abd); + vbio->vbio_abd = NULL; + + if (zio->io_type == ZIO_TYPE_READ) + abd_return_buf_copy(zio->io_abd, buf, zio->io_size); + else + abd_return_buf(zio->io_abd, buf, zio->io_size); +} + +static void +vbio_free(vbio_t *vbio) +{ + VERIFY0(atomic_read(&vbio->vbio_ref)); + + for (int i = 0; i < vbio->vbio_nbios; i++) + bio_put(vbio->vbio_bio[i]); + + vbio_return_abd(vbio); + + kmem_free(vbio, sizeof (vbio_t) + + sizeof (struct bio *) * vbio->vbio_max_bios); +} + +static void +vbio_put(vbio_t *vbio) +{ + if (atomic_dec_return(&vbio->vbio_ref) > 0) + return; + + /* + * This was the last reference, so the entire IO is completed. Clean + * up and submit it for processing. + */ + + /* + * Get any data buf back to the original ABD, if necessary. We do this + * now so we can get the ZIO into the pipeline as quickly as possible, + * and then do the remaining cleanup after. + */ + vbio_return_abd(vbio); + + zio_t *zio = vbio->vbio_zio; + + /* + * Set the overall error. If multiple BIOs returned an error, only the + * first will be taken; the others are dropped (see + * vdev_disk_io_rw_completion()). Its pretty much impossible for + * multiple IOs to the same device to fail with different errors, so + * there's no real risk. + */ + zio->io_error = vbio->vbio_error; + if (zio->io_error) + vdev_disk_error(zio); + + /* All done, submit for processing */ + zio_delay_interrupt(zio); + + /* Finish cleanup */ + vbio_free(vbio); +} + +BIO_END_IO_PROTO(vdev_disk_io_rw_completion, bio, error) +{ + vbio_t *vbio = bio->bi_private; + + if (vbio->vbio_error == 0) { +#ifdef HAVE_1ARG_BIO_END_IO_T + vbio->vbio_error = BIO_END_IO_ERROR(bio); +#else + if (error) + vbio->vbio_error = -(error); + else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + vbio->vbio_error = EIO; +#endif } - dr->dr_zio = zio; + /* Drop this BIOs reference acquired by vbio_submit() */ + vbio_put(vbio); +} + +static inline void +_buf_to_page_and_offset(const void *buf, struct page **pagep, uint_t *offp) +{ + struct page *page = is_vmalloc_addr(buf) ? + vmalloc_to_page(buf) : virt_to_page(buf); + + if (!PageCompound(page)) { + /* Single boring page, nothing more to see */ + *pagep = page; + *offp = offset_in_page(buf); + return; + } /* - * Since bio's can have up to BIO_MAX_PAGES=256 iovec's, each of which - * is at least 512 bytes and at most PAGESIZE (typically 4K), one bio - * can cover at least 128KB and at most 1MB. When the required number - * of iovec's exceeds this, we are forced to break the IO in multiple - * bio's and wait for them all to complete. This is likely if the - * recordsize property is increased beyond 1MB. The default - * bio_count=16 should typically accommodate the maximum-size zio of - * 16MB. + * This page is part of a "compound page", which is a group of pages + * that can be referenced from a single struct page *. Its organised as + * a "head" page, followed by a series of "tail" pages. + * + * In OpenZFS, compound pages are allocated using the __GFP_COMP flag, + * which we get from scatter ABDs and SPL vmalloc slabs (ie >16K + * allocations). So a great many of the IO buffers we get are going to + * be of this type. + * + * The tail pages are just regular PAGE_SIZE pages, and we can just + * load them into the BIO the same as we would for non-compound pages + * above, and it all works just fine. However, the head page has length + * covering itself and all the tail pages. If our buffer spans multiple + * pages, then we can load the head page and a >PAGE_SIZE length into + * the BIO, which is far more efficient. + * + * To do this, we need to calculate the offset of the buffer from the + * head page (offset_in_page() is the offset within its PAGE_SIZE'd + * page ie just a simple ~(PAGE_SIZE-1) mask). */ - abd_offset = 0; - bio_offset = io_offset; - bio_size = io_size; - for (int i = 0; i <= dr->dr_bio_count; i++) { + *pagep = compound_head(page); + *offp = (uint_t)((uintptr_t)(buf) & (page_size(*pagep)-1)); +} - /* Finished constructing bio's for given buffer */ - if (bio_size <= 0) - break; +/* + * Iterator callback to count ABD pages and check their size & alignment. + * + * On Linux, each BIO segment can take a page pointer, and an offset+length of + * the data within that page. A page can be arbitrarily large ("compound" + * pages) but we still have to ensure the data portion is correctly sized and + * aligned to the logical block size, to ensure that if the kernel wants to + * split the BIO, the two halves will still be properly aligned. + */ +typedef struct { + uint_t bmask; + uint_t npages; + uint_t end; +} vdev_disk_check_pages_t; + +static int +vdev_disk_check_pages_cb(void *buf, size_t len, void *priv) +{ + vdev_disk_check_pages_t *s = priv; + + struct page *page; + uint_t off; + + /* + * If we didn't finish on a block size boundary last time, then there + * would be a gap if we tried to use this ABD as-is, so abort. + */ + if (s->end != 0) + return (1); + + /* + * Note if we're taking less than a full block, so we can check it + * above on the next call. + */ + s->end = len & s->bmask; + + while (len > 0) { + _buf_to_page_and_offset(buf, &page, &off); /* - * If additional bio's are required, we have to retry, but - * this should be rare - see the comment above. + * All blocks after the first must start on a block size + * boundary. */ - if (dr->dr_bio_count == i) { - vdev_disk_dio_free(dr); - bio_count *= 2; - goto retry; - } + if (s->npages != 0 && (off & s->bmask) != 0) + return (1); - nr_vecs = vdev_bio_max_segs(zio, bio_size, abd_offset); - dr->dr_bio[i] = vdev_bio_alloc(bdev, GFP_NOIO, nr_vecs); - if (unlikely(dr->dr_bio[i] == NULL)) { - vdev_disk_dio_free(dr); - return (SET_ERROR(ENOMEM)); - } + uint_t take = MIN(len, page_size(page)-off); - /* Matching put called by vdev_disk_physio_completion */ - vdev_disk_dio_get(dr); + buf += take; + len -= take; - BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9; - dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion; - dr->dr_bio[i]->bi_private = dr; - bio_set_op_attrs(dr->dr_bio[i], rw, flags); + s->npages++; + } - /* Remaining size is returned to become the new size */ - bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd, - bio_size, abd_offset); + return (0); +} + +/* + * Check if we can submit the pages in this ABD to the kernel as-is. Returns + * the number of pages, or 0 if it can't be submitted like this. + */ +static uint_t +vdev_disk_check_pages(abd_t *abd, uint64_t size, uint_t lbs) +{ + vdev_disk_check_pages_t s = { + .bmask = lbs-1, + .npages = 0, + .end = 0 + }; + + if (abd_iterate_func(abd, 0, size, vdev_disk_check_pages_cb, &s)) + return (0); + + return (s.npages); +} - /* Advance in buffer and construct another bio if needed */ - abd_offset += BIO_BI_SIZE(dr->dr_bio[i]); - bio_offset += BIO_BI_SIZE(dr->dr_bio[i]); +/* Iterator callback to submit ABD pages to the vbio. */ +static int +vdev_disk_fill_vbio_cb(void *buf, size_t len, void *priv) +{ + vbio_t *vbio = priv; + int err; + + struct page *page; + uint_t off; + + while (len > 0) { + _buf_to_page_and_offset(buf, &page, &off); + + uint_t take = MIN(len, page_size(page)-off); + + err = vbio_add_page(vbio, page, take, off); + if (err != 0) + return (err); + + buf += take; + len -= take; } - /* Extra reference to protect dio_request during vdev_submit_bio */ - vdev_disk_dio_get(dr); + return (0); +} - if (dr->dr_bio_count > 1) - blk_start_plug(&plug); +static int +vdev_disk_io_rw(zio_t *zio) +{ + vdev_t *v = zio->io_vd; + vdev_disk_t *vd = v->vdev_tsd; + struct block_device *bdev = vd->vd_bdev; + int flags = 0; + + /* + * Accessing outside the block device is never allowed. + */ + if (zio->io_offset + zio->io_size > bdev->bd_inode->i_size) { + vdev_dbgmsg(zio->io_vd, + "Illegal access %llu size %llu, device size %llu", + (u_longlong_t)io_offset, + (u_longlong_t)io_size, + (u_longlong_t)i_size_read(bdev->bd_inode)); + return (SET_ERROR(EIO)); + } - /* Submit all bio's associated with this dio */ - for (int i = 0; i < dr->dr_bio_count; i++) { - if (dr->dr_bio[i]) - vdev_submit_bio(dr->dr_bio[i]); + if (!(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)) && + v->vdev_failfast == B_TRUE) { + bio_set_flags_failfast(bdev, &flags, zfs_vdev_failfast_mask & 1, + zfs_vdev_failfast_mask & 2, zfs_vdev_failfast_mask & 4); } - if (dr->dr_bio_count > 1) - blk_finish_plug(&plug); + /* + * Count the number of pages in the incoming ABD, and check its + * alignment. If any part of it would require submitting a page that is + * not aligned to the logical block size, then we take a copy into a + * linear buffer and submit that instead. This should be impossible on + * a 512b LBS, and fairly rare on 4K, usually requiring + * abnormally-small data blocks (eg gang blocks) mixed into the same + * ABD as larger ones (eg aggregated). + */ + uint_t lbs = bdev_logical_block_size(bdev); + + abd_t *abd = zio->io_abd; + uint_t npages = vdev_disk_check_pages(abd, zio->io_size, lbs); + if (npages == 0) { + void *buf; + /* We can't safely use pages from this ABD, so use a copy */ + if (zio->io_type == ZIO_TYPE_READ) + buf = abd_borrow_buf(zio->io_abd, zio->io_size); + else + buf = abd_borrow_buf_copy(zio->io_abd, zio->io_size); + + /* + * New allocation can still span multiple pages, so we need to + * count it properly. We wrap it in an abd_t so we can just use + * the iterators to count and to fill the vbio later. + */ + abd = abd_get_from_buf(buf, zio->io_size); + npages = vdev_disk_check_pages(abd, zio->io_size, lbs); - vdev_disk_dio_put(dr); + /* + * Zero here would mean the borrowed copy has an invalid + * alignment too, which would mean we've somehow been passed a + * linear ABD with an interior page that has a non-zero offset + * or a size not a multiple of PAGE_SIZE. This is not possible. + * It would mean either zio_buf_alloc() or its underlying + * allocators have done something extremely strange, or our + * math in vdev_disk_check_pages() is wrong. In either case, + * something in seriously wrong and its not safe to continue. + */ + VERIFY(npages); + } + /* Allocate vbio, with a pointer to the borrowed ABD if necessary */ + int error = 0; + vbio_t *vbio = vbio_alloc(zio, bdev, npages); + if (abd != zio->io_abd) + vbio->vbio_abd = abd; + + /* Fill it with pages */ + error = abd_iterate_func(abd, 0, zio->io_size, + vdev_disk_fill_vbio_cb, vbio); + if (error != 0) { + vbio_free(vbio); + return (error); + } + + vbio_submit(vbio, flags); return (error); } @@ -867,7 +1148,7 @@ vdev_disk_io_start(zio_t *zio) { vdev_t *v = zio->io_vd; vdev_disk_t *vd = v->vdev_tsd; - int rw, error; + int error; /* * If the vdev is closed, it's likely in the REMOVED or FAULTED state. @@ -930,13 +1211,6 @@ vdev_disk_io_start(zio_t *zio) rw_exit(&vd->vd_lock); zio_execute(zio); return; - case ZIO_TYPE_WRITE: - rw = WRITE; - break; - - case ZIO_TYPE_READ: - rw = READ; - break; case ZIO_TYPE_TRIM: zio->io_error = vdev_disk_io_trim(zio); @@ -944,23 +1218,34 @@ vdev_disk_io_start(zio_t *zio) zio_interrupt(zio); return; - default: + case ZIO_TYPE_READ: + case ZIO_TYPE_WRITE: + zio->io_target_timestamp = zio_handle_io_delay(zio); + error = vdev_disk_io_rw(zio); rw_exit(&vd->vd_lock); - zio->io_error = SET_ERROR(ENOTSUP); - zio_interrupt(zio); + if (error) { + zio->io_error = error; + zio_interrupt(zio); + } return; - } - zio->io_target_timestamp = zio_handle_io_delay(zio); - error = __vdev_disk_physio(vd->vd_bdev, zio, - zio->io_size, zio->io_offset, rw, 0); - rw_exit(&vd->vd_lock); + default: + /* + * Getting here means our parent vdev has made a very strange + * request of us, and shouldn't happen. Assert here to force a + * crash in dev builds, but in production return the IO + * unhandled. The pool will likely suspend anyway but that's + * nicer than crashing the kernel. + */ + ASSERT3S(zio->io_type, ==, -1); - if (error) { - zio->io_error = error; + rw_exit(&vd->vd_lock); + zio->io_error = SET_ERROR(ENOTSUP); zio_interrupt(zio); return; } + + __builtin_unreachable(); } static void @@ -1103,3 +1388,6 @@ ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, open_timeout_ms, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, failfast_mask, UINT, ZMOD_RW, "Defines failfast mask: 1 - device, 2 - transport, 4 - driver"); + +ZFS_MODULE_PARAM(zfs_vdev_disk, vdev_disk_, max_segs, UINT, ZMOD_RW, + "Maximum number of data segments to add to an IO request (min 4)");