fs: ext4: fsync: optimize double-fsync() a bunch
There are cases where EXT4 is a bit too conservative sending barriers down to the disk; there are cases where the transaction in progress is not the one that sent the barrier (in other words: the fsync is for a file for which the IO happened more time ago and all data was already sent to the disk). For that case, a more performing tradeoff can be made on SSD devices (which have the ability to flush their dram caches in a hurry on a power fail event) where the barrier gets sent to the disk, but we don't need to wait for the barrier to complete. Any consecutive IO will block on the barrier correctly. Signed-off-by: Adam W. Willis <return.of.octobot@gmail.com> (cherry picked from commit 74aa09a7751e438bd15b5cd73f611021b7239240) (cherry picked from commit fa3bdf1a32cac074ff52403cb9ce18eb18c7f7d1)
This commit is contained in:
parent
8d7a99febf
commit
54bab59901
5 changed files with 60 additions and 1 deletions
19
block/bio.c
19
block/bio.c
|
@ -1186,6 +1186,25 @@ int submit_bio_wait(struct bio *bio)
|
|||
}
|
||||
EXPORT_SYMBOL(submit_bio_wait);
|
||||
|
||||
static void submit_bio_nowait_endio(struct bio *bio)
|
||||
{
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
/**
|
||||
* submit_bio_nowait - submit a bio for fire-and-forget
|
||||
* @bio: The &struct bio which describes the I/O
|
||||
*
|
||||
* Simple wrapper around submit_bio() that takes care of bio_put() on completion
|
||||
*/
|
||||
void submit_bio_nowait(struct bio *bio)
|
||||
{
|
||||
bio->bi_end_io = submit_bio_nowait_endio;
|
||||
bio->bi_opf |= REQ_SYNC;
|
||||
submit_bio(bio);
|
||||
}
|
||||
EXPORT_SYMBOL(submit_bio_nowait);
|
||||
|
||||
/**
|
||||
* bio_advance - increment/complete a bio by some number of bytes
|
||||
* @bio: bio to advance
|
||||
|
|
|
@ -467,6 +467,35 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
|
|||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_flush);
|
||||
|
||||
#include <trace/hooks/block.h>
|
||||
/**
|
||||
* blkdev_issue_flush_nowait - queue a flush
|
||||
* @bdev: blockdev to issue flush for
|
||||
* @gfp_mask: memory allocation flags (for bio_alloc)
|
||||
* @error_sector: error sector
|
||||
*
|
||||
* Description:
|
||||
* Issue a flush for the block device in question. Caller can supply
|
||||
* room for storing the error offset in case of a flush error, if they
|
||||
* wish to. If WAIT flag is not passed then caller may check only what
|
||||
* request was pushed in some internal queue for later handling.
|
||||
*/
|
||||
void blkdev_issue_flush_nowait(struct block_device *bdev, gfp_t gfp_mask)
|
||||
{
|
||||
struct request_queue *q;
|
||||
struct bio *bio;
|
||||
if (bdev->bd_disk == NULL)
|
||||
return;
|
||||
q = bdev_get_queue(bdev);
|
||||
if (!q)
|
||||
return;
|
||||
bio = bio_alloc(gfp_mask, 0);
|
||||
bio_set_dev(bio, bdev);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
submit_bio_nowait(bio);
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_flush_nowait);
|
||||
|
||||
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
|
||||
gfp_t flags)
|
||||
{
|
||||
|
|
|
@ -174,7 +174,11 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
|
||||
|
||||
if (needs_barrier) {
|
||||
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
err = 0;
|
||||
if (!blk_queue_nonrot(bdev_get_queue(inode->i_sb->s_bdev)))
|
||||
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
else
|
||||
blkdev_issue_flush_nowait(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
}
|
||||
|
|
|
@ -433,6 +433,7 @@ static inline void bio_wouldblock_error(struct bio *bio)
|
|||
struct request_queue;
|
||||
|
||||
extern int submit_bio_wait(struct bio *bio);
|
||||
extern void submit_bio_nowait(struct bio *bio);
|
||||
extern void bio_advance(struct bio *, unsigned);
|
||||
|
||||
extern void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
|
|
|
@ -1341,6 +1341,8 @@ static inline long nr_blockdev_pages(void)
|
|||
|
||||
extern void blk_io_schedule(void);
|
||||
|
||||
extern void blkdev_issue_flush_nowait(struct block_device *, gfp_t);
|
||||
|
||||
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
|
||||
|
||||
|
@ -2073,6 +2075,10 @@ static inline int sync_blockdev(struct block_device *bdev)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blkdev_issue_flush_nowait(struct block_device *bdev, gfp_t gfp_mask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
int fsync_bdev(struct block_device *bdev);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue