block: move guard_bio_eod to bio.c
This is bio layer functionality and not related to buffer heads. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
1b4d4dbdae
commit
29125ed624
43
block/bio.c
43
block/bio.c
@@ -588,6 +588,49 @@ void bio_truncate(struct bio *bio, unsigned new_size)
|
|||||||
bio->bi_iter.bi_size = new_size;
|
bio->bi_iter.bi_size = new_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* guard_bio_eod - truncate a BIO to fit the block device
|
||||||
|
* @bio: bio to truncate
|
||||||
|
*
|
||||||
|
* This allows us to do IO even on the odd last sectors of a device, even if the
|
||||||
|
* block size is some multiple of the physical sector size.
|
||||||
|
*
|
||||||
|
* We'll just truncate the bio to the size of the device, and clear the end of
|
||||||
|
* the buffer head manually. Truly out-of-range accesses will turn into actual
|
||||||
|
* I/O errors, this only handles the "we need to be able to do I/O at the final
|
||||||
|
* sector" case.
|
||||||
|
*/
|
||||||
|
void guard_bio_eod(struct bio *bio)
|
||||||
|
{
|
||||||
|
sector_t maxsector;
|
||||||
|
struct hd_struct *part;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
part = __disk_get_part(bio->bi_disk, bio->bi_partno);
|
||||||
|
if (part)
|
||||||
|
maxsector = part_nr_sects_read(part);
|
||||||
|
else
|
||||||
|
maxsector = get_capacity(bio->bi_disk);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
if (!maxsector)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the *whole* IO is past the end of the device,
|
||||||
|
* let it through, and the IO layer will turn it into
|
||||||
|
* an EIO.
|
||||||
|
*/
|
||||||
|
if (unlikely(bio->bi_iter.bi_sector >= maxsector))
|
||||||
|
return;
|
||||||
|
|
||||||
|
maxsector -= bio->bi_iter.bi_sector;
|
||||||
|
if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
|
||||||
|
return;
|
||||||
|
|
||||||
|
bio_truncate(bio, maxsector << 9);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bio_put - release a reference to a bio
|
* bio_put - release a reference to a bio
|
||||||
* @bio: bio to release reference to
|
* @bio: bio to release reference to
|
||||||
|
|||||||
43
fs/buffer.c
43
fs/buffer.c
@@ -3019,49 +3019,6 @@ static void end_bio_bh_io_sync(struct bio *bio)
|
|||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This allows us to do IO even on the odd last sectors
|
|
||||||
* of a device, even if the block size is some multiple
|
|
||||||
* of the physical sector size.
|
|
||||||
*
|
|
||||||
* We'll just truncate the bio to the size of the device,
|
|
||||||
* and clear the end of the buffer head manually.
|
|
||||||
*
|
|
||||||
* Truly out-of-range accesses will turn into actual IO
|
|
||||||
* errors, this only handles the "we need to be able to
|
|
||||||
* do IO at the final sector" case.
|
|
||||||
*/
|
|
||||||
void guard_bio_eod(struct bio *bio)
|
|
||||||
{
|
|
||||||
sector_t maxsector;
|
|
||||||
struct hd_struct *part;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
part = __disk_get_part(bio->bi_disk, bio->bi_partno);
|
|
||||||
if (part)
|
|
||||||
maxsector = part_nr_sects_read(part);
|
|
||||||
else
|
|
||||||
maxsector = get_capacity(bio->bi_disk);
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
if (!maxsector)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the *whole* IO is past the end of the device,
|
|
||||||
* let it through, and the IO layer will turn it into
|
|
||||||
* an EIO.
|
|
||||||
*/
|
|
||||||
if (unlikely(bio->bi_iter.bi_sector >= maxsector))
|
|
||||||
return;
|
|
||||||
|
|
||||||
maxsector -= bio->bi_iter.bi_sector;
|
|
||||||
if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
|
|
||||||
return;
|
|
||||||
|
|
||||||
bio_truncate(bio, maxsector << 9);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||||
enum rw_hint write_hint, struct writeback_control *wbc)
|
enum rw_hint write_hint, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -38,7 +38,6 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
|
|||||||
/*
|
/*
|
||||||
* buffer.c
|
* buffer.c
|
||||||
*/
|
*/
|
||||||
extern void guard_bio_eod(struct bio *bio);
|
|
||||||
extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
||||||
get_block_t *get_block, struct iomap *iomap);
|
get_block_t *get_block, struct iomap *iomap);
|
||||||
|
|
||||||
|
|||||||
@@ -471,6 +471,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
|
|||||||
extern int bio_uncopy_user(struct bio *);
|
extern int bio_uncopy_user(struct bio *);
|
||||||
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
|
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
|
||||||
void bio_truncate(struct bio *bio, unsigned new_size);
|
void bio_truncate(struct bio *bio, unsigned new_size);
|
||||||
|
void guard_bio_eod(struct bio *bio);
|
||||||
|
|
||||||
static inline void zero_fill_bio(struct bio *bio)
|
static inline void zero_fill_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user