Merge d652502ef4 Merge tag 'ovl-update-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs into android-mainline

A tiny step en route to v5.13-rc1

Signed-off-by: Lee Jones <lee.jones@linaro.org>
Change-Id: I049e80976042ebffc90bb080f09da0afcfd48d77
This commit is contained in:
Lee Jones
2021-05-12 15:54:32 +01:00
1192 changed files with 54942 additions and 15327 deletions

View File

@@ -635,6 +635,49 @@ static bool mapping_needs_writeback(struct address_space *mapping)
return mapping->nrpages;
}
/**
* filemap_range_needs_writeback - check if range potentially needs writeback
* @mapping: address space within which to check
* @start_byte: offset in bytes where the range starts
* @end_byte: offset in bytes where the range ends (inclusive)
*
* Find at least one page in the range supplied, usually used to check if
* direct writing in this range will trigger a writeback. Used by O_DIRECT
* read/write with IOCB_NOWAIT, to see if the caller needs to do
* filemap_write_and_wait_range() before proceeding.
*
* Return: %true if the caller should do filemap_write_and_wait_range() before
* doing O_DIRECT to a page in this range, %false otherwise.
*/
bool filemap_range_needs_writeback(struct address_space *mapping,
loff_t start_byte, loff_t end_byte)
{
XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
pgoff_t max = end_byte >> PAGE_SHIFT;
struct page *page;
if (!mapping_needs_writeback(mapping))
return false;
if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
return false;
if (end_byte < start_byte)
return false;
rcu_read_lock();
xas_for_each(&xas, page, max) {
if (xas_retry(&xas, page))
continue;
if (xa_is_value(page))
continue;
if (PageDirty(page) || PageLocked(page) || PageWriteback(page))
break;
}
rcu_read_unlock();
return page != NULL;
}
EXPORT_SYMBOL_GPL(filemap_range_needs_writeback);
/**
* filemap_write_and_wait_range - write out & wait on a file range
* @mapping: the address_space for the pages
@@ -1724,7 +1767,7 @@ EXPORT_SYMBOL(page_cache_prev_miss);
* @mapping: the address_space to search
* @index: The page cache index.
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* Looks up the page cache slot at @mapping & @index. If there is a
* page cache page, the head page is returned with an increased refcount.
*
* If the slot holds a shadow entry of a previously evicted page, or a
@@ -2305,8 +2348,6 @@ static int filemap_read_page(struct file *file, struct address_space *mapping,
return error;
if (PageUptodate(page))
return 0;
if (!page->mapping) /* page truncated */
return AOP_TRUNCATED_PAGE;
shrink_readahead_size_eio(&file->f_ra);
return -EIO;
}
@@ -2638,8 +2679,8 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
size = i_size_read(inode);
if (iocb->ki_flags & IOCB_NOWAIT) {
if (filemap_range_has_page(mapping, iocb->ki_pos,
iocb->ki_pos + count - 1))
if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
iocb->ki_pos + count - 1))
return -EAGAIN;
} else {
retval = filemap_write_and_wait_range(mapping,
@@ -2937,7 +2978,6 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
struct file *file = vmf->vma->vm_file;
struct file *fpin = NULL;
struct address_space *mapping = file->f_mapping;
struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
pgoff_t offset = vmf->pgoff;
pgoff_t max_off;
@@ -3024,14 +3064,8 @@ page_not_uptodate:
* because there really aren't any performance issues here
* and we need to check for errors.
*/
ClearPageError(page);
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (!PageUptodate(page))
error = -EIO;
}
error = filemap_read_page(file, mapping, page);
if (fpin)
goto out_retry;
put_page(page);
@@ -3039,7 +3073,6 @@ page_not_uptodate:
if (!error || error == AOP_TRUNCATED_PAGE)
goto retry_find;
shrink_readahead_size_eio(ra);
return VM_FAULT_SIGBUS;
out_retry: