FROMLIST: mm: implement speculative handling in filemap_map_pages()

In the speculative case, we know the page table already exists, and it
must be locked with pte_map_lock(). In the case where no page is found
for the given address, return VM_FAULT_RETRY which will abort the
fault before we get into the vm_ops->fault() callback. This is fine
because if filemap_map_pages does not find the page in page cache,
vm_ops->fault() will not either.

Initialize addr and last_pgoff to correspond to the pte at the original
fault address (which was mapped with pte_map_lock()), rather than the
pte at start_pgoff. The choice of initial values doesn't matter as
they will all be adjusted together before use, so they just need to be
consistent with each other, and using the original fault address and
pte allows us to reuse pte_map_lock() without any changes to it.

Signed-off-by: Michel Lespinasse <michel@lespinasse.org>
Link: https://lore.kernel.org/all/20210407014502.24091-29-michel@lespinasse.org/
Bug: 161210518
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Change-Id: I0acf4f9626ec0126cdc9a95a7ff1cd735c1af2ca
This commit is contained in:
Michel Lespinasse
2021-04-29 10:28:25 -07:00
committed by Todd Kjos
parent 7045d2d838
commit 4979ff3738

View File

@@ -3335,25 +3335,31 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
pgoff_t last_pgoff = start_pgoff;
pgoff_t last_pgoff;
unsigned long addr;
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct page *head, *page;
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
vm_fault_t ret = 0;
vm_fault_t ret = (vmf->flags & FAULT_FLAG_SPECULATIVE) ?
VM_FAULT_RETRY : 0;
rcu_read_lock();
/* filemap_map_pages() is called within an rcu read lock already. */
head = first_map_page(mapping, &xas, end_pgoff);
if (!head)
goto out;
return ret;
if (filemap_map_pmd(vmf, head)) {
ret = VM_FAULT_NOPAGE;
goto out;
if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
filemap_map_pmd(vmf, head))
return VM_FAULT_NOPAGE;
if (!pte_map_lock(vmf)) {
unlock_page(head);
put_page(head);
return VM_FAULT_RETRY;
}
addr = vmf->address;
last_pgoff = vmf->pgoff;
addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
do {
page = find_subpage(head, xas.xa_index);
if (PageHWPoison(page))
@@ -3383,8 +3389,7 @@ unlock:
put_page(head);
} while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL);
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
rcu_read_unlock();
vmf->pte = NULL;
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
return ret;
}