mmap locking API: convert mmap_sem comments
Convert comments that reference mmap_sem to reference mmap_lock instead. [akpm@linux-foundation.org: fix up linux-next leftovers] [akpm@linux-foundation.org: s/lockaphore/lock/, per Vlastimil] [akpm@linux-foundation.org: more linux-next fixups, per Michel] Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Laurent Dufour <ldufour@linux.ibm.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-13-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
3e4e28c5a8
commit
c1e8d7c6a7
@@ -76,7 +76,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||
PAGE_SIZE);
|
||||
kunmap_atomic(page_kaddr);
|
||||
|
||||
/* fallback to copy_from_user outside mmap_sem */
|
||||
/* fallback to copy_from_user outside mmap_lock */
|
||||
if (unlikely(ret)) {
|
||||
ret = -ENOENT;
|
||||
*pagep = page;
|
||||
@@ -200,7 +200,7 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
/*
|
||||
* __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
|
||||
* called with mmap_sem held, it will release mmap_sem before returning.
|
||||
* called with mmap_lock held, it will release mmap_lock before returning.
|
||||
*/
|
||||
static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
|
||||
struct vm_area_struct *dst_vma,
|
||||
@@ -247,7 +247,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
|
||||
|
||||
retry:
|
||||
/*
|
||||
* On routine entry dst_vma is set. If we had to drop mmap_sem and
|
||||
* On routine entry dst_vma is set. If we had to drop mmap_lock and
|
||||
* retry, dst_vma will be set to NULL and we must lookup again.
|
||||
*/
|
||||
if (!dst_vma) {
|
||||
@@ -357,7 +357,7 @@ out:
|
||||
* private and shared mappings. See the routine
|
||||
* restore_reserve_on_error for details. Unfortunately, we
|
||||
* can not call restore_reserve_on_error now as it would
|
||||
* require holding mmap_sem.
|
||||
* require holding mmap_lock.
|
||||
*
|
||||
* If a reservation for the page existed in the reservation
|
||||
* map of a private mapping, the map was modified to indicate
|
||||
|
||||
Reference in New Issue
Block a user