mm: rename migrate_pgmap_owner
MMU notifier ranges have a migrate_pgmap_owner field which is used by drivers to store a pointer. This is subsequently used by the driver callback to filter MMU_NOTIFY_MIGRATE events. Other notifier event types can also benefit from this filtering, so rename the 'migrate_pgmap_owner' field to 'owner' and create a new notifier initialisation function to initialise this field. Link: https://lkml.kernel.org/r/20210616105937.23201-6-apopple@nvidia.com Signed-off-by: Alistair Popple <apopple@nvidia.com> Suggested-by: Peter Xu <peterx@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Shakeel Butt <shakeelb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
a98a2f0c8c
commit
6b49bf6ddb
@@ -332,7 +332,7 @@ between device driver specific code and shared common code:
|
|||||||
walks to fill in the ``args->src`` array with PFNs to be migrated.
|
walks to fill in the ``args->src`` array with PFNs to be migrated.
|
||||||
The ``invalidate_range_start()`` callback is passed a
|
The ``invalidate_range_start()`` callback is passed a
|
||||||
``struct mmu_notifier_range`` with the ``event`` field set to
|
``struct mmu_notifier_range`` with the ``event`` field set to
|
||||||
``MMU_NOTIFY_MIGRATE`` and the ``migrate_pgmap_owner`` field set to
|
``MMU_NOTIFY_MIGRATE`` and the ``owner`` field set to
|
||||||
the ``args->pgmap_owner`` field passed to migrate_vma_setup(). This is
|
the ``args->pgmap_owner`` field passed to migrate_vma_setup(). This is
|
||||||
allows the device driver to skip the invalidation callback and only
|
allows the device driver to skip the invalidation callback and only
|
||||||
invalidate device private MMU mappings that are actually migrating.
|
invalidate device private MMU mappings that are actually migrating.
|
||||||
|
|||||||
@@ -265,7 +265,7 @@ nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
* the invalidation is handled as part of the migration process.
|
* the invalidation is handled as part of the migration process.
|
||||||
*/
|
*/
|
||||||
if (update->event == MMU_NOTIFY_MIGRATE &&
|
if (update->event == MMU_NOTIFY_MIGRATE &&
|
||||||
update->migrate_pgmap_owner == svmm->vmm->cli->drm->dev)
|
update->owner == svmm->vmm->cli->drm->dev)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
|
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ struct mmu_interval_notifier;
|
|||||||
*
|
*
|
||||||
* @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
|
* @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
|
||||||
* a device driver to possibly ignore the invalidation if the
|
* a device driver to possibly ignore the invalidation if the
|
||||||
* migrate_pgmap_owner field matches the driver's device private pgmap owner.
|
* owner field matches the driver's device private pgmap owner.
|
||||||
*/
|
*/
|
||||||
enum mmu_notifier_event {
|
enum mmu_notifier_event {
|
||||||
MMU_NOTIFY_UNMAP = 0,
|
MMU_NOTIFY_UNMAP = 0,
|
||||||
@@ -269,7 +269,7 @@ struct mmu_notifier_range {
|
|||||||
unsigned long end;
|
unsigned long end;
|
||||||
unsigned flags;
|
unsigned flags;
|
||||||
enum mmu_notifier_event event;
|
enum mmu_notifier_event event;
|
||||||
void *migrate_pgmap_owner;
|
void *owner;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline int mm_has_notifiers(struct mm_struct *mm)
|
static inline int mm_has_notifiers(struct mm_struct *mm)
|
||||||
@@ -521,14 +521,14 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
|
|||||||
range->flags = flags;
|
range->flags = flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mmu_notifier_range_init_migrate(
|
static inline void mmu_notifier_range_init_owner(
|
||||||
struct mmu_notifier_range *range, unsigned int flags,
|
struct mmu_notifier_range *range,
|
||||||
|
enum mmu_notifier_event event, unsigned int flags,
|
||||||
struct vm_area_struct *vma, struct mm_struct *mm,
|
struct vm_area_struct *vma, struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long end, void *pgmap)
|
unsigned long start, unsigned long end, void *owner)
|
||||||
{
|
{
|
||||||
mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
|
mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
|
||||||
start, end);
|
range->owner = owner;
|
||||||
range->migrate_pgmap_owner = pgmap;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
|
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
|
||||||
@@ -655,8 +655,8 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
|
|||||||
|
|
||||||
#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
|
#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
|
||||||
_mmu_notifier_range_init(range, start, end)
|
_mmu_notifier_range_init(range, start, end)
|
||||||
#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
|
#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
|
||||||
pgmap) \
|
end, owner) \
|
||||||
_mmu_notifier_range_init(range, start, end)
|
_mmu_notifier_range_init(range, start, end)
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
|
|||||||
@@ -218,7 +218,7 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
|
|||||||
* the invalidation is handled as part of the migration process.
|
* the invalidation is handled as part of the migration process.
|
||||||
*/
|
*/
|
||||||
if (range->event == MMU_NOTIFY_MIGRATE &&
|
if (range->event == MMU_NOTIFY_MIGRATE &&
|
||||||
range->migrate_pgmap_owner == dmirror->mdevice)
|
range->owner == dmirror->mdevice)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (mmu_notifier_range_blockable(range))
|
if (mmu_notifier_range_blockable(range))
|
||||||
|
|||||||
10
mm/migrate.c
10
mm/migrate.c
@@ -2416,8 +2416,8 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
|
|||||||
* that the registered device driver can skip invalidating device
|
* that the registered device driver can skip invalidating device
|
||||||
* private page mappings that won't be migrated.
|
* private page mappings that won't be migrated.
|
||||||
*/
|
*/
|
||||||
mmu_notifier_range_init_migrate(&range, 0, migrate->vma,
|
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
|
||||||
migrate->vma->vm_mm, migrate->start, migrate->end,
|
migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
|
||||||
migrate->pgmap_owner);
|
migrate->pgmap_owner);
|
||||||
mmu_notifier_invalidate_range_start(&range);
|
mmu_notifier_invalidate_range_start(&range);
|
||||||
|
|
||||||
@@ -2927,9 +2927,9 @@ void migrate_vma_pages(struct migrate_vma *migrate)
|
|||||||
if (!notified) {
|
if (!notified) {
|
||||||
notified = true;
|
notified = true;
|
||||||
|
|
||||||
mmu_notifier_range_init_migrate(&range, 0,
|
mmu_notifier_range_init_owner(&range,
|
||||||
migrate->vma, migrate->vma->vm_mm,
|
MMU_NOTIFY_MIGRATE, 0, migrate->vma,
|
||||||
addr, migrate->end,
|
migrate->vma->vm_mm, addr, migrate->end,
|
||||||
migrate->pgmap_owner);
|
migrate->pgmap_owner);
|
||||||
mmu_notifier_invalidate_range_start(&range);
|
mmu_notifier_invalidate_range_start(&range);
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user