UPSTREAM: KVM: Convert the kvm->vcpus array to a xarray

At least on arm64 and x86, the vcpus array is pretty huge (up to
1024 entries on x86) and is mostly empty in the majority of the cases
(running 1k vcpu VMs is not that common).

This mean that we end-up with a 4kB block of unused memory in the
middle of the kvm structure.

Instead of wasting away this memory, let's use an xarray instead,
which gives us almost the same flexibility as a normal array, but
with a reduced memory usage with smaller VMs.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Message-Id: <20211116160403.4074052-6-maz@kernel.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit c5b077549136584618a66258f09d8d4b41e7409c)
Signed-off-by: Will Deacon <willdeacon@google.com>
Bug: 233587962
Bug: 233588291
Change-Id: I5c89adb8caf5b167536b0e51590c7ee7ec0363d9
This commit is contained in:
Marc Zyngier
2021-11-16 16:04:01 +00:00
committed by Will Deacon
parent e89fbf4a08
commit 37766cea76
2 changed files with 12 additions and 8 deletions

View File

@@ -31,6 +31,7 @@
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/nospec.h> #include <linux/nospec.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/xarray.h>
#include <asm/signal.h> #include <asm/signal.h>
#include <linux/kvm.h> #include <linux/kvm.h>
@@ -659,7 +660,7 @@ struct kvm {
struct mutex slots_arch_lock; struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */ struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; struct xarray vcpu_array;
/* Used to wait for completion of MMU notifiers. */ /* Used to wait for completion of MMU notifiers. */
spinlock_t mn_invalidate_lock; spinlock_t mn_invalidate_lock;
@@ -800,7 +801,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb(); smp_rmb();
return kvm->vcpus[i]; return xa_load(&kvm->vcpu_array, i);
} }
#define kvm_for_each_vcpu(idx, vcpup, kvm) \ #define kvm_for_each_vcpu(idx, vcpup, kvm) \

View File

@@ -452,7 +452,7 @@ void kvm_destroy_vcpus(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_vcpu_destroy(vcpu); kvm_vcpu_destroy(vcpu);
kvm->vcpus[i] = NULL; xa_erase(&kvm->vcpu_array, i);
} }
atomic_set(&kvm->online_vcpus, 0); atomic_set(&kvm->online_vcpus, 0);
@@ -1057,6 +1057,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
mutex_init(&kvm->slots_arch_lock); mutex_init(&kvm->slots_arch_lock);
spin_lock_init(&kvm->mn_invalidate_lock); spin_lock_init(&kvm->mn_invalidate_lock);
rcuwait_init(&kvm->mn_memslots_update_rcuwait); rcuwait_init(&kvm->mn_memslots_update_rcuwait);
xa_init(&kvm->vcpu_array);
INIT_LIST_HEAD(&kvm->devices); INIT_LIST_HEAD(&kvm->devices);
@@ -3687,7 +3688,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
} }
vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
BUG_ON(r == -EBUSY);
if (r)
goto unlock_vcpu_destroy;
/* Fill the stats id string for the vcpu */ /* Fill the stats id string for the vcpu */
snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
@@ -3697,15 +3701,14 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
kvm_get_kvm(kvm); kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu); r = create_vcpu_fd(vcpu);
if (r < 0) { if (r < 0) {
xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
kvm_put_kvm_no_destroy(kvm); kvm_put_kvm_no_destroy(kvm);
goto unlock_vcpu_destroy; goto unlock_vcpu_destroy;
} }
kvm->vcpus[vcpu->vcpu_idx] = vcpu;
/* /*
* Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
* before kvm->online_vcpu's incremented value. * pointer before kvm->online_vcpu's incremented value.
*/ */
smp_wmb(); smp_wmb();
atomic_inc(&kvm->online_vcpus); atomic_inc(&kvm->online_vcpus);