summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/Kconfig2
-rw-r--r--virt/kvm/arm/aarch32.c20
-rw-r--r--virt/kvm/arm/arm.c4
-rw-r--r--virt/kvm/arm/mmu.c2
-rw-r--r--virt/kvm/arm/psci.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c5
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/eventfd.c17
-rw-r--r--virt/kvm/kvm_main.c56
9 files changed, 58 insertions, 52 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 72143cfaf6ec..ea434ddc8499 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
config KVM_COMPAT
def_bool y
- depends on KVM && COMPAT && !S390
+ depends on KVM && COMPAT && !(S390 || ARM64)
config HAVE_KVM_IRQ_BYPASS
bool
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
index efc84cbe8277..5abbe9b3c652 100644
--- a/virt/kvm/arm/aarch32.c
+++ b/virt/kvm/arm/aarch32.c
@@ -108,9 +108,9 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{
unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
+ bool is_arm = !(cpsr & PSR_AA32_T_BIT);
- if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK))
+ if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
return;
cond = (cpsr & 0xe000) >> 13;
@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
else
itbits = (itbits << 1) & 0x1f;
- cpsr &= ~COMPAT_PSR_IT_MASK;
+ cpsr &= ~PSR_AA32_IT_MASK;
cpsr |= cond << 13;
cpsr |= (itbits & 0x1c) << (10 - 2);
cpsr |= (itbits & 0x3) << 25;
@@ -138,7 +138,7 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
bool is_thumb;
- is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
+ is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2;
else
@@ -164,16 +164,16 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
- bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
+ bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
- cpsr = mode | COMPAT_PSR_I_BIT;
+ cpsr = mode | PSR_AA32_I_BIT;
if (sctlr & (1 << 30))
- cpsr |= COMPAT_PSR_T_BIT;
+ cpsr |= PSR_AA32_T_BIT;
if (sctlr & (1 << 25))
- cpsr |= COMPAT_PSR_E_BIT;
+ cpsr |= PSR_AA32_E_BIT;
*vcpu_cpsr(vcpu) = cpsr;
@@ -192,7 +192,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{
- prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
+ prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
}
/*
@@ -216,7 +216,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
fsr = &vcpu_cp15(vcpu, c5_DFSR);
}
- prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
+ prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
*far = addr;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 04e554cae3a2..108250e4d376 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -604,7 +604,7 @@ void kvm_arm_resume_guest(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.pause = false;
- swake_up(kvm_arch_vcpu_wq(vcpu));
+ swake_up_one(kvm_arch_vcpu_wq(vcpu));
}
}
@@ -612,7 +612,7 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
- swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
(!vcpu->arch.pause)));
if (vcpu->arch.power_off || vcpu->arch.pause) {
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 8d90de213ce9..1d90d79706bd 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
phys_addr_t next;
assert_spin_locked(&kvm->mmu_lock);
+ WARN_ON(size & ~PAGE_MASK);
+
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
/*
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index c95ab4c5a475..9b73d3ad918a 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -155,7 +155,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
- swake_up(wq);
+ swake_up_one(wq);
return PSCI_RET_SUCCESS;
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index ff7dc890941a..cdce653e3c47 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start);
kvm_vgic_global_state.vcpu_base = 0;
- } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
- pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
- (unsigned long long)resource_size(&info->vcpu),
- PAGE_SIZE);
- kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 57bcb27dcf30..23c2519c5b32 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -107,7 +107,7 @@ static void async_pf_execute(struct work_struct *work)
trace_kvm_async_pf_completed(addr, gva);
if (swq_has_sleeper(&vcpu->wq))
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
mmput(mm);
kvm_put_kvm(vcpu->kvm);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 90d30fbe95ae..b20b751286fc 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
{
struct kvm_kernel_irqfd *irqfd =
container_of(work, struct kvm_kernel_irqfd, shutdown);
+ struct kvm *kvm = irqfd->kvm;
u64 cnt;
+ /* Make sure irqfd has been initalized in assign path. */
+ synchronize_srcu(&kvm->irq_srcu);
+
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
* further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
idx = srcu_read_lock(&kvm->irq_srcu);
irqfd_update(kvm, irqfd);
- srcu_read_unlock(&kvm->irq_srcu, idx);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
@@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
if (events & EPOLLIN)
schedule_work(&irqfd->inject);
- /*
- * do not drop the file until the irqfd is fully initialized, otherwise
- * we might race against the EPOLLHUP
- */
- fdput(f);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
if (kvm_arch_has_irq_bypass()) {
irqfd->consumer.token = (void *)irqfd->eventfd;
@@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
}
#endif
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+
+ /*
+ * do not drop the file until the irqfd is fully initialized, otherwise
+ * we might race against the EPOLLHUP
+ */
+ fdput(f);
return 0;
fail:
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4c593acc4510..0df592c4f09f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
+#define KVM_COMPAT(c) .compat_ioctl = (c)
+#else
+static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
+ unsigned long arg) { return -EINVAL; }
+#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);
@@ -268,7 +273,8 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
* kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
* barrier here.
*/
- if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
+ if (!kvm_arch_flush_remote_tlb(kvm)
+ || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.remote_tlb_flush;
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
@@ -1164,7 +1170,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
n = kvm_dirty_bitmap_bytes(memslot);
- dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
+ dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
memset(dirty_bitmap_buffer, 0, n);
spin_lock(&kvm->mmu_lock);
@@ -1337,18 +1343,16 @@ static inline int check_user_page_hwpoison(unsigned long addr)
}
/*
- * The atomic path to get the writable pfn which will be stored in @pfn,
- * true indicates success, otherwise false is returned.
+ * The fast path to get the writable pfn which will be stored in @pfn,
+ * true indicates success, otherwise false is returned. It's also the
+ * only part that runs if we can are in atomic context.
*/
-static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
- bool write_fault, bool *writable, kvm_pfn_t *pfn)
+static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
+ bool *writable, kvm_pfn_t *pfn)
{
struct page *page[1];
int npages;
- if (!(async || atomic))
- return false;
-
/*
* Fast pin a writable pfn only if it is a write fault request
* or the caller allows to map a writable pfn for a read fault
@@ -1492,7 +1496,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
/* we can do it either atomically or asynchronously, not both */
BUG_ON(atomic && async);
- if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
+ if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
return pfn;
if (atomic)
@@ -2122,16 +2126,22 @@ static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{
+ int ret = -EINTR;
+ int idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if (kvm_arch_vcpu_runnable(vcpu)) {
kvm_make_request(KVM_REQ_UNHALT, vcpu);
- return -EINTR;
+ goto out;
}
if (kvm_cpu_has_pending_timer(vcpu))
- return -EINTR;
+ goto out;
if (signal_pending(current))
- return -EINTR;
+ goto out;
- return 0;
+ ret = 0;
+out:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ return ret;
}
/*
@@ -2167,7 +2177,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_blocking(vcpu);
for (;;) {
- prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
if (kvm_vcpu_check_block(vcpu) < 0)
break;
@@ -2209,7 +2219,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
wqp = kvm_arch_vcpu_wq(vcpu);
if (swq_has_sleeper(wqp)) {
- swake_up(wqp);
+ swake_up_one(wqp);
++vcpu->stat.halt_wakeup;
return true;
}
@@ -2396,11 +2406,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_KVM_COMPAT
- .compat_ioctl = kvm_vcpu_compat_ioctl,
-#endif
.mmap = kvm_vcpu_mmap,
.llseek = noop_llseek,
+ KVM_COMPAT(kvm_vcpu_compat_ioctl),
};
/*
@@ -2824,10 +2832,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
static const struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_KVM_COMPAT
- .compat_ioctl = kvm_device_ioctl,
-#endif
.release = kvm_device_release,
+ KVM_COMPAT(kvm_device_ioctl),
};
struct kvm_device *kvm_device_from_filp(struct file *filp)
@@ -3165,10 +3171,8 @@ static long kvm_vm_compat_ioctl(struct file *filp,
static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_KVM_COMPAT
- .compat_ioctl = kvm_vm_compat_ioctl,
-#endif
.llseek = noop_llseek,
+ KVM_COMPAT(kvm_vm_compat_ioctl),
};
static int kvm_dev_ioctl_create_vm(unsigned long type)
@@ -3259,8 +3263,8 @@ out:
static struct file_operations kvm_chardev_ops = {
.unlocked_ioctl = kvm_dev_ioctl,
- .compat_ioctl = kvm_dev_ioctl,
.llseek = noop_llseek,
+ KVM_COMPAT(kvm_dev_ioctl),
};
static struct miscdevice kvm_dev = {