Skip to content

Commit 084f8b0

Browse files
author
Gavin Shan
committed
KVM: arm64: Protect vLPI translation with vgic_irq::irq_lock
JIRA: https://issues.redhat.com/browse/RHEL-93666 Though undocumented, KVM generally protects the translation of a vLPI with the its_lock. While this makes perfectly good sense, as the ITS itself contains the guest translation, an upcoming change will require twiddling the vLPI mapping in an atomic context. Switch to using the vIRQ's irq_lock to protect the translation. Use of the its_lock in vgic_v4_unset_forwarding() is preserved for now as it still needs to walk the ITS. Tested-by: Sweet Tea Dorminy <[email protected]> Signed-off-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]> (cherry picked from commit fc4dafe) Signed-off-by: Gavin Shan <[email protected]>
1 parent 9d272bc commit 084f8b0

File tree

2 files changed

+47
-42
lines changed

2 files changed

+47
-42
lines changed

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -336,39 +336,34 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
336336
}
337337
}
338338

339-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
340-
341339
if (irq->hw)
342-
return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
340+
ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
343341

344-
return 0;
342+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
343+
return ret;
345344
}
346345

347346
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
348347
{
349-
int ret = 0;
350-
unsigned long flags;
348+
struct its_vlpi_map map;
349+
int ret;
351350

352-
raw_spin_lock_irqsave(&irq->irq_lock, flags);
351+
guard(raw_spinlock_irqsave)(&irq->irq_lock);
353352
irq->target_vcpu = vcpu;
354-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
355353

356-
if (irq->hw) {
357-
struct its_vlpi_map map;
358-
359-
ret = its_get_vlpi(irq->host_irq, &map);
360-
if (ret)
361-
return ret;
354+
if (!irq->hw)
355+
return 0;
362356

363-
if (map.vpe)
364-
atomic_dec(&map.vpe->vlpi_count);
365-
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
366-
atomic_inc(&map.vpe->vlpi_count);
357+
ret = its_get_vlpi(irq->host_irq, &map);
358+
if (ret)
359+
return ret;
367360

368-
ret = its_map_vlpi(irq->host_irq, &map);
369-
}
361+
if (map.vpe)
362+
atomic_dec(&map.vpe->vlpi_count);
370363

371-
return ret;
364+
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
365+
atomic_inc(&map.vpe->vlpi_count);
366+
return its_map_vlpi(irq->host_irq, &map);
372367
}
373368

374369
static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
@@ -786,12 +781,17 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
786781
/* Requires the its_lock to be held. */
787782
static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
788783
{
784+
struct vgic_irq *irq = ite->irq;
789785
list_del(&ite->ite_list);
790786

791787
/* This put matches the get in vgic_add_lpi. */
792-
if (ite->irq) {
793-
if (ite->irq->hw)
794-
WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
788+
if (irq) {
789+
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
790+
if (irq->hw)
791+
WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
792+
793+
irq->hw = false;
794+
}
795795

796796
vgic_put_irq(kvm, ite->irq);
797797
}

arch/arm64/kvm/vgic/vgic-v4.c

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -441,9 +441,11 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
441441
irq_entry->msi.data, &irq))
442442
return 0;
443443

444+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
445+
444446
/* Silently exit if the vLPI is already mapped */
445447
if (irq->hw)
446-
return 0;
448+
goto out_unlock_irq;
447449

448450
/*
449451
* Emit the mapping request. If it fails, the ITS probably
@@ -463,30 +465,30 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
463465

464466
ret = its_map_vlpi(virq, &map);
465467
if (ret)
466-
return ret;
468+
goto out_unlock_irq;
467469

468470
irq->hw = true;
469471
irq->host_irq = virq;
470472
atomic_inc(&map.vpe->vlpi_count);
471473

472474
/* Transfer pending state */
473-
raw_spin_lock_irqsave(&irq->irq_lock, flags);
474-
if (irq->pending_latch) {
475-
ret = irq_set_irqchip_state(irq->host_irq,
476-
IRQCHIP_STATE_PENDING,
477-
irq->pending_latch);
478-
WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
475+
if (!irq->pending_latch)
476+
goto out_unlock_irq;
479477

480-
/*
481-
* Clear pending_latch and communicate this state
482-
* change via vgic_queue_irq_unlock.
483-
*/
484-
irq->pending_latch = false;
485-
vgic_queue_irq_unlock(kvm, irq, flags);
486-
} else {
487-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
488-
}
478+
ret = irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING,
479+
irq->pending_latch);
480+
WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
481+
482+
/*
483+
* Clear pending_latch and communicate this state
484+
* change via vgic_queue_irq_unlock.
485+
*/
486+
irq->pending_latch = false;
487+
vgic_queue_irq_unlock(kvm, irq, flags);
488+
return ret;
489489

490+
out_unlock_irq:
491+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
490492
return ret;
491493
}
492494

@@ -495,7 +497,8 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
495497
{
496498
struct vgic_its *its;
497499
struct vgic_irq *irq;
498-
int ret;
500+
unsigned long flags;
501+
int ret = 0;
499502

500503
if (!vgic_supports_direct_msis(kvm))
501504
return 0;
@@ -515,13 +518,15 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
515518
if (ret)
516519
goto out;
517520

521+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
518522
WARN_ON(irq->hw && irq->host_irq != virq);
519523
if (irq->hw) {
520524
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
521525
irq->hw = false;
522526
ret = its_unmap_vlpi(virq);
523527
}
524528

529+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
525530
out:
526531
mutex_unlock(&its->its_lock);
527532
return ret;

0 commit comments

Comments
 (0)