Skip to content

Commit 6c88046

Browse files
author
Sebastian Ott
committed
KVM: arm64: Protect vLPI translation with vgic_irq::irq_lock
JIRA: https://issues.redhat.com/browse/RHEL-92805 Though undocumented, KVM generally protects the translation of a vLPI with the its_lock. While this makes perfectly good sense, as the ITS itself contains the guest translation, an upcoming change will require twiddling the vLPI mapping in an atomic context. Switch to using the vIRQ's irq_lock to protect the translation. Use of the its_lock in vgic_v4_unset_forwarding() is preserved for now as it still needs to walk the ITS. Tested-by: Sweet Tea Dorminy <[email protected]> Signed-off-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]> (cherry picked from commit fc4dafe) Signed-off-by: Sebastian Ott <[email protected]>
1 parent cc19af6 commit 6c88046

File tree

2 files changed

+47
-42
lines changed

2 files changed

+47
-42
lines changed

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -336,39 +336,34 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
336336
}
337337
}
338338

339-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
340-
341339
if (irq->hw)
342-
return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
340+
ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
343341

344-
return 0;
342+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
343+
return ret;
345344
}
346345

347346
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
348347
{
349-
int ret = 0;
350-
unsigned long flags;
348+
struct its_vlpi_map map;
349+
int ret;
351350

352-
raw_spin_lock_irqsave(&irq->irq_lock, flags);
351+
guard(raw_spinlock_irqsave)(&irq->irq_lock);
353352
irq->target_vcpu = vcpu;
354-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
355353

356-
if (irq->hw) {
357-
struct its_vlpi_map map;
358-
359-
ret = its_get_vlpi(irq->host_irq, &map);
360-
if (ret)
361-
return ret;
354+
if (!irq->hw)
355+
return 0;
362356

363-
if (map.vpe)
364-
atomic_dec(&map.vpe->vlpi_count);
365-
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
366-
atomic_inc(&map.vpe->vlpi_count);
357+
ret = its_get_vlpi(irq->host_irq, &map);
358+
if (ret)
359+
return ret;
367360

368-
ret = its_map_vlpi(irq->host_irq, &map);
369-
}
361+
if (map.vpe)
362+
atomic_dec(&map.vpe->vlpi_count);
370363

371-
return ret;
364+
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
365+
atomic_inc(&map.vpe->vlpi_count);
366+
return its_map_vlpi(irq->host_irq, &map);
372367
}
373368

374369
static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
@@ -786,12 +781,17 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
786781
/* Requires the its_lock to be held. */
787782
static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
788783
{
784+
struct vgic_irq *irq = ite->irq;
789785
list_del(&ite->ite_list);
790786

791787
/* This put matches the get in vgic_add_lpi. */
792-
if (ite->irq) {
793-
if (ite->irq->hw)
794-
WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
788+
if (irq) {
789+
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
790+
if (irq->hw)
791+
WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
792+
793+
irq->hw = false;
794+
}
795795

796796
vgic_put_irq(kvm, ite->irq);
797797
}

arch/arm64/kvm/vgic/vgic-v4.c

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -457,9 +457,11 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
457457
irq_entry->msi.data, &irq))
458458
return 0;
459459

460+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
461+
460462
/* Silently exit if the vLPI is already mapped */
461463
if (irq->hw)
462-
return 0;
464+
goto out_unlock_irq;
463465

464466
/*
465467
* Emit the mapping request. If it fails, the ITS probably
@@ -479,30 +481,30 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
479481

480482
ret = its_map_vlpi(virq, &map);
481483
if (ret)
482-
return ret;
484+
goto out_unlock_irq;
483485

484486
irq->hw = true;
485487
irq->host_irq = virq;
486488
atomic_inc(&map.vpe->vlpi_count);
487489

488490
/* Transfer pending state */
489-
raw_spin_lock_irqsave(&irq->irq_lock, flags);
490-
if (irq->pending_latch) {
491-
ret = irq_set_irqchip_state(irq->host_irq,
492-
IRQCHIP_STATE_PENDING,
493-
irq->pending_latch);
494-
WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
491+
if (!irq->pending_latch)
492+
goto out_unlock_irq;
495493

496-
/*
497-
* Clear pending_latch and communicate this state
498-
* change via vgic_queue_irq_unlock.
499-
*/
500-
irq->pending_latch = false;
501-
vgic_queue_irq_unlock(kvm, irq, flags);
502-
} else {
503-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
504-
}
494+
ret = irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING,
495+
irq->pending_latch);
496+
WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
497+
498+
/*
499+
* Clear pending_latch and communicate this state
500+
* change via vgic_queue_irq_unlock.
501+
*/
502+
irq->pending_latch = false;
503+
vgic_queue_irq_unlock(kvm, irq, flags);
504+
return ret;
505505

506+
out_unlock_irq:
507+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
506508
return ret;
507509
}
508510

@@ -511,7 +513,8 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
511513
{
512514
struct vgic_its *its;
513515
struct vgic_irq *irq;
514-
int ret;
516+
unsigned long flags;
517+
int ret = 0;
515518

516519
if (!vgic_supports_direct_msis(kvm))
517520
return 0;
@@ -531,13 +534,15 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
531534
if (ret)
532535
goto out;
533536

537+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
534538
WARN_ON(irq->hw && irq->host_irq != virq);
535539
if (irq->hw) {
536540
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
537541
irq->hw = false;
538542
ret = its_unmap_vlpi(virq);
539543
}
540544

545+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
541546
out:
542547
mutex_unlock(&its->its_lock);
543548
return ret;

0 commit comments

Comments
 (0)