Event channels should be masked while chaning affinity, or else we might get spurious/lost interrupts. --- sys/x86/xen/xen_intr.c | 15 ++++++++++++--- 1 files changed, 12 insertions(+), 3 deletions(-) diff --git a/sys/x86/xen/xen_intr.c b/sys/x86/xen/xen_intr.c index fd36e68..bc0781e 100644 --- a/sys/x86/xen/xen_intr.c +++ b/sys/x86/xen/xen_intr.c _at__at_ -797,7 +797,7 _at__at_ xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id) struct evtchn_bind_vcpu bind_vcpu; struct xenisrc *isrc; u_int to_cpu, vcpu_id; - int error; + int error, masked; #ifdef XENHVM if (xen_vector_callback_enabled == 0) _at__at_ -815,6 +815,12 _at__at_ xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id) return (EINVAL); } + /* + * Mask the event channel port so we don't receive spurious events + * while changing affinity. + */ + masked = evtchn_test_and_set_mask(isrc->xi_port); + if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) || (isrc->xi_type == EVTCHN_TYPE_IPI)) { /* _at__at_ -825,8 +831,7 _at__at_ xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id) evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); isrc->xi_cpu = to_cpu; evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); - mtx_unlock(&xen_intr_isrc_lock); - return (0); + goto out; } bind_vcpu.port = isrc->xi_port; _at__at_ -848,6 +853,10 _at__at_ xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id) evtchn_cpu_mask_port(to_cpu, isrc->xi_port); } } + +out: + if (!masked) + evtchn_unmask_port(isrc->xi_port); mtx_unlock(&xen_intr_isrc_lock); return (0); #else -- 1.7.7.5 (Apple Git-26)Received on Tue Dec 24 2013 - 10:22:41 UTC
This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:40:45 UTC