[BACK]Return to spl.S CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / amd64 / amd64

File: [cvs.NetBSD.org] / src / sys / arch / amd64 / amd64 / spl.S (download)

Revision 1.43.4.3, Fri Apr 10 14:41:59 2020 UTC (4 years ago) by bouyer
Branch: bouyer-xenpvh
Changes since 1.43.4.2: +7 -2 lines

spllower(): Also check Xen pending events
hypervisor_pvhvm_callback(): exit via Xdoreti, so that pending interrupts
are checked.
disable __HAVE_FAST_SOFTINTS only for XENPV, it now works for PVHVM.
We still have to disable PREEMPTION, until we support MULTIPROCESSOR

/*	$NetBSD: spl.S,v 1.43.4.3 2020/04/10 14:41:59 bouyer Exp $	*/

/*
 * Copyright (c) 2003 Wasabi Systems, Inc.
 * All rights reserved.
 *
 * Written by Frank van der Linden for Wasabi Systems, Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *      This product includes software developed for the NetBSD Project by
 *      Wasabi Systems, Inc.
 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
 *    or promote products derived from this software without specific prior
 *    written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
 * All rights reserved.
 *
 * This code is derived from software contributed to The NetBSD Foundation
 * by Charles M. Hannum and Andrew Doran.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "opt_ddb.h"
#include "opt_kasan.h"
#include "opt_kmsan.h"

#define ALIGN_TEXT	.align 16,0x90

#include <machine/asm.h>
#include <machine/trap.h>
#include <machine/segments.h>
#include <machine/frameasm.h>

#include "assym.h"

	.text

/*
 * int splraise(int s);
 */
ENTRY(splraise)
	movl	CPUVAR(ILEVEL),%eax
	cmpl	%edi,%eax
	cmoval	%eax,%edi
	movl	%edi,CPUVAR(ILEVEL)
	KMSAN_INIT_RET(4)
	ret
END(splraise)

#ifndef XENPV
/*
 * Xsoftintr()
 *
 * Switch to the LWP assigned to handle interrupts from the given
 * source.  We borrow the VM context from the interrupted LWP.
 *
 * On entry:
 *
 *	%rax		intrsource
 *	%r13		address to return to
 */
IDTVEC(softintr)
	/* set up struct switchframe */
	pushq	$_C_LABEL(softintr_ret)
	pushq	%rbx
	pushq	%r12
	pushq	%r13
	pushq	%r14
	pushq	%r15

	movl	$IPL_HIGH,CPUVAR(ILEVEL)
	movq	CPUVAR(CURLWP),%r15
	movq	IS_LWP(%rax),%rdi	/* switch to handler LWP */
	movq	L_PCB(%rdi),%rdx
	movq	L_PCB(%r15),%rcx
	movq	%rdi,CPUVAR(CURLWP)

#ifdef KASAN
	/* clear the new stack */
	pushq	%rax
	pushq	%rdx
	pushq	%rcx
	callq	_C_LABEL(kasan_softint)
	popq	%rcx
	popq	%rdx
	popq	%rax
#endif

#ifdef KMSAN
	pushq	%rax
	pushq	%rdx
	pushq	%rcx
	callq	_C_LABEL(kmsan_softint)
	popq	%rcx
	popq	%rdx
	popq	%rax
#endif

	/* save old context */
	movq	%rsp,PCB_RSP(%rcx)
	movq	%rbp,PCB_RBP(%rcx)

	/* switch to the new stack */
	movq	PCB_RSP0(%rdx),%rsp

	/* dispatch */
	sti
	movq	%r15,%rdi		/* interrupted LWP */
	movl	IS_MAXLEVEL(%rax),%esi	/* ipl to run at */
	call	_C_LABEL(softint_dispatch)/* run handlers */
	cli

	/* restore old context */
	movq	L_PCB(%r15),%rcx
	movq	PCB_RSP(%rcx),%rsp

	xchgq	%r15,CPUVAR(CURLWP)	/* must be globally visible */
	popq	%r15			/* unwind switchframe */
	addq	$(5 * 8),%rsp
	jmp	*%r13			/* back to Xspllower/Xdoreti */
IDTVEC_END(softintr)

/*
 * softintr_ret()
 *
 * Trampoline function that gets returned to by cpu_switchto() when
 * an interrupt handler blocks.  On entry:
 *
 *	%rax		prevlwp from cpu_switchto()
 */
ENTRY(softintr_ret)
	incl	CPUVAR(MTX_COUNT)	/* re-adjust after mi_switch */
	cli
	jmp	*%r13			/* back to Xspllower/Xdoreti */
END(softintr_ret)

/*
 * void softint_trigger(uintptr_t machdep);
 *
 * Software interrupt registration.
 */
ENTRY(softint_trigger)
	orl	%edi,CPUVAR(IPENDING)	/* atomic on local cpu */
	ret
END(softint_trigger)


/*
 * Xrecurse_preempt()
 *
 * Handles preemption interrupts via Xspllower().
 */
IDTVEC(recurse_preempt)
	movl	$IPL_PREEMPT,CPUVAR(ILEVEL)
	sti
	xorq	%rdi,%rdi
	KMSAN_INIT_ARG(8)
	call	_C_LABEL(kpreempt)
	cli
	jmp	*%r13			/* back to Xspllower */
IDTVEC_END(recurse_preempt)

/*
 * Xresume_preempt()
 *
 * Handles preemption interrupts via Xdoreti().
 */
IDTVEC(resume_preempt)
	movl	$IPL_PREEMPT,CPUVAR(ILEVEL)
	sti
	testq	$SEL_RPL,TF_CS(%rsp)
	jnz	1f
	movq	TF_RIP(%rsp),%rdi
	KMSAN_INIT_ARG(8)
	call	_C_LABEL(kpreempt)	/* from kernel */
	cli
	jmp	*%r13			/* back to Xdoreti */
1:
	call	_C_LABEL(preempt)	/* from user */
	cli
	jmp	*%r13			/* back to Xdoreti */
IDTVEC_END(resume_preempt)
#endif /* XEN */

#ifndef XENPV
/*
 * void spllower(int s);
 *
 * Must be the same size as cx8_spllower().  This must use
 * pushf/cli/popf as it is used early in boot where interrupts
 * are disabled via eflags/IE.
 */
ENTRY(spllower)
	cmpl	CPUVAR(ILEVEL),%edi
	jae	1f
	movl	CPUVAR(IUNMASK)(,%rdi,4),%edx
	pushf
	cli
	testl	CPUVAR(IPENDING),%edx
	jnz	2f
#if defined(XEN)
	movl	CPUVAR(XUNMASK)(,%rdi,4),%edx
	testl	CPUVAR(XPENDING),%edx
	jnz	2f
#endif
	movl	%edi,CPUVAR(ILEVEL)
	popf
1:
	ret
	ret
2:
	popf
	jmp	_C_LABEL(Xspllower)
3:
	.space 16
	.align	16
END(spllower)
LABEL(spllower_end)

/*
 * void	cx8_spllower(int s);
 *
 * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low.
 *
 * edx : eax = old level / old ipending
 * ecx : ebx = new level / old ipending
 */
ENTRY(cx8_spllower)
	movl	CPUVAR(ILEVEL),%edx
	movq	%rbx,%r8
	cmpl	%edx,%edi			/* new level is lower? */
	jae	1f
0:
	movl	CPUVAR(IPENDING),%eax
	movl	%edi,%ecx
	testl	%eax,CPUVAR(IUNMASK)(,%rcx,4)/* deferred interrupts? */
	movl	%eax,%ebx
	/*
	 * On the P4 this jump is cheaper than patching in junk
	 * using cmov.  Is cmpxchg expensive if it fails?
	 */
	jnz	2f
	cmpxchg8b CPUVAR(ISTATE)		/* swap in new ilevel */
	jnz	0b
1:
	movq	%r8,%rbx
	ret
2:
	movq	%r8,%rbx
	.type	_C_LABEL(cx8_spllower_patch), @function
LABEL(cx8_spllower_patch)
	jmp	_C_LABEL(Xspllower)

	.align	16
END(cx8_spllower_patch)
END(cx8_spllower)
LABEL(cx8_spllower_end)
#endif /* !XENPV */

/*
 * void Xspllower(int s);
 *
 * Process pending interrupts.
 *
 * Important registers:
 *   ebx - cpl
 *   r13 - address to resume loop at
 *
 * It is important that the bit scan instruction is bsr, it will get
 * the highest 2 bits (currently the IPI and clock handlers) first,
 * to avoid deadlocks where one CPU sends an IPI, another one is at
 * splhigh() and defers it, lands in here via splx(), and handles
 * a lower-prio one first, which needs to take the kernel lock -->
 * the sending CPU will never see the that CPU accept the IPI
 * (see pmap_tlb_shootnow).
 */
	nop
	.align	4	/* Avoid confusion with cx8_spllower_end */

IDTVEC(spllower)
	pushq	%rbx
	pushq	%r13
	pushq	%r12
	movl	%edi,%ebx
	leaq	1f(%rip),%r13		/* address to resume loop at */
1:
#if !defined(XENPV)
	movl	%ebx,%eax		/* get cpl */
	movl	CPUVAR(IUNMASK)(,%rax,4),%eax
	CLI(si)
	andl	CPUVAR(IPENDING),%eax	/* any non-masked bits left? */
	jz	2f
	bsrl	%eax,%eax
	btrl	%eax,CPUVAR(IPENDING)
	movq	CPUVAR(ISOURCES)(,%rax,8),%rax
	jmp	*IS_RECURSE(%rax)
#endif
2:
#if defined(XEN)
	movl	%ebx,%eax		/* get cpl */
	movl	CPUVAR(XUNMASK)(,%rax,4),%eax
	CLI(si)
	andl	CPUVAR(XPENDING),%eax	/* any non-masked bits left? */
	jz	3f
	bsrl	%eax,%eax
	btrl	%eax,CPUVAR(XPENDING)
	movq	CPUVAR(XSOURCES)(,%rax,8),%rax
	jmp	*IS_RECURSE(%rax)
#endif
3:
	movl	%ebx,CPUVAR(ILEVEL)
	STI(si)
	popq	%r12
	popq	%r13
	popq	%rbx
	ret
IDTVEC_END(spllower)

/*
 * void Xdoreti(void);
 *
 * Handle return from interrupt after device handler finishes.
 *
 * Important registers:
 *   ebx - cpl to restore
 *   r13 - address to resume loop at
 */
IDTVEC(doreti)
	popq	%rbx			/* get previous priority */
	decl	CPUVAR(IDEPTH)
	leaq	1f(%rip),%r13
1:
#if !defined(XENPV)
	movl    %ebx,%eax
	movl	CPUVAR(IUNMASK)(,%rax,4),%eax
	CLI(si)
	andl	CPUVAR(IPENDING),%eax
	jz	2f
	bsrl	%eax,%eax		/* slow, but not worth optimizing */
	btrl	%eax,CPUVAR(IPENDING)
	movq	CPUVAR(ISOURCES)(,%rax,8),%rax
	jmp	*IS_RESUME(%rax)
#endif
2:
#if defined(XEN)
	movl    %ebx,%eax
	movl	CPUVAR(XUNMASK)(,%rax,4),%eax
	CLI(si)
	andl	CPUVAR(XPENDING),%eax
	jz	3f
	bsrl	%eax,%eax		/* slow, but not worth optimizing */
	btrl	%eax,CPUVAR(XPENDING)
	movq	CPUVAR(XSOURCES)(,%rax,8),%rax
	jmp	*IS_RESUME(%rax)
#endif
3:	/* Check for ASTs on exit to user mode. */
	movl	%ebx,CPUVAR(ILEVEL)
5:
	testb	$SEL_RPL,TF_CS(%rsp)
	jz	6f

	.type	_C_LABEL(doreti_checkast), @function
LABEL(doreti_checkast)
	movq	CPUVAR(CURLWP),%r14
	CHECK_ASTPENDING(%r14)
	je	3f
	CLEAR_ASTPENDING(%r14)
	STI(si)
	movl	$T_ASTFLT,TF_TRAPNO(%rsp)	/* XXX undo later.. */
	/* Pushed T_ASTFLT into tf_trapno on entry. */
	movq	%rsp,%rdi
	KMSAN_INIT_ARG(8)
	call	_C_LABEL(trap)
	CLI(si)
	jmp	doreti_checkast
3:
	CHECK_DEFERRED_SWITCH
	jnz	9f
	HANDLE_DEFERRED_FPU
6:
	INTRFASTEXIT
9:
	STI(si)
	call	_C_LABEL(do_pmap_load)
	CLI(si)
	jmp	doreti_checkast		/* recheck ASTs */
END(doreti_checkast)
IDTVEC_END(doreti)