61c86a8668
[ Upstream commit ee0e39a63b78849f8abbef268b13e4838569f646 ] Move is_vsyscall_vaddr() into asm/vsyscall.h to make it available for copy_from_kernel_nofault_allowed() in arch/x86/mm/maccess.c. Reviewed-by: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20240202103935.3154011-2-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
37 lines
988 B
C
Executable file
37 lines
988 B
C
Executable file
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_VSYSCALL_H
|
|
#define _ASM_X86_VSYSCALL_H
|
|
|
|
#include <linux/seqlock.h>
|
|
#include <uapi/asm/vsyscall.h>
|
|
#include <asm/page_types.h>
|
|
|
|
#ifdef CONFIG_X86_VSYSCALL_EMULATION
|
|
extern void map_vsyscall(void);
|
|
extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
|
|
|
|
/*
|
|
* Called on instruction fetch fault in vsyscall page.
|
|
* Returns true if handled.
|
|
*/
|
|
extern bool emulate_vsyscall(unsigned long error_code,
|
|
struct pt_regs *regs, unsigned long address);
|
|
#else
|
|
static inline void map_vsyscall(void) {}
|
|
static inline bool emulate_vsyscall(unsigned long error_code,
|
|
struct pt_regs *regs, unsigned long address)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* The (legacy) vsyscall page is the long page in the kernel portion
|
|
* of the address space that has user-accessible permissions.
|
|
*/
|
|
static inline bool is_vsyscall_vaddr(unsigned long vaddr)
|
|
{
|
|
return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
|
|
}
|
|
|
|
#endif /* _ASM_X86_VSYSCALL_H */
|