Skip to content

Commit

Permalink
FROMLIST: BACKPORT: [PATCH v2 2/3] arm64: compat: Split the sigreturn…
Browse files Browse the repository at this point in the history
… trampolines and kuser helpers (assembler sources)

(cherry picked from url http://lkml.iu.edu/hypermail/linux/kernel/1709.1/01902.html)

AArch32 processes are currently installed a special [vectors] page that
contains the sigreturn trampolines and the kuser helpers, at the fixed
address mandated by the kuser helpers ABI.

Having both functionalities in the same page has become problematic,
because:

* It makes it impossible to disable the kuser helpers (the sigreturn
  trampolines cannot be removed), which is possible on arm.

* A future 32-bit vDSO would provide the sigreturn trampolines itself,
  making those in [vectors] redundant.

This patch addresses the problem by moving the sigreturn trampolines
sources to its own file.  Wrapped the comments to reduce the wrath of
checkpatch.pl.

Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Signed-off-by: Mark Salyzyn <salyzyn@android.com>
Bug: 9674955
Bug: 63737556
Bug: 20045882
Change-Id: I1d7b96e7cfbe979ecf4cb4996befd1f3ae0e64fd
Signed-off-by: khusika <khusikadhamar@gmail.com>
  • Loading branch information
kevin-brodsky-arm authored and khusika committed Oct 13, 2018
1 parent bf79d6b commit 754a4f4
Show file tree
Hide file tree
Showing 3 changed files with 73 additions and 46 deletions.
4 changes: 3 additions & 1 deletion arch/arm64/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,11 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
cpufeature.o alternative.o psci-call.o \
smp.o smp_spin_table.o topology.o cacheinfo.o

arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
arm64-obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_COMPAT) += sigreturn32.o
arm64-obj-$(CONFIG_COMPAT) += kuser32.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
Expand Down
48 changes: 3 additions & 45 deletions arch/arm64/kernel/kuser32.S
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,13 @@
*
* AArch32 user helpers.
*
* Each segment is 32-byte aligned and will be moved to the top of the high
* vector page. New segments (if ever needed) must be added in front of
* existing ones. This mechanism should be used only for things that are
* really small and justified, and not be abused freely.
* These helpers are provided for compatibility with AArch32 binaries that
* still need them. They are installed at a fixed address by
* aarch32_setup_additional_pages().
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/

#include <asm/unistd.h>

.align 5
.globl __kuser_helper_start
__kuser_helper_start:
Expand Down Expand Up @@ -77,42 +74,3 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:

/*
* AArch32 sigreturn code
*
* For ARM syscalls, the syscall number has to be loaded into r7.
* We do not support an OABI userspace.
*
* For Thumb syscalls, we also pass the syscall number via r7. We therefore
* need two 16-bit instructions.
*/
.globl __aarch32_sigret_code_start
__aarch32_sigret_code_start:

/*
* ARM Code
*/
.byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
.byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn

/*
* Thumb code
*/
.byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
.byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn

/*
* ARM code
*/
.byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
.byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn

/*
* Thumb code
*/
.byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
.byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn

.globl __aarch32_sigret_code_end
__aarch32_sigret_code_end:
67 changes: 67 additions & 0 deletions arch/arm64/kernel/sigreturn32.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
* sigreturn trampolines for AArch32.
*
* Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* AArch32 sigreturn code
*
* For ARM syscalls, the syscall number has to be loaded into r7.
* We do not support an OABI userspace.
*
* For Thumb syscalls, we also pass the syscall number via r7. We therefore
* need two 16-bit instructions.
*/

#include <asm/unistd.h>

.globl __aarch32_sigret_code_start
__aarch32_sigret_code_start:

/*
* ARM Code
*/
// mov r7, #__NR_compat_sigreturn
.byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3
// svc #__NR_compat_sigreturn
.byte __NR_compat_sigreturn, 0x00, 0x00, 0xef

/*
* Thumb code
*/
// svc #__NR_compat_sigreturn
.byte __NR_compat_sigreturn, 0x27
// mov r7, #__NR_compat_sigreturn
.byte __NR_compat_sigreturn, 0xdf

/*
* ARM code
*/
// mov r7, #__NR_compat_rt_sigreturn
.byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3
// svc #__NR_compat_rt_sigreturn
.byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef

/*
* Thumb code
*/
// svc #__NR_compat_rt_sigreturn
.byte __NR_compat_rt_sigreturn, 0x27
// mov r7, #__NR_compat_rt_sigreturn
.byte __NR_compat_rt_sigreturn, 0xdf

.globl __aarch32_sigret_code_end
__aarch32_sigret_code_end:

0 comments on commit 754a4f4

Please sign in to comment.