Files
kernel_arpi/arch/x86/lib/memmove_32.S
Nick Desaulniers 38bfd3357f FROMLIST: x86, mem: move memmove to out of line assembler
When building ARCH=i386 with CONFIG_LTO_CLANG_FULL=y, it's possible
(depending on additional configs which I have not been able to isolate)
to observe a failure during register allocation:

  error: inline assembly requires more registers than available

when memmove is inlined into tcp_v4_fill_cb() or tcp_v6_fill_cb().

memmove is quite large and probably shouldn't be inlined due to size
alone. A noinline function attribute would be the simplest fix, but
there's a few things that stand out with the current definition:

In addition to having complex constraints that can't always be resolved,
the clobber list seems to be missing %bx and %dx, and possibly %cl. By
using numbered operands rather than symbolic operands, the constraints
are quite obnoxious to refactor.

Having a large function be 99% inline asm is a code smell that this
function should simply be written in stand-alone out-of-line assembler.
That gives the opportunity for other cleanups like fixing the
inconsistent use of tabs vs spaces and instruction suffixes, and the
label 3 appearing twice.  Symbolic operands and local labels would
provide this code with a fresh coat of paint.

Moving this to out of line assembler guarantees that the
compiler cannot inline calls to memmove.

This has been done previously for 64b:
commit 9599ec0471 ("x86-64, mem: Convert memmove() to assembly file
and fix return value bug")

Bug: 247605214
Link: https://lore.kernel.org/llvm/20220927210248.3950201-1-ndesaulniers@google.com/
Reviewed-by: Kees Cook <keescook@chromium.org>
Tested-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
Change-Id: I5fde7a76d915c20a594dd9e0d409015855e731b2
2022-09-28 00:27:52 +00:00

216 lines
3.7 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/export.h>
SYM_FUNC_START(memmove)
/*
* void *memmove(void *dest, const void *src, size_t n)
* -mregparm=3 passes these in registers:
*/
.set dest, %eax
.set src, %edx
.set n, %ecx
/*
* Need 3 scratch registers. These need to be saved+restored. Section 3.2.1
* Footnote 7 of the System V Application Binary Interface Version 1.0 aka
* "psABI" notes:
* Note that in contrast to the Intel386 ABI, %rdi, and %rsi belong to the
* called function, not the caller.
* i.e. %edi and %esi are callee saved for i386 (because they belong to the
* caller).
*/
.set tmp0, %edi
.set tmp0w, %di
.set tmp1, %ebx
.set tmp1w, %bx
.set tmp2, %esi
.set tmp3b, %cl
pushl %ebp
movl %esp, %ebp
pushl dest
pushl tmp0
pushl tmp1
pushl tmp2
/* Handle more 16 bytes in loop */
cmpl $0x10, n
jb .L16_byteswap
/* Decide forward/backward copy mode */
cmpl dest, src
jb .Lbackwards_header
/*
* movs instruction have many startup latency
* so we handle small size by general register.
*/
cmpl $680, n
jb .Ltoo_small_forwards
/*
* movs instruction is only good for aligned case.
*/
movl src, tmp0
xorl dest, tmp0
andl $0xff, tmp0
jz .Lforward_movs
.Ltoo_small_forwards:
subl $0x10, n
/*
* We gobble 16 bytes forward in each loop.
*/
.L16_byteswap_forwards_loop:
subl $0x10, n
movl 0*4(src), tmp0
movl 1*4(src), tmp1
movl tmp0, 0*4(dest)
movl tmp1, 1*4(dest)
movl 2*4(src), tmp0
movl 3*4(src), tmp1
movl tmp0, 2*4(dest)
movl tmp1, 3*4(dest)
leal 0x10(src), src
leal 0x10(dest), dest
jae .L16_byteswap_forwards_loop
addl $0x10, n
jmp .L16_byteswap
/*
* Handle data forward by movs.
*/
.p2align 4
.Lforward_movs:
movl -4(src, n), tmp0
leal -4(dest, n), tmp1
shrl $2, n
rep movsl
movl tmp0, (tmp1)
jmp .Ldone
/*
* Handle data backward by movs.
*/
.p2align 4
.Lbackwards_movs:
movl (src), tmp0
movl dest, tmp1
leal -4(src, n), src
leal -4(dest, n), dest
shrl $2, n
std
rep movsl
movl tmp0,(tmp1)
cld
jmp .Ldone
/*
* Start to prepare for backward copy.
*/
.p2align 4
.Lbackwards_header:
cmpl $680, n
jb .Ltoo_small_backwards
movl src, tmp0
xorl dest, tmp0
andl $0xff, tmp0
jz .Lbackwards_movs
/*
* Calculate copy position to tail.
*/
.Ltoo_small_backwards:
addl n, src
addl n, dest
subl $0x10, n
/*
* We gobble 16 bytes backward in each loop.
*/
.L16_byteswap_backwards_loop:
subl $0x10, n
movl -1*4(src), tmp0
movl -2*4(src), tmp1
movl tmp0, -1*4(dest)
movl tmp1, -2*4(dest)
movl -3*4(src), tmp0
movl -4*4(src), tmp1
movl tmp0, -3*4(dest)
movl tmp1, -4*4(dest)
leal -0x10(src), src
leal -0x10(dest), dest
jae .L16_byteswap_backwards_loop
/*
* Calculate copy position to head.
*/
addl $0x10, n
subl n, src
subl n, dest
/*
* Move data from 8 bytes to 15 bytes.
*/
.p2align 4
.L16_byteswap:
cmpl $8, n
jb .L8_byteswap
movl 0*4(src), tmp0
movl 1*4(src), tmp1
movl -2*4(src, n), tmp2
movl -1*4(src, n), src
movl tmp0, 0*4(dest)
movl tmp1, 1*4(dest)
movl tmp2, -2*4(dest, n)
movl src, -1*4(dest, n)
jmp .Ldone
/*
* Move data from 4 bytes to 7 bytes.
*/
.p2align 4
.L8_byteswap:
cmpl $4, n
jb .L4_byteswap
movl 0*4(src), tmp0
movl -1*4(src, n), tmp1
movl tmp0, 0*4(dest)
movl tmp1, -1*4(dest, n)
jmp .Ldone
/*
* Move data from 2 bytes to 3 bytes.
*/
.p2align 4
.L4_byteswap:
cmpl $2, n
jb .Lbyteswap
movw 0*2(src), tmp0w
movw -1*2(src, n), tmp1w
movw tmp0w, 0*2(dest)
movw tmp1w, -1*2(dest, n)
jmp .Ldone
/*
* Move data for 1 byte.
*/
.p2align 4
.Lbyteswap:
cmpl $1, n
jb .Ldone
movb (src), tmp3b
movb tmp3b, (dest)
.p2align 4
.Ldone:
popl tmp2
popl tmp1
popl tmp0
popl %eax
popl %ebp
RET
SYM_FUNC_END(memmove)
EXPORT_SYMBOL(memmove)