x86: Cleanup pthread_spin_{try}lock.S

Save a jmp on the lock path coming from an initial failure in
pthread_spin_lock.S.  This costs 4-bytes of code but since the
function still fits in the same number of 16-byte blocks (default
function alignment) it does not have affect on the total binary size
of libc.so (unchanged after this commit).

pthread_spin_trylock was using a CAS when a simple xchg works which
is often more expensive.

Full check passes on x86-64.
This commit is contained in:
Noah Goldstein 2022-09-30 21:13:27 -07:00
parent 10c779f44a
commit 653c12c7d8
2 changed files with 29 additions and 12 deletions

View File

@ -19,18 +19,27 @@
#include <shlib-compat.h>
ENTRY(__pthread_spin_lock)
1: LOCK
decl 0(%rdi)
jne 2f
/* Always return zero. */
xor %eax, %eax
LOCK
decl 0(%rdi)
jne 1f
ret
.align 16
2: rep
1:
/* `rep nop` == `pause`. */
rep
nop
cmpl $0, 0(%rdi)
jg 1b
jmp 2b
cmpl %eax, 0(%rdi)
jle 1b
/* Just repeat the `lock decl` logic here. The code size save
of jumping back to entry doesn't change how many 16-byte
chunks (default function alignment) that the code fits in. */
LOCK
decl 0(%rdi)
jne 1b
ret
END(__pthread_spin_lock)
versioned_symbol (libc, __pthread_spin_lock, pthread_spin_lock, GLIBC_2_34)

View File

@ -20,13 +20,21 @@
#include <shlib-compat.h>
ENTRY(__pthread_spin_trylock)
movl $1, %eax
xorl %ecx, %ecx
lock
cmpxchgl %ecx, (%rdi)
/* xchg has implicit LOCK prefix. */
xchgl %ecx, (%rdi)
/* Branch on result. Expectation is the use of trylock will be
branching on success/failure so this branch can be used to
to predict the coming branch. It has the benefit of
breaking the likely expensive memory dependency on (%rdi). */
cmpl $1, %ecx
jnz 1f
xorl %eax, %eax
ret
1:
movl $EBUSY, %eax
cmovel %ecx, %eax
retq
ret
END(__pthread_spin_trylock)
versioned_symbol (libc, __pthread_spin_trylock, pthread_spin_trylock,
GLIBC_2_34)