RISC-V: Atomic and Locking Routines

This patch implements various atomic and locking routines on RISC-V.  We
mandate the A extension on Linux-capable RISC-V systems, so this can
rely on always having the various atomic instructions availiable.

2018-01-29  Palmer Dabbelt  <palmer@sifive.com>

        * sysdeps/riscv/nptl/bits/pthreadtypes-arch.h: New file.
        * sysdeps/riscv/nptl/bits/semaphore.h: Likewise.
        * sysdeps/riscv/nptl/libc-lowlevellock.c: Likewise.
        * sysdeps/unix/sysv/linux/riscv/atomic-machine.h: Likewise.
This commit is contained in:
Palmer Dabbelt 2018-01-29 10:27:17 -08:00
parent b2cb5e0298
commit d1c09b2471
No known key found for this signature in database
GPG key ID: EF4CA1502CCBAB41
5 changed files with 307 additions and 0 deletions

View file

@ -92,6 +92,10 @@
* sysdeps/riscv/rvf/s_roundevenf.c: Likewise.
* sysdeps/riscv/rvf/s_roundf.c: Likewise.
* sysdeps/riscv/rvf/s_truncf.c: Likewise.
* sysdeps/riscv/nptl/bits/pthreadtypes-arch.h: New file.
* sysdeps/riscv/nptl/bits/semaphore.h: Likewise.
* sysdeps/riscv/nptl/libc-lowlevellock.c: Likewise.
* sysdeps/unix/sysv/linux/riscv/atomic-machine.h: Likewise.
2018-01-29 Florian Weimer <fweimer@redhat.com>

View file

@ -0,0 +1,68 @@
/* Machine-specific pthread type layouts. RISC-V version.
Copyright (C) 2011-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<http://www.gnu.org/licenses/>. */
#ifndef _BITS_PTHREADTYPES_ARCH_H
#define _BITS_PTHREADTYPES_ARCH_H 1
#include <endian.h>
#if __riscv_xlen == 64
# define __SIZEOF_PTHREAD_ATTR_T 56
# define __SIZEOF_PTHREAD_MUTEX_T 40
# define __SIZEOF_PTHREAD_MUTEXATTR_T 4
# define __SIZEOF_PTHREAD_COND_T 48
# define __SIZEOF_PTHREAD_CONDATTR_T 4
# define __SIZEOF_PTHREAD_RWLOCK_T 56
# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8
# define __SIZEOF_PTHREAD_BARRIER_T 32
# define __SIZEOF_PTHREAD_BARRIERATTR_T 4
#else
# error "rv32i-based systems are not supported"
#endif
#define __PTHREAD_COMPAT_PADDING_MID
#define __PTHREAD_COMPAT_PADDING_END
#define __PTHREAD_MUTEX_LOCK_ELISION 0
#define __PTHREAD_MUTEX_USE_UNION 0
#define __PTHREAD_MUTEX_NUSERS_AFTER_KIND 0
#define __LOCK_ALIGNMENT
#define __ONCE_ALIGNMENT
/* There is a lot of padding in this structure. While it's not strictly
necessary on RISC-V, we're going to leave it in to be on the safe side in
case it's needed in the future. Most other architectures have the padding,
so this gives us the same extensibility as everyone else has. */
struct __pthread_rwlock_arch_t
{
unsigned int __readers;
unsigned int __writers;
unsigned int __wrphase_futex;
unsigned int __writers_futex;
unsigned int __pad3;
unsigned int __pad4;
int __cur_writer;
int __shared;
unsigned long int __pad1;
unsigned long int __pad2;
unsigned int __flags;
};
#define __PTHREAD_RWLOCK_ELISION_EXTRA 0
#endif /* bits/pthreadtypes.h */

View file

@ -0,0 +1,33 @@
/* Machine-specific POSIX semaphore type layouts. RISC-V version.
Copyright (C) 2002-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<http://www.gnu.org/licenses/>. */
#ifndef _SEMAPHORE_H
# error "Never use <bits/semaphore.h> directly; include <semaphore.h> instead."
#endif
#define __SIZEOF_SEM_T (4 * __SIZEOF_POINTER__)
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
typedef union
{
char __size[__SIZEOF_SEM_T];
long int __align;
} sem_t;

View file

@ -0,0 +1,8 @@
/* This kludge works around a libpthread static linking problem:
https://sourceware.org/bugzilla/show_bug.cgi?id=15648. */
#ifndef SHARED
# define __lll_lock_wait_private weak_function __lll_lock_wait_private
#endif
#include <lowlevellock.c>

View file

@ -0,0 +1,194 @@
/* Low-level functions for atomic operations. RISC-V version.
Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<http://www.gnu.org/licenses/>. */
#ifndef _LINUX_RISCV_BITS_ATOMIC_H
#define _LINUX_RISCV_BITS_ATOMIC_H 1
#include <stdint.h>
typedef int32_t atomic32_t;
typedef uint32_t uatomic32_t;
typedef int64_t atomic64_t;
typedef uint64_t uatomic64_t;
typedef intptr_t atomicptr_t;
typedef uintptr_t uatomicptr_t;
typedef intmax_t atomic_max_t;
typedef uintmax_t uatomic_max_t;
#define atomic_full_barrier() __sync_synchronize ()
#ifdef __riscv_atomic
# define __HAVE_64B_ATOMICS (__riscv_xlen >= 64)
# define USE_ATOMIC_COMPILER_BUILTINS 1
# define ATOMIC_EXCHANGE_USES_CAS 0
/* Compare and exchange.
For all "bool" routines, we return FALSE if exchange succesful. */
# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
!__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
})
# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
!__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
})
# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
!__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
})
# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
!__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
})
# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
__oldval; \
})
# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
__oldval; \
})
# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
__oldval; \
})
# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
__oldval; \
})
/* Atomic compare and exchange. */
# define atomic_compare_and_exchange_bool_acq(mem, new, old) \
__atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
mem, new, old, __ATOMIC_ACQUIRE)
# define atomic_compare_and_exchange_val_acq(mem, new, old) \
__atomic_val_bysize (__arch_compare_and_exchange_val, int, \
mem, new, old, __ATOMIC_ACQUIRE)
# define atomic_compare_and_exchange_val_rel(mem, new, old) \
__atomic_val_bysize (__arch_compare_and_exchange_val, int, \
mem, new, old, __ATOMIC_RELEASE)
/* Atomic exchange (without compare). */
# define __arch_exchange_8_int(mem, newval, model) \
__atomic_exchange_n (mem, newval, model)
# define __arch_exchange_16_int(mem, newval, model) \
__atomic_exchange_n (mem, newval, model)
# define __arch_exchange_32_int(mem, newval, model) \
__atomic_exchange_n (mem, newval, model)
# define __arch_exchange_64_int(mem, newval, model) \
__atomic_exchange_n (mem, newval, model)
# define atomic_exchange_acq(mem, value) \
__atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
# define atomic_exchange_rel(mem, value) \
__atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE)
/* Atomically add value and return the previous (unincremented) value. */
# define __arch_exchange_and_add_8_int(mem, value, model) \
__atomic_fetch_add (mem, value, model)
# define __arch_exchange_and_add_16_int(mem, value, model) \
__atomic_fetch_add (mem, value, model)
# define __arch_exchange_and_add_32_int(mem, value, model) \
__atomic_fetch_add (mem, value, model)
# define __arch_exchange_and_add_64_int(mem, value, model) \
__atomic_fetch_add (mem, value, model)
# define atomic_exchange_and_add_acq(mem, value) \
__atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
__ATOMIC_ACQUIRE)
# define atomic_exchange_and_add_rel(mem, value) \
__atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
__ATOMIC_RELEASE)
/* Miscellaneous. */
# define asm_amo(which, ordering, mem, value) ({ \
__atomic_check_size (mem); \
typeof (*mem) __tmp; \
if (sizeof (__tmp) == 4) \
asm volatile (which ".w" ordering "\t%0, %z2, %1" \
: "=r" (__tmp), "+A" (* (mem)) \
: "rJ" (value)); \
else if (sizeof (__tmp) == 8) \
asm volatile (which ".d" ordering "\t%0, %z2, %1" \
: "=r" (__tmp), "+A" (* (mem)) \
: "rJ" (value)); \
else \
abort (); \
__tmp; })
# define atomic_max(mem, value) asm_amo ("amomaxu", ".aq", mem, value)
# define atomic_min(mem, value) asm_amo ("amominu", ".aq", mem, value)
# define atomic_bit_test_set(mem, bit) \
({ typeof (*mem) __mask = (typeof (*mem))1 << (bit); \
asm_amo ("amoor", ".aq", mem, __mask) & __mask; })
# define catomic_exchange_and_add(mem, value) \
atomic_exchange_and_add (mem, value)
# define catomic_max(mem, value) atomic_max (mem, value)
#else /* __riscv_atomic */
# error "ISAs that do not subsume the A extension are not supported"
#endif /* !__riscv_atomic */
#endif /* bits/atomic.h */