Use C11 atomics instead of atomic_increment(_val)

Replace atomic_increment and atomic_increment_val with atomic_fetch_add_relaxed.
One case in sem_post.c uses release semantics (see comment above it).
The others are simple counters and do not protect any shared data from
concurrent accesses.

Passes regress on AArch64.

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
This commit is contained in:
Wilco Dijkstra 2022-09-22 15:32:40 +01:00
parent 8114b95cef
commit d1babeb32d
9 changed files with 12 additions and 12 deletions

View file

@ -228,7 +228,7 @@ __pthread_create_internal (struct __pthread **thread,
the number of threads from within the new thread isn't an option the number of threads from within the new thread isn't an option
since this thread might return and call `pthread_exit' before the since this thread might return and call `pthread_exit' before the
new thread runs. */ new thread runs. */
atomic_increment (&__pthread_total); atomic_fetch_add_relaxed (&__pthread_total, 1);
/* Store a pointer to this thread in the thread ID lookup table. We /* Store a pointer to this thread in the thread ID lookup table. We
could use __thread_setid, however, we only lock for reading as no could use __thread_setid, however, we only lock for reading as no

View file

@ -85,7 +85,7 @@ by @theglibc{}.
@deftypefun int sem_wait (sem_t *@var{sem}); @deftypefun int sem_wait (sem_t *@var{sem});
@safety{@prelim{}@mtsafe{}@assafe{}@acunsafe{@acucorrupt{}}} @safety{@prelim{}@mtsafe{}@assafe{}@acunsafe{@acucorrupt{}}}
@c atomic_increment (nwaiters) acucorrupt @c atomic_fetch_add_relaxed (nwaiters) acucorrupt
@c @c
@c Given the use atomic operations this function seems @c Given the use atomic operations this function seems
@c to be AS-safe. It is AC-unsafe because there is still @c to be AS-safe. It is AC-unsafe because there is still

View file

@ -2528,7 +2528,7 @@ aiocb64}, since the LFS transparently replaces the old interface.
@c _dl_allocate_tls_init ok @c _dl_allocate_tls_init ok
@c GET_DTV ok @c GET_DTV ok
@c mmap ok @c mmap ok
@c atomic_increment_val ok @c atomic_fetch_add_relaxed ok
@c munmap ok @c munmap ok
@c change_stack_perm ok @c change_stack_perm ok
@c mprotect ok @c mprotect ok
@ -2567,7 +2567,7 @@ aiocb64}, since the LFS transparently replaces the old interface.
@c do_clone @asulock @ascuheap @aculock @acsmem @c do_clone @asulock @ascuheap @aculock @acsmem
@c PREPARE_CREATE ok @c PREPARE_CREATE ok
@c lll_lock (pd->lock) @asulock @aculock @c lll_lock (pd->lock) @asulock @aculock
@c atomic_increment ok @c atomic_fetch_add_relaxed ok
@c clone ok @c clone ok
@c atomic_fetch_add_relaxed ok @c atomic_fetch_add_relaxed ok
@c atomic_exchange_acq ok @c atomic_exchange_acq ok

View file

@ -163,7 +163,7 @@ setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
/* If this failed, it must have had not started yet or else exited. */ /* If this failed, it must have had not started yet or else exited. */
if (!INTERNAL_SYSCALL_ERROR_P (val)) if (!INTERNAL_SYSCALL_ERROR_P (val))
{ {
atomic_increment (&cmdp->cntr); atomic_fetch_add_relaxed (&cmdp->cntr, 1);
return 1; return 1;
} }
else else

View file

@ -759,7 +759,7 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
we momentarily store a false value; this doesn't matter because there we momentarily store a false value; this doesn't matter because there
is no kosher thing a signal handler interrupting us right here can do is no kosher thing a signal handler interrupting us right here can do
that cares whether the thread count is correct. */ that cares whether the thread count is correct. */
atomic_increment (&__nptl_nthreads); atomic_fetch_add_relaxed (&__nptl_nthreads, 1);
/* Our local value of stopped_start and thread_ran can be accessed at /* Our local value of stopped_start and thread_ran can be accessed at
any time. The PD->stopped_start may only be accessed if we have any time. The PD->stopped_start may only be accessed if we have

View file

@ -91,7 +91,7 @@ __old_sem_post (sem_t *sem)
/* We must need to synchronize with consumers of this token, so the atomic /* We must need to synchronize with consumers of this token, so the atomic
increment must have release MO semantics. */ increment must have release MO semantics. */
atomic_write_barrier (); atomic_write_barrier ();
(void) atomic_increment_val (futex); atomic_fetch_add_release (futex, 1);
/* We always have to assume it is a shared semaphore. */ /* We always have to assume it is a shared semaphore. */
futex_wake (futex, 1, LLL_SHARED); futex_wake (futex, 1, LLL_SHARED);
return 0; return 0;

View file

@ -192,7 +192,7 @@ cache_add (int type, const void *key, size_t len, struct datahead *packet,
/* We depend on this value being correct and at least as high as the /* We depend on this value being correct and at least as high as the
real number of entries. */ real number of entries. */
atomic_increment (&table->head->nentries); atomic_fetch_add_relaxed (&table->head->nentries, 1);
/* It does not matter that we are not loading the just increment /* It does not matter that we are not loading the just increment
value, this is just for statistics. */ value, this is just for statistics. */

View file

@ -425,7 +425,7 @@ __nscd_get_map_ref (request_type type, const char *name,
0)) 0))
cur = NO_MAPPING; cur = NO_MAPPING;
else else
atomic_increment (&cur->counter); atomic_fetch_add_relaxed (&cur->counter, 1);
} }
} }

View file

@ -72,8 +72,8 @@ static uint32_t nl_timestamp;
uint32_t uint32_t
__bump_nl_timestamp (void) __bump_nl_timestamp (void)
{ {
if (atomic_increment_val (&nl_timestamp) == 0) if (atomic_fetch_add_relaxed (&nl_timestamp, 1) + 1 == 0)
atomic_increment (&nl_timestamp); atomic_fetch_add_relaxed (&nl_timestamp, 1);
return nl_timestamp; return nl_timestamp;
} }
@ -309,7 +309,7 @@ __check_pf (bool *seen_ipv4, bool *seen_ipv6,
if (cache_valid_p ()) if (cache_valid_p ())
{ {
data = cache; data = cache;
atomic_increment (&cache->usecnt); atomic_fetch_add_relaxed (&cache->usecnt, 1);
} }
else else
{ {