hurd: Reimplement libc locks using mach's gsync

* hurd/Makefile (routines): Add hurdlock.
	* hurd/Versions (GLIBC_PRIVATE): Added new entry to export the above
	interface.
	(HURD_CTHREADS_0.3): Remove __libc_getspecific.
	* hurd/hurdpid.c: Include <lowlevellock.h>
	(_S_msg_proc_newids): Use lll_wait to synchronize.
	* hurd/hurdsig.c: (reauth_proc): Use __mutex_lock and __mutex_unlock.
	* hurd/setauth.c: Include <hurdlock.h>, use integer for synchronization.
	* mach/Makefile (lock-headers): Remove machine-lock.h.
	* mach/lock-intern.h: Include <lowlevellock.h> instead of
	<machine-lock.h>.
	(__spin_lock_t): New type.
	(__SPIN_LOCK_INITIALIZER): New macro.
	(__spin_lock, __spin_unlock, __spin_try_lock, __spin_lock_locked,
	__mutex_init, __mutex_lock_solid, __mutex_unlock_solid, __mutex_lock,
	__mutex_unlock, __mutex_trylock): Use lll to implement locks.
	* mach/mutex-init.c: Include <lowlevellock.h> instead of <cthreads.h>.
	(__mutex_init): Initialize with lll.
	* manual/errno.texi (EOWNERDEAD, ENOTRECOVERABLE): New errno values.
	* sysdeps/mach/Makefile: Add libmachuser as dependencies for libs
	needing lll.
	* sysdeps/mach/hurd/bits/errno.h: Regenerate.
	* sysdeps/mach/hurd/cthreads.c (__libc_getspecific): Remove function.
	* sysdeps/mach/hurd/bits/libc-lock.h: Remove file.
	* sysdeps/mach/hurd/setpgid.c: Include <lowlevellock.h>.
	(__setpgid): Use lll for synchronization.
	* sysdeps/mach/hurd/setsid.c: Likewise with __setsid.
	* sysdeps/mach/bits/libc-lock.h: Include <tls.h> and <lowlevellock.h>
	instead of <cthreads.h>.
	(_IO_lock_inexpensive): New macro
	(__libc_lock_recursive_t, __rtld_lock_recursive_t): New structures.
	(__libc_lock_self0): New declaration.
	(__libc_lock_owner_self): New macro.
	(__libc_key_t): Remove type.
	(_LIBC_LOCK_INITIALIZER): New macro.
	(__libc_lock_define_initialized, __libc_lock_init, __libc_lock_fini,
	__libc_lock_fini_recursive, __rtld_lock_fini_recursive,
	__libc_lock_lock, __libc_lock_trylock, __libc_lock_unlock,
	__libc_lock_define_initialized_recursive,
	__rtld_lock_define_initialized_recursive,
	__libc_lock_init_recursive, __libc_lock_trylock_recursive,
	__libc_lock_lock_recursive, __libc_lock_unlock_recursive,
	__rtld_lock_initialize, __rtld_lock_trylock_recursive,
	__rtld_lock_lock_recursive, __rtld_lock_unlock_recursive
	__libc_once_define, __libc_mutex_unlock): Reimplement with lll.
	(__libc_lock_define_recursive, __rtld_lock_define_recursive,
	_LIBC_LOCK_RECURSIVE_INITIALIZER, _RTLD_LOCK_RECURSIVE_INITIALIZER):
	New macros.
	Include <libc-lockP.h> to reimplement libc_key* with pthread_key*.
	* hurd/hurdlock.c: New file.
	* hurd/hurdlock.h: New file.
	* mach/lowlevellock.h: New file
This commit is contained in:
Agustina Arzille 2018-03-18 18:22:55 +01:00 committed by Samuel Thibault
parent 542c20a171
commit fb4cc8a0c2
20 changed files with 689 additions and 314 deletions

View file

@ -6,6 +6,58 @@
__libc_cleanup_end): Rewrite implementation using __libc_cleanup_end): Rewrite implementation using
__attribute__ ((__cleanup__)). __attribute__ ((__cleanup__)).
(__libc_cleanup_push, __libc_cleanup_pop): New macros. (__libc_cleanup_push, __libc_cleanup_pop): New macros.
* hurd/Makefile (routines): Add hurdlock.
* hurd/Versions (GLIBC_PRIVATE): Added new entry to export the above
interface.
(HURD_CTHREADS_0.3): Remove __libc_getspecific.
* hurd/hurdpid.c: Include <lowlevellock.h>
(_S_msg_proc_newids): Use lll_wait to synchronize.
* hurd/hurdsig.c: (reauth_proc): Use __mutex_lock and __mutex_unlock.
* hurd/setauth.c: Include <hurdlock.h>, use integer for synchronization.
* mach/Makefile (lock-headers): Remove machine-lock.h.
* mach/lock-intern.h: Include <lowlevellock.h> instead of
<machine-lock.h>.
(__spin_lock_t): New type.
(__SPIN_LOCK_INITIALIZER): New macro.
(__spin_lock, __spin_unlock, __spin_try_lock, __spin_lock_locked,
__mutex_init, __mutex_lock_solid, __mutex_unlock_solid, __mutex_lock,
__mutex_unlock, __mutex_trylock): Use lll to implement locks.
* mach/mutex-init.c: Include <lowlevellock.h> instead of <cthreads.h>.
(__mutex_init): Initialize with lll.
* manual/errno.texi (EOWNERDEAD, ENOTRECOVERABLE): New errno values.
* sysdeps/mach/Makefile: Add libmachuser as dependencies for libs
needing lll.
* sysdeps/mach/hurd/bits/errno.h: Regenerate.
* sysdeps/mach/hurd/cthreads.c (__libc_getspecific): Remove function.
* sysdeps/mach/hurd/bits/libc-lock.h: Remove file.
* sysdeps/mach/hurd/setpgid.c: Include <lowlevellock.h>.
(__setpgid): Use lll for synchronization.
* sysdeps/mach/hurd/setsid.c: Likewise with __setsid.
* sysdeps/mach/bits/libc-lock.h: Include <tls.h> and <lowlevellock.h>
instead of <cthreads.h>.
(_IO_lock_inexpensive): New macro
(__libc_lock_recursive_t, __rtld_lock_recursive_t): New structures.
(__libc_lock_self0): New declaration.
(__libc_lock_owner_self): New macro.
(__libc_key_t): Remove type.
(_LIBC_LOCK_INITIALIZER): New macro.
(__libc_lock_define_initialized, __libc_lock_init, __libc_lock_fini,
__libc_lock_fini_recursive, __rtld_lock_fini_recursive,
__libc_lock_lock, __libc_lock_trylock, __libc_lock_unlock,
__libc_lock_define_initialized_recursive,
__rtld_lock_define_initialized_recursive,
__libc_lock_init_recursive, __libc_lock_trylock_recursive,
__libc_lock_lock_recursive, __libc_lock_unlock_recursive,
__rtld_lock_initialize, __rtld_lock_trylock_recursive,
__rtld_lock_lock_recursive, __rtld_lock_unlock_recursive
__libc_once_define, __libc_mutex_unlock): Reimplement with lll.
(__libc_lock_define_recursive, __rtld_lock_define_recursive,
_LIBC_LOCK_RECURSIVE_INITIALIZER, _RTLD_LOCK_RECURSIVE_INITIALIZER):
New macros.
Include <libc-lockP.h> to reimplement libc_key* with pthread_key*.
* hurd/hurdlock.c: New file.
* hurd/hurdlock.h: New file.
* mach/lowlevellock.h: New file
2018-03-18 Samuel Thibault <samuel.thibault@ens-lyon.org> 2018-03-18 Samuel Thibault <samuel.thibault@ens-lyon.org>

View file

@ -54,6 +54,7 @@ routines = hurdstartup hurdinit \
vpprintf \ vpprintf \
ports-get ports-set hurdports hurdmsg \ ports-get ports-set hurdports hurdmsg \
errno-loc \ errno-loc \
hurdlock \
$(sig) $(dtable) $(inlines) port-cleanup report-wait xattr $(sig) $(dtable) $(inlines) port-cleanup report-wait xattr
sig = hurdsig hurdfault siginfo hurd-raise preempt-sig \ sig = hurdsig hurdfault siginfo hurd-raise preempt-sig \
trampoline longjmp-ts catch-exc exc2signal hurdkill sigunwind \ trampoline longjmp-ts catch-exc exc2signal hurdkill sigunwind \

View file

@ -147,6 +147,13 @@ libc {
# cthreads functions with stubs in libc # cthreads functions with stubs in libc
__cthread_keycreate; __cthread_getspecific; __cthread_setspecific; __cthread_keycreate; __cthread_getspecific; __cthread_setspecific;
__libc_getspecific; }
GLIBC_PRIVATE {
# Used by other libs.
__lll_abstimed_wait; __lll_abstimed_xwait;
__lll_abstimed_lock; __lll_robust_lock;
__lll_robust_abstimed_lock; __lll_robust_trylock;
__lll_robust_unlock;
} }
} }

215
hurd/hurdlock.c Normal file
View file

@ -0,0 +1,215 @@
/* Hurd helpers for lowlevellocks.
Copyright (C) 1999-2017 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include "hurdlock.h"
#include <hurd.h>
#include <hurd/hurd.h>
#include <time.h>
#include <errno.h>
#include <unistd.h>
/* Convert an absolute timeout in nanoseconds to a relative
timeout in milliseconds. */
static inline int __attribute__ ((gnu_inline))
compute_reltime (const struct timespec *abstime, clockid_t clk)
{
struct timespec ts;
__clock_gettime (clk, &ts);
ts.tv_sec = abstime->tv_sec - ts.tv_sec;
ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
if (ts.tv_nsec < 0)
{
--ts.tv_sec;
ts.tv_nsec += 1000000000;
}
return (ts.tv_sec < 0 ? -1 :
(int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000));
}
int __lll_abstimed_wait (void *ptr, int val,
const struct timespec *tsp, int flags, int clk)
{
int mlsec = compute_reltime (tsp, clk);
return (mlsec < 0 ? KERN_TIMEDOUT :
lll_timed_wait (ptr, val, mlsec, flags));
}
int __lll_abstimed_xwait (void *ptr, int lo, int hi,
const struct timespec *tsp, int flags, int clk)
{
int mlsec = compute_reltime (tsp, clk);
return (mlsec < 0 ? KERN_TIMEDOUT :
lll_timed_xwait (ptr, lo, hi, mlsec, flags));
}
int __lll_abstimed_lock (void *ptr,
const struct timespec *tsp, int flags, int clk)
{
if (lll_trylock (ptr) == 0)
return (0);
while (1)
{
if (atomic_exchange_acq ((int *)ptr, 2) == 0)
return (0);
else if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
return (EINVAL);
int mlsec = compute_reltime (tsp, clk);
if (mlsec < 0 || lll_timed_wait (ptr,
2, mlsec, flags) == KERN_TIMEDOUT)
return (ETIMEDOUT);
}
}
/* Robust locks. */
/* Test if a given process id is still valid. */
static inline int valid_pid (int pid)
{
task_t task = __pid2task (pid);
if (task == MACH_PORT_NULL)
return (0);
__mach_port_deallocate (__mach_task_self (), task);
return (1);
}
/* Robust locks have currently no support from the kernel; they
are simply implemented with periodic polling. When sleeping, the
maximum blocking time is determined by this constant. */
#define MAX_WAIT_TIME 1500
int __lll_robust_lock (void *ptr, int flags)
{
int *iptr = (int *)ptr;
int id = __getpid ();
int wait_time = 25;
unsigned int val;
/* Try to set the lock word to our PID if it's clear. Otherwise,
mark it as having waiters. */
while (1)
{
val = *iptr;
if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
return (0);
else if (atomic_compare_and_exchange_bool_acq (iptr,
val | LLL_WAITERS, val) == 0)
break;
}
for (id |= LLL_WAITERS ; ; )
{
val = *iptr;
if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
return (0);
else if (val && !valid_pid (val & LLL_OWNER_MASK))
{
if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
return (EOWNERDEAD);
}
else
{
lll_timed_wait (iptr, val, wait_time, flags);
if (wait_time < MAX_WAIT_TIME)
wait_time <<= 1;
}
}
}
int __lll_robust_abstimed_lock (void *ptr,
const struct timespec *tsp, int flags, int clk)
{
int *iptr = (int *)ptr;
int id = __getpid ();
int wait_time = 25;
unsigned int val;
while (1)
{
val = *iptr;
if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
return (0);
else if (atomic_compare_and_exchange_bool_acq (iptr,
val | LLL_WAITERS, val) == 0)
break;
}
for (id |= LLL_WAITERS ; ; )
{
val = *iptr;
if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
return (0);
else if (val && !valid_pid (val & LLL_OWNER_MASK))
{
if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
return (EOWNERDEAD);
}
else
{
int mlsec = compute_reltime (tsp, clk);
if (mlsec < 0)
return (ETIMEDOUT);
else if (mlsec > wait_time)
mlsec = wait_time;
int res = lll_timed_wait (iptr, val, mlsec, flags);
if (res == KERN_TIMEDOUT)
return (ETIMEDOUT);
else if (wait_time < MAX_WAIT_TIME)
wait_time <<= 1;
}
}
}
int __lll_robust_trylock (void *ptr)
{
int *iptr = (int *)ptr;
int id = __getpid ();
unsigned int val = *iptr;
if (!val)
{
if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
return (0);
}
else if (!valid_pid (val & LLL_OWNER_MASK) &&
atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
return (EOWNERDEAD);
return (EBUSY);
}
void __lll_robust_unlock (void *ptr, int flags)
{
unsigned int val = atomic_load_relaxed((unsigned int *)ptr);
while (1)
{
if (val & LLL_WAITERS)
{
lll_set_wake (ptr, 0, flags);
break;
}
else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
break;
}
}

125
hurd/hurdlock.h Normal file
View file

@ -0,0 +1,125 @@
/* Low-level lock implementation. High-level Hurd helpers.
Copyright (C) 1999-2017 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#ifndef _HURD_LOCK_H
#define _HURD_LOCK_H 1
#include <mach/lowlevellock.h>
struct timespec;
/* Flags for robust locks. */
#define LLL_WAITERS (1U << 31)
#define LLL_DEAD_OWNER (1U << 30)
#define LLL_OWNER_MASK ~(LLL_WAITERS | LLL_DEAD_OWNER)
/* Wait on 64-bit address PTR, without blocking if its contents
are different from the pair <LO, HI>. */
#define lll_xwait(ptr, lo, hi, flags) \
__gsync_wait (__mach_task_self (), \
(vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD)
/* Same as 'lll_wait', but only block for MLSEC milliseconds. */
#define lll_timed_wait(ptr, val, mlsec, flags) \
__gsync_wait (__mach_task_self (), \
(vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
/* Same as 'lll_xwait', but only block for MLSEC milliseconds. */
#define lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
__gsync_wait (__mach_task_self (), (vm_offset_t)ptr, \
lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD)
/* Same as 'lll_wait', but only block until TSP elapses,
using clock CLK. */
extern int __lll_abstimed_wait (void *__ptr, int __val,
const struct timespec *__tsp, int __flags, int __clk);
/* Same as 'lll_xwait', but only block until TSP elapses,
using clock CLK. */
extern int __lll_abstimed_xwait (void *__ptr, int __lo, int __hi,
const struct timespec *__tsp, int __flags, int __clk);
/* Same as 'lll_lock', but return with an error if TSP elapses,
using clock CLK. */
extern int __lll_abstimed_lock (void *__ptr,
const struct timespec *__tsp, int __flags, int __clk);
/* Acquire the lock at PTR, but return with an error if
the process containing the owner thread dies. */
extern int __lll_robust_lock (void *__ptr, int __flags);
/* Same as '__lll_robust_lock', but only block until TSP
elapses, using clock CLK. */
extern int __lll_robust_abstimed_lock (void *__ptr,
const struct timespec *__tsp, int __flags, int __clk);
/* Same as '__lll_robust_lock', but return with an error
if the lock cannot be acquired without blocking. */
extern int __lll_robust_trylock (void *__ptr);
/* Wake one or more threads waiting on address PTR,
setting its value to VAL before doing so. */
#define lll_set_wake(ptr, val, flags) \
__gsync_wake (__mach_task_self (), \
(vm_offset_t)ptr, val, flags | GSYNC_MUTATE)
/* Release the robust lock at PTR. */
extern void __lll_robust_unlock (void *__ptr, int __flags);
/* Rearrange threads waiting on address SRC to instead wait on
DST, waking one of them if WAIT_ONE is non-zero. */
#define lll_requeue(src, dst, wake_one, flags) \
__gsync_requeue (__mach_task_self (), (vm_offset_t)src, \
(vm_offset_t)dst, (boolean_t)wake_one, flags)
/* The following are hacks that allow us to simulate optional
parameters in C, to avoid having to pass the clock id for
every one of these calls, defaulting to CLOCK_REALTIME if
no argument is passed. */
#define lll_abstimed_wait(ptr, val, tsp, flags, ...) \
({ \
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
__lll_abstimed_wait ((ptr), (val), (tsp), (flags), \
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
})
#define lll_abstimed_xwait(ptr, lo, hi, tsp, flags, ...) \
({ \
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
__lll_abstimed_xwait ((ptr), (lo), (hi), (tsp), (flags), \
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
})
#define lll_abstimed_lock(ptr, tsp, flags, ...) \
({ \
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
__lll_abstimed_lock ((ptr), (tsp), (flags), \
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
})
#define lll_robust_abstimed_lock(ptr, tsp, flags, ...) \
({ \
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
__lll_robust_abstimed_lock ((ptr), (tsp), (flags), \
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
})
#endif

View file

@ -16,6 +16,8 @@
<http://www.gnu.org/licenses/>. */ <http://www.gnu.org/licenses/>. */
#include <hurd.h> #include <hurd.h>
#include <lowlevellock.h>
pid_t _hurd_pid, _hurd_ppid, _hurd_pgrp; pid_t _hurd_pid, _hurd_ppid, _hurd_pgrp;
int _hurd_orphaned; int _hurd_orphaned;
@ -66,6 +68,7 @@ _S_msg_proc_newids (mach_port_t me,
/* Notify any waiting user threads that the id change as been completed. */ /* Notify any waiting user threads that the id change as been completed. */
++_hurd_pids_changed_stamp; ++_hurd_pids_changed_stamp;
lll_wake (&_hurd_pids_changed_stamp, GSYNC_BROADCAST);
return 0; return 0;
} }

View file

@ -1362,14 +1362,14 @@ reauth_proc (mach_port_t new)
__mach_port_destroy (__mach_task_self (), ref); __mach_port_destroy (__mach_task_self (), ref);
/* Set the owner of the process here too. */ /* Set the owner of the process here too. */
mutex_lock (&_hurd_id.lock); __mutex_lock (&_hurd_id.lock);
if (!_hurd_check_ids ()) if (!_hurd_check_ids ())
HURD_PORT_USE (&_hurd_ports[INIT_PORT_PROC], HURD_PORT_USE (&_hurd_ports[INIT_PORT_PROC],
__proc_setowner (port, __proc_setowner (port,
(_hurd_id.gen.nuids (_hurd_id.gen.nuids
? _hurd_id.gen.uids[0] : 0), ? _hurd_id.gen.uids[0] : 0),
!_hurd_id.gen.nuids)); !_hurd_id.gen.nuids));
mutex_unlock (&_hurd_id.lock); __mutex_unlock (&_hurd_id.lock);
(void) &reauth_proc; /* Silence compiler warning. */ (void) &reauth_proc; /* Silence compiler warning. */
} }

View file

@ -18,14 +18,13 @@
#include <hurd.h> #include <hurd.h>
#include <hurd/port.h> #include <hurd/port.h>
#include <hurd/id.h> #include <hurd/id.h>
#include <hurdlock.h>
#include "set-hooks.h" #include "set-hooks.h"
/* Things in the library which want to be run when the auth port changes. */ /* Things in the library which want to be run when the auth port changes. */
DEFINE_HOOK (_hurd_reauth_hook, (auth_t new_auth)); DEFINE_HOOK (_hurd_reauth_hook, (auth_t new_auth));
#include <cthreads.h> static unsigned int reauth_lock = LLL_INITIALIZER;
static struct mutex reauth_lock = MUTEX_INITIALIZER;
/* Set the auth port to NEW, and reauthenticate /* Set the auth port to NEW, and reauthenticate
everything used by the library. */ everything used by the library. */

View file

@ -23,7 +23,7 @@ headers = mach_init.h mach.h mach_error.h mach-shortcuts.h mach/mach_traps.h \
$(interface-headers) mach/mach.h mach/mig_support.h mach/error.h \ $(interface-headers) mach/mach.h mach/mig_support.h mach/error.h \
$(lock-headers) machine-sp.h bits/mach/param.h $(lock-headers) machine-sp.h bits/mach/param.h
lock = spin-solid spin-lock mutex-init mutex-solid lock = spin-solid spin-lock mutex-init mutex-solid
lock-headers = lock-intern.h machine-lock.h spin-lock.h lock-headers = lock-intern.h spin-lock.h
routines = $(mach-syscalls) $(mach-shortcuts) \ routines = $(mach-syscalls) $(mach-shortcuts) \
mach_init mig_strncpy msg \ mach_init mig_strncpy msg \
mig-alloc mig-dealloc mig-reply \ mig-alloc mig-dealloc mig-reply \

View file

@ -19,12 +19,19 @@
#define _LOCK_INTERN_H #define _LOCK_INTERN_H
#include <sys/cdefs.h> #include <sys/cdefs.h>
#include <machine-lock.h> #if defined __USE_EXTERN_INLINES && defined _LIBC
# include <lowlevellock.h>
#endif
#ifndef _EXTERN_INLINE #ifndef _EXTERN_INLINE
#define _EXTERN_INLINE __extern_inline #define _EXTERN_INLINE __extern_inline
#endif #endif
/* The type of a spin lock variable. */
typedef unsigned int __spin_lock_t;
/* Static initializer for spinlocks. */
#define __SPIN_LOCK_INITIALIZER LLL_INITIALIZER
/* Initialize LOCK. */ /* Initialize LOCK. */
@ -50,32 +57,48 @@ extern void __spin_lock (__spin_lock_t *__lock);
_EXTERN_INLINE void _EXTERN_INLINE void
__spin_lock (__spin_lock_t *__lock) __spin_lock (__spin_lock_t *__lock)
{ {
if (! __spin_try_lock (__lock)) lll_lock (__lock, 0);
__spin_lock_solid (__lock); }
#endif
/* Unlock LOCK. */
extern void __spin_unlock (__spin_lock_t *__lock);
#if defined __USE_EXTERN_INLINES && defined _LIBC
_EXTERN_INLINE void
__spin_unlock (__spin_lock_t *__lock)
{
lll_unlock (__lock, 0);
}
#endif
/* Try to lock LOCK; return nonzero if we locked it, zero if another has. */
extern int __spin_try_lock (__spin_lock_t *__lock);
#if defined __USE_EXTERN_INLINES && defined _LIBC
_EXTERN_INLINE int
__spin_try_lock (__spin_lock_t *__lock)
{
return (lll_trylock (__lock) == 0);
}
#endif
/* Return nonzero if LOCK is locked. */
extern int __spin_lock_locked (__spin_lock_t *__lock);
#if defined __USE_EXTERN_INLINES && defined _LIBC
_EXTERN_INLINE int
__spin_lock_locked (__spin_lock_t *__lock)
{
return (*(volatile __spin_lock_t *)__lock != 0);
} }
#endif #endif
/* Name space-clean internal interface to mutex locks. /* Name space-clean internal interface to mutex locks. */
Code internal to the C library uses these functions to lock and unlock
mutex locks. These locks are of type `struct mutex', defined in
<cthreads.h>. The functions here are name space-clean. If the program
is linked with the cthreads library, `__mutex_lock_solid' and
`__mutex_unlock_solid' will invoke the corresponding cthreads functions
to implement real mutex locks. If not, simple stub versions just use
spin locks. */
/* Initialize the newly allocated mutex lock LOCK for further use. */ /* Initialize the newly allocated mutex lock LOCK for further use. */
extern void __mutex_init (void *__lock); extern void __mutex_init (void *__lock);
/* Lock LOCK, blocking if we can't get it. */
extern void __mutex_lock_solid (void *__lock);
/* Finish unlocking LOCK, after the spin lock LOCK->held has already been
unlocked. This function will wake up any thread waiting on LOCK. */
extern void __mutex_unlock_solid (void *__lock);
/* Lock the mutex lock LOCK. */ /* Lock the mutex lock LOCK. */
extern void __mutex_lock (void *__lock); extern void __mutex_lock (void *__lock);
@ -84,8 +107,7 @@ extern void __mutex_lock (void *__lock);
_EXTERN_INLINE void _EXTERN_INLINE void
__mutex_lock (void *__lock) __mutex_lock (void *__lock)
{ {
if (! __spin_try_lock ((__spin_lock_t *) __lock)) __spin_lock ((__spin_lock_t *)__lock);
__mutex_lock_solid (__lock);
} }
#endif #endif
@ -97,8 +119,7 @@ extern void __mutex_unlock (void *__lock);
_EXTERN_INLINE void _EXTERN_INLINE void
__mutex_unlock (void *__lock) __mutex_unlock (void *__lock)
{ {
__spin_unlock ((__spin_lock_t *) __lock); __spin_unlock ((__spin_lock_t *)__lock);
__mutex_unlock_solid (__lock);
} }
#endif #endif
@ -109,7 +130,7 @@ extern int __mutex_trylock (void *__lock);
_EXTERN_INLINE int _EXTERN_INLINE int
__mutex_trylock (void *__lock) __mutex_trylock (void *__lock)
{ {
return __spin_try_lock ((__spin_lock_t *) __lock); return (__spin_try_lock ((__spin_lock_t *)__lock));
} }
#endif #endif

81
mach/lowlevellock.h Normal file
View file

@ -0,0 +1,81 @@
/* Low-level lock implementation. Mach gsync-based version.
Copyright (C) 1994-2017 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#ifndef _MACH_LOWLEVELLOCK_H
#define _MACH_LOWLEVELLOCK_H 1
#include <mach/gnumach.h>
#include <atomic.h>
/* Gsync flags. */
#ifndef GSYNC_SHARED
#define GSYNC_SHARED 0x01
#define GSYNC_QUAD 0x02
#define GSYNC_TIMED 0x04
#define GSYNC_BROADCAST 0x08
#define GSYNC_MUTATE 0x10
#endif
/* Static initializer for low-level locks. */
#define LLL_INITIALIZER 0
/* Wait on address PTR, without blocking if its contents
* are different from VAL. */
#define lll_wait(ptr, val, flags) \
__gsync_wait (__mach_task_self (), \
(vm_offset_t)(ptr), (val), 0, 0, (flags))
/* Wake one or more threads waiting on address PTR. */
#define lll_wake(ptr, flags) \
__gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags))
/* Acquire the lock at PTR. */
#define lll_lock(ptr, flags) \
({ \
int *__iptr = (int *)(ptr); \
int __flags = (flags); \
if (*__iptr != 0 || \
atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) != 0) \
while (1) \
{ \
if (atomic_exchange_acq (__iptr, 2) == 0) \
break; \
lll_wait (__iptr, 2, __flags); \
} \
(void)0; \
})
/* Try to acquire the lock at PTR, without blocking.
Evaluates to zero on success. */
#define lll_trylock(ptr) \
({ \
int *__iptr = (int *)(ptr); \
*__iptr == 0 && \
atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1; \
})
/* Release the lock at PTR. */
#define lll_unlock(ptr, flags) \
({ \
int *__iptr = (int *)(ptr); \
if (atomic_exchange_rel (__iptr, 0) == 2) \
lll_wake (__iptr, (flags)); \
(void)0; \
})
#endif

View file

@ -17,13 +17,10 @@
<http://www.gnu.org/licenses/>. */ <http://www.gnu.org/licenses/>. */
#include <lock-intern.h> #include <lock-intern.h>
#include <cthreads.h> #include <lowlevellock.h>
void void
__mutex_init (void *lock) __mutex_init (void *lock)
{ {
/* This happens to be name space-safe because it is a macro. *(int *)lock = LLL_INITIALIZER;
It invokes only spin_lock_init, which is a macro for __spin_lock_init;
and cthread_queue_init, which is a macro for some simple code. */
mutex_init ((struct mutex *) lock);
} }

View file

@ -882,6 +882,16 @@ the normal result is for the operations affected to complete with this
error; @pxref{Cancel AIO Operations}. error; @pxref{Cancel AIO Operations}.
@end deftypevr @end deftypevr
@deftypevr Macro int EOWNERDEAD
@standards{GNU, errno.h}
@errno{EOWNERDEAD, 120, Owner died}
@end deftypevr
@deftypevr Macro int ENOTRECOVERABLE
@standards{GNU, errno.h}
@errno{ENOTRECOVERABLE, 121, State not recoverable}
@end deftypevr
@emph{The following error codes are defined by the Linux/i386 kernel. @emph{The following error codes are defined by the Linux/i386 kernel.
They are not yet documented.} They are not yet documented.}

View file

@ -50,4 +50,30 @@ mach-before-compile:
before-compile += $(mach-before-compile) before-compile += $(mach-before-compile)
endif endif
ifeq (crypt,$(subdir))
LDLIBS-crypt.so += $(objdir)/mach/libmachuser.so
else ifeq (dlfcn,$(subdir))
LDLIBS-dl.so += $(objdir)/mach/libmachuser.so
else ifeq (nis,$(subdir))
LDLIBS-nsl.so += $(objdir)/mach/libmachuser.so
LDLIBS-nss_nis.so += $(objdir)/mach/libmachuser.so
LDLIBS-nss_nisplus.so += $(objdir)/mach/libmachuser.so
LDLIBS-nss_compat.so += $(objdir)/mach/libmachuser.so
else ifeq (nss,$(subdir))
LDLIBS-nss.so += $(objdir)/mach/libmachuser.so
LDLIBS-nss_files.so += $(objdir)/mach/libmachuser.so
LDLIBS-nss_db.so += $(objdir)/mach/libmachuser.so
LDLIBS-nss_compat.so += $(objdir)/mach/libmachuser.so
else ifeq (hesiod,$(subdir))
LDLIBS-nss_hesiod.so += $(objdir)/mach/libmachuser.so
else ifeq (posix,$(subdir))
LDLIBS-tst-rfc3484 += $(objdir)/mach/libmachuser.so
LDLIBS-tst-rfc3484-2 += $(objdir)/mach/libmachuser.so
LDLIBS-tst-rfc3484-3 += $(objdir)/mach/libmachuser.so
else ifeq (nscd,$(subdir))
LDLIBS-nscd += $(objdir)/mach/libmachuser.so
endif
LDLIBS-pthread.so += $(objdir)/mach/libmachuser.so
endif # in-Makerules endif # in-Makerules

View file

@ -127,6 +127,8 @@ enum __error_t_codes
EPROTO = 0x40000074, /* Protocol error */ EPROTO = 0x40000074, /* Protocol error */
ETIME = 0x40000075, /* Timer expired */ ETIME = 0x40000075, /* Timer expired */
ECANCELED = 0x40000077, /* Operation canceled */ ECANCELED = 0x40000077, /* Operation canceled */
EOWNERDEAD = 0x40000078, /* Robust mutex owner died */
ENOTRECOVERABLE = 0x40000079, /* Robust mutex irrecoverable */
/* Errors from <mach/message.h>. */ /* Errors from <mach/message.h>. */
EMACH_SEND_IN_PROGRESS = 0x10000001, EMACH_SEND_IN_PROGRESS = 0x10000001,
@ -322,6 +324,8 @@ enum __error_t_codes
#define EPROTO 0x40000074 #define EPROTO 0x40000074
#define ETIME 0x40000075 #define ETIME 0x40000075
#define ECANCELED 0x40000077 #define ECANCELED 0x40000077
#define EOWNERDEAD 0x40000078
#define ENOTRECOVERABLE 0x40000079
/* Errors from <mach/message.h>. */ /* Errors from <mach/message.h>. */
#define EMACH_SEND_IN_PROGRESS 0x10000001 #define EMACH_SEND_IN_PROGRESS 0x10000001
@ -405,6 +409,6 @@ enum __error_t_codes
#define ED_NO_MEMORY 2508 #define ED_NO_MEMORY 2508
#define ED_READ_ONLY 2509 #define ED_READ_ONLY 2509
#define _HURD_ERRNOS 120 #define _HURD_ERRNOS 122
#endif /* bits/errno.h. */ #endif /* bits/errno.h. */

View file

@ -52,13 +52,3 @@ __cthread_setspecific (cthread_key_t key, void *val)
__set_errno (ENOSYS); __set_errno (ENOSYS);
return -1; return -1;
} }
/* Call cthread_getspecific which gets a pointer to the return value instead
of just returning it. */
void *
__libc_getspecific (cthread_key_t key)
{
void *val;
__cthread_getspecific (key, &val);
return val;
}

View file

@ -1,216 +0,0 @@
/* libc-internal interface for mutex locks. Hurd version using Mach cthreads.
Copyright (C) 1996-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#ifndef _LIBC_LOCK_H
#define _LIBC_LOCK_H 1
#if (_LIBC - 0) || (_CTHREADS_ - 0)
#include <cthreads.h>
/* The locking here is very inexpensive, even for inlining. */
#define _IO_lock_inexpensive 1
typedef struct mutex __libc_lock_t;
typedef struct
{
struct mutex mutex;
void *owner;
int count;
} __libc_lock_recursive_t;
typedef __libc_lock_recursive_t __rtld_lock_recursive_t;
extern char __libc_lock_self0[0];
#define __libc_lock_owner_self() (__LIBC_NO_TLS () ? &__libc_lock_self0 : THREAD_SELF)
#else
typedef struct __libc_lock_opaque__ __libc_lock_t;
typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
#endif
/* Define a lock variable NAME with storage class CLASS. The lock must be
initialized with __libc_lock_init before it can be used (or define it
with __libc_lock_define_initialized, below). Use `extern' for CLASS to
declare a lock defined in another module. In public structure
definitions you must use a pointer to the lock structure (i.e., NAME
begins with a `*'), because its storage size will not be known outside
of libc. */
#define __libc_lock_define(CLASS,NAME) \
CLASS __libc_lock_t NAME;
/* Define an initialized lock variable NAME with storage class CLASS. */
#define _LIBC_LOCK_INITIALIZER MUTEX_INITIALIZER
#define __libc_lock_define_initialized(CLASS,NAME) \
CLASS __libc_lock_t NAME = _LIBC_LOCK_INITIALIZER;
/* Initialize the named lock variable, leaving it in a consistent, unlocked
state. */
#define __libc_lock_init(NAME) __mutex_init (&(NAME))
/* Finalize the named lock variable, which must be locked. It cannot be
used again until __libc_lock_init is called again on it. This must be
called on a lock variable before the containing storage is reused. */
#define __libc_lock_fini(NAME) __mutex_unlock (&(NAME))
#define __libc_lock_fini_recursive(NAME) __mutex_unlock (&(NAME).mutex)
#define __rtld_lock_fini_recursive(NAME) __mutex_unlock (&(NAME).mutex)
/* Lock the named lock variable. */
#define __libc_lock_lock(NAME) __mutex_lock (&(NAME))
/* Lock the named lock variable. */
#define __libc_lock_trylock(NAME) (!__mutex_trylock (&(NAME)))
/* Unlock the named lock variable. */
#define __libc_lock_unlock(NAME) __mutex_unlock (&(NAME))
#define __libc_lock_define_recursive(CLASS,NAME) \
CLASS __libc_lock_recursive_t NAME;
#define _LIBC_LOCK_RECURSIVE_INITIALIZER { MUTEX_INITIALIZER, 0, 0 }
#define __libc_lock_define_initialized_recursive(CLASS,NAME) \
CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
#define __rtld_lock_define_recursive(CLASS,NAME) \
__libc_lock_define_recursive (CLASS, NAME)
#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
_LIBC_LOCK_RECURSIVE_INITIALIZER
#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
__libc_lock_define_initialized_recursive (CLASS, NAME)
#define __libc_lock_init_recursive(NAME) \
({ __libc_lock_recursive_t *const __lock = &(NAME); \
__lock->owner = 0; mutex_init (&__lock->mutex); })
#define __libc_lock_trylock_recursive(NAME) \
({ __libc_lock_recursive_t *const __lock = &(NAME); \
void *__self = __libc_lock_owner_self (); \
__mutex_trylock (&__lock->mutex) \
? (__lock->owner = __self, __lock->count = 1, 0) \
: __lock->owner == __self ? (++__lock->count, 0) : 1; })
#define __libc_lock_lock_recursive(NAME) \
({ __libc_lock_recursive_t *const __lock = &(NAME); \
void *__self = __libc_lock_owner_self (); \
if (__mutex_trylock (&__lock->mutex) \
|| (__lock->owner != __self \
&& (__mutex_lock (&__lock->mutex), 1))) \
__lock->owner = __self, __lock->count = 1; \
else \
++__lock->count; \
})
#define __libc_lock_unlock_recursive(NAME) \
({ __libc_lock_recursive_t *const __lock = &(NAME); \
if (--__lock->count == 0) \
{ \
__lock->owner = 0; \
__mutex_unlock (&__lock->mutex); \
} \
})
#define __rtld_lock_initialize(NAME) \
(void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
#define __rtld_lock_trylock_recursive(NAME) \
__libc_lock_trylock_recursive (NAME)
#define __rtld_lock_lock_recursive(NAME) \
__libc_lock_lock_recursive(NAME)
#define __rtld_lock_unlock_recursive(NAME) \
__libc_lock_unlock_recursive (NAME)
/* XXX for now */
#define __libc_rwlock_define __libc_lock_define
#define __libc_rwlock_define_initialized __libc_lock_define_initialized
#define __libc_rwlock_init __libc_lock_init
#define __libc_rwlock_fini __libc_lock_fini
#define __libc_rwlock_rdlock __libc_lock_lock
#define __libc_rwlock_wrlock __libc_lock_lock
#define __libc_rwlock_tryrdlock __libc_lock_trylock
#define __libc_rwlock_trywrlock __libc_lock_trylock
#define __libc_rwlock_unlock __libc_lock_unlock
/* Start a critical region with a cleanup function */
#define __libc_cleanup_region_start(DOIT, FCT, ARG) \
{ \
typeof (***(FCT)) *__save_FCT = (DOIT) ? (FCT) : 0; \
typeof (ARG) __save_ARG = ARG; \
/* close brace is in __libc_cleanup_region_end below. */
/* End a critical region started with __libc_cleanup_region_start. */
#define __libc_cleanup_region_end(DOIT) \
if ((DOIT) && __save_FCT != 0) \
(*__save_FCT)(__save_ARG); \
}
/* Sometimes we have to exit the block in the middle. */
#define __libc_cleanup_end(DOIT) \
if ((DOIT) && __save_FCT != 0) \
(*__save_FCT)(__save_ARG); \
#define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg)
#define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute)
#if (_CTHREADS_ - 0)
/* Use mutexes as once control variables. */
struct __libc_once
{
__libc_lock_t lock;
int done;
};
#define __libc_once_define(CLASS,NAME) \
CLASS struct __libc_once NAME = { MUTEX_INITIALIZER, 0 }
/* Call handler iff the first call. */
#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
do { \
__libc_lock_lock (ONCE_CONTROL.lock); \
if (!ONCE_CONTROL.done) \
(INIT_FUNCTION) (); \
ONCE_CONTROL.done = 1; \
__libc_lock_unlock (ONCE_CONTROL.lock); \
} while (0)
/* Get once control variable. */
#define __libc_once_get(ONCE_CONTROL) ((ONCE_CONTROL).done != 0)
#ifdef _LIBC
/* We need portable names for some functions. E.g., when they are
used as argument to __libc_cleanup_region_start. */
#define __libc_mutex_unlock __mutex_unlock
#endif
/* Type for key of thread specific data. */
typedef cthread_key_t __libc_key_t;
#define __libc_key_create(KEY,DEST) __cthread_keycreate (KEY)
#define __libc_setspecific(KEY,VAL) __cthread_setspecific (KEY, VAL)
void *__libc_getspecific (__libc_key_t key);
#endif /* _CTHREADS_ */
/* Hide the definitions which are only supposed to be used inside libc in
a separate file. This file is not present in the installation! */
#ifdef _LIBC
# include <libc-lockP.h>
#endif
#endif /* libc-lock.h */

View file

@ -19,6 +19,7 @@
#include <unistd.h> #include <unistd.h>
#include <hurd.h> #include <hurd.h>
#include <hurd/port.h> #include <hurd/port.h>
#include <lowlevellock.h>
/* Set the process group ID of the process matching PID to PGID. /* Set the process group ID of the process matching PID to PGID.
If PID is zero, the current process's process group ID is set. If PID is zero, the current process's process group ID is set.
@ -38,14 +39,7 @@ __setpgid (pid_t pid, pid_t pgid)
/* Synchronize with the signal thread to make sure we have /* Synchronize with the signal thread to make sure we have
received and processed proc_newids before returning to the user. */ received and processed proc_newids before returning to the user. */
while (_hurd_pids_changed_stamp == stamp) while (_hurd_pids_changed_stamp == stamp)
{ lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
#ifdef noteven
/* XXX we have no need for a mutex, but cthreads demands one. */
__condition_wait (&_hurd_pids_changed_sync, NULL);
#else
__swtch_pri(0);
#endif
}
return 0; return 0;

View file

@ -21,6 +21,7 @@
#include <hurd/port.h> #include <hurd/port.h>
#include <hurd/fd.h> #include <hurd/fd.h>
#include <hurd/ioctl.h> #include <hurd/ioctl.h>
#include <lowlevellock.h>
/* Create a new session with the calling process as its leader. /* Create a new session with the calling process as its leader.
The process group IDs of the session and the calling process The process group IDs of the session and the calling process
@ -55,14 +56,7 @@ __setsid (void)
returned by `getpgrp ()' in other threads) has been updated before returned by `getpgrp ()' in other threads) has been updated before
we return. */ we return. */
while (_hurd_pids_changed_stamp == stamp) while (_hurd_pids_changed_stamp == stamp)
{ lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
#ifdef noteven
/* XXX we have no need for a mutex, but cthreads demands one. */
__condition_wait (&_hurd_pids_changed_sync, NULL);
#else
__swtch_pri (0);
#endif
}
} }
HURD_CRITICAL_END; HURD_CRITICAL_END;

View file

@ -20,15 +20,32 @@
#define _LIBC_LOCK_H 1 #define _LIBC_LOCK_H 1
#ifdef _LIBC #ifdef _LIBC
#include <cthreads.h>
#define __libc_lock_t struct mutex #include <tls.h>
#include <lowlevellock.h>
/* The locking here is very inexpensive, even for inlining. */
#define _IO_lock_inexpensive 1
typedef unsigned int __libc_lock_t;
typedef struct
{
__libc_lock_t lock;
int cnt;
void *owner;
} __libc_lock_recursive_t;
typedef __libc_lock_recursive_t __rtld_lock_recursive_t;
extern char __libc_lock_self0[0];
#define __libc_lock_owner_self() \
(__LIBC_NO_TLS () ? (void *)&__libc_lock_self0 : THREAD_SELF)
#else #else
typedef struct __libc_lock_opaque__ __libc_lock_t; typedef struct __libc_lock_opaque__ __libc_lock_t;
typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
#endif #endif
/* Type for key of thread specific data. */
typedef cthread_key_t __libc_key_t;
/* Define a lock variable NAME with storage class CLASS. The lock must be /* Define a lock variable NAME with storage class CLASS. The lock must be
initialized with __libc_lock_init before it can be used (or define it initialized with __libc_lock_init before it can be used (or define it
with __libc_lock_define_initialized, below). Use `extern' for CLASS to with __libc_lock_define_initialized, below). Use `extern' for CLASS to
@ -40,27 +57,97 @@ typedef cthread_key_t __libc_key_t;
CLASS __libc_lock_t NAME; CLASS __libc_lock_t NAME;
/* Define an initialized lock variable NAME with storage class CLASS. */ /* Define an initialized lock variable NAME with storage class CLASS. */
#define _LIBC_LOCK_INITIALIZER LLL_INITIALIZER
#define __libc_lock_define_initialized(CLASS,NAME) \ #define __libc_lock_define_initialized(CLASS,NAME) \
CLASS __libc_lock_t NAME = MUTEX_INITIALIZER; CLASS __libc_lock_t NAME = LLL_INITIALIZER;
/* Initialize the named lock variable, leaving it in a consistent, unlocked /* Initialize the named lock variable, leaving it in a consistent, unlocked
state. */ state. */
#define __libc_lock_init(NAME) __mutex_init (&(NAME)) #define __libc_lock_init(NAME) (NAME) = LLL_INITIALIZER
/* Finalize the named lock variable, which must be locked. It cannot be /* Finalize the named lock variable, which must be locked. It cannot be
used again until __libc_lock_init is called again on it. This must be used again until __libc_lock_init is called again on it. This must be
called on a lock variable before the containing storage is reused. */ called on a lock variable before the containing storage is reused. */
#define __libc_lock_fini(NAME) __mutex_unlock (&(NAME)) #define __libc_lock_fini __libc_lock_unlock
#define __libc_lock_fini_recursive __libc_lock_unlock_recursive
#define __rtld_lock_fini_recursive __rtld_lock_unlock_recursive
/* Lock the named lock variable. */ /* Lock the named lock variable. */
#define __libc_lock_lock(NAME) __mutex_lock (&(NAME)) #define __libc_lock_lock(NAME) \
({ lll_lock (&(NAME), 0); 0; })
/* Lock the named lock variable. */ /* Lock the named lock variable. */
#define __libc_lock_trylock(NAME) (!__mutex_trylock (&(NAME))) #define __libc_lock_trylock(NAME) lll_trylock (&(NAME))
/* Unlock the named lock variable. */ /* Unlock the named lock variable. */
#define __libc_lock_unlock(NAME) __mutex_unlock (&(NAME)) #define __libc_lock_unlock(NAME) \
({ lll_unlock (&(NAME), 0); 0; })
#define __libc_lock_define_recursive(CLASS,NAME) \
CLASS __libc_lock_recursive_t NAME;
#define _LIBC_LOCK_RECURSIVE_INITIALIZER { LLL_INITIALIZER, 0, 0 }
#define __libc_lock_define_initialized_recursive(CLASS,NAME) \
CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
#define __rtld_lock_define_recursive(CLASS,NAME) \
__libc_lock_define_recursive (CLASS, NAME)
#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
_LIBC_LOCK_RECURSIVE_INITIALIZER
#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
__libc_lock_define_initialized_recursive (CLASS, NAME)
#define __libc_lock_init_recursive(NAME) \
({ \
(NAME) = (__libc_lock_recursive_t)_LIBC_LOCK_RECURSIVE_INITIALIZER; \
0; \
})
#define __libc_lock_trylock_recursive(NAME) \
({ \
__libc_lock_recursive_t *const __lock = &(NAME); \
void *__self = __libc_lock_owner_self (); \
int __r = 0; \
if (__self == __lock->owner) \
++__lock->cnt; \
else if ((__r = lll_trylock (&__lock->lock)) == 0) \
__lock->owner = __self, __lock->cnt = 1; \
__r; \
})
#define __libc_lock_lock_recursive(NAME) \
({ \
__libc_lock_recursive_t *const __lock = &(NAME); \
void *__self = __libc_lock_owner_self (); \
if (__self != __lock->owner) \
{ \
lll_lock (&__lock->lock, 0); \
__lock->owner = __self; \
} \
++__lock->cnt; \
(void)0; \
})
#define __libc_lock_unlock_recursive(NAME) \
({ \
__libc_lock_recursive_t *const __lock = &(NAME); \
if (--__lock->cnt == 0) \
{ \
__lock->owner = 0; \
lll_unlock (&__lock->lock, 0); \
} \
})
#define __rtld_lock_initialize(NAME) \
(void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
#define __rtld_lock_trylock_recursive(NAME) \
__libc_lock_trylock_recursive (NAME)
#define __rtld_lock_lock_recursive(NAME) \
__libc_lock_lock_recursive(NAME)
#define __rtld_lock_unlock_recursive(NAME) \
__libc_lock_unlock_recursive (NAME)
/* XXX for now */ /* XXX for now */
#define __libc_rwlock_define __libc_lock_define #define __libc_rwlock_define __libc_lock_define
@ -95,7 +182,7 @@ __libc_cleanup_fct (struct __libc_cleanup_frame *framep)
__attribute__ ((__cleanup__ (__libc_cleanup_fct))) = \ __attribute__ ((__cleanup__ (__libc_cleanup_fct))) = \
{ .__fct = (FCT), .__argp = (ARG), .__doit = (DOIT) }; { .__fct = (FCT), .__argp = (ARG), .__doit = (DOIT) };
/* This one closes the brace above. */ /* This one closes the brace above. */
#define __libc_cleanup_region_end(DOIT) \ #define __libc_cleanup_region_end(DOIT) \
__cleanup.__doit = (DOIT); \ __cleanup.__doit = (DOIT); \
} \ } \
@ -106,7 +193,7 @@ __libc_cleanup_fct (struct __libc_cleanup_frame *framep)
#define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg) #define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg)
#define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute) #define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute)
/* Use mutexes as once control variables. */ /* Use mutexes as once control variables. */
struct __libc_once struct __libc_once
{ {
@ -115,8 +202,7 @@ struct __libc_once
}; };
#define __libc_once_define(CLASS,NAME) \ #define __libc_once_define(CLASS,NAME) \
CLASS struct __libc_once NAME = { MUTEX_INITIALIZER, 0 } CLASS struct __libc_once NAME = { _LIBC_LOCK_INITIALIZER, 0 }
/* Call handler iff the first call. */ /* Call handler iff the first call. */
#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \ #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
@ -134,25 +220,11 @@ struct __libc_once
#ifdef _LIBC #ifdef _LIBC
/* We need portable names for some functions. E.g., when they are /* We need portable names for some functions. E.g., when they are
used as argument to __libc_cleanup_region_start. */ used as argument to __libc_cleanup_region_start. */
#define __libc_mutex_unlock __mutex_unlock #define __libc_mutex_unlock __libc_lock_unlock
/* Hide the definitions which are only supposed to be used inside libc in
a separate file. This file is not present in the installation! */
# include <libc-lockP.h>
#endif #endif
#define __libc_key_create(KEY,DEST) __cthread_keycreate (KEY)
#define __libc_setspecific(KEY,VAL) __cthread_setspecific (KEY, VAL)
void *__libc_getspecific (__libc_key_t key);
/* XXX until cthreads supports recursive locks */
#define __libc_lock_define_initialized_recursive __libc_lock_define_initialized
#define __libc_lock_init_recursive __libc_lock_init
#define __libc_lock_fini_recursive __libc_lock_fini
#define __libc_lock_trylock_recursive __libc_lock_trylock
#define __libc_lock_unlock_recursive __libc_lock_unlock
#define __libc_lock_lock_recursive __libc_lock_lock
#define __rtld_lock_define_initialized_recursive __libc_lock_define_initialized
#define __rtld_lock_fini_recursive __libc_lock_fini
#define __rtld_lock_trylock_recursive __libc_lock_trylock
#define __rtld_lock_unlock_recursive __libc_lock_unlock
#define __rtld_lock_lock_recursive __libc_lock_lock
#endif /* libc-lock.h */ #endif /* libc-lock.h */