2003-07-23  Jakub Jelinek  <jakub@redhat.com>

	* sysdeps/pthread/lio_listio.c (LIO_OPCODE_BASE): Define.
	(lio_listio): Use it.
	* sysdeps/pthread/lio_listio64.c: Include lio_listio.c instead of
	after few defines to avoid duplication.
This commit is contained in:
Ulrich Drepper 2003-07-23 18:50:26 +00:00
parent 9d79e0377b
commit 6bc0b95489
4 changed files with 19 additions and 152 deletions

View file

@ -1,3 +1,10 @@
2003-07-23 Jakub Jelinek <jakub@redhat.com>
* sysdeps/pthread/lio_listio.c (LIO_OPCODE_BASE): Define.
(lio_listio): Use it.
* sysdeps/pthread/lio_listio64.c: Include lio_listio.c instead of
after few defines to avoid duplication.
2003-07-22 Jakub Jelinek <jakub@redhat.com>
* include/stdio.h (__libc_fatal): Add libc_hidden_proto.

View file

@ -1 +1 @@
NPTL 0.54 by Ulrich Drepper
NPTL 0.55 by Ulrich Drepper

View file

@ -1,5 +1,5 @@
/* Enqueue and list of read or write requests.
Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
Copyright (C) 1997,1998,1999,2000,2001,2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
@ -18,6 +18,7 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#ifndef lio_listio
#include <aio.h>
#include <assert.h>
#include <errno.h>
@ -26,6 +27,8 @@
#include "aio_misc.h"
#define LIO_OPCODE_BASE 0
#endif
/* We need this special structure to handle asynchronous I/O. */
struct async_waitlist
@ -72,7 +75,8 @@ lio_listio (mode, list, nent, sig)
{
list[cnt]->aio_sigevent.sigev_notify = SIGEV_NONE;
requests[cnt] = __aio_enqueue_request ((aiocb_union *) list[cnt],
list[cnt]->aio_lio_opcode);
(list[cnt]->aio_lio_opcode
| LIO_OPCODE_BASE));
if (requests[cnt] != NULL)
/* Successfully enqueued. */

View file

@ -19,158 +19,14 @@
02111-1307 USA. */
#include <aio.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include "aio_misc.h"
/* We need this special structure to handle asynchronous I/O. */
struct async_waitlist
{
int counter;
struct sigevent sigev;
struct waitlist list[0];
};
int
lio_listio64 (mode, list, nent, sig)
int mode;
struct aiocb64 *const list[];
int nent;
struct sigevent *sig;
{
struct sigevent defsigev;
struct requestlist *requests[nent];
int cnt;
volatile int total = 0;
int result = 0;
/* Check arguments. */
if (mode != LIO_WAIT && mode != LIO_NOWAIT)
{
__set_errno (EINVAL);
return -1;
}
if (sig == NULL)
{
defsigev.sigev_notify = SIGEV_NONE;
sig = &defsigev;
}
/* Request the mutex. */
pthread_mutex_lock (&__aio_requests_mutex);
/* Now we can enqueue all requests. Since we already acquired the
mutex the enqueue function need not do this. */
for (cnt = 0; cnt < nent; ++cnt)
if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP)
{
requests[cnt] = __aio_enqueue_request ((aiocb_union *) list[cnt],
(list[cnt]->aio_lio_opcode
| 128));
if (requests[cnt] != NULL)
/* Successfully enqueued. */
++total;
else
/* Signal that we've seen an error. `errno' and the error code
of the aiocb will tell more. */
result = -1;
}
if (total == 0)
{
/* We don't have anything to do except signalling if we work
asynchronously. */
/* Release the mutex. We do this before raising a signal since the
signal handler might do a `siglongjmp' and then the mutex is
locked forever. */
pthread_mutex_unlock (&__aio_requests_mutex);
if (mode == LIO_NOWAIT)
__aio_notify_only (sig,
sig->sigev_notify == SIGEV_SIGNAL ? getpid () : 0);
return result;
}
else if (mode == LIO_WAIT)
{
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
struct waitlist waitlist[nent];
int oldstate;
total = 0;
for (cnt = 0; cnt < nent; ++cnt)
if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP
&& requests[cnt] != NULL)
{
waitlist[cnt].cond = &cond;
waitlist[cnt].next = requests[cnt]->waiting;
waitlist[cnt].counterp = &total;
waitlist[cnt].sigevp = NULL;
waitlist[cnt].caller_pid = 0; /* Not needed. */
requests[cnt]->waiting = &waitlist[cnt];
++total;
}
/* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancelation
points we must be careful. We added entries to the waiting lists
which we must remove. So defer cancelation for now. */
pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate);
while (total > 0)
pthread_cond_wait (&cond, &__aio_requests_mutex);
/* Now it's time to restore the cancelation state. */
pthread_setcancelstate (oldstate, NULL);
/* Release the conditional variable. */
if (pthread_cond_destroy (&cond) != 0)
/* This must never happen. */
abort ();
}
else
{
struct async_waitlist *waitlist;
waitlist = (struct async_waitlist *)
malloc (sizeof (struct async_waitlist)
+ (nent * sizeof (struct waitlist)));
if (waitlist == NULL)
{
__set_errno (EAGAIN);
result = -1;
}
else
{
pid_t caller_pid = sig->sigev_notify == SIGEV_SIGNAL ? getpid () : 0;
total = 0;
for (cnt = 0; cnt < nent; ++cnt)
if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP
&& requests[cnt] != NULL)
{
waitlist->list[cnt].cond = NULL;
waitlist->list[cnt].next = requests[cnt]->waiting;
waitlist->list[cnt].counterp = &waitlist->counter;
waitlist->list[cnt].sigevp = &waitlist->sigev;
waitlist->list[cnt].caller_pid = caller_pid;
requests[cnt]->waiting = &waitlist->list[cnt];
++total;
}
waitlist->counter = total;
waitlist->sigev = *sig;
}
}
/* Release the mutex. */
pthread_mutex_unlock (&__aio_requests_mutex);
return result;
}
#define lio_listio lio_listio64
#define aiocb aiocb64
#define LIO_OPCODE_BASE 128
#include <lio_listio.c>