2011-02-12 00:22:29 -05:00
|
|
|
#include "pthread_impl.h"
|
|
|
|
|
2011-04-06 20:27:07 -04:00
|
|
|
static void dummy_0()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
weak_alias(dummy_0, __rsyscall_lock);
|
|
|
|
weak_alias(dummy_0, __rsyscall_unlock);
|
2011-04-19 23:09:14 -04:00
|
|
|
weak_alias(dummy_0, __pthread_tsd_run_dtors);
|
2011-04-03 02:33:50 -04:00
|
|
|
|
2011-03-25 22:13:57 -04:00
|
|
|
#ifdef __pthread_unwind_next
|
|
|
|
#undef __pthread_unwind_next
|
|
|
|
#define __pthread_unwind_next __pthread_unwind_next_3
|
|
|
|
#endif
|
|
|
|
|
2011-02-13 19:58:30 -05:00
|
|
|
void __pthread_unwind_next(struct __ptcb *cb)
|
|
|
|
{
|
2011-04-17 17:06:05 -04:00
|
|
|
pthread_t self = pthread_self();
|
2011-04-17 11:43:03 -04:00
|
|
|
int n;
|
2011-02-13 19:58:30 -05:00
|
|
|
|
2011-04-17 17:06:05 -04:00
|
|
|
if (cb->__next) {
|
|
|
|
self->cancelbuf = cb->__next->__next;
|
|
|
|
longjmp((void *)cb->__next->__jb, 1);
|
|
|
|
}
|
2011-02-13 19:58:30 -05:00
|
|
|
|
|
|
|
LOCK(&self->exitlock);
|
|
|
|
|
2011-04-19 23:09:14 -04:00
|
|
|
__pthread_tsd_run_dtors();
|
2011-02-13 19:58:30 -05:00
|
|
|
|
2011-03-10 18:31:37 -05:00
|
|
|
/* Mark this thread dead before decrementing count */
|
|
|
|
self->dead = 1;
|
2011-02-19 11:04:36 -05:00
|
|
|
|
2011-04-17 11:43:03 -04:00
|
|
|
do n = libc.threads_minus_1;
|
|
|
|
while (n && a_cas(&libc.threads_minus_1, n, n-1)!=n);
|
|
|
|
if (!n) exit(0);
|
2011-02-19 10:38:57 -05:00
|
|
|
|
2011-03-10 18:31:37 -05:00
|
|
|
if (self->detached && self->map_base) {
|
overhaul implementation-internal signal protections
the new approach relies on the fact that the only ways to create
sigset_t objects without invoking UB are to use the sig*set()
functions, or from the masks returned by sigprocmask, sigaction, etc.
or in the ucontext_t argument to a signal handler. thus, as long as
sigfillset and sigaddset avoid adding the "protected" signals, there
is no way the application will ever obtain a sigset_t including these
bits, and thus no need to add the overhead of checking/clearing them
when sigprocmask or sigaction is called.
note that the old code actually *failed* to remove the bits from
sa_mask when sigaction was called.
the new implementations are also significantly smaller, simpler, and
faster due to ignoring the useless "GNU HURD signals" 65-1024, which
are not used and, if there's any sanity in the world, never will be
used.
2011-05-07 23:23:58 -04:00
|
|
|
__syscall(SYS_rt_sigprocmask, SIG_BLOCK, (uint64_t[]){-1},0,8);
|
2011-02-13 19:58:30 -05:00
|
|
|
__unmapself(self->map_base, self->map_size);
|
2011-03-10 18:31:37 -05:00
|
|
|
}
|
2011-02-13 19:58:30 -05:00
|
|
|
|
2011-04-06 19:47:50 -04:00
|
|
|
__syscall(SYS_exit, 0);
|
2011-02-13 19:58:30 -05:00
|
|
|
}
|
2011-02-12 00:22:29 -05:00
|
|
|
|
|
|
|
static int start(void *p)
|
|
|
|
{
|
|
|
|
struct pthread *self = p;
|
overhaul implementation-internal signal protections
the new approach relies on the fact that the only ways to create
sigset_t objects without invoking UB are to use the sig*set()
functions, or from the masks returned by sigprocmask, sigaction, etc.
or in the ucontext_t argument to a signal handler. thus, as long as
sigfillset and sigaddset avoid adding the "protected" signals, there
is no way the application will ever obtain a sigset_t including these
bits, and thus no need to add the overhead of checking/clearing them
when sigprocmask or sigaction is called.
note that the old code actually *failed* to remove the bits from
sa_mask when sigaction was called.
the new implementations are also significantly smaller, simpler, and
faster due to ignoring the useless "GNU HURD signals" 65-1024, which
are not used and, if there's any sanity in the world, never will be
used.
2011-05-07 23:23:58 -04:00
|
|
|
if (self->unblock_cancel)
|
2011-05-07 23:37:10 -04:00
|
|
|
__syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, 8);
|
2011-02-12 00:22:29 -05:00
|
|
|
pthread_exit(self->start(self->start_arg));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-15 03:24:58 -05:00
|
|
|
int __uniclone(void *, int (*)(), void *);
|
2011-02-12 00:22:29 -05:00
|
|
|
|
|
|
|
#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
|
|
|
|
|
|
|
|
/* pthread_key_create.c overrides this */
|
|
|
|
static const size_t dummy = 0;
|
|
|
|
weak_alias(dummy, __pthread_tsd_size);
|
|
|
|
|
|
|
|
int pthread_create(pthread_t *res, const pthread_attr_t *attr, void *(*entry)(void *), void *arg)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
size_t size, guard;
|
|
|
|
struct pthread *self = pthread_self(), *new;
|
|
|
|
unsigned char *map, *stack, *tsd;
|
2011-04-06 20:27:07 -04:00
|
|
|
const pthread_attr_t default_attr = { 0 };
|
2011-02-12 00:22:29 -05:00
|
|
|
|
2011-04-03 16:15:15 -04:00
|
|
|
if (!self) return ENOSYS;
|
2011-04-17 16:53:54 -04:00
|
|
|
if (!libc.threaded) {
|
2011-05-07 23:37:10 -04:00
|
|
|
__syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, 8);
|
2011-04-17 16:53:54 -04:00
|
|
|
libc.threaded = 1;
|
|
|
|
}
|
2011-02-12 00:22:29 -05:00
|
|
|
|
|
|
|
if (!attr) attr = &default_attr;
|
2011-02-17 17:16:20 -05:00
|
|
|
guard = ROUND(attr->_a_guardsize + DEFAULT_GUARD_SIZE);
|
|
|
|
size = guard + ROUND(attr->_a_stacksize + DEFAULT_STACK_SIZE);
|
2011-02-12 00:22:29 -05:00
|
|
|
size += __pthread_tsd_size;
|
|
|
|
map = mmap(0, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANON, -1, 0);
|
|
|
|
if (!map) return EAGAIN;
|
2011-03-16 11:36:21 -04:00
|
|
|
if (guard) mprotect(map, guard, PROT_NONE);
|
2011-02-12 00:22:29 -05:00
|
|
|
|
|
|
|
tsd = map + size - __pthread_tsd_size;
|
|
|
|
new = (void *)(tsd - sizeof *new - PAGE_SIZE%sizeof *new);
|
|
|
|
new->map_base = map;
|
|
|
|
new->map_size = size;
|
|
|
|
new->pid = self->pid;
|
|
|
|
new->errno_ptr = &new->errno_val;
|
|
|
|
new->start = entry;
|
|
|
|
new->start_arg = arg;
|
|
|
|
new->self = new;
|
|
|
|
new->tsd = (void *)tsd;
|
2011-02-17 17:16:20 -05:00
|
|
|
new->detached = attr->_a_detach;
|
2011-02-12 00:22:29 -05:00
|
|
|
new->attr = *attr;
|
2011-03-29 12:58:22 -04:00
|
|
|
new->unblock_cancel = self->cancel;
|
2011-02-12 00:22:29 -05:00
|
|
|
memcpy(new->tlsdesc, self->tlsdesc, sizeof new->tlsdesc);
|
|
|
|
new->tlsdesc[1] = (uintptr_t)new;
|
|
|
|
stack = (void *)((uintptr_t)new-1 & ~(uintptr_t)15);
|
|
|
|
|
2011-04-06 20:27:07 -04:00
|
|
|
__rsyscall_lock();
|
2011-02-12 00:22:29 -05:00
|
|
|
|
|
|
|
a_inc(&libc.threads_minus_1);
|
2011-02-15 03:24:58 -05:00
|
|
|
ret = __uniclone(stack, start, new);
|
2011-02-12 00:22:29 -05:00
|
|
|
|
2011-04-06 20:27:07 -04:00
|
|
|
__rsyscall_unlock();
|
2011-02-12 00:22:29 -05:00
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
a_dec(&libc.threads_minus_1);
|
|
|
|
munmap(map, size);
|
2011-02-15 02:20:21 -05:00
|
|
|
return EAGAIN;
|
2011-02-12 00:22:29 -05:00
|
|
|
}
|
|
|
|
*res = new;
|
|
|
|
return 0;
|
|
|
|
}
|
2011-02-13 19:58:30 -05:00
|
|
|
|
|
|
|
void pthread_exit(void *result)
|
|
|
|
{
|
|
|
|
struct pthread *self = pthread_self();
|
2011-04-17 11:43:03 -04:00
|
|
|
struct __ptcb cb = { .__next = self->cancelbuf };
|
2011-02-13 19:58:30 -05:00
|
|
|
self->result = result;
|
2011-04-17 11:43:03 -04:00
|
|
|
__pthread_unwind_next(&cb);
|
2011-02-13 19:58:30 -05:00
|
|
|
}
|