2011-02-12 00:22:29 -05:00
|
|
|
#include "pthread_impl.h"
|
|
|
|
|
2012-08-17 17:13:53 -04:00
|
|
|
void __vm_lock_impl(int);
|
|
|
|
void __vm_unlock_impl(void);
|
2011-09-27 13:50:29 -04:00
|
|
|
|
|
|
|
static int pshared_barrier_wait(pthread_barrier_t *b)
|
|
|
|
{
|
|
|
|
int limit = (b->_b_limit & INT_MAX) + 1;
|
|
|
|
int ret = 0;
|
2011-09-28 18:00:02 -04:00
|
|
|
int v, w;
|
2011-09-27 13:50:29 -04:00
|
|
|
|
2011-09-27 23:08:59 -04:00
|
|
|
if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD;
|
2011-09-27 13:50:29 -04:00
|
|
|
|
2011-09-28 18:00:02 -04:00
|
|
|
while ((v=a_cas(&b->_b_lock, 0, limit)))
|
|
|
|
__wait(&b->_b_lock, &b->_b_waiters, v, 0);
|
2011-09-27 13:50:29 -04:00
|
|
|
|
2011-09-28 18:57:18 -04:00
|
|
|
/* Wait for <limit> threads to get to the barrier */
|
2011-09-27 13:50:29 -04:00
|
|
|
if (++b->_b_count == limit) {
|
2011-09-28 18:57:18 -04:00
|
|
|
a_store(&b->_b_count, 0);
|
2011-09-27 13:50:29 -04:00
|
|
|
ret = PTHREAD_BARRIER_SERIAL_THREAD;
|
2011-09-28 18:57:18 -04:00
|
|
|
if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
|
2011-09-27 13:50:29 -04:00
|
|
|
} else {
|
|
|
|
a_store(&b->_b_lock, 0);
|
|
|
|
if (b->_b_waiters) __wake(&b->_b_lock, 1, 0);
|
2011-09-28 18:57:18 -04:00
|
|
|
while ((v=b->_b_count)>0)
|
|
|
|
__wait(&b->_b_count, &b->_b_waiters2, v, 0);
|
2011-09-27 13:50:29 -04:00
|
|
|
}
|
|
|
|
|
2012-08-17 17:13:53 -04:00
|
|
|
__vm_lock_impl(+1);
|
2011-09-27 13:50:29 -04:00
|
|
|
|
2011-09-28 18:57:18 -04:00
|
|
|
/* Ensure all threads have a vm lock before proceeding */
|
|
|
|
if (a_fetch_add(&b->_b_count, -1)==1-limit) {
|
|
|
|
a_store(&b->_b_count, 0);
|
|
|
|
if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
|
2011-09-27 13:50:29 -04:00
|
|
|
} else {
|
2011-09-28 18:57:18 -04:00
|
|
|
while ((v=b->_b_count))
|
|
|
|
__wait(&b->_b_count, &b->_b_waiters2, v, 0);
|
2011-09-27 13:50:29 -04:00
|
|
|
}
|
|
|
|
|
2011-09-28 18:00:02 -04:00
|
|
|
/* Perform a recursive unlock suitable for self-sync'd destruction */
|
|
|
|
do {
|
|
|
|
v = b->_b_lock;
|
|
|
|
w = b->_b_waiters;
|
2011-09-28 18:57:18 -04:00
|
|
|
} while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v);
|
2011-09-28 18:00:02 -04:00
|
|
|
|
2011-09-28 18:57:18 -04:00
|
|
|
/* Wake a thread waiting to reuse or destroy the barrier */
|
2011-09-28 18:00:02 -04:00
|
|
|
if (v==INT_MIN+1 || (v==1 && w))
|
|
|
|
__wake(&b->_b_lock, 1, 0);
|
|
|
|
|
2012-08-17 17:13:53 -04:00
|
|
|
__vm_unlock_impl();
|
2011-09-27 13:50:29 -04:00
|
|
|
|
2011-09-27 17:03:44 -04:00
|
|
|
return ret;
|
2011-09-27 13:50:29 -04:00
|
|
|
}
|
|
|
|
|
2011-05-06 20:00:59 -04:00
|
|
|
struct instance
|
|
|
|
{
|
|
|
|
int count;
|
|
|
|
int last;
|
|
|
|
int waiters;
|
|
|
|
int finished;
|
|
|
|
};
|
|
|
|
|
2011-02-12 00:22:29 -05:00
|
|
|
int pthread_barrier_wait(pthread_barrier_t *b)
|
|
|
|
{
|
2011-05-06 20:00:59 -04:00
|
|
|
int limit = b->_b_limit;
|
|
|
|
struct instance *inst;
|
2011-02-12 00:22:29 -05:00
|
|
|
|
|
|
|
/* Trivial case: count was set at 1 */
|
2011-05-06 20:00:59 -04:00
|
|
|
if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD;
|
2011-02-12 00:22:29 -05:00
|
|
|
|
2011-09-27 13:50:29 -04:00
|
|
|
/* Process-shared barriers require a separate, inefficient wait */
|
|
|
|
if (limit < 0) return pshared_barrier_wait(b);
|
|
|
|
|
2011-05-06 20:00:59 -04:00
|
|
|
/* Otherwise we need a lock on the barrier object */
|
|
|
|
while (a_swap(&b->_b_lock, 1))
|
2011-09-27 13:50:29 -04:00
|
|
|
__wait(&b->_b_lock, &b->_b_waiters, 1, 1);
|
2011-05-06 20:00:59 -04:00
|
|
|
inst = b->_b_inst;
|
2011-02-12 00:22:29 -05:00
|
|
|
|
2011-05-06 20:00:59 -04:00
|
|
|
/* First thread to enter the barrier becomes the "instance owner" */
|
|
|
|
if (!inst) {
|
|
|
|
struct instance new_inst = { 0 };
|
2014-08-25 15:58:19 -04:00
|
|
|
int spins = 200;
|
2011-05-06 20:00:59 -04:00
|
|
|
b->_b_inst = inst = &new_inst;
|
|
|
|
a_store(&b->_b_lock, 0);
|
2011-09-27 13:50:29 -04:00
|
|
|
if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
|
2011-05-06 20:00:59 -04:00
|
|
|
while (spins-- && !inst->finished)
|
|
|
|
a_spin();
|
|
|
|
a_inc(&inst->finished);
|
|
|
|
while (inst->finished == 1)
|
2014-08-22 23:49:54 -04:00
|
|
|
__syscall(SYS_futex,&inst->finished,FUTEX_WAIT|128,1,0) != -ENOSYS
|
make futex operations use private-futex mode when possible
private-futex uses the virtual address of the futex int directly as
the hash key rather than requiring the kernel to resolve the address
to an underlying backing for the mapping in which it lies. for certain
usage patterns it improves performance significantly.
in many places, the code using futex __wake and __wait operations was
already passing a correct fixed zero or nonzero flag for the priv
argument, so no change was needed at the site of the call, only in the
__wake and __wait functions themselves. in other places, especially
where the process-shared attribute for a synchronization object was
not previously tracked, additional new code is needed. for mutexes,
the only place to store the flag is in the type field, so additional
bit masking logic is needed for accessing the type.
for non-process-shared condition variable broadcasts, the futex
requeue operation is unable to requeue from a private futex to a
process-shared one in the mutex structure, so requeue is simply
disabled in this case by waking all waiters.
for robust mutexes, the kernel always performs a non-private wake when
the owner dies. in order not to introduce a behavioral regression in
non-process-shared robust mutexes (when the owning thread dies), they
are simply forced to be treated as process-shared for now, giving
correct behavior at the expense of performance. this can be fixed by
adding explicit code to pthread_exit to do the right thing for
non-shared robust mutexes in userspace rather than relying on the
kernel to do it, and will be fixed in this way later.
since not all supported kernels have private futex support, the new
code detects EINVAL from the futex syscall and falls back to making
the call without the private flag. no attempt to cache the result is
made; caching it and using the cached value efficiently is somewhat
difficult, and not worth the complexity when the benefits would be
seen only on ancient kernels which have numerous other limitations and
bugs anyway.
2014-08-15 23:54:52 -04:00
|
|
|
|| __syscall(SYS_futex,&inst->finished,FUTEX_WAIT,1,0);
|
2011-02-12 00:22:29 -05:00
|
|
|
return PTHREAD_BARRIER_SERIAL_THREAD;
|
|
|
|
}
|
|
|
|
|
2011-05-06 20:00:59 -04:00
|
|
|
/* Last thread to enter the barrier wakes all non-instance-owners */
|
|
|
|
if (++inst->count == limit) {
|
|
|
|
b->_b_inst = 0;
|
|
|
|
a_store(&b->_b_lock, 0);
|
2011-09-27 13:50:29 -04:00
|
|
|
if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
|
2011-05-06 20:00:59 -04:00
|
|
|
a_store(&inst->last, 1);
|
|
|
|
if (inst->waiters)
|
2011-09-27 13:50:29 -04:00
|
|
|
__wake(&inst->last, -1, 1);
|
2011-05-06 20:00:59 -04:00
|
|
|
} else {
|
|
|
|
a_store(&b->_b_lock, 0);
|
2011-09-27 13:50:29 -04:00
|
|
|
if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
|
|
|
|
__wait(&inst->last, &inst->waiters, 0, 1);
|
2011-05-06 20:00:59 -04:00
|
|
|
}
|
2011-02-12 00:22:29 -05:00
|
|
|
|
2011-05-06 20:00:59 -04:00
|
|
|
/* Last thread to exit the barrier wakes the instance owner */
|
|
|
|
if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1))
|
2011-09-27 13:50:29 -04:00
|
|
|
__wake(&inst->finished, 1, 1);
|
2011-02-12 00:22:29 -05:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|