mirror of
https://github.com/fluencelabs/musl
synced 2025-05-29 07:31:53 +00:00
the memory model we use internally for atomics permits plain loads of values which may be subject to concurrent modification without requiring that a special load function be used. since a compiler is free to make transformations that alter the number of loads or the way in which loads are performed, the compiler is theoretically free to break this usage. the most obvious concern is with atomic cas constructs: something of the form tmp=*p;a_cas(p,tmp,f(tmp)); could be transformed to a_cas(p,*p,f(*p)); where the latter is intended to show multiple loads of *p whose resulting values might fail to be equal; this would break the atomicity of the whole operation. but even more fundamental breakage is possible. with the changes being made now, objects that may be modified by atomics are modeled as volatile, and the atomic operations performed on them by other threads are modeled as asynchronous stores by hardware which happens to be acting on the request of another thread. such modeling of course does not itself address memory synchronization between cores/cpus, but that aspect was already handled. this all seems less than ideal, but it's the best we can do without mandating a C11 compiler and using the C11 model for atomics. in the case of pthread_once_t, the ABI type of the underlying object is not volatile-qualified. so we are assuming that accessing the object through a volatile-qualified lvalue via casts yields volatile access semantics. the language of the C standard is somewhat unclear on this matter, but this is an assumption the linux kernel also makes, and seems to be the correct interpretation of the standard.
115 lines
3.0 KiB
C
115 lines
3.0 KiB
C
#include "pthread_impl.h"
|
|
|
|
void __vm_lock_impl(int);
|
|
void __vm_unlock_impl(void);
|
|
|
|
static int pshared_barrier_wait(pthread_barrier_t *b)
|
|
{
|
|
int limit = (b->_b_limit & INT_MAX) + 1;
|
|
int ret = 0;
|
|
int v, w;
|
|
|
|
if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD;
|
|
|
|
while ((v=a_cas(&b->_b_lock, 0, limit)))
|
|
__wait(&b->_b_lock, &b->_b_waiters, v, 0);
|
|
|
|
/* Wait for <limit> threads to get to the barrier */
|
|
if (++b->_b_count == limit) {
|
|
a_store(&b->_b_count, 0);
|
|
ret = PTHREAD_BARRIER_SERIAL_THREAD;
|
|
if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
|
|
} else {
|
|
a_store(&b->_b_lock, 0);
|
|
if (b->_b_waiters) __wake(&b->_b_lock, 1, 0);
|
|
while ((v=b->_b_count)>0)
|
|
__wait(&b->_b_count, &b->_b_waiters2, v, 0);
|
|
}
|
|
|
|
__vm_lock_impl(+1);
|
|
|
|
/* Ensure all threads have a vm lock before proceeding */
|
|
if (a_fetch_add(&b->_b_count, -1)==1-limit) {
|
|
a_store(&b->_b_count, 0);
|
|
if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
|
|
} else {
|
|
while ((v=b->_b_count))
|
|
__wait(&b->_b_count, &b->_b_waiters2, v, 0);
|
|
}
|
|
|
|
/* Perform a recursive unlock suitable for self-sync'd destruction */
|
|
do {
|
|
v = b->_b_lock;
|
|
w = b->_b_waiters;
|
|
} while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v);
|
|
|
|
/* Wake a thread waiting to reuse or destroy the barrier */
|
|
if (v==INT_MIN+1 || (v==1 && w))
|
|
__wake(&b->_b_lock, 1, 0);
|
|
|
|
__vm_unlock_impl();
|
|
|
|
return ret;
|
|
}
|
|
|
|
struct instance
|
|
{
|
|
volatile int count;
|
|
volatile int last;
|
|
volatile int waiters;
|
|
volatile int finished;
|
|
};
|
|
|
|
int pthread_barrier_wait(pthread_barrier_t *b)
|
|
{
|
|
int limit = b->_b_limit;
|
|
struct instance *inst;
|
|
|
|
/* Trivial case: count was set at 1 */
|
|
if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD;
|
|
|
|
/* Process-shared barriers require a separate, inefficient wait */
|
|
if (limit < 0) return pshared_barrier_wait(b);
|
|
|
|
/* Otherwise we need a lock on the barrier object */
|
|
while (a_swap(&b->_b_lock, 1))
|
|
__wait(&b->_b_lock, &b->_b_waiters, 1, 1);
|
|
inst = b->_b_inst;
|
|
|
|
/* First thread to enter the barrier becomes the "instance owner" */
|
|
if (!inst) {
|
|
struct instance new_inst = { 0 };
|
|
int spins = 200;
|
|
b->_b_inst = inst = &new_inst;
|
|
a_store(&b->_b_lock, 0);
|
|
if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
|
|
while (spins-- && !inst->finished)
|
|
a_spin();
|
|
a_inc(&inst->finished);
|
|
while (inst->finished == 1)
|
|
__syscall(SYS_futex,&inst->finished,FUTEX_WAIT|128,1,0) != -ENOSYS
|
|
|| __syscall(SYS_futex,&inst->finished,FUTEX_WAIT,1,0);
|
|
return PTHREAD_BARRIER_SERIAL_THREAD;
|
|
}
|
|
|
|
/* Last thread to enter the barrier wakes all non-instance-owners */
|
|
if (++inst->count == limit) {
|
|
b->_b_inst = 0;
|
|
a_store(&b->_b_lock, 0);
|
|
if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
|
|
a_store(&inst->last, 1);
|
|
if (inst->waiters)
|
|
__wake(&inst->last, -1, 1);
|
|
} else {
|
|
a_store(&b->_b_lock, 0);
|
|
if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
|
|
__wait(&inst->last, &inst->waiters, 0, 1);
|
|
}
|
|
|
|
/* Last thread to exit the barrier wakes the instance owner */
|
|
if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1))
|
|
__wake(&inst->finished, 1, 1);
|
|
|
|
return 0;
|
|
}
|