mirror of
https://github.com/fluencelabs/musl
synced 2025-06-29 06:32:16 +00:00
make futex operations use private-futex mode when possible
private-futex uses the virtual address of the futex int directly as the hash key rather than requiring the kernel to resolve the address to an underlying backing for the mapping in which it lies. for certain usage patterns it improves performance significantly. in many places, the code using futex __wake and __wait operations was already passing a correct fixed zero or nonzero flag for the priv argument, so no change was needed at the site of the call, only in the __wake and __wait functions themselves. in other places, especially where the process-shared attribute for a synchronization object was not previously tracked, additional new code is needed. for mutexes, the only place to store the flag is in the type field, so additional bit masking logic is needed for accessing the type. for non-process-shared condition variable broadcasts, the futex requeue operation is unable to requeue from a private futex to a process-shared one in the mutex structure, so requeue is simply disabled in this case by waking all waiters. for robust mutexes, the kernel always performs a non-private wake when the owner dies. in order not to introduce a behavioral regression in non-process-shared robust mutexes (when the owning thread dies), they are simply forced to be treated as process-shared for now, giving correct behavior at the expense of performance. this can be fixed by adding explicit code to pthread_exit to do the right thing for non-shared robust mutexes in userspace rather than relying on the kernel to do it, and will be fixed in this way later. since not all supported kernels have private futex support, the new code detects EINVAL from the futex syscall and falls back to making the call without the private flag. no attempt to cache the result is made; caching it and using the cached value efficiently is somewhat difficult, and not worth the complexity when the benefits would be seen only on ancient kernels which have numerous other limitations and bugs anyway.
This commit is contained in:
@ -76,6 +76,7 @@ struct __timer {
|
|||||||
#define _c_destroy __u.__i[8]
|
#define _c_destroy __u.__i[8]
|
||||||
#define _rw_lock __u.__i[0]
|
#define _rw_lock __u.__i[0]
|
||||||
#define _rw_waiters __u.__i[1]
|
#define _rw_waiters __u.__i[1]
|
||||||
|
#define _rw_shared __u.__i[2]
|
||||||
#define _b_lock __u.__i[0]
|
#define _b_lock __u.__i[0]
|
||||||
#define _b_waiters __u.__i[1]
|
#define _b_waiters __u.__i[1]
|
||||||
#define _b_limit __u.__i[2]
|
#define _b_limit __u.__i[2]
|
||||||
@ -108,8 +109,13 @@ void __unmapself(void *, size_t);
|
|||||||
|
|
||||||
int __timedwait(volatile int *, int, clockid_t, const struct timespec *, void (*)(void *), void *, int);
|
int __timedwait(volatile int *, int, clockid_t, const struct timespec *, void (*)(void *), void *, int);
|
||||||
void __wait(volatile int *, volatile int *, int, int);
|
void __wait(volatile int *, volatile int *, int, int);
|
||||||
#define __wake(addr, cnt, priv) \
|
static inline void __wake(volatile void *addr, int cnt, int priv)
|
||||||
__syscall(SYS_futex, addr, FUTEX_WAKE, (cnt)<0?INT_MAX:(cnt))
|
{
|
||||||
|
if (priv) priv = 128;
|
||||||
|
if (cnt<0) cnt = INT_MAX;
|
||||||
|
__syscall(SYS_futex, addr, FUTEX_WAKE|priv, cnt) != -EINVAL ||
|
||||||
|
__syscall(SYS_futex, addr, FUTEX_WAKE, cnt);
|
||||||
|
}
|
||||||
|
|
||||||
void __acquire_ptc();
|
void __acquire_ptc();
|
||||||
void __release_ptc();
|
void __release_ptc();
|
||||||
|
@ -4,12 +4,15 @@
|
|||||||
#include "futex.h"
|
#include "futex.h"
|
||||||
#include "syscall.h"
|
#include "syscall.h"
|
||||||
|
|
||||||
static int do_wait(volatile int *addr, int val,
|
int __timedwait(volatile int *addr, int val,
|
||||||
clockid_t clk, const struct timespec *at, int priv)
|
clockid_t clk, const struct timespec *at,
|
||||||
|
void (*cleanup)(void *), void *arg, int priv)
|
||||||
{
|
{
|
||||||
int r;
|
int r, cs;
|
||||||
struct timespec to, *top=0;
|
struct timespec to, *top=0;
|
||||||
|
|
||||||
|
if (priv) priv = 128;
|
||||||
|
|
||||||
if (at) {
|
if (at) {
|
||||||
if (at->tv_nsec >= 1000000000UL) return EINVAL;
|
if (at->tv_nsec >= 1000000000UL) return EINVAL;
|
||||||
if (clock_gettime(clk, &to)) return EINVAL;
|
if (clock_gettime(clk, &to)) return EINVAL;
|
||||||
@ -22,21 +25,12 @@ static int do_wait(volatile int *addr, int val,
|
|||||||
top = &to;
|
top = &to;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = -__syscall_cp(SYS_futex, addr, FUTEX_WAIT, val, top);
|
|
||||||
if (r == EINTR || r == EINVAL || r == ETIMEDOUT) return r;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __timedwait(volatile int *addr, int val,
|
|
||||||
clockid_t clk, const struct timespec *at,
|
|
||||||
void (*cleanup)(void *), void *arg, int priv)
|
|
||||||
{
|
|
||||||
int r, cs;
|
|
||||||
|
|
||||||
if (!cleanup) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
|
if (!cleanup) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
|
||||||
pthread_cleanup_push(cleanup, arg);
|
pthread_cleanup_push(cleanup, arg);
|
||||||
|
|
||||||
r = do_wait(addr, val, clk, at, priv);
|
r = -__syscall_cp(SYS_futex, addr, FUTEX_WAIT|priv, val, top);
|
||||||
|
if (r == EINVAL) r = -__syscall_cp(SYS_futex, addr, FUTEX_WAIT, val, top);
|
||||||
|
if (r != EINTR && r != ETIMEDOUT) r = 0;
|
||||||
|
|
||||||
pthread_cleanup_pop(0);
|
pthread_cleanup_pop(0);
|
||||||
if (!cleanup) pthread_setcancelstate(cs, 0);
|
if (!cleanup) pthread_setcancelstate(cs, 0);
|
||||||
|
@ -3,13 +3,15 @@
|
|||||||
void __wait(volatile int *addr, volatile int *waiters, int val, int priv)
|
void __wait(volatile int *addr, volatile int *waiters, int val, int priv)
|
||||||
{
|
{
|
||||||
int spins=10000;
|
int spins=10000;
|
||||||
if (priv) priv = 128; priv=0;
|
if (priv) priv = 128;
|
||||||
while (spins--) {
|
while (spins--) {
|
||||||
if (*addr==val) a_spin();
|
if (*addr==val) a_spin();
|
||||||
else return;
|
else return;
|
||||||
}
|
}
|
||||||
if (waiters) a_inc(waiters);
|
if (waiters) a_inc(waiters);
|
||||||
while (*addr==val)
|
while (*addr==val) {
|
||||||
__syscall(SYS_futex, addr, FUTEX_WAIT|priv, val, 0);
|
__syscall(SYS_futex, addr, FUTEX_WAIT|priv, val, 0) != -EINVAL
|
||||||
|
|| __syscall(SYS_futex, addr, FUTEX_WAIT, val, 0);
|
||||||
|
}
|
||||||
if (waiters) a_dec(waiters);
|
if (waiters) a_dec(waiters);
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *restrict a, int *re
|
|||||||
}
|
}
|
||||||
int pthread_mutexattr_getpshared(const pthread_mutexattr_t *restrict a, int *restrict pshared)
|
int pthread_mutexattr_getpshared(const pthread_mutexattr_t *restrict a, int *restrict pshared)
|
||||||
{
|
{
|
||||||
*pshared = a->__attr>>31;
|
*pshared = a->__attr / 128U % 2;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +87,8 @@ int pthread_barrier_wait(pthread_barrier_t *b)
|
|||||||
a_spin();
|
a_spin();
|
||||||
a_inc(&inst->finished);
|
a_inc(&inst->finished);
|
||||||
while (inst->finished == 1)
|
while (inst->finished == 1)
|
||||||
__syscall(SYS_futex, &inst->finished, FUTEX_WAIT,1,0);
|
__syscall(SYS_futex,&inst->finished,FUTEX_WAIT|128,1,0) != -EINTR
|
||||||
|
|| __syscall(SYS_futex,&inst->finished,FUTEX_WAIT,1,0);
|
||||||
return PTHREAD_BARRIER_SERIAL_THREAD;
|
return PTHREAD_BARRIER_SERIAL_THREAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,13 +27,17 @@ int pthread_cond_broadcast(pthread_cond_t *c)
|
|||||||
|
|
||||||
/* Perform the futex requeue, waking one waiter unless we know
|
/* Perform the futex requeue, waking one waiter unless we know
|
||||||
* that the calling thread holds the mutex. */
|
* that the calling thread holds the mutex. */
|
||||||
|
int wake_cnt = !(m->_m_type & 3)
|
||||||
|
|| (m->_m_lock&INT_MAX)!=__pthread_self()->tid;
|
||||||
|
if (m->_m_type & 128) wake_cnt = INT_MAX;
|
||||||
|
__syscall(SYS_futex, &c->_c_seq, FUTEX_REQUEUE | 128,
|
||||||
|
wake_cnt, INT_MAX, &m->_m_lock) != -EINVAL ||
|
||||||
__syscall(SYS_futex, &c->_c_seq, FUTEX_REQUEUE,
|
__syscall(SYS_futex, &c->_c_seq, FUTEX_REQUEUE,
|
||||||
!m->_m_type || (m->_m_lock&INT_MAX)!=__pthread_self()->tid,
|
wake_cnt, INT_MAX, &m->_m_lock);
|
||||||
INT_MAX, &m->_m_lock);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
a_store(&c->_c_lock, 0);
|
a_store(&c->_c_lock, 0);
|
||||||
if (c->_c_lockwait) __wake(&c->_c_lock, 1, 0);
|
if (c->_c_lockwait) __wake(&c->_c_lock, 1, 1);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,6 @@ int pthread_cond_signal(pthread_cond_t *c)
|
|||||||
{
|
{
|
||||||
if (!c->_c_waiters) return 0;
|
if (!c->_c_waiters) return 0;
|
||||||
a_inc(&c->_c_seq);
|
a_inc(&c->_c_seq);
|
||||||
if (c->_c_waiters) __wake(&c->_c_seq, 1, 0);
|
if (c->_c_waiters) __wake(&c->_c_seq, 1, c->_c_mutex!=(void*)-1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ int pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict
|
|||||||
struct cm cm = { .c=c, .m=m };
|
struct cm cm = { .c=c, .m=m };
|
||||||
int r, e=0, seq;
|
int r, e=0, seq;
|
||||||
|
|
||||||
if (m->_m_type && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
|
if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
|
||||||
return EPERM;
|
return EPERM;
|
||||||
|
|
||||||
if (ts && ts->tv_nsec >= 1000000000UL)
|
if (ts && ts->tv_nsec >= 1000000000UL)
|
||||||
@ -64,7 +64,8 @@ int pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict
|
|||||||
|
|
||||||
pthread_mutex_unlock(m);
|
pthread_mutex_unlock(m);
|
||||||
|
|
||||||
do e = __timedwait(&c->_c_seq, seq, c->_c_clock, ts, cleanup, &cm, 0);
|
do e = __timedwait(&c->_c_seq, seq, c->_c_clock, ts, cleanup, &cm,
|
||||||
|
c->_c_mutex != (void *)-1);
|
||||||
while (c->_c_seq == seq && (!e || e==EINTR));
|
while (c->_c_seq == seq && (!e || e==EINTR));
|
||||||
if (e == EINTR) e = 0;
|
if (e == EINTR) e = 0;
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
int pthread_mutex_consistent(pthread_mutex_t *m)
|
int pthread_mutex_consistent(pthread_mutex_t *m)
|
||||||
{
|
{
|
||||||
if (m->_m_type < 8) return EINVAL;
|
if ((m->_m_type & 15) < 8) return EINVAL;
|
||||||
if ((m->_m_lock & 0x3fffffff) != __pthread_self()->tid)
|
if ((m->_m_lock & 0x3fffffff) != __pthread_self()->tid)
|
||||||
return EPERM;
|
return EPERM;
|
||||||
m->_m_type -= 8;
|
m->_m_type -= 8;
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
int pthread_mutex_init(pthread_mutex_t *restrict m, const pthread_mutexattr_t *restrict a)
|
int pthread_mutex_init(pthread_mutex_t *restrict m, const pthread_mutexattr_t *restrict a)
|
||||||
{
|
{
|
||||||
*m = (pthread_mutex_t){0};
|
*m = (pthread_mutex_t){0};
|
||||||
if (a) m->_m_type = a->__attr & 7;
|
if (a) m->_m_type = a->__attr;
|
||||||
|
if (m->_m_type & 4) m->_m_type |= 128U;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,8 @@
|
|||||||
|
|
||||||
int pthread_mutex_lock(pthread_mutex_t *m)
|
int pthread_mutex_lock(pthread_mutex_t *m)
|
||||||
{
|
{
|
||||||
if (m->_m_type == PTHREAD_MUTEX_NORMAL && !a_cas(&m->_m_lock, 0, EBUSY))
|
if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL
|
||||||
|
&& !a_cas(&m->_m_lock, 0, EBUSY))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return pthread_mutex_timedlock(m, 0);
|
return pthread_mutex_timedlock(m, 0);
|
||||||
|
@ -2,11 +2,12 @@
|
|||||||
|
|
||||||
int pthread_mutex_timedlock(pthread_mutex_t *restrict m, const struct timespec *restrict at)
|
int pthread_mutex_timedlock(pthread_mutex_t *restrict m, const struct timespec *restrict at)
|
||||||
{
|
{
|
||||||
int r, t;
|
if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL
|
||||||
|
&& !a_cas(&m->_m_lock, 0, EBUSY))
|
||||||
if (m->_m_type == PTHREAD_MUTEX_NORMAL && !a_cas(&m->_m_lock, 0, EBUSY))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
int r, t, priv = (m->_m_type & 128) ^ 128;
|
||||||
|
|
||||||
while ((r=pthread_mutex_trylock(m)) == EBUSY) {
|
while ((r=pthread_mutex_trylock(m)) == EBUSY) {
|
||||||
if (!(r=m->_m_lock) || (r&0x40000000)) continue;
|
if (!(r=m->_m_lock) || (r&0x40000000)) continue;
|
||||||
if ((m->_m_type&3) == PTHREAD_MUTEX_ERRORCHECK
|
if ((m->_m_type&3) == PTHREAD_MUTEX_ERRORCHECK
|
||||||
@ -16,7 +17,7 @@ int pthread_mutex_timedlock(pthread_mutex_t *restrict m, const struct timespec *
|
|||||||
a_inc(&m->_m_waiters);
|
a_inc(&m->_m_waiters);
|
||||||
t = r | 0x80000000;
|
t = r | 0x80000000;
|
||||||
a_cas(&m->_m_lock, r, t);
|
a_cas(&m->_m_lock, r, t);
|
||||||
r = __timedwait(&m->_m_lock, t, CLOCK_REALTIME, at, 0, 0, 0);
|
r = __timedwait(&m->_m_lock, t, CLOCK_REALTIME, at, 0, 0, priv);
|
||||||
a_dec(&m->_m_waiters);
|
a_dec(&m->_m_waiters);
|
||||||
if (r && r != EINTR) break;
|
if (r && r != EINTR) break;
|
||||||
}
|
}
|
||||||
|
@ -1,17 +1,13 @@
|
|||||||
#include "pthread_impl.h"
|
#include "pthread_impl.h"
|
||||||
|
|
||||||
int pthread_mutex_trylock(pthread_mutex_t *m)
|
int __pthread_mutex_trylock_owner(pthread_mutex_t *m)
|
||||||
{
|
{
|
||||||
int tid, old, own;
|
int old, own;
|
||||||
pthread_t self;
|
int type = m->_m_type & 15;
|
||||||
|
pthread_t self = __pthread_self();
|
||||||
|
int tid = self->tid;
|
||||||
|
|
||||||
if (m->_m_type == PTHREAD_MUTEX_NORMAL)
|
if (type >= 4) {
|
||||||
return a_cas(&m->_m_lock, 0, EBUSY) & EBUSY;
|
|
||||||
|
|
||||||
self = __pthread_self();
|
|
||||||
tid = self->tid;
|
|
||||||
|
|
||||||
if (m->_m_type >= 4) {
|
|
||||||
if (!self->robust_list.off)
|
if (!self->robust_list.off)
|
||||||
__syscall(SYS_set_robust_list,
|
__syscall(SYS_set_robust_list,
|
||||||
&self->robust_list, 3*sizeof(long));
|
&self->robust_list, 3*sizeof(long));
|
||||||
@ -21,7 +17,7 @@ int pthread_mutex_trylock(pthread_mutex_t *m)
|
|||||||
|
|
||||||
old = m->_m_lock;
|
old = m->_m_lock;
|
||||||
own = old & 0x7fffffff;
|
own = old & 0x7fffffff;
|
||||||
if (own == tid && (m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE) {
|
if (own == tid && (type&3) == PTHREAD_MUTEX_RECURSIVE) {
|
||||||
if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN;
|
if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN;
|
||||||
m->_m_count++;
|
m->_m_count++;
|
||||||
return 0;
|
return 0;
|
||||||
@ -30,9 +26,9 @@ int pthread_mutex_trylock(pthread_mutex_t *m)
|
|||||||
if ((own && !(own & 0x40000000)) || a_cas(&m->_m_lock, old, tid)!=old)
|
if ((own && !(own & 0x40000000)) || a_cas(&m->_m_lock, old, tid)!=old)
|
||||||
return EBUSY;
|
return EBUSY;
|
||||||
|
|
||||||
if (m->_m_type < 4) return 0;
|
if (type < 4) return 0;
|
||||||
|
|
||||||
if (m->_m_type >= 8) {
|
if (type >= 8) {
|
||||||
m->_m_lock = 0;
|
m->_m_lock = 0;
|
||||||
return ENOTRECOVERABLE;
|
return ENOTRECOVERABLE;
|
||||||
}
|
}
|
||||||
@ -50,3 +46,10 @@ int pthread_mutex_trylock(pthread_mutex_t *m)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int pthread_mutex_trylock(pthread_mutex_t *m)
|
||||||
|
{
|
||||||
|
if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL)
|
||||||
|
return a_cas(&m->_m_lock, 0, EBUSY) & EBUSY;
|
||||||
|
return __pthread_mutex_trylock_owner(m);
|
||||||
|
}
|
||||||
|
@ -9,16 +9,18 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
|
|||||||
int waiters = m->_m_waiters;
|
int waiters = m->_m_waiters;
|
||||||
int cont;
|
int cont;
|
||||||
int robust = 0;
|
int robust = 0;
|
||||||
|
int type = m->_m_type & 15;
|
||||||
|
int priv = (m->_m_type & 128) ^ 128;
|
||||||
|
|
||||||
if (m->_m_type != PTHREAD_MUTEX_NORMAL) {
|
if (type != PTHREAD_MUTEX_NORMAL) {
|
||||||
if (!m->_m_lock)
|
if (!m->_m_lock)
|
||||||
return EPERM;
|
return EPERM;
|
||||||
self = __pthread_self();
|
self = __pthread_self();
|
||||||
if ((m->_m_lock&0x1fffffff) != self->tid)
|
if ((m->_m_lock&0x1fffffff) != self->tid)
|
||||||
return EPERM;
|
return EPERM;
|
||||||
if ((m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE && m->_m_count)
|
if ((type&3) == PTHREAD_MUTEX_RECURSIVE && m->_m_count)
|
||||||
return m->_m_count--, 0;
|
return m->_m_count--, 0;
|
||||||
if (m->_m_type >= 4) {
|
if (type >= 4) {
|
||||||
robust = 1;
|
robust = 1;
|
||||||
self->robust_list.pending = &m->_m_next;
|
self->robust_list.pending = &m->_m_next;
|
||||||
*(void **)m->_m_prev = m->_m_next;
|
*(void **)m->_m_prev = m->_m_next;
|
||||||
@ -32,6 +34,6 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
|
|||||||
__vm_unlock_impl();
|
__vm_unlock_impl();
|
||||||
}
|
}
|
||||||
if (waiters || cont<0)
|
if (waiters || cont<0)
|
||||||
__wake(&m->_m_lock, 1, 0);
|
__wake(&m->_m_lock, 1, priv);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
int pthread_mutexattr_setpshared(pthread_mutexattr_t *a, int pshared)
|
int pthread_mutexattr_setpshared(pthread_mutexattr_t *a, int pshared)
|
||||||
{
|
{
|
||||||
if (pshared > 1U) return EINVAL;
|
if (pshared > 1U) return EINVAL;
|
||||||
a->__attr &= 0x7fffffff;
|
a->__attr &= ~128U;
|
||||||
a->__attr |= pshared<<31;
|
a->__attr |= pshared<<7;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
static void undo(void *control)
|
static void undo(void *control)
|
||||||
{
|
{
|
||||||
a_store(control, 0);
|
a_store(control, 0);
|
||||||
__wake(control, 1, 0);
|
__wake(control, 1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pthread_once(pthread_once_t *control, void (*init)(void))
|
int pthread_once(pthread_once_t *control, void (*init)(void))
|
||||||
@ -25,10 +25,10 @@ int pthread_once(pthread_once_t *control, void (*init)(void))
|
|||||||
pthread_cleanup_pop(0);
|
pthread_cleanup_pop(0);
|
||||||
|
|
||||||
a_store(control, 2);
|
a_store(control, 2);
|
||||||
if (waiters) __wake(control, -1, 0);
|
if (waiters) __wake(control, -1, 1);
|
||||||
return 0;
|
return 0;
|
||||||
case 1:
|
case 1:
|
||||||
__wait(control, &waiters, 1, 0);
|
__wait(control, &waiters, 1, 1);
|
||||||
continue;
|
continue;
|
||||||
case 2:
|
case 2:
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
int pthread_rwlock_init(pthread_rwlock_t *restrict rw, const pthread_rwlockattr_t *restrict a)
|
int pthread_rwlock_init(pthread_rwlock_t *restrict rw, const pthread_rwlockattr_t *restrict a)
|
||||||
{
|
{
|
||||||
*rw = (pthread_rwlock_t){0};
|
*rw = (pthread_rwlock_t){0};
|
||||||
if (a) {
|
if (a) rw->_rw_shared = a->__attr[0]*128;
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ int pthread_rwlock_timedrdlock(pthread_rwlock_t *restrict rw, const struct times
|
|||||||
t = r | 0x80000000;
|
t = r | 0x80000000;
|
||||||
a_inc(&rw->_rw_waiters);
|
a_inc(&rw->_rw_waiters);
|
||||||
a_cas(&rw->_rw_lock, r, t);
|
a_cas(&rw->_rw_lock, r, t);
|
||||||
r = __timedwait(&rw->_rw_lock, t, CLOCK_REALTIME, at, 0, 0, 0);
|
r = __timedwait(&rw->_rw_lock, t, CLOCK_REALTIME, at, 0, 0, rw->_rw_shared^128);
|
||||||
a_dec(&rw->_rw_waiters);
|
a_dec(&rw->_rw_waiters);
|
||||||
if (r && r != EINTR) return r;
|
if (r && r != EINTR) return r;
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ int pthread_rwlock_timedwrlock(pthread_rwlock_t *restrict rw, const struct times
|
|||||||
t = r | 0x80000000;
|
t = r | 0x80000000;
|
||||||
a_inc(&rw->_rw_waiters);
|
a_inc(&rw->_rw_waiters);
|
||||||
a_cas(&rw->_rw_lock, r, t);
|
a_cas(&rw->_rw_lock, r, t);
|
||||||
r = __timedwait(&rw->_rw_lock, t, CLOCK_REALTIME, at, 0, 0, 0);
|
r = __timedwait(&rw->_rw_lock, t, CLOCK_REALTIME, at, 0, 0, rw->_rw_shared^128);
|
||||||
a_dec(&rw->_rw_waiters);
|
a_dec(&rw->_rw_waiters);
|
||||||
if (r && r != EINTR) return r;
|
if (r && r != EINTR) return r;
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
int pthread_rwlock_unlock(pthread_rwlock_t *rw)
|
int pthread_rwlock_unlock(pthread_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
int val, cnt, waiters, new;
|
int val, cnt, waiters, new, priv = rw->_rw_shared^128;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
val = rw->_rw_lock;
|
val = rw->_rw_lock;
|
||||||
@ -12,7 +12,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rw)
|
|||||||
} while (a_cas(&rw->_rw_lock, val, new) != val);
|
} while (a_cas(&rw->_rw_lock, val, new) != val);
|
||||||
|
|
||||||
if (!new && (waiters || val<0))
|
if (!new && (waiters || val<0))
|
||||||
__wake(&rw->_rw_lock, cnt, 0);
|
__wake(&rw->_rw_lock, cnt, priv);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -10,5 +10,6 @@ int sem_init(sem_t *sem, int pshared, unsigned value)
|
|||||||
}
|
}
|
||||||
sem->__val[0] = value;
|
sem->__val[0] = value;
|
||||||
sem->__val[1] = 0;
|
sem->__val[1] = 0;
|
||||||
|
sem->__val[2] = pshared ? 0 : 128;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
int sem_post(sem_t *sem)
|
int sem_post(sem_t *sem)
|
||||||
{
|
{
|
||||||
int val, waiters;
|
int val, waiters, priv = sem->__val[2];
|
||||||
do {
|
do {
|
||||||
val = sem->__val[0];
|
val = sem->__val[0];
|
||||||
waiters = sem->__val[1];
|
waiters = sem->__val[1];
|
||||||
@ -12,6 +12,6 @@ int sem_post(sem_t *sem)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} while (a_cas(sem->__val, val, val+1+(val<0)) != val);
|
} while (a_cas(sem->__val, val, val+1+(val<0)) != val);
|
||||||
if (val<0 || waiters) __wake(sem->__val, 1, 0);
|
if (val<0 || waiters) __wake(sem->__val, 1, priv);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ int sem_timedwait(sem_t *restrict sem, const struct timespec *restrict at)
|
|||||||
int r;
|
int r;
|
||||||
a_inc(sem->__val+1);
|
a_inc(sem->__val+1);
|
||||||
a_cas(sem->__val, 0, -1);
|
a_cas(sem->__val, 0, -1);
|
||||||
r = __timedwait(sem->__val, -1, CLOCK_REALTIME, at, cleanup, sem->__val+1, 0);
|
r = __timedwait(sem->__val, -1, CLOCK_REALTIME, at, cleanup, sem->__val+1, sem->__val[2]);
|
||||||
a_dec(sem->__val+1);
|
a_dec(sem->__val+1);
|
||||||
if (r) {
|
if (r) {
|
||||||
errno = r;
|
errno = r;
|
||||||
|
Reference in New Issue
Block a user