Re: Fatal error 'mutex is on list' at line 139 in file /usr/src/lib/libthr/thread/thr_mutex.c (errno = 35)

From: Oleg V. Nauman <oleg_at_opentransfer.com>
Date: Tue, 22 Mar 2016 12:01:38 +0200
On Tuesday 22 March 2016 09:53:23 Konstantin Belousov wrote:
> On Tue, Mar 22, 2016 at 08:06:17AM +0200, Oleg V. Nauman wrote:
> >  After applying the patch:
> > Mar 22 07:34:37 asus kernel: pid 1928 creating existing key 1
> > Mar 22 07:34:58 asus kernel: pid 1928 (akonadi_baloo_index), uid 1001:
> > exited on signal 6 (core dumped)
> 
> Good, thank you.  Please try this one.
> 
> diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c
> index 3342c9f..865e4cf 100644
> --- a/lib/libthr/thread/thr_mutex.c
> +++ b/lib/libthr/thread/thr_mutex.c
> _at__at_ -38,6 +38,7 _at__at_
>   * $FreeBSD$
>   */
> 
> +#include <stdbool.h>
>  #include "namespace.h"
>  #include <stdlib.h>
>  #include <errno.h>
> _at__at_ -264,6 +265,51 _at__at_ set_inherited_priority(struct pthread *curthread,
> struct pthread_mutex *m) m->m_lock.m_ceilings[1] = -1;
>  }
> 
> +static void
> +shared_mutex_init(struct pthread_mutex *pmtx, const struct
> +    pthread_mutex_attr *mutex_attr)
> +{
> +	static const struct pthread_mutex_attr foobar_mutex_attr = {
> +		.m_type = PTHREAD_MUTEX_DEFAULT,
> +		.m_protocol = PTHREAD_PRIO_NONE,
> +		.m_ceiling = 0,
> +		.m_pshared = PTHREAD_PROCESS_SHARED
> +	};
> +	bool done;
> +
> +	/*
> +	 * Hack to allow multiple pthread_mutex_init() calls on the
> +	 * same process-shared mutex.  We rely on kernel allocating
> +	 * zeroed offpage for the mutex, i.e. the
> +	 * PMUTEX_INITSTAGE_ALLOC value must be zero.
> +	 */
> +	for (done = false; !done;) {
> +		switch (pmtx->m_ps) {
> +		case PMUTEX_INITSTAGE_DONE:
> +			atomic_thread_fence_acq();
> +			done = true;
> +			break;
> +		case PMUTEX_INITSTAGE_ALLOC:
> +			if (atomic_cmpset_int(&pmtx->m_ps,
> +			    PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) {
> +				if (mutex_attr == NULL)
> +					mutex_attr = &foobar_mutex_attr;
> +				mutex_init_body(pmtx, mutex_attr);
> +				atomic_store_rel_int(&pmtx->m_ps,
> +				    PMUTEX_INITSTAGE_DONE);
> +				done = true;
> +			}
> +			break;
> +		case PMUTEX_INITSTAGE_BUSY:
> +			_pthread_yield();
> +			break;
> +		default:
> +			PANIC("corrupted offpage");
> +			break;
> +		}
> +	}
> +}
> +
>  int
>  __pthread_mutex_init(pthread_mutex_t *mutex,
>      const pthread_mutexattr_t *mutex_attr)
> _at__at_ -285,7 +331,7 _at__at_ __pthread_mutex_init(pthread_mutex_t *mutex,
>  	if (pmtx == NULL)
>  		return (EFAULT);
>  	*mutex = THR_PSHARED_PTR;
> -	mutex_init_body(pmtx, *mutex_attr);
> +	shared_mutex_init(pmtx, *mutex_attr);
>  	return (0);
>  }
> 
> _at__at_ -426,6 +472,7 _at__at_ check_and_init_mutex(pthread_mutex_t *mutex, struct
> pthread_mutex **m) *m = __thr_pshared_offpage(mutex, 0);
>  		if (*m == NULL)
>  			ret = EINVAL;
> +		shared_mutex_init(*m, NULL);
>  	} else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
>  		if (*m == THR_MUTEX_DESTROYED) {
>  			ret = EINVAL;
> _at__at_ -588,6 +635,7 _at__at_ _pthread_mutex_unlock(pthread_mutex_t *mutex)
>  		mp = __thr_pshared_offpage(mutex, 0);
>  		if (mp == NULL)
>  			return (EINVAL);
> +		shared_mutex_init(mp, NULL);
>  	} else {
>  		mp = *mutex;
>  	}
> _at__at_ -815,6 +863,7 _at__at_ _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
>  		m = __thr_pshared_offpage(mutex, 0);
>  		if (m == NULL)
>  			return (EINVAL);
> +		shared_mutex_init(m, NULL);
>  	} else {
>  		m = *mutex;
>  		if (m <= THR_MUTEX_DESTROYED)
> _at__at_ -839,6 +888,7 _at__at_ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
>  		m = __thr_pshared_offpage(mutex, 0);
>  		if (m == NULL)
>  			return (EINVAL);
> +		shared_mutex_init(m, NULL);
>  	} else {
>  		m = *mutex;
>  		if (m <= THR_MUTEX_DESTROYED)
> _at__at_ -942,12 +992,13 _at__at_ __pthread_mutex_setyieldloops_np(pthread_mutex_t
> *mutex, int count) int
>  _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
>  {
> -	struct pthread_mutex	*m;
> +	struct pthread_mutex *m;
> 
>  	if (*mutex == THR_PSHARED_PTR) {
>  		m = __thr_pshared_offpage(mutex, 0);
>  		if (m == NULL)
>  			return (0);
> +		shared_mutex_init(m, NULL);
>  	} else {
>  		m = *mutex;
>  		if (m <= THR_MUTEX_DESTROYED)
> diff --git a/lib/libthr/thread/thr_private.h
> b/lib/libthr/thread/thr_private.h index 7ee1fbf..0db2dad 100644
> --- a/lib/libthr/thread/thr_private.h
> +++ b/lib/libthr/thread/thr_private.h
> _at__at_ -146,6 +146,13 _at__at_ TAILQ_HEAD(mutex_queue, pthread_mutex);
> 
>  #define MAX_DEFER_WAITERS       50
> 
> +/*
> + * Values for pthread_mutex m_ps indicator.
> + */
> +#define	PMUTEX_INITSTAGE_ALLOC	0
> +#define	PMUTEX_INITSTAGE_BUSY	1
> +#define	PMUTEX_INITSTAGE_DONE	2
> +
>  struct pthread_mutex {
>  	/*
>  	 * Lock for accesses to this structure.
> _at__at_ -156,6 +163,7 _at__at_ struct pthread_mutex {
>  	int				m_count;
>  	int				m_spinloops;
>  	int				m_yieldloops;
> +	int				m_ps;	/* pshared init stage */
>  	/*
>  	 * Link for all mutexes a thread currently owns, of the same
>  	 * prio type.

 After applying this patch:

No coredumps produced after three subsequent KDE session restarts.

Thank you!
Received on Tue Mar 22 2016 - 09:01:47 UTC

This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:41:03 UTC