Using the vnode mutex in fifo_open() causes lock order problems when combined with some of the network stack locking changes that are in progress. The patch below modifies fifo_open() to use a private mutex in place of the vnode mutex. There is also some minor optimization of a couple of calls to fifo_cleanup(). This patch has passed my torture testing, but it could probably use some more testing by anyone who heavily uses named pipes, especially with select(), kqueue(), and SIGIO. Index: sys/fs/fifofs/fifo_vnops.c =================================================================== RCS file: /home/ncvs/src/sys/fs/fifofs/fifo_vnops.c,v retrieving revision 1.93 diff -u -r1.93 fifo_vnops.c --- sys/fs/fifofs/fifo_vnops.c 7 Apr 2004 20:45:59 -0000 1.93 +++ sys/fs/fifofs/fifo_vnops.c 11 May 2004 03:03:39 -0000 _at__at_ -122,6 +122,9 _at__at_ VNODEOP_SET(fifo_vnodeop_opv_desc); +struct mtx fifo_mtx; +MTX_SYSINIT(fifo, &fifo_mtx, "fifo mutex", MTX_DEF); + int fifo_vnoperate(ap) struct vop_generic_args /* { _at__at_ -217,28 +220,25 _at__at_ * the vnode lock. * * Protect the increment of fi_readers and fi_writers and the - * associated calls to wakeup() with the vnode interlock in - * addition to the vnode lock. This allows the vnode lock to - * be dropped for the msleep() calls below, and using the vnode - * interlock as the msleep() mutex prevents the wakeup from - * being missed. + * associated calls to wakeup() with the fifo mutex in addition + * to the vnode lock. This allows the vnode lock to be dropped + * for the msleep() calls below, and using the fifo mutex with + * msleep() prevents the wakeup from being missed. */ - VI_LOCK(vp); + mtx_lock(&fifo_mtx); if (ap->a_mode & FREAD) { fip->fi_readers++; if (fip->fi_readers == 1) { fip->fi_writesock->so_state &= ~SS_CANTSENDMORE; if (fip->fi_writers > 0) { wakeup(&fip->fi_writers); - VI_UNLOCK(vp); sowwakeup(fip->fi_writesock); - VI_LOCK(vp); } } } if (ap->a_mode & FWRITE) { if ((ap->a_mode & O_NONBLOCK) && fip->fi_readers == 0) { - VI_UNLOCK(vp); + mtx_unlock(&fifo_mtx); return (ENXIO); } fip->fi_writers++; _at__at_ -246,26 +246,25 _at__at_ fip->fi_readsock->so_state &= ~SS_CANTRCVMORE; if (fip->fi_readers > 0) { wakeup(&fip->fi_readers); - VI_UNLOCK(vp); sorwakeup(fip->fi_writesock); - VI_LOCK(vp); } } } if ((ap->a_mode & O_NONBLOCK) == 0) { if ((ap->a_mode & FREAD) && fip->fi_writers == 0) { VOP_UNLOCK(vp, 0, td); - error = msleep(&fip->fi_readers, VI_MTX(vp), - PCATCH | PSOCK, "fifoor", 0); - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td); + error = msleep(&fip->fi_readers, &fifo_mtx, + PDROP | PCATCH | PSOCK, "fifoor", 0); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); if (error) { fip->fi_readers--; - if (fip->fi_readers == 0) + if (fip->fi_readers == 0) { socantsendmore(fip->fi_writesock); - fifo_cleanup(vp); + fifo_cleanup(vp); + } return (error); } - VI_LOCK(vp); + mtx_lock(&fifo_mtx); /* * We must have got woken up because we had a writer. * That (and not still having one) is the condition _at__at_ -274,15 +273,15 _at__at_ } if ((ap->a_mode & FWRITE) && fip->fi_readers == 0) { VOP_UNLOCK(vp, 0, td); - error = msleep(&fip->fi_writers, VI_MTX(vp), - PCATCH | PSOCK, "fifoow", 0); - vn_lock(vp, - LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td); + error = msleep(&fip->fi_writers, &fifo_mtx, + PDROP | PCATCH | PSOCK, "fifoow", 0); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); if (error) { fip->fi_writers--; - if (fip->fi_writers == 0) + if (fip->fi_writers == 0) { socantrcvmore(fip->fi_readsock); - fifo_cleanup(vp); + fifo_cleanup(vp); + } return (error); } /* _at__at_ -293,7 +292,7 _at__at_ return (0); } } - VI_UNLOCK(vp); + mtx_unlock(&fifo_mtx); return (0); }Received on Thu May 13 2004 - 03:00:36 UTC
This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:37:53 UTC