Re: zfs-related(?) panic in cache_enter: wrong vnode type

From: Andriy Gapon <avg_at_FreeBSD.org>
Date: Wed, 07 Dec 2011 19:33:08 +0200
A detail that may or may not be useful.
It seems that the panic happened when tried to resume a vim process.  The process
was suspended, its current directory and a file being edited/viewed may have been
already removed.

on 07/12/2011 18:50 Andriy Gapon said the following:
> 
> (kgdb) bt
> #0  doadump (textdump=1) at pcpu.h:224
> #1  0xffffffff804f6d3b in kern_reboot (howto=260) at
> /usr/src/sys/kern/kern_shutdown.c:447
> #2  0xffffffff804f63e9 in panic (fmt=0x104 <Address 0x104 out of bounds>) at
> /usr/src/sys/kern/kern_shutdown.c:635
> #3  0xffffffff80585f46 in cache_enter (dvp=0xfffffe003d4763c0,
> vp=0xfffffe0142517000, cnp=0xffffff82393b3708) at /usr/src/sys/kern/vfs_cache.c:726
> #4  0xffffffff81a90900 in zfs_lookup (dvp=0xfffffe003d4763c0,
> nm=0xffffff82393b3140 "..", vpp=0xffffff82393b36e0, cnp=0xffffff82393b3708,
> nameiop=0, cr=0xfffffe0042e88100, td=0xfffffe000fdfa480,
>     flags=0) at
> /usr/src/sys/modules/zfs/../../cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c:1470
> #5  0xffffffff81a91570 in zfs_freebsd_lookup (ap=0xffffff82393b32c0) at
> /usr/src/sys/modules/zfs/../../cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c:5858
> #6  0xffffffff8073f054 in VOP_CACHEDLOOKUP_APV (vop=0xffffffff81b05a20,
> a=0xffffff82393b32c0) at vnode_if.c:187
> #7  0xffffffff80586bf4 in vfs_cache_lookup (ap=Variable "ap" is not available.
> ) at vnode_if.h:80
> #8  0xffffffff80740a5c in VOP_LOOKUP_APV (vop=0xffffffff81b05a20,
> a=0xffffff82393b33a0) at vnode_if.c:123
> #9  0xffffffff8058e42c in lookup (ndp=0xffffff82393b36a0) at vnode_if.h:54
> #10 0xffffffff8058f17e in namei (ndp=0xffffff82393b36a0) at
> /usr/src/sys/kern/vfs_lookup.c:312
> #11 0xffffffff805a890d in vn_open_cred (ndp=0xffffff82393b36a0,
> flagp=0xffffff82393b3918, cmode=0, vn_open_flags=Variable "vn_open_flags" is not
> available.
> ) at /usr/src/sys/kern/vfs_vnops.c:195
> #12 0xffffffff80589e7e in vop_stdvptocnp (ap=Variable "ap" is not available.
> ) at /usr/src/sys/kern/vfs_default.c:774
> #13 0xffffffff8073b012 in VOP_VPTOCNP_APV (vop=0xffffffff80a99140,
> a=0xffffff82393b39b0) at vnode_if.c:3479
> #14 0xffffffff80584665 in vn_vptocnp_locked (vp=0xffffff82393b3a50,
> cred=0xfffffe0042e88100,
>     buf=0xfffffe000ca06000
> "������������������������������������������������������������������������������������������������������������������������������������������������������"...,
> buflen=0xffffff82393b3a4c) at vnode_if.h:1564
> #15 0xffffffff80584bab in vn_fullpath1 (td=0xfffffe000fdfa480,
> vp=0xfffffe003d4763c0, rdir=0xfffffe000cd4d000,
>     buf=0xfffffe000ca06000
> "������������������������������������������������������������������������������������������������������������������������������������������������������"...,
> retbuf=0xffffff82393b3ab0, buflen=1023) at /usr/src/sys/kern/vfs_cache.c:1218
> #16 0xffffffff8058526a in kern___getcwd (td=0xfffffe000fdfa480, buf=0x80880a000
> <Address 0x80880a000 out of bounds>, bufseg=UIO_USERSPACE, buflen=1024) at
> /usr/src/sys/kern/vfs_cache.c:960
> #17 0xffffffff805853f4 in sys___getcwd (td=Variable "td" is not available.
> ) at /usr/src/sys/kern/vfs_cache.c:934
> #18 0xffffffff806d2069 in amd64_syscall (td=0xfffffe000fdfa480, traced=0) at
> subr_syscall.c:131
> #19 0xffffffff806bb4e7 in Xfast_syscall () at /usr/src/sys/amd64/amd64/exception.S:387
> #20 0x00000008031adb2c in ?? ()
> Previous frame inner to this frame (corrupt stack?)
> (kgdb) fr 3
> #3  0xffffffff80585f46 in cache_enter (dvp=0xfffffe003d4763c0,
> vp=0xfffffe0142517000, cnp=0xffffff82393b3708) at /usr/src/sys/kern/vfs_cache.c:726
> 726                     KASSERT(vp == NULL || vp->v_type == VDIR,
> (kgdb) list
> 721                     if (dvp->v_cache_dd != NULL) {
> 722                         CACHE_WUNLOCK();
> 723                         cache_free(ncp);
> 724                         return;
> 725                     }
> 726                     KASSERT(vp == NULL || vp->v_type == VDIR,
> 727                         ("wrong vnode type %p", vp));
> 728                     dvp->v_cache_dd = ncp;
> 729             }
> 730
> (kgdb) p *vp
> $1 = {v_type = VREG, v_tag = 0xffffffff81afe449 "zfs", v_op = 0xffffffff81b05a20,
> v_data = 0xfffffe020d9a8320, v_mount = 0xfffffe001a283600, v_nmntvnodes =
> {tqe_next = 0xfffffe00347c6d20,
>     tqe_prev = 0xfffffe013b0575c8}, v_un = {vu_mount = 0x0, vu_socket = 0x0,
> vu_cdev = 0x0, vu_fifoinfo = 0x0}, v_hashlist = {le_next = 0x0, le_prev = 0x0},
> v_hash = 0, v_cache_src = {
>     lh_first = 0x0}, v_cache_dst = {tqh_first = 0xfffffe003dfcf690, tqh_last =
> 0xfffffe003dfcf6b0}, v_cache_dd = 0x0, v_cstart = 0, v_lasta = 0, v_lastw = 0,
> v_clen = 0, v_lock = {lock_object = {
>       lo_name = 0xffffffff81afe449 "zfs", lo_flags = 91947008, lo_data = 0,
> lo_witness = 0xffffff800066e380}, lk_lock = 18446741874952610944, lk_exslpfail =
> 0, lk_timo = 51, lk_pri = 96},
>   v_interlock = {lock_object = {lo_name = 0xffffffff807e610a "vnode interlock",
> lo_flags = 16973824, lo_data = 0, lo_witness = 0xffffff8000665600}, mtx_lock = 4},
> v_vnlock = 0xfffffe0142517098,
>   v_holdcnt = 2, v_usecount = 1, v_iflag = 0, v_vflag = 0, v_writecount = 0,
> v_freelist = {tqe_next = 0xfffffe00347c6d20, tqe_prev = 0xfffffe02110d3e30},
> v_bufobj = {bo_mtx = {lock_object = {
>         lo_name = 0xffffffff807efdfa "bufobj interlock", lo_flags = 16973824,
> lo_data = 0, lo_witness = 0xffffff800066c300}, mtx_lock = 4}, bo_clean = {bv_hd =
> {tqh_first = 0x0,
>         tqh_last = 0xfffffe0142517140}, bv_root = 0x0, bv_cnt = 0}, bo_dirty =
> {bv_hd = {tqh_first = 0x0, tqh_last = 0xfffffe0142517160}, bv_root = 0x0, bv_cnt =
> 0}, bo_numoutput = 0, bo_flag = 0,
>     bo_ops = 0xffffffff80a95ec0, bo_bsize = 131072, bo_object =
> 0xfffffe01fccb5620, bo_synclist = {le_next = 0x0, le_prev = 0x0}, bo_private =
> 0xfffffe0142517000, __bo_vnode = 0xfffffe0142517000},
>   v_pollinfo = 0x0, v_label = 0x0, v_lockf = 0x0}
> (kgdb)
> 


-- 
Andriy Gapon
Received on Wed Dec 07 2011 - 16:33:12 UTC

This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:40:21 UTC