Re: r268621: panic: shadowed tmpfs v_object [with dump]

From: Mattia Rossi <mattia.rossi.mate_at_gmail.com>
Date: Wed, 23 Jul 2014 22:56:46 +0200
Got the same panic, is this fix getting committed? Or has it already 
been committed?

Mat

On 23/07/14 18:12, Bryan Drewery wrote:
> On 7/23/14, 7:11 AM, Konstantin Belousov wrote:
>> On Tue, Jul 22, 2014 at 02:53:56PM -0700, Bryan Drewery wrote:
>>> On 7/22/14, 2:26 PM, Bryan Drewery wrote:
>>>> On 7/22/14, 2:07 PM, Bryan Drewery wrote:
>>>>> Meant to send to current_at_, moving there.
>>>>>
>>>>> On 7/22/14, 2:07 PM, Bryan Drewery wrote:
>>>>>> On r268621:
>>>>>>
>>>>>>> panic: shadowed tmpfs v_object 0xfffff807a7f96600
>>>>>>> cpuid = 0
>>>>>>> KDB: stack backtrace:
>>>>>>> db_trace_self_wrapper() at db_trace_self_wrapper+0x2b/frame
>>>>>>> 0xfffffe1247d67390
>>>>>>> kdb_backtrace() at kdb_backtrace+0x39/frame 0xfffffe1247d67440
>>>>>>> vpanic() at vpanic+0x126/frame 0xfffffe1247d67480
>>>>>>> kassert_panic() at kassert_panic+0x139/frame 0xfffffe1247d674f0
>>>>>>> vm_object_deallocate() at vm_object_deallocate+0x236/frame
>>>>>>> 0xfffffe1247d67550
>>>>>>> tmpfs_free_node() at tmpfs_free_node+0x138/frame 0xfffffe1247d67580
>>>>>>> tmpfs_reclaim() at tmpfs_reclaim+0x17d/frame 0xfffffe1247d675c0
>>>>>>> VOP_RECLAIM_APV() at VOP_RECLAIM_APV+0xf7/frame 0xfffffe1247d675f0
>>>>>>> vgonel() at vgonel+0x1a1/frame 0xfffffe1247d67660
>>>>>>> vrecycle() at vrecycle+0x3e/frame 0xfffffe1247d67690
>>>>>>> tmpfs_inactive() at tmpfs_inactive+0x4c/frame 0xfffffe1247d676b0
>>>>>>> VOP_INACTIVE_APV() at VOP_INACTIVE_APV+0xf7/frame 
>>>>>>> 0xfffffe1247d676e0
>>>>>>> vinactive() at vinactive+0xc6/frame 0xfffffe1247d67730
>>>>>>> vputx() at vputx+0x27a/frame 0xfffffe1247d67790
>>>>>>> tmpfs_rename() at tmpfs_rename+0xf5/frame 0xfffffe1247d67860
>>>>>>> VOP_RENAME_APV() at VOP_RENAME_APV+0xfc/frame 0xfffffe1247d67890
>>>>>>> kern_renameat() at kern_renameat+0x3ef/frame 0xfffffe1247d67ae0
>>>>>>> amd64_syscall() at amd64_syscall+0x25a/frame 0xfffffe1247d67bf0
>>>>>>> Xfast_syscall() at Xfast_syscall+0xfb/frame 0xfffffe1247d67bf0
>>>>>>> --- syscall (128, FreeBSD ELF64, sys_rename), rip = 0x80088b74a, 
>>>>>>> rsp =
>>>>>>> 0x7fffffffe238, rbp = 0x7fffffffe710 ---
>>>>>>> Uptime: 6d4h0m3s
>>>>>>>
>>>>>>> Dump failed. Partition too small.
>>>>>>
>>>>>> Unfortunately I have no dump to debug.
>>>>>>
>>>>>
>>>> Running poudriere again after boot hit the issue right away:
>>>>
>>>>
>>>>> (kgdb) bt
>>>>> #0  doadump (textdump=1) at pcpu.h:219
>>>>> #1  0xffffffff809122a7 in kern_reboot (howto=260) at
>>>>> /usr/src/sys/kern/kern_shutdown.c:445
>>>>> #2  0xffffffff809127e5 in vpanic (fmt=<value optimized out>, 
>>>>> ap=<value
>>>>> optimized out>) at /usr/src/sys/kern/kern_shutdown.c:744
>>>>> #3  0xffffffff80912679 in kassert_panic (fmt=<value optimized 
>>>>> out>) at
>>>>> /usr/src/sys/kern/kern_shutdown.c:632
>>>>> #4  0xffffffff80ba7996 in vm_object_deallocate (object=<value
>>>>> optimized out>) at /usr/src/sys/vm/vm_object.c:562
>>>>> #5  0xffffffff820a75a8 in tmpfs_free_node (tmp=0xfffff800b5155980,
>>>>> node=0xfffff802716ba740) at
>>>>> /usr/src/sys/modules/tmpfs/../../fs/tmpfs/tmpfs_subr.c:335
>>>>> #6  0xffffffff820a363d in tmpfs_reclaim (v=<value optimized out>) at
>>>>> /usr/src/sys/modules/tmpfs/../../fs/tmpfs/tmpfs_vnops.c:1276
>>>>> #7  0xffffffff80e48717 in VOP_RECLAIM_APV (vop=<value optimized out>,
>>>>> a=<value optimized out>) at vnode_if.c:2017
>>>>> #8  0xffffffff809c1381 in vgonel (vp=0xfffff802716b61d8) at
>>>>> vnode_if.h:830
>>>>> #9  0xffffffff809c18be in vrecycle (vp=0xfffff802716b61d8) at
>>>>> /usr/src/sys/kern/vfs_subr.c:2655
>>>>> #10 0xffffffff820a61cc in tmpfs_inactive (v=<value optimized out>) at
>>>>> /usr/src/sys/modules/tmpfs/../../fs/tmpfs/tmpfs_vnops.c:1242
>>>>> #11 0xffffffff80e485b7 in VOP_INACTIVE_APV (vop=<value optimized 
>>>>> out>,
>>>>> a=<value optimized out>) at vnode_if.c:1951
>>>>> #12 0xffffffff809bfd36 in vinactive (vp=0xfffff802716b61d8,
>>>>> td=0xfffff80187e29920) at vnode_if.h:807
>>>>> #13 0xffffffff809c012a in vputx (vp=0xfffff802716b61d8, func=2) at
>>>>> /usr/src/sys/kern/vfs_subr.c:2267
>>>>> #14 0xffffffff820a47c5 in tmpfs_rename (v=<value optimized out>) at
>>>>> /usr/src/sys/modules/tmpfs/../../fs/tmpfs/tmpfs_vnops.c:1023
>>>>> #15 0xffffffff80e47d3c in VOP_RENAME_APV (vop=<value optimized out>,
>>>>> a=<value optimized out>) at vnode_if.c:1544
>>>>> #16 0xffffffff809cc77f in kern_renameat (td=<value optimized out>,
>>>>> oldfd=<value optimized out>, old=<value optimized out>, newfd=<value
>>>>> optimized out>, new=<value optimized out>,
>>>>>      pathseg=<value optimized out>) at vnode_if.h:636
>>>>> #17 0xffffffff80d280fa in amd64_syscall (td=0xfffff80187e29920,
>>>>> traced=0) at subr_syscall.c:133
>>>>> #18 0xffffffff80d0a64b in Xfast_syscall () at
>>>>> /usr/src/sys/amd64/amd64/exception.S:407
>>>>> (kgdb) p *(vm_object_t)0xfffff8027169f500
>>>>> $1 = {lock = {lock_object = {lo_name = 0xffffffff80fe89f6 "vm 
>>>>> object",
>>>>> lo_flags = 90374144, lo_data = 0, lo_witness = 0xfffffe00006e7680},
>>>>> rw_lock = 18446735284191271200}, object_list = {
>>>>>      tqe_next = 0xfffff8027169f400, tqe_prev = 0xfffff8027169f620},
>>>>> shadow_head = {lh_first = 0xfffff801b8489e00}, shadow_list = {le_next
>>>>> = 0x0, le_prev = 0x0}, memq = {tqh_first = 0xfffff811d966bc08,
>>>>>      tqh_last = 0xfffff811d966bc18}, rtree = {rt_root =
>>>>> 18446735354278362121, rt_flags = 0 '\0'}, size = 1, generation = 1,
>>>>> ref_count = 1, shadow_count = 1, memattr = 6 '\006', type = 1 '\001',
>>>>>    flags = 528, pg_color = 0, paging_in_progress = 0,
>>>>> resident_page_count = 1, backing_object = 0x0, 
>>>>> backing_object_offset =
>>>>> 0, pager_object_list = {tqe_next = 0x0, tqe_prev = 0x0}, rvq = {
>>>>>      lh_first = 0x0}, cache = {rt_root = 0, rt_flags = 0 '\0'}, 
>>>>> handle
>>>>> = 0x0, un_pager = {vnp = {vnp_size = 0, writemappings = 0}, devp =
>>>>> {devp_pglist = {tqh_first = 0x0, tqh_last = 0x0}, ops = 0x0,
>>>>>        dev = 0x0}, sgp = {sgp_pglist = {tqh_first = 0x0, tqh_last =
>>>>> 0x0}}, swp = {swp_tmpfs = 0x0, swp_bcount = 0}}, cred = 0x0, 
>>>>> charge = 0}
>>>>> (kgdb) frame 8
>>>>> #8  0xffffffff809c1381 in vgonel (vp=0xfffff802716b61d8) at
>>>>> vnode_if.h:830
>>>>> 830             return (VOP_RECLAIM_APV(vp->v_op, &a));
>>>>> (kgdb) p *vp
>>>>> $2 = {v_tag = 0xffffffff820abf96 "tmpfs", v_op = 0xffffffff820ac938,
>>>>> v_data = 0x0, v_mount = 0xfffff8004733a000, v_nmntvnodes = 
>>>>> {tqe_next =
>>>>> 0xfffff802716b6000, tqe_prev = 0xfffff802716b63d0}, v_un = {
>>>>>      vu_mount = 0x0, vu_socket = 0x0, vu_cdev = 0x0, vu_fifoinfo =
>>>>> 0x0}, v_hashlist = {le_next = 0x0, le_prev = 0x0}, v_cache_src =
>>>>> {lh_first = 0x0}, v_cache_dst = {tqh_first = 0x0,
>>>>>      tqh_last = 0xfffff802716b6228}, v_cache_dd = 0x0, v_lock =
>>>>> {lock_object = {lo_name = 0xffffffff820abf96 "tmpfs", lo_flags =
>>>>> 116588544, lo_data = 0, lo_witness = 0xfffffe0000711980},
>>>>>      lk_lock = 18446735284191271200, lk_exslpfail = 0, lk_timo = 51,
>>>>> lk_pri = 96}, v_interlock = {lock_object = {lo_name =
>>>>> 0xffffffff80fafc26 "vnode interlock", lo_flags = 16973824, lo_data 
>>>>> = 0,
>>>>>        lo_witness = 0xfffffe00006e7500}, mtx_lock = 4}, v_vnlock =
>>>>> 0xfffff802716b6240, v_actfreelist = {tqe_next = 0xfffff80271898588,
>>>>> tqe_prev = 0xfffff8004733a078}, v_bufobj = {bo_lock = {
>>>>>        lock_object = {lo_name = 0xffffffff80fb8084 "bufobj 
>>>>> interlock",
>>>>> lo_flags = 86179840, lo_data = 0, lo_witness = 0xfffffe00006ef380},
>>>>> rw_lock = 1}, bo_ops = 0xffffffff814942a0, bo_object = 0x0,
>>>>>      bo_synclist = {le_next = 0x0, le_prev = 0x0}, bo_private =
>>>>> 0xfffff802716b61d8, __bo_vnode = 0xfffff802716b61d8, bo_clean = 
>>>>> {bv_hd
>>>>> = {tqh_first = 0x0, tqh_last = 0xfffff802716b62f8}, bv_root = {
>>>>>          pt_root = 0}, bv_cnt = 0}, bo_dirty = {bv_hd = {tqh_first =
>>>>> 0x0, tqh_last = 0xfffff802716b6318}, bv_root = {pt_root = 0}, 
>>>>> bv_cnt =
>>>>> 0}, bo_numoutput = 0, bo_flag = 0, bo_bsize = 4096},
>>>>>    v_pollinfo = 0x0, v_label = 0x0, v_lockf = 0x0, v_rl = 
>>>>> {rl_waiters =
>>>>> {tqh_first = 0x0, tqh_last = 0xfffff802716b6360}, rl_currdep = 0x0},
>>>>> v_cstart = 0, v_lasta = 0, v_lastw = 0, v_clen = 0,
>>>>>    v_holdcnt = 2, v_usecount = 0, v_iflag = 2688, v_vflag = 0,
>>>>> v_writecount = 0, v_hash = 40987489, v_type = VREG}
>>>>> (kgdb) info locals
>>>>> mp = (struct mount *) 0xfffff8004733a000
>>>>> fromnd = {ni_dirp = 0x801006080 <Address 0x801006080 out of bounds>,
>>>>> ni_segflg = UIO_USERSPACE, ni_rightsneeded = {cr_rights =
>>>>> {144115188142965760, 288230376151711744}},
>>>>>    ni_startdir = 0xfffff802716b63b0, ni_rootdir = 0xfffff8026b01a760,
>>>>> ni_topdir = 0xfffff8026b01a760, ni_dirfd = -100, ni_strictrelative =
>>>>> 0, ni_filecaps = {fc_rights = {cr_rights = {0, 0}},
>>>>>      fc_ioctls = 0x0, fc_nioctls = -1, fc_fcntls = 0}, ni_vp =
>>>>> 0xfffff80271898588, ni_dvp = 0xfffff802716b63b0, ni_pathlen = 1,
>>>>> ni_next = 0xfffff80061ea501f "", ni_loopcnt = 0, ni_cnd = {cn_nameiop
>>>>> = 2,
>>>>>      cn_flags = 67148812, cn_thread = 0xfffff80187e29920, cn_cred =
>>>>> 0xfffff80038911800, cn_lkflags = 524288, cn_pnbuf = 
>>>>> 0xfffff80061ea5000
>>>>> "/var/run/ld-elf.so.hints.HTjP6A",
>>>>>      cn_nameptr = 0xfffff80061ea5009 "ld-elf.so.hints.HTjP6A",
>>>>> cn_namelen = 22, cn_consume = 0}}
>>>>> tond = {ni_dirp = 0x403e66 <Address 0x403e66 out of bounds>, 
>>>>> ni_segflg
>>>>> = UIO_USERSPACE, ni_rightsneeded = {cr_rights = {144115188080051200,
>>>>> 288230376151711744}}, ni_startdir = 0xfffff802716b63b0,
>>>>>    ni_rootdir = 0xfffff8026b01a760, ni_topdir = 0xfffff8026b01a760,
>>>>> ni_dirfd = -100, ni_strictrelative = 0, ni_filecaps = {fc_rights =
>>>>> {cr_rights = {0, 0}}, fc_ioctls = 0x0, fc_nioctls = -1,
>>>>>      fc_fcntls = 0}, ni_vp = 0xfffff802716b61d8, ni_dvp =
>>>>> 0xfffff802716b63b0, ni_pathlen = 1, ni_next = 0xfffff80038d69418 "",
>>>>> ni_loopcnt = 0, ni_cnd = {cn_nameiop = 3, cn_flags = 134257708,
>>>>>      cn_thread = 0xfffff80187e29920, cn_cred = 0xfffff80038911800,
>>>>> cn_lkflags = 524288, cn_pnbuf = 0xfffff80038d69400
>>>>> "/var/run/ld-elf.so.hints", cn_nameptr = 0xfffff80038d69409
>>>>> "ld-elf.so.hints",
>>>>>      cn_namelen = 15, cn_consume = 0}}
>>>>> rights = {cr_rights = {144115188080051200, 288230376151711744}}
>>>>> mp = (struct mount *) 0xfffff8004733a000
>>>>> error = <value optimized out>
>>>>> fvp = <value optimized out>
>>>>> tvp = <value optimized out>
>>>>> tdvp = <value optimized out>
>>>>> (kgdb) p *mp
>>>>> $9 = {mnt_mtx = {lock_object = {lo_name = 0xffffffff80f8fcec "struct
>>>>> mount mtx", lo_flags = 16973824, lo_data = 0, lo_witness =
>>>>> 0xfffffe00006e7a00}, mtx_lock = 4}, mnt_gen = 1, mnt_list = {
>>>>>      tqe_next = 0xfffff80038fa9cc0, tqe_prev = 0xfffff80187b74ce8},
>>>>> mnt_op = 0xffffffff820ace60, mnt_vfc = 0xffffffff820acf80,
>>>>> mnt_vnodecovered = 0xfffff801b853e760, mnt_syncer = 
>>>>> 0xfffff8026b01a588,
>>>>>    mnt_ref = 13206, mnt_nvnodelist = {tqh_first = 0xfffff8026b01a760,
>>>>> tqh_last = 0xfffff802718985a8}, mnt_nvnodelistsize = 13205,
>>>>> mnt_activevnodelist = {tqh_first = 0xfffff802716b61d8,
>>>>>      tqh_last = 0xfffff8026b01a648}, mnt_activevnodelistsize = 730,
>>>>> mnt_writeopcount = 1, mnt_kern_flag = 0, mnt_flag = 4096, mnt_opt =
>>>>> 0xfffff8000e59cc30, mnt_optnew = 0xfffff8001b9ea050,
>>>>>    mnt_maxsymlinklen = 0, mnt_stat = {f_version = 537068824, f_type =
>>>>> 135, f_flags = 4096, f_bsize = 4096, f_iosize = 4096, f_blocks =
>>>>> 1835008, f_bfree = 1738991, f_bavail = 1738991, f_files = 25690112,
>>>>>      f_ffree = 25676911, f_syncwrites = 0, f_asyncwrites = 0,
>>>>> f_syncreads = 0, f_asyncreads = 0, f_spare = {0, 0, 0, 0, 0, 0, 0, 0,
>>>>> 0, 0}, f_namemax = 255, f_owner = 0, f_fsid = {val = {-2029977843,
>>>>>          135}}, f_charspare = '\0' <repeats 79 times>, f_fstypename =
>>>>> "tmpfs\000\000\000\000\000\000\000\000\000\000", f_mntfromname =
>>>>> "tmpfs", '\0' <repeats 82 times>,
>>>>>      f_mntonname = "/poudriere/data/.m/exp-10amd64-commit-test/01",
>>>>> '\0' <repeats 42 times>}, mnt_cred = 0xfffff80047478700, mnt_data =
>>>>> 0xfffff800b5155980, mnt_time = 0, mnt_iosize_max = 65536,
>>>>>    mnt_export = 0x0, mnt_label = 0x0, mnt_hashseed = 1147308587,
>>>>> mnt_lockref = 0, mnt_secondary_writes = 0, mnt_secondary_accwrites =
>>>>> 0, mnt_susp_owner = 0x0, mnt_gjprovider = 0x0, mnt_explock = {
>>>>>      lock_object = {lo_name = 0xffffffff80f8fd0f "explock", 
>>>>> lo_flags =
>>>>> 108199936, lo_data = 0, lo_witness = 0xfffffe000070ef80}, lk_lock 
>>>>> = 1,
>>>>> lk_exslpfail = 0, lk_timo = 0, lk_pri = 96},
>>>>>    mnt_upper_link = {tqe_next = 0x0, tqe_prev = 0x0}, mnt_uppers =
>>>>> {tqh_first = 0x0, tqh_last = 0xfffff8004733a320}}
>>>>
>>>
>>> Shadowed object:
>>>
>>>> (kgdb) p *$1->shadow_head->lh_first
>>>> $3 = {lock = {lock_object = {lo_name = 0xffffffff80fe89f6 "vm 
>>>> object", lo_flags = 90374144, lo_data = 0, lo_witness = 
>>>> 0xfffffe00006e7680}, rw_lock = 1}, object_list = {tqe_next = 
>>>> 0xfffff801b8b3ae00,
>>>>      tqe_prev = 0xfffff802717bb120}, shadow_head = {lh_first = 
>>>> 0x0}, shadow_list = {le_next = 0x0, le_prev = 0xfffff8027169f530}, 
>>>> memq = {tqh_first = 0xfffff811da2c75f8, tqh_last = 
>>>> 0xfffff811da2c7608},
>>>>    rtree = {rt_root = 18446735354291320313, rt_flags = 0 '\0'}, 
>>>> size = 1, generation = 1, ref_count = 1, shadow_count = 0, memattr 
>>>> = 6 '\006', type = 0 '\0', flags = 12288, pg_color = 1598,
>>>>    paging_in_progress = 0, resident_page_count = 1, backing_object 
>>>> = 0xfffff8027169f500, backing_object_offset = 0, pager_object_list 
>>>> = {tqe_next = 0x0, tqe_prev = 0x0}, rvq = {lh_first = 0x0}, cache = {
>>>>      rt_root = 0, rt_flags = 0 '\0'}, handle = 0x0, un_pager = {vnp 
>>>> = {vnp_size = 0, writemappings = 0}, devp = {devp_pglist = 
>>>> {tqh_first = 0x0, tqh_last = 0x0}, ops = 0x0, dev = 0x0}, sgp = {
>>>>        sgp_pglist = {tqh_first = 0x0, tqh_last = 0x0}}, swp = 
>>>> {swp_tmpfs = 0x0, swp_bcount = 0}}, cred = 0xfffff80038911800, 
>>>> charge = 4096}
>>>
>>
>> Try this
>>
>> diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
>> index 1b97bdf..bb01f00 100644
>> --- a/sys/vm/vm_object.c
>> +++ b/sys/vm/vm_object.c
>> _at__at_ -559,8 +559,6 _at__at_ vm_object_deallocate(vm_object_t object)
>>                   (object->handle == NULL) &&
>>                   (object->type == OBJT_DEFAULT ||
>>                    object->type == OBJT_SWAP)) {
>> -                KASSERT((object->flags & OBJ_TMPFS_NODE) == 0,
>> -                    ("shadowed tmpfs v_object %p", object));
>>                   vm_object_t robject;
>>
>>                   robject = LIST_FIRST(&object->shadow_head);
>> _at__at_ -568,6 +566,8 _at__at_ vm_object_deallocate(vm_object_t object)
>>                       ("vm_object_deallocate: ref_count: %d, 
>> shadow_count: %d",
>>                        object->ref_count,
>>                        object->shadow_count));
>> +                KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0,
>> +                    ("shadowed tmpfs v_object %p", object));
>>                   if (!VM_OBJECT_TRYWLOCK(robject)) {
>>                       /*
>>                        * Avoid a potential deadlock.
>> _at__at_ -637,6 +637,8 _at__at_ retry:
>>   doterm:
>>           temp = object->backing_object;
>>           if (temp != NULL) {
>> +            KASSERT((object->flags & OBJ_TMPFS_NODE) == 0,
>> +                ("shadowed tmpfs v_object 2 %p", object));
>>               VM_OBJECT_WLOCK(temp);
>>               LIST_REMOVE(object, shadow_list);
>>               temp->shadow_count--;
>>
>
> Yup this avoids the panic.
>
> Thanks!
>
Received on Wed Jul 23 2014 - 18:56:51 UTC

This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:40:51 UTC