I got this panic on a 12 CPU sparc64 system running current from about a month ago: panic: trap: fast data access mmu miss cpuid = 8 KDB: enter: panic [thread pid 68865 tid 100379 ] Stopped at kdb_enter+0x68: ta %xcc, 1 db> wh Tracing pid 68865 tid 100379 td 0xfffff800eeb91710 panic() at panic+0x248 trap() at trap+0x4e8 -- fast data access mmu miss tar=0 %o7=0xc01c9b94 -- swp_pager_meta_ctl() at swp_pager_meta_ctl+0x8 swap_pager_getpages() at swap_pager_getpages+0x1c mdstart_swap() at mdstart_swap+0x2b8 md_kthread() at md_kthread+0x19c fork_exit() at fork_exit+0x84 fork_trampoline() at fork_trampoline+0x8 #10 0x00000000c01d5b30 in panic (fmt=0xc04bbe08 "trap: %s") at ../../../kern/kern_shutdown.c:551 #11 0x00000000c03b0510 in trap (tf=0xecebd2f0) at ../../../sparc64/sparc64/trap.c:378 #12 0x00000000c0060fe0 in tl1_trap () #13 0x00000000c034abe8 in swp_pager_meta_ctl (object=0x0, pindex=0, flags=-1068777864) at ../../../vm/swap_pager.c:1906 #14 0x00000000c01c9b9c in _mtx_unlock_flags (m=0x0, opts=130487, file=0x0, line=421) at ../../../kern/kern_mutex.c:163 #15 0x00000000c03492a4 in swap_pager_getpages (object=0xfffff800e2aaee88, m=0xecebd628, count=1, reqpage=0) at ../../../vm/swap_pager.c:995 #16 0x00000000c0104b20 in mdstart_swap (sc=0xfffff800cf641800, bp=0xfffff8008271a798) at vm_pager.h:130 #17 0x00000000c0104ee4 in md_kthread (arg=0xfffff800cf641800) at ../../../dev/md/md.c:720 #18 0x00000000c01b9f8c in fork_exit (callout=0xc0104d40 <md_kthread>, arg=0xfffff800cf641800, frame=0xecebd880) at ../../../kern/kern_fork.c:816 #19 0x00000000c00611d0 in fork_trampoline () #20 0x00000000c00611d0 in fork_trampoline () Previous frame identical to this frame (corrupt stack?) kgdb seems to have corrupted the trace (spurious extra frame and different arguments to frame 13), but if you believe the arguments in frame 15: 995 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); (kgdb) print mreq->object $8 = 0xfffff800aeb21930 (kgdb) print *((vm_object_t)0xfffff800aeb21930) $7 = {mtx = {mtx_object = {lo_name = 0xc04b3b58 "vm object", lo_type = 0xc04b4dd0 "standard object", lo_flags = 21168128, lo_profile_obj = {lpo_acqtime = 0, lpo_waittime = 0, lpo_filename = 0x0, lpo_namehash = 4063449288, lpo_lineno = 0, lpo_type = 0xc049f940 "sleep mutex", lpo_contest_holding = 0, lpo_contest_locking = 119}, lo_witness_data = {lod_list = { stqe_next = 0x0}, lod_witness = 0x0}}, mtx_lock = 4, mtx_recurse = 0}, object_list = {tqe_next = 0xfffff800f3281360, tqe_prev = 0xfffff800d706e250}, shadow_head = {lh_first = 0x0}, shadow_list = {le_next = 0x0, le_prev = 0xfffff800d7cb8168}, memq = { tqh_first = 0xfffff801387bf950, tqh_last = 0xfffff80139c06cb8}, root = 0xfffff80139c06ca8, size = 10, generation = 23, ref_count = 1, shadow_count = 0, type = 0 '\0', flags = 8448, pg_color = 8, paging_in_progress = 0, resident_page_count = 10, backing_object = 0x0, backing_object_offset = 0, pager_object_list = {tqe_next = 0x0, tqe_prev = 0x0}, handle = 0x0, un_pager = {vnp = {vnp_size = 0}, devp = {devp_pglist = {tqh_first = 0x0, tqh_last = 0x0}}, swp = {swp_bcount = 0}}} (kgdb) print ((vm_object_t)0xfffff800aeb21930)->mtx $10 = {mtx_object = {lo_name = 0xc04b3b58 "vm object", lo_type = 0xc04b4dd0 "standard object", lo_flags = 21168128, lo_profile_obj = { lpo_acqtime = 0, lpo_waittime = 0, lpo_filename = 0x0, lpo_namehash = 4063449288, lpo_lineno = 0, lpo_type = 0xc049f940 "sleep mutex", lpo_contest_holding = 0, lpo_contest_locking = 119}, lo_witness_data = {lod_list = { stqe_next = 0x0}, lod_witness = 0x0}}, mtx_lock = 4, mtx_recurse = 0} (kgdb) print ((vm_object_t)0xfffff800aeb21930)->mtx->mtx_lock $11 = 4 This means the mutex is unowned and the assertion here failed: (kgdb) list 1913 1908 struct swblock *swap; 1909 daddr_t r1; 1910 int idx; 1911 1912 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1913 /* 1914 * The meta data only exists of the object is OBJT_SWAP 1915 * and even then might not be allocated yet. 1916 */ 1917 if (object->type != OBJT_SWAP) Kris
This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:39:04 UTC