--- kern_timeout.c.orig Mon Nov 7 17:26:56 2005 +++ kern_timeout.c Wed Nov 9 02:14:50 2005 @@ -110,6 +110,47 @@ static struct cv callout_wait; static int wakeup_done_ctr; +void callout_check_callwheel(void); + +/* + * callout_check_callwheel() - check all the callwheel for deadc0de'd entries + * + * This code cycles through the callwheel and if it finds + * broken callout, it panics. + * + * callout_lock must be acquired before calling this function + * + */ +void +callout_check_callwheel(void) +{ + struct callout *c, *c2; + struct callout_tailq *bucket; + int i; + static int in_panic=0; + + if (in_panic) return; + + for (i = 0; i < callwheelsize; ++i) { + bucket = &callwheel[i]; + c = TAILQ_FIRST(bucket); + if ((int)c == 0xdeadc0de) { + in_panic = 1; + panic("deadc0de found at the beginning of bucket"); + break; + } + while (c) { + c2 = TAILQ_NEXT(c, c_links.tqe); + if ((int)c2 == 0xdeadc0de) { + in_panic = 1; + panic("c->TAILQ_NEXT = 0xdeadc0de"); + break; + } + c = c2; + } + } +} + /* * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization * @@ -206,6 +247,9 @@ depth = 0; steps = 0; mtx_lock_spin(&callout_lock); + + callout_check_callwheel(); + while (softticks != ticks) { softticks++; /* @@ -335,6 +379,9 @@ avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8; avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; nextsoftcheck = NULL; + + callout_check_callwheel(); + mtx_unlock_spin(&callout_lock); } @@ -365,16 +412,23 @@ mtx_lock_spin(&callout_lock); + callout_check_callwheel(); + /* Fill in the next free callout structure. */ new = SLIST_FIRST(&callfree); if (new == NULL) /* XXX Attempt to malloc first */ panic("timeout table full"); SLIST_REMOVE_HEAD(&callfree, c_links.sle); - + + callout_check_callwheel(); + callout_reset(new, to_ticks, ftn, arg); handle.callout = new; + + callout_check_callwheel(); + mtx_unlock_spin(&callout_lock); return (handle); } @@ -395,8 +449,14 @@ return; mtx_lock_spin(&callout_lock); + + callout_check_callwheel(); + if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) callout_stop(handle.callout); + + callout_check_callwheel(); + mtx_unlock_spin(&callout_lock); } @@ -437,6 +497,9 @@ #endif mtx_lock_spin(&callout_lock); + + callout_check_callwheel(); + if (c == curr_callout) { /* * We're being asked to reschedule a callout which is @@ -463,6 +526,8 @@ cancelled = 1; + callout_check_callwheel(); + /* * Part of the normal "stop a pending callout" process * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING @@ -481,14 +546,20 @@ if (to_ticks <= 0) to_ticks = 1; + callout_check_callwheel(); + c->c_arg = arg; c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); c->c_func = ftn; c->c_time = ticks + to_ticks; + TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); - mtx_unlock_spin(&callout_lock); + callout_check_callwheel(); + + mtx_unlock_spin(&callout_lock); + return (cancelled); } @@ -511,6 +582,9 @@ } mtx_lock_spin(&callout_lock); + + callout_check_callwheel(); + /* * Don't attempt to delete a callout that's not on the queue. */ @@ -547,15 +621,25 @@ } c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); + callout_check_callwheel(); + if (nextsoftcheck == c) { nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); } + + callout_check_callwheel(); + TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); + callout_check_callwheel(); + if (c->c_flags & CALLOUT_LOCAL_ALLOC) { c->c_func = NULL; SLIST_INSERT_HEAD(&callfree, c, c_links.sle); } + + callout_check_callwheel(); + mtx_unlock_spin(&callout_lock); return (1); } @@ -641,6 +725,9 @@ /* don't collide with softclock() */ mtx_lock_spin(&callout_lock); + + callout_check_callwheel(); + for (p = calltodo.c_next; p != NULL; p = p->c_next) { p->c_time -= delta_ticks; @@ -651,6 +738,9 @@ /* take back the ticks the timer didn't use (p->c_time <= 0) */ delta_ticks = -p->c_time; } + + callout_check_callwheel(); + mtx_unlock_spin(&callout_lock); return;