Index: kern/kern_switch.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_switch.c,v retrieving revision 1.136 diff -p -u -r1.136 kern_switch.c --- kern/kern_switch.c 20 Sep 2007 20:38:43 -0000 1.136 +++ kern/kern_switch.c 2 Oct 2007 21:41:10 -0000 @@ -133,16 +133,6 @@ choosethread(void) { struct thread *td; -#if defined(SMP) && (defined(__i386__) || defined(__amd64__)) - if (smp_active == 0 && PCPU_GET(cpuid) != 0) { - /* Shutting down, run idlethread on AP's */ - td = PCPU_GET(idlethread); - CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); - TD_SET_RUNNING(td); - return (td); - } -#endif - retry: td = sched_choose(); @@ -184,7 +174,7 @@ critical_exit(void) td = curthread; KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); -#ifdef PREEMPTION + if (td->td_critnest == 1) { td->td_critnest = 0; if (td->td_owepreempt) { @@ -196,7 +186,6 @@ critical_exit(void) thread_unlock(td); } } else -#endif td->td_critnest--; CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, Index: kern/kern_synch.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_synch.c,v retrieving revision 1.301 diff -p -u -r1.301 kern_synch.c --- kern/kern_synch.c 17 Sep 2007 05:27:20 -0000 1.301 +++ kern/kern_synch.c 2 Oct 2007 08:18:19 -0000 @@ -553,8 +553,11 @@ synch_setup(dummy) int yield(struct thread *td, struct yield_args *uap) { - mtx_assert(&Giant, MA_NOTOWNED); - (void)uap; - sched_relinquish(td); + + thread_lock(td); + sched_prio(td, PRI_MAX_TIMESHARE); + mi_switch(SW_VOL, NULL); + thread_unlock(td); + td->td_retval[0] = 0; return (0); } Index: kern/p1003_1b.c =================================================================== RCS file: /home/ncvs/src/sys/kern/p1003_1b.c,v retrieving revision 1.35 diff -p -u -r1.35 p1003_1b.c --- kern/p1003_1b.c 5 Mar 2007 13:10:57 -0000 1.35 +++ kern/p1003_1b.c 2 Oct 2007 21:55:48 -0000 @@ -241,7 +241,8 @@ int sched_yield(struct thread *td, struct sched_yield_args *uap) { - return (ksched_yield(ksched)); + sched_relinquish(curthread); + return 0; } int Index: kern/sched_4bsd.c =================================================================== RCS file: /home/ncvs/src/sys/kern/sched_4bsd.c,v retrieving revision 1.105 diff -p -u -r1.105 sched_4bsd.c --- kern/sched_4bsd.c 21 Sep 2007 04:10:23 -0000 1.105 +++ kern/sched_4bsd.c 2 Oct 2007 08:08:36 -0000 @@ -1324,8 +1324,6 @@ void sched_relinquish(struct thread *td) { thread_lock(td); - if (td->td_pri_class == PRI_TIMESHARE) - sched_prio(td, PRI_MAX_TIMESHARE); SCHED_STAT_INC(switch_relinquish); mi_switch(SW_VOL, NULL); thread_unlock(td); Index: kern/sched_ule.c =================================================================== RCS file: /home/ncvs/src/sys/kern/sched_ule.c,v retrieving revision 1.209 diff -p -u -r1.209 sched_ule.c --- kern/sched_ule.c 24 Sep 2007 00:28:54 -0000 1.209 +++ kern/sched_ule.c 5 Oct 2007 00:35:41 -0000 @@ -374,8 +374,12 @@ tdq_print(int cpu) static __inline void tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) { + u_char pri; + + pri = ts->ts_thread->td_priority; TDQ_LOCK_ASSERT(tdq, MA_OWNED); THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); + TD_SET_RUNQ(ts->ts_thread); #ifdef SMP if (THREAD_CAN_MIGRATE(ts->ts_thread)) { tdq->tdq_transferable++; @@ -383,15 +387,15 @@ tdq_runq_add(struct tdq *tdq, struct td_ ts->ts_flags |= TSF_XFERABLE; } #endif - if (ts->ts_runq == &tdq->tdq_timeshare) { - u_char pri; - - pri = ts->ts_thread->td_priority; + if (pri <= PRI_MAX_REALTIME) { + ts->ts_runq = &tdq->tdq_realtime; + } else if (pri <= PRI_MAX_TIMESHARE) { + ts->ts_runq = &tdq->tdq_timeshare; KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, ("Invalid priority %d on timeshare runq", pri)); /* * This queue contains only priorities between MIN and MAX - * realtime. Use the whole queue to represent these values. + * timeshare. Use the whole queue to represent these values. */ if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; @@ -407,8 +411,10 @@ tdq_runq_add(struct tdq *tdq, struct td_ } else pri = tdq->tdq_ridx; runq_add_pri(ts->ts_runq, ts, pri, flags); + return; } else - runq_add(ts->ts_runq, ts, flags); + ts->ts_runq = &tdq->tdq_idle; + runq_add(ts->ts_runq, ts, flags); } /* @@ -434,13 +440,6 @@ tdq_runq_rem(struct tdq *tdq, struct td_ runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); else runq_remove_idx(ts->ts_runq, ts, NULL); - /* - * For timeshare threads we update the priority here so - * the priority reflects the time we've been sleeping. - */ - ts->ts_ltick = ticks; - sched_pctcpu_update(ts); - sched_priority(ts->ts_thread); } else runq_remove(ts->ts_runq, ts); } @@ -1565,15 +1564,20 @@ static void sched_thread_priority(struct thread *td, u_char prio) { struct td_sched *ts; + struct tdq *tdq; CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, prio, curthread, curthread->td_proc->p_comm); ts = td->td_sched; + tdq = TDQ_CPU(ts->ts_cpu); THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority == prio) return; - +#ifdef SMP + if (prio < tdq->tdq_lowpri) + tdq->tdq_lowpri = prio; +#endif if (TD_ON_RUNQ(td) && prio < td->td_priority) { /* * If the priority has been elevated due to priority @@ -1584,16 +1588,8 @@ sched_thread_priority(struct thread *td, sched_rem(td); td->td_priority = prio; sched_add(td, SRQ_BORROWING); - } else { -#ifdef SMP - struct tdq *tdq; - - tdq = TDQ_CPU(ts->ts_cpu); - if (prio < tdq->tdq_lowpri) - tdq->tdq_lowpri = prio; -#endif + } else td->td_priority = prio; - } } /* @@ -1739,6 +1735,8 @@ sched_switch_migrate(struct tdq *tdq, st tdn = TDQ_CPU(td->td_sched->ts_cpu); #ifdef SMP + /* The load is being removed from the current cpu. */ + tdq_load_rem(tdq, td->td_sched); /* * Do the lock dance required to avoid LOR. We grab an extra * spinlock nesting to prevent preemption while we're @@ -1830,12 +1828,11 @@ sched_switch(struct thread *td, struct t TD_SET_CAN_RUN(td); } else if (TD_IS_RUNNING(td)) { MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); - tdq_load_rem(tdq, ts); srqflag = (flags & SW_PREEMPT) ? SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING; if (ts->ts_cpu == cpuid) - tdq_add(tdq, td, srqflag); + tdq_runq_add(tdq, ts, srqflag); else mtx = sched_switch_migrate(tdq, td, srqflag); } else { @@ -1949,7 +1946,6 @@ sched_wakeup(struct thread *td) ts->ts_slptime += hzticks; sched_interact_update(td); sched_pctcpu_update(ts); - sched_priority(td); } /* Reset the slice value after we sleep. */ ts->ts_slice = sched_slice; @@ -2154,16 +2150,17 @@ sched_clock(struct thread *td) */ td->td_sched->ts_runtime += tickincr; sched_interact_update(td); + sched_priority(td); /* * We used up one time slice. */ if (--ts->ts_slice > 0) return; /* - * We're out of time, recompute priorities and requeue. + * We're out of time, force a requeue later. */ - sched_priority(td); td->td_flags |= TDF_NEEDRESCHED; + ts->ts_slice = sched_slice; } /* @@ -2284,11 +2281,10 @@ void tdq_add(struct tdq *tdq, struct thread *td, int flags) { struct td_sched *ts; - int class; #ifdef SMP + int class; int cpumask; #endif - TDQ_LOCK_ASSERT(tdq, MA_OWNED); KASSERT((td->td_inhibitors == 0), ("sched_add: trying to run inhibited thread")); @@ -2298,20 +2294,10 @@ tdq_add(struct tdq *tdq, struct thread * ("sched_add: thread swapped out")); ts = td->td_sched; - class = PRI_BASE(td->td_pri_class); - TD_SET_RUNQ(td); - if (ts->ts_slice == 0) - ts->ts_slice = sched_slice; - /* - * Pick the run queue based on priority. - */ - if (td->td_priority <= PRI_MAX_REALTIME) - ts->ts_runq = &tdq->tdq_realtime; - else if (td->td_priority <= PRI_MAX_TIMESHARE) - ts->ts_runq = &tdq->tdq_timeshare; - else - ts->ts_runq = &tdq->tdq_idle; + tdq_runq_add(tdq, ts, flags); + tdq_load_add(tdq, ts); #ifdef SMP + class = PRI_BASE(td->td_pri_class); cpumask = 1 << ts->ts_cpu; /* * If we had been idle, clear our bit in the group and potentially @@ -2334,8 +2320,6 @@ tdq_add(struct tdq *tdq, struct thread * if (td->td_priority < tdq->tdq_lowpri) tdq->tdq_lowpri = td->td_priority; #endif - tdq_runq_add(tdq, ts, flags); - tdq_load_add(tdq, ts); } /* @@ -2502,8 +2486,6 @@ void sched_relinquish(struct thread *td) { thread_lock(td); - if (td->td_pri_class == PRI_TIMESHARE) - sched_prio(td, PRI_MAX_TIMESHARE); SCHED_STAT_INC(switch_relinquish); mi_switch(SW_VOL, NULL); thread_unlock(td);