This change exports each individual thread's resource usage via sysctl when individual threads are requested via KERN_PROC_INC_THREAD. This generally works correctly with 'top -m io' after the previous change to revert top(1) back to using KERN_PROC_PROC when threads are not enabled. There is one issue in that top doesn't necessarily DTRT when disabling/enabling threads via 'H' at runtime while in io mode. I may do some further work to clean that up. However, for just top run it will now show per-thread stats instead of duplicating the per-process stats for each thread. It also fixes a bug in that the code in calcru() to try to account for the current thread's runtime wasn't correctly accounting charging that time to the current thread. Index: sys/kern/kern_proc.c =================================================================== --- sys/kern/kern_proc.c (revision 224058) +++ sys/kern/kern_proc.c (working copy) _at__at_ -848,6 +848,8 _at__at_ kp->ki_tdaddr = td; PROC_LOCK_ASSERT(p, MA_OWNED); + if (preferthread) + PROC_SLOCK(p); thread_lock(td); if (td->td_wmesg != NULL) strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg)); _at__at_ -899,6 +901,7 _at__at_ kp->ki_pri.pri_user = td->td_user_pri; if (preferthread) { + rufetchtd(td, &kp->ki_rusage); kp->ki_runtime = cputick2usec(td->td_rux.rux_runtime); kp->ki_pctcpu = sched_pctcpu(td); kp->ki_estcpu = td->td_estcpu; _at__at_ -911,6 +914,8 _at__at_ kp->ki_siglist = td->td_siglist; kp->ki_sigmask = td->td_sigmask; thread_unlock(td); + if (preferthread) + PROC_SUNLOCK(p); } /* Index: sys/kern/kern_resource.c =================================================================== --- sys/kern/kern_resource.c (revision 224058) +++ sys/kern/kern_resource.c (working copy) _at__at_ -813,7 +813,7 _at__at_ calcru(struct proc *p, struct timeval *up, struct timeval *sp) { struct thread *td; - uint64_t u; + uint64_t runtime, u; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); _at__at_ -826,7 +826,9 _at__at_ td = curthread; if (td->td_proc == p) { u = cpu_ticks(); - p->p_rux.rux_runtime += u - PCPU_GET(switchtime); + runtime = u - PCPU_GET(switchtime); + td->td_runtime += runtime; + td->td_incruntime += runtime; PCPU_SET(switchtime, u); } /* Make sure the per-thread stats are current. */ _at__at_ -838,6 +840,34 _at__at_ calcru1(p, &p->p_rux, up, sp); } +/* Collect resource usage for a single thread. */ +void +rufetchtd(struct thread *td, struct rusage *ru) +{ + struct proc *p; + uint64_t runtime, u; + + p = td->td_proc; + PROC_SLOCK_ASSERT(p, MA_OWNED); + THREAD_LOCK_ASSERT(td, MA_OWNED); + /* + * If we are getting stats for the current thread, then add in the + * stats that this thread has accumulated in its current time slice. + * We reset the thread and CPU state as if we had performed a context + * switch right here. + */ + if (td == curthread) { + u = cpu_ticks(); + runtime = u - PCPU_GET(switchtime); + td->td_runtime += runtime; + td->td_incruntime += runtime; + PCPU_SET(switchtime, u); + } + ruxagg(p, td); + *ru = td->td_ru; + calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime); +} + static void calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up, struct timeval *sp) _at__at_ -955,12 +985,10 _at__at_ case RUSAGE_THREAD: PROC_SLOCK(p); - ruxagg(p, td); - PROC_SUNLOCK(p); thread_lock(td); - *rup = td->td_ru; - calcru1(p, &td->td_rux, &rup->ru_utime, &rup->ru_stime); + rufetchtd(td, rup); thread_unlock(td); + PROC_SUNLOCK(p); break; default: Index: sys/sys/resourcevar.h =================================================================== --- sys/sys/resourcevar.h (revision 224058) +++ sys/sys/resourcevar.h (working copy) _at__at_ -136,6 +136,7 _at__at_ void rufetch(struct proc *p, struct rusage *ru); void rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up, struct timeval *sp); +void rufetchtd(struct thread *td, struct rusage *ru); void ruxagg(struct proc *p, struct thread *td); int suswintr(void *base, int word); struct uidinfo -- John BaldwinReceived on Fri Jul 15 2011 - 15:43:41 UTC
This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:40:15 UTC