Index: sys/kern/subr_epoch.c =================================================================== --- sys/kern/subr_epoch.c (revision 336962) +++ sys/kern/subr_epoch.c (working copy) @@ -232,33 +232,14 @@ struct epoch_thread *tdwait; struct turnstile *ts; struct lock_object *lock; - int spincount, gen; int locksheld __unused; record = __containerof(cr, struct epoch_record, er_record); td = curthread; locksheld = td->td_locks; - spincount = 0; counter_u64_add(block_count, 1); if (record->er_cpuid != curcpu) { /* - * If the head of the list is running, we can wait for it - * to remove itself from the list and thus save us the - * overhead of a migration - */ - if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL && - TD_IS_RUNNING(tdwait->et_td)) { - gen = record->er_gen; - thread_unlock(td); - do { - cpu_spinwait(); - } while (tdwait == TAILQ_FIRST(&record->er_tdlist) && - gen == record->er_gen && TD_IS_RUNNING(tdwait->et_td) && - spincount++ < MAX_ADAPTIVE_SPIN); - thread_lock(td); - return; - } - /* * Being on the same CPU as that of the record on which * we need to wait allows us access to the thread * list associated with that CPU. We can then examine the