Martin Nilsson wrote: > Julian Elischer wrote: > try this patch Index: sys/vm/vm_pageout.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_pageout.c,v retrieving revision 1.268 diff -u -r1.268 vm_pageout.c --- sys/vm/vm_pageout.c 7 Jan 2005 02:29:27 -0000 1.268 +++ sys/vm/vm_pageout.c 30 Jul 2005 03:12:37 -0000 _at__at_ -5,6 +5,8 _at__at_ * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. + * Copyright (c) 2005 Yahoo! Technologies Norway AS + * All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. _at__at_ -210,6 +212,16 _at__at_ static void vm_pageout_page_stats(void); /* + * Experimental VM_PAGEOUT_FORCE_BLOCKING_OBJLOCK option, which will cause + * pagedaemon to fall back to blocking locking of vm objects if nonblocking + * lock attempt failed. Lock order violation is avoided by unlocking + * the page queues before locking the object. marker pages are used + * to detect changes and allow for continued page queue traversal even + * when changes had occurred. + */ +#define VM_PAGEOUT_FORCE_BLOCKING_OBJLOCK + +/* * vm_pageout_clean: * * Clean the page and remove it from the laundry. _at__at_ -750,8 +762,37 _at__at_ * queue, most likely are being paged out. */ if (!VM_OBJECT_TRYLOCK(object)) { +#ifdef VM_PAGEOUT_FORCE_BLOCKING_OBJLOCK + /* + * Cannot lock object while holding page queue lock. + * Depend on both struct vm_object and normal + * struct vm_page being type stable and sanity + * check after reobtaining page queue lock. + */ + TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, + m, &marker, pageq); + vm_page_unlock_queues(); + VM_OBJECT_LOCK(object); + vm_page_lock_queues(); + /* Page queue might have changed. */ + next = TAILQ_NEXT(&marker, pageq); + if (m->queue != PQ_INACTIVE || + m->object != object || + m->hold_count != 0 || + &marker != TAILQ_NEXT(m, pageq)) { + /* Page changed. */ + VM_OBJECT_UNLOCK(object); + TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, + &marker, pageq); + addl_page_shortage++; + continue; + } + TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, + &marker, pageq); +#else addl_page_shortage++; continue; +#endif } if (m->busy || (m->flags & PG_BUSY)) { VM_OBJECT_UNLOCK(object); _at__at_ -1024,10 +1065,44 _at__at_ next = TAILQ_NEXT(m, pageq); object = m->object; +#ifdef VM_PAGEOUT_FORCE_BLOCKING_OBJLOCK + if ((m->flags & PG_MARKER) != 0) { + m = next; + continue; + } +#endif if (!VM_OBJECT_TRYLOCK(object)) { +#ifdef VM_PAGEOUT_FORCE_BLOCKING_OBJLOCK + /* + * Cannot lock object while holding page queue lock. + * Depend on both struct vm_object and normal + * struct vm_page being type stable and sanity + * check after reobtaining page queue lock. + */ + TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE].pl, + m, &marker, pageq); + vm_page_unlock_queues(); + VM_OBJECT_LOCK(object); + vm_page_lock_queues(); + /* Page queue might have changed. */ + next = TAILQ_NEXT(&marker, pageq); + if (m->queue != PQ_ACTIVE || + m->object != object || + &marker != TAILQ_NEXT(m, pageq)) { + /* Page changed. */ + VM_OBJECT_UNLOCK(object); + TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, + &marker, pageq); + m = next; + continue; + } + TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, + &marker, pageq); +#else vm_pageq_requeue(m); m = next; continue; +#endif } /* _at__at_ -1264,6 +1339,9 _at__at_ vm_pageout_page_stats() { vm_object_t object; +#ifdef VM_PAGEOUT_FORCE_BLOCKING_OBJLOCK + struct vm_page marker; +#endif vm_page_t m,next; int pcount,tpcount; /* Number of pages to check */ static int fullintervalcount = 0; _at__at_ -1287,6 +1365,16 _at__at_ fullintervalcount = 0; } +#ifdef VM_PAGEOUT_FORCE_BLOCKING_OBJLOCK + /* + * Initialize our marker + */ + bzero(&marker, sizeof(marker)); + marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; + marker.queue = PQ_INACTIVE; + marker.wire_count = 1; +#endif + m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); while ((m != NULL) && (pcount-- > 0)) { int actcount; _at__at_ -1296,10 +1384,45 _at__at_ next = TAILQ_NEXT(m, pageq); object = m->object; + +#ifdef VM_PAGEOUT_FORCE_BLOCKING_OBJLOCK + if ((m->flags & PG_MARKER) != 0) { + m = next; + continue; + } +#endif if (!VM_OBJECT_TRYLOCK(object)) { +#ifdef VM_PAGEOUT_FORCE_BLOCKING_OBJLOCK + /* + * Cannot lock object while holding page queue lock. + * Depend on both struct vm_object and normal + * struct vm_page being type stable and sanity + * check after reobtaining page queue lock. + */ + TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE].pl, + m, &marker, pageq); + vm_page_unlock_queues(); + VM_OBJECT_LOCK(object); + vm_page_lock_queues(); + /* Page queue might have changed. */ + next = TAILQ_NEXT(&marker, pageq); + if (m->queue != PQ_ACTIVE || + m->object != object || + &marker != TAILQ_NEXT(m, pageq)) { + /* Page changed. */ + VM_OBJECT_UNLOCK(object); + TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, + &marker, pageq); + m = next; + continue; + } + TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, + &marker, pageq); +#else vm_pageq_requeue(m); m = next; continue; +#endif } /*Received on Sun Aug 07 2005 - 01:45:33 UTC
This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:38:40 UTC