Re: panic at vm_page_wire with FreeBSD 9.0 Beta 3

From: Kostik Belousov <kostikbel_at_gmail.com>
Date: Thu, 3 Nov 2011 15:24:37 +0200
On Thu, Nov 03, 2011 at 12:40:08AM -0500, Alan Cox wrote:
> On 11/02/2011 05:32, Andriy Gapon wrote:
> >[restored cc: to the original poster]
> >As Bruce Evans has pointed to me privately [I am not sure why privately], 
> >there
> >is already an example in i386 and amd64 atomic.h, where operations are 
> >inlined
> >for a kernel build, but presented as real (external) functions for a module
> >build.  You can search e.g. sys/amd64/include/atomic.h for KLD_MODULE.
> >
> >I think that the same treatment could/should be applied to vm_page_*lock*
> >operations defined in sys/vm/vm_page.h.
> *snip*
> 
> Yes, it should be.  There are without question legitimate reasons for a 
> module to acquire a page lock.

I agree. Also, I think that we should use the opportunity to also isolate
the modules from the struct vm_page layout changes. As example, I converted
nfsclient.ko.

Patch is not tested.

diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 305c189..7264cd1 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
_at__at_ -128,7 +128,7 _at__at_ nfs_getpages(struct vop_getpages_args *ap)
 	 * can only occur at the file EOF.
 	 */
 	VM_OBJECT_LOCK(object);
-	if (pages[ap->a_reqpage]->valid != 0) {
+	if (vm_page_read_valid(pages[ap->a_reqpage]) != 0) {
 		for (i = 0; i < npages; ++i) {
 			if (i != ap->a_reqpage) {
 				vm_page_lock(pages[i]);
_at__at_ -198,16 +198,16 _at__at_ nfs_getpages(struct vop_getpages_args *ap)
 			/*
 			 * Read operation filled an entire page
 			 */
-			m->valid = VM_PAGE_BITS_ALL;
-			KASSERT(m->dirty == 0,
+			vm_page_write_valid(m, VM_PAGE_BITS_ALL);
+			KASSERT(vm_page_read_dirty(m) == 0,
 			    ("nfs_getpages: page %p is dirty", m));
 		} else if (size > toff) {
 			/*
 			 * Read operation filled a partial page.
 			 */
-			m->valid = 0;
+			vm_page_write_valid(m, 0);
 			vm_page_set_valid(m, 0, size - toff);
-			KASSERT(m->dirty == 0,
+			KASSERT(vm_page_read_dirty(m) == 0,
 			    ("nfs_getpages: page %p is dirty", m));
 		} else {
 			/*
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index f14da4a..5b8b4e3 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
_at__at_ -2677,6 +2677,66 _at__at_ vm_page_test_dirty(vm_page_t m)
 		vm_page_dirty(m);
 }
 
+void
+vm_page_lock_func(vm_page_t m, const char *file, int line)
+{
+
+#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
+	_mtx_lock_flags(vm_page_lockptr(m), 0, file, line);
+#else
+	__mtx_lock(vm_page_lockptr(m), 0, file, line);
+#endif
+}
+
+void
+vm_page_unlock_func(vm_page_t m, const char *file, int line)
+{
+
+#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
+	_mtx_unlock_flags(vm_page_lockptr(m), 0, file, line);
+#else
+	__mtx_unlock(vm_page_lockptr(m), curthread, 0, file, line);
+#endif
+}
+
+int
+vm_page_trylock_func(vm_page_t m, const char *file, int line)
+{
+
+	return (_mtx_trylock(vm_page_lockptr(m), 0, file, line));
+}
+
+void
+vm_page_lock_assert_func(vm_page_t m, int a, const char *file, int line)
+{
+
+#ifdef INVARIANTS
+	_mtx_assert(vm_page_lockptr(m), a, file, line);
+#endif
+}
+
+vm_page_bits_t
+vm_page_read_dirty_func(vm_page_t m)
+{
+
+	return (m->dirty);
+}
+
+vm_page_bits_t
+vm_page_read_valid_func(vm_page_t m)
+{
+
+	return (m->valid);
+}
+
+void
+vm_page_write_valid_func(vm_page_t m, vm_page_bits_t v)
+{
+
+	m->valid = v;
+}
+
+
 int so_zerocp_fullpage = 0;
 
 /*
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 23637bb..618ba2b 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
_at__at_ -113,6 +113,21 _at__at_
 
 TAILQ_HEAD(pglist, vm_page);
 
+#if PAGE_SIZE == 4096
+#define VM_PAGE_BITS_ALL 0xffu
+typedef uint8_t vm_page_bits_t;
+#elif PAGE_SIZE == 8192
+#define VM_PAGE_BITS_ALL 0xffffu
+typedef uint16_t vm_page_bits_t;
+#elif PAGE_SIZE == 16384
+#define VM_PAGE_BITS_ALL 0xffffffffu
+typedef uint32_t vm_page_bits_t;
+#elif PAGE_SIZE == 32768
+#define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
+typedef uint64_t vm_page_bits_t;
+#endif
+
+
 struct vm_page {
 	TAILQ_ENTRY(vm_page) pageq;	/* queue info for FIFO queue or free list (Q) */
 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) 	*/
_at__at_ -138,19 +153,8 _at__at_ struct vm_page {
 	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
 	/* In reality, support for 32KB pages is not fully implemented. */
-#if PAGE_SIZE == 4096
-	uint8_t	valid;			/* map of valid DEV_BSIZE chunks (O) */
-	uint8_t	dirty;			/* map of dirty DEV_BSIZE chunks (M) */
-#elif PAGE_SIZE == 8192
-	uint16_t valid;			/* map of valid DEV_BSIZE chunks (O) */
-	uint16_t dirty;			/* map of dirty DEV_BSIZE chunks (M) */
-#elif PAGE_SIZE == 16384
-	uint32_t valid;			/* map of valid DEV_BSIZE chunks (O) */
-	uint32_t dirty;			/* map of dirty DEV_BSIZE chunks (M) */
-#elif PAGE_SIZE == 32768
-	uint64_t valid;			/* map of valid DEV_BSIZE chunks (O) */
-	uint64_t dirty;			/* map of dirty DEV_BSIZE chunks (M) */
-#endif
+	vm_page_bits_t valid;		/* map of valid DEV_BSIZE chunks (O) */
+	vm_page_bits_t dirty;		/* map of dirty DEV_BSIZE chunks (M) */
 };
 
 /*
_at__at_ -216,12 +220,50 _at__at_ extern struct vpglocks pa_lock[];
 
 #define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
 
+#ifdef KLD_MODULE
+#define	vm_page_lock(m)		vm_page_lock_func((m), LOCK_FILE, LOCK_LINE)
+#define	vm_page_unlock(m)	vm_page_unlock_func((m), LOCK_FILE, LOCK_LINE)
+#define	vm_page_trylock(m)	vm_page_trylock_func((m), LOCK_FILE, LOCK_LINE)
+#ifdef INVARIANTS
+#define	vm_page_lock_assert(m, a)	\
+    vm_page_lock_assert_func((m), (a), LOCK_FILE, LOCK_LINE)
+#else
+#define	vm_page_lock_assert(m, a)
+#endif
+
+#define	vm_page_read_dirty(m)	vm_page_read_dirty_func((m))
+#define	vm_page_read_valid(m)	vm_page_read_valid_func((m))
+#define	vm_page_write_valid(m, v)	vm_page_write_valid_func((m), (v))
+
+#else	/* KLD_MODULE */
 #define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
 #define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
 #define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
 #define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
 #define	vm_page_lock_assert(m, a)	mtx_assert(vm_page_lockptr((m)), (a))
 
+static inline vm_page_bits_t
+vm_page_read_dirty(vm_page_t m)
+{
+
+	return (m->dirty);
+}
+
+static inline vm_page_bits_t
+vm_page_read_valid(vm_page_t m)
+{
+
+	return (m->valid);
+}
+
+static inline void
+vm_page_write_valid(vm_page_t m, vm_page_bits_t v)
+{
+
+	m->valid = v;
+}
+#endif
+
 #define	vm_page_queue_free_mtx	vm_page_queue_free_lock.data
 /*
  * These are the flags defined for vm_page.
_at__at_ -322,16 +364,6 _at__at_ extern struct vpglocks vm_page_queue_lock;
 #define vm_page_lock_queues()   mtx_lock(&vm_page_queue_mtx)
 #define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
 
-#if PAGE_SIZE == 4096
-#define VM_PAGE_BITS_ALL 0xffu
-#elif PAGE_SIZE == 8192
-#define VM_PAGE_BITS_ALL 0xffffu
-#elif PAGE_SIZE == 16384
-#define VM_PAGE_BITS_ALL 0xffffffffu
-#elif PAGE_SIZE == 32768
-#define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
-#endif
-
 /* page allocation classes: */
 #define VM_ALLOC_NORMAL		0
 #define VM_ALLOC_INTERRUPT	1
_at__at_ -411,6 +443,15 _at__at_ void vm_page_cowfault (vm_page_t);
 int vm_page_cowsetup(vm_page_t);
 void vm_page_cowclear (vm_page_t);
 
+void vm_page_lock_func(vm_page_t m, const char *file, int line);
+void vm_page_unlock_func(vm_page_t m, const char *file, int line);
+int vm_page_trylock_func(vm_page_t m, const char *file, int line);
+void vm_page_lock_assert_func(vm_page_t m, int a, const char *file, int line);
+
+vm_page_bits_t vm_page_read_dirty_func(vm_page_t m);
+vm_page_bits_t vm_page_read_valid_func(vm_page_t m);
+void vm_page_write_valid_func(vm_page_t m, vm_page_bits_t v);
+
 #ifdef INVARIANTS
 void vm_page_object_lock_assert(vm_page_t m);
 #define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	vm_page_object_lock_assert(m)

Received on Thu Nov 03 2011 - 12:24:49 UTC

This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:40:19 UTC