On Fri, Nov 04, 2011 at 10:09:09AM -0500, Alan Cox wrote: > On 11/04/2011 05:08, Kostik Belousov wrote: > >On Thu, Nov 03, 2011 at 12:51:10PM -0500, Alan Cox wrote: > >>I would suggest introducing the vm_page_bits_t change first. If, at the > >>same time, you change the return type from the function vm_page_bits() > >>to use vm_page_bits_t, then I believe it is straightforward to fix all > >>of the places in vm_page.c that don't properly handle a 32 KB page size. > >Ok, I think this is orhtohonal to the ABI issue. The vm_page_bits_t > >applied. > > Agreed, which is why I wanted to separate the two things. > > I've made a few comments below. ... > Looks good. I will make universe the patch below. Any further notes ? diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index f14da4a..f398453 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c _at__at_ -137,7 +137,7 _at__at_ SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, static uma_zone_t fakepg_zone; -static void vm_page_clear_dirty_mask(vm_page_t m, int pagebits); +static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); static void vm_page_queue_remove(int queue, vm_page_t m); static void vm_page_enqueue(int queue, vm_page_t m); static void vm_page_init_fakepg(void *dummy); _at__at_ -2350,7 +2350,7 _at__at_ retrylookup: * * Inputs are required to range within a page. */ -int +vm_page_bits_t vm_page_bits(int base, int size) { int first_bit; _at__at_ -2367,7 +2367,8 _at__at_ vm_page_bits(int base, int size) first_bit = base >> DEV_BSHIFT; last_bit = (base + size - 1) >> DEV_BSHIFT; - return ((2 << last_bit) - (1 << first_bit)); + return (((vm_page_bits_t)2 << last_bit) - + ((vm_page_bits_t)1 << first_bit)); } /* _at__at_ -2426,7 +2427,7 _at__at_ vm_page_set_valid(vm_page_t m, int base, int size) * Clear the given bits from the specified page's dirty field. */ static __inline void -vm_page_clear_dirty_mask(vm_page_t m, int pagebits) +vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) { uintptr_t addr; #if PAGE_SIZE < 16384 _at__at_ -2455,7 +2456,6 _at__at_ vm_page_clear_dirty_mask(vm_page_t m, int pagebits) */ addr = (uintptr_t)&m->dirty; #if PAGE_SIZE == 32768 -#error pagebits too short atomic_clear_64((uint64_t *)addr, pagebits); #elif PAGE_SIZE == 16384 atomic_clear_32((uint32_t *)addr, pagebits); _at__at_ -2492,8 +2492,8 _at__at_ vm_page_clear_dirty_mask(vm_page_t m, int pagebits) void vm_page_set_validclean(vm_page_t m, int base, int size) { - u_long oldvalid; - int endoff, frag, pagebits; + vm_page_bits_t oldvalid, pagebits; + int endoff, frag; VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (size == 0) /* handle degenerate case */ _at__at_ -2505,7 +2505,7 _at__at_ vm_page_set_validclean(vm_page_t m, int base, int size) * first block. */ if ((frag = base & ~(DEV_BSIZE - 1)) != base && - (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) + (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) pmap_zero_page_area(m, frag, base - frag); /* _at__at_ -2515,7 +2515,7 _at__at_ vm_page_set_validclean(vm_page_t m, int base, int size) */ endoff = base + size; if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && - (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) + (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) pmap_zero_page_area(m, endoff, DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); _at__at_ -2585,7 +2585,7 _at__at_ vm_page_clear_dirty(vm_page_t m, int base, int size) void vm_page_set_invalid(vm_page_t m, int base, int size) { - int bits; + vm_page_bits_t bits; VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, _at__at_ -2625,7 +2625,7 _at__at_ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) */ for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { if (i == (PAGE_SIZE / DEV_BSIZE) || - (m->valid & (1 << i)) + (m->valid & ((vm_page_bits_t)1 << i)) ) { if (i > b) { pmap_zero_page_area(m, _at__at_ -2656,9 +2656,10 _at__at_ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) int vm_page_is_valid(vm_page_t m, int base, int size) { - int bits = vm_page_bits(base, size); + vm_page_bits_t bits; VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + bits = vm_page_bits(base, size); if (m->valid && ((m->valid & bits) == bits)) return 1; else diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 23637bb..e3eb08c 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h _at__at_ -113,6 +113,20 _at__at_ TAILQ_HEAD(pglist, vm_page); +#if PAGE_SIZE == 4096 +#define VM_PAGE_BITS_ALL 0xffu +typedef uint8_t vm_page_bits_t; +#elif PAGE_SIZE == 8192 +#define VM_PAGE_BITS_ALL 0xffffu +typedef uint16_t vm_page_bits_t; +#elif PAGE_SIZE == 16384 +#define VM_PAGE_BITS_ALL 0xffffffffu +typedef uint32_t vm_page_bits_t; +#elif PAGE_SIZE == 32768 +#define VM_PAGE_BITS_ALL 0xfffffffffffffffflu +typedef uint64_t vm_page_bits_t; +#endif + struct vm_page { TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (Q) */ TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ _at__at_ -137,20 +151,8 _at__at_ struct vm_page { u_char busy; /* page busy count (O) */ /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */ /* so, on normal X86 kernels, they must be at least 8 bits wide */ - /* In reality, support for 32KB pages is not fully implemented. */ -#if PAGE_SIZE == 4096 - uint8_t valid; /* map of valid DEV_BSIZE chunks (O) */ - uint8_t dirty; /* map of dirty DEV_BSIZE chunks (M) */ -#elif PAGE_SIZE == 8192 - uint16_t valid; /* map of valid DEV_BSIZE chunks (O) */ - uint16_t dirty; /* map of dirty DEV_BSIZE chunks (M) */ -#elif PAGE_SIZE == 16384 - uint32_t valid; /* map of valid DEV_BSIZE chunks (O) */ - uint32_t dirty; /* map of dirty DEV_BSIZE chunks (M) */ -#elif PAGE_SIZE == 32768 - uint64_t valid; /* map of valid DEV_BSIZE chunks (O) */ - uint64_t dirty; /* map of dirty DEV_BSIZE chunks (M) */ -#endif + vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */ + vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */ }; /* _at__at_ -403,7 +405,7 _at__at_ void vm_page_clear_dirty (vm_page_t, int, int); void vm_page_set_invalid (vm_page_t, int, int); int vm_page_is_valid (vm_page_t, int, int); void vm_page_test_dirty (vm_page_t); -int vm_page_bits (int, int); +vm_page_bits_t vm_page_bits(int base, int size); void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); void vm_page_free_toq(vm_page_t m); void vm_page_zero_idle_wakeup(void); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index e3222cb..cd2658d 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c _at__at_ -486,15 +486,16 _at__at_ vnode_pager_input_smlfs(object, m) vm_object_t object; vm_page_t m; { - int bits, i; struct vnode *vp; struct bufobj *bo; struct buf *bp; struct sf_buf *sf; daddr_t fileaddr; vm_offset_t bsize; - int error = 0; + vm_page_bits_t bits; + int error, i; + error = 0; vp = object->handle; if (vp->v_iflag & VI_DOOMED) return VM_PAGER_BAD;
This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:40:20 UTC