Re: vm_page_t related KBI [Was: Re: panic at vm_page_wire with FreeBSD 9.0 Beta 3]

From: Arnaud Lacombe <lacombar_at_gmail.com>
Date: Sun, 6 Nov 2011 23:21:19 -0500
Hi,

On Sat, Nov 5, 2011 at 10:13 AM, Kostik Belousov <kostikbel_at_gmail.com> wrote:
> On Fri, Nov 04, 2011 at 06:03:39PM +0200, Kostik Belousov wrote:
>
> Below is the KBI patch after vm_page_bits_t merge is done.
> Again, I did not spent time converting all in-tree consumers
> from the (potentially) loadable modules to the new KPI until it
> is agreed upon.
>
> diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
> index 305c189..7264cd1 100644
> --- a/sys/nfsclient/nfs_bio.c
> +++ b/sys/nfsclient/nfs_bio.c
> _at__at_ -128,7 +128,7 _at__at_ nfs_getpages(struct vop_getpages_args *ap)
>         * can only occur at the file EOF.
>         */
>        VM_OBJECT_LOCK(object);
> -       if (pages[ap->a_reqpage]->valid != 0) {
> +       if (vm_page_read_valid(pages[ap->a_reqpage]) != 0) {
>                for (i = 0; i < npages; ++i) {
>                        if (i != ap->a_reqpage) {
>                                vm_page_lock(pages[i]);
> _at__at_ -198,16 +198,16 _at__at_ nfs_getpages(struct vop_getpages_args *ap)
>                        /*
>                         * Read operation filled an entire page
>                         */
> -                       m->valid = VM_PAGE_BITS_ALL;
> -                       KASSERT(m->dirty == 0,
> +                       vm_page_write_valid(m, VM_PAGE_BITS_ALL);
> +                       KASSERT(vm_page_read_dirty(m) == 0,
>                            ("nfs_getpages: page %p is dirty", m));
>                } else if (size > toff) {
>                        /*
>                         * Read operation filled a partial page.
>                         */
> -                       m->valid = 0;
> +                       vm_page_write_valid(m, 0);
>                        vm_page_set_valid(m, 0, size - toff);
> -                       KASSERT(m->dirty == 0,
> +                       KASSERT(vm_page_read_dirty(m) == 0,
>                            ("nfs_getpages: page %p is dirty", m));
>                } else {
>                        /*
> diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
> index 389aea5..2f41e70 100644
> --- a/sys/vm/vm_page.c
> +++ b/sys/vm/vm_page.c
> _at__at_ -2677,6 +2677,66 _at__at_ vm_page_test_dirty(vm_page_t m)
>                vm_page_dirty(m);
>  }
>
> +void
> +vm_page_lock_func(vm_page_t m, const char *file, int line)
> +{
> +
> +#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
> +       _mtx_lock_flags(vm_page_lockptr(m), 0, file, line);
> +#else
> +       __mtx_lock(vm_page_lockptr(m), 0, file, line);
> +#endif
> +}
> +
Why do you re-implement the wheel ? all the point of these assessors
is to hide implementation detail. IMO, you should restrict yourself to
the documented API from mutex(9), only.

Oh, wait, you end-up using LOCK_FILE instead of just __FILE__, but
wait LOCK_FILE is either just __FILE__, or NULL, depending on
LOCK_DEBUG, but you wouldn't have those function without
INVARIANTS.... This whole LOCK_FILE/LOCK_LINE seem completely
fracked-up... If you don't want this code in INVARIANTS, but in
LOCK_DEBUG, only make it live only in the LOCK_DEBUG case.

Btw, let me also question the use of non-inline functions.

> +void
> +vm_page_unlock_func(vm_page_t m, const char *file, int line)
> +{
> +
> +#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
> +       _mtx_unlock_flags(vm_page_lockptr(m), 0, file, line);
> +#else
> +       __mtx_unlock(vm_page_lockptr(m), curthread, 0, file, line);
> +#endif
> +}
> +
> +int
> +vm_page_trylock_func(vm_page_t m, const char *file, int line)
> +{
> +
> +       return (_mtx_trylock(vm_page_lockptr(m), 0, file, line));
> +}
> +
> +void
> +vm_page_lock_assert_func(vm_page_t m, int a, const char *file, int line)
> +{
> +
> +#ifdef INVARIANTS
> +       _mtx_assert(vm_page_lockptr(m), a, file, line);
> +#endif
> +}
> +
same remark on all the above.

> +vm_page_bits_t
> +vm_page_read_dirty_func(vm_page_t m)
> +{
> +
> +       return (m->dirty);
> +}
> +
> +vm_page_bits_t
> +vm_page_read_valid_func(vm_page_t m)
> +{
> +
> +       return (m->valid);
> +}
> +
> +void
> +vm_page_write_valid_func(vm_page_t m, vm_page_bits_t v)
> +{
> +
> +       m->valid = v;
> +}
> +
> +
>  int so_zerocp_fullpage = 0;
>
>  /*
> diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
> index 7099b70..4f8f71e 100644
> --- a/sys/vm/vm_page.h
> +++ b/sys/vm/vm_page.h
> _at__at_ -218,12 +218,50 _at__at_ extern struct vpglocks pa_lock[];
>
>  #define        PA_LOCK_ASSERT(pa, a)   mtx_assert(PA_LOCKPTR(pa), (a))
>
> +#ifdef KLD_MODULE
> +#define        vm_page_lock(m)         vm_page_lock_func((m), LOCK_FILE, LOCK_LINE)
> +#define        vm_page_unlock(m)       vm_page_unlock_func((m), LOCK_FILE, LOCK_LINE)
> +#define        vm_page_trylock(m)      vm_page_trylock_func((m), LOCK_FILE, LOCK_LINE)
> +#ifdef INVARIANTS
> +#define        vm_page_lock_assert(m, a)       \
> +    vm_page_lock_assert_func((m), (a), LOCK_FILE, LOCK_LINE)
> +#else
> +#define        vm_page_lock_assert(m, a)
> +#endif
> +
> +#define        vm_page_read_dirty(m)   vm_page_read_dirty_func((m))
> +#define        vm_page_read_valid(m)   vm_page_read_valid_func((m))
> +#define        vm_page_write_valid(m, v)       vm_page_write_valid_func((m), (v))
> +
> +#else  /* KLD_MODULE */
>  #define        vm_page_lockptr(m)      (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
>  #define        vm_page_lock(m)         mtx_lock(vm_page_lockptr((m)))
>  #define        vm_page_unlock(m)       mtx_unlock(vm_page_lockptr((m)))
>  #define        vm_page_trylock(m)      mtx_trylock(vm_page_lockptr((m)))
>  #define        vm_page_lock_assert(m, a)       mtx_assert(vm_page_lockptr((m)), (a))
>
> +static inline vm_page_bits_t
> +vm_page_read_dirty(vm_page_t m)
> +{
> +
> +       return (m->dirty);
> +}
> +
> +static inline vm_page_bits_t
> +vm_page_read_valid(vm_page_t m)
> +{
> +
> +       return (m->valid);
> +}
> +
> +static inline void
> +vm_page_write_valid(vm_page_t m, vm_page_bits_t v)
> +{
> +
> +       m->valid = v;
> +}
> +#endif
> +
>  #define        vm_page_queue_free_mtx  vm_page_queue_free_lock.data
>  /*
>  * These are the flags defined for vm_page.
> _at__at_ -403,6 +441,15 _at__at_ void vm_page_cowfault (vm_page_t);
>  int vm_page_cowsetup(vm_page_t);
>  void vm_page_cowclear (vm_page_t);
>
> +void vm_page_lock_func(vm_page_t m, const char *file, int line);
> +void vm_page_unlock_func(vm_page_t m, const char *file, int line);
> +int vm_page_trylock_func(vm_page_t m, const char *file, int line);
> +void vm_page_lock_assert_func(vm_page_t m, int a, const char *file, int line);
> +
> +vm_page_bits_t vm_page_read_dirty_func(vm_page_t m);
> +vm_page_bits_t vm_page_read_valid_func(vm_page_t m);
> +void vm_page_write_valid_func(vm_page_t m, vm_page_bits_t v);
> +
>  #ifdef INVARIANTS
>  void vm_page_object_lock_assert(vm_page_t m);
>  #define        VM_PAGE_OBJECT_LOCK_ASSERT(m)   vm_page_object_lock_assert(m)
>
Received on Mon Nov 07 2011 - 03:21:21 UTC

This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:40:20 UTC