Index: src/sys/vm/memguard.c =================================================================== RCS file: /home/ncvs/src/sys/vm/memguard.c,v retrieving revision 1.1 diff -u -r1.1 memguard.c --- src/sys/vm/memguard.c 21 Jan 2005 18:09:17 -0000 1.1 +++ src/sys/vm/memguard.c 21 Jan 2005 21:21:38 -0000 @@ -52,6 +52,13 @@ #include /* + * The maximum number of pages allowed per allocation. If you're using + * MemGuard to override very large items (> MAX_PAGES_PER_ITEM in size), + * you need to increase MAX_PAGES_PER_ITEM. + */ +#define MAX_PAGES_PER_ITEM 10 + +/* * Global MemGuard data. */ static vm_map_t memguard_map; @@ -61,13 +68,20 @@ STAILQ_ENTRY(memguard_entry) entries; void *ptr; }; -static STAILQ_HEAD(memguard_fifo, memguard_entry) memguard_fifo_pool; +static struct memguard_fifo { + struct memguard_entry *stqh_first; + struct memguard_entry **stqh_last; + int index; +} memguard_fifo_pool[MAX_PAGES_PER_ITEM]; /* * Local prototypes. */ -static void memguard_guard(void *addr); -static void memguard_unguard(void *addr); +static void memguard_guard(void *addr, int numpgs); +static void memguard_unguard(void *addr, int numpgs); +static struct memguard_fifo *vtomgfifo(vm_offset_t va); +static void vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo); +static void vclrmgfifo(vm_offset_t va); /* * Local macros. MemGuard data is global, so replace these with whatever @@ -89,6 +103,7 @@ memguard_init(vm_map_t parent_map, unsigned long size) { char *base, *limit; + int i; /* size must be multiple of PAGE_SIZE */ size /= PAGE_SIZE; @@ -103,7 +118,10 @@ MEMGUARD_CRIT_SECTION_INIT; MEMGUARD_CRIT_SECTION_ENTER; - STAILQ_INIT(&memguard_fifo_pool); + for (i = 0; i < MAX_PAGES_PER_ITEM; i++) { + STAILQ_INIT(&memguard_fifo_pool[i]); + memguard_fifo_pool[i].index = i; + } MEMGUARD_CRIT_SECTION_EXIT; printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n"); @@ -121,10 +139,11 @@ { void *obj = NULL; struct memguard_entry *e = NULL; + int i; - /* XXX: MemGuard does not handle > PAGE_SIZE objects. */ - if (size > PAGE_SIZE) - panic("MEMGUARD: Cannot handle objects > PAGE_SIZE"); + i = size / PAGE_SIZE; + if (i >= MAX_PAGES_PER_ITEM) + panic("MEMGUARD: You must increase MAX_PAGES_PER_ITEM"); /* * If we haven't exhausted the memguard_map yet, allocate from @@ -137,16 +156,17 @@ */ MEMGUARD_CRIT_SECTION_ENTER; if (memguard_mapused >= memguard_mapsize) { - e = STAILQ_FIRST(&memguard_fifo_pool); + e = STAILQ_FIRST(&memguard_fifo_pool[i]); if (e != NULL) { - STAILQ_REMOVE(&memguard_fifo_pool, e, + STAILQ_REMOVE(&memguard_fifo_pool[i], e, memguard_entry, entries); MEMGUARD_CRIT_SECTION_EXIT; obj = e->ptr; free(e, M_TEMP); - memguard_unguard(obj); + memguard_unguard(obj, i+1); if (flags & M_ZERO) - bzero(obj, PAGE_SIZE); + bzero(obj, PAGE_SIZE * (i+1)); + vsetmgfifo((vm_offset_t)obj, &memguard_fifo_pool[i]); return obj; } MEMGUARD_CRIT_SECTION_EXIT; @@ -155,18 +175,19 @@ "memguard_map too small"); return NULL; } else - memguard_mapused += PAGE_SIZE; + memguard_mapused += (PAGE_SIZE * (i+1)); MEMGUARD_CRIT_SECTION_EXIT; if (obj == NULL) - obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE, flags); + obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE * (i+1), flags); if (obj != NULL) { - memguard_unguard(obj); + memguard_unguard(obj, i+1); + vsetmgfifo((vm_offset_t)obj, &memguard_fifo_pool[i]); if (flags & M_ZERO) - bzero(obj, PAGE_SIZE); + bzero(obj, PAGE_SIZE * (i+1)); } else { MEMGUARD_CRIT_SECTION_ENTER; - memguard_mapused -= PAGE_SIZE; + memguard_mapused -= (PAGE_SIZE * (i+1)); MEMGUARD_CRIT_SECTION_EXIT; } return obj; @@ -179,20 +200,25 @@ memguard_free(void *addr) { struct memguard_entry *e; + struct memguard_fifo *mgfifo; + int idx; - memguard_guard(addr); + mgfifo = vtomgfifo((vm_offset_t)addr); + idx = mgfifo->index; + memguard_guard(addr, idx + 1); + vclrmgfifo((vm_offset_t)addr); e = malloc(sizeof(struct memguard_entry), M_TEMP, M_NOWAIT); if (e == NULL) { MEMGUARD_CRIT_SECTION_ENTER; - memguard_mapused -= PAGE_SIZE; + memguard_mapused -= (PAGE_SIZE * (idx+1)); MEMGUARD_CRIT_SECTION_EXIT; kmem_free(memguard_map, (vm_offset_t)round_page( - (unsigned long)addr), PAGE_SIZE); + (unsigned long)addr), PAGE_SIZE * (idx+1)); return; } e->ptr = (void *)round_page((unsigned long)addr); MEMGUARD_CRIT_SECTION_ENTER; - STAILQ_INSERT_TAIL(&memguard_fifo_pool, e, entries); + STAILQ_INSERT_TAIL(mgfifo, e, entries); MEMGUARD_CRIT_SECTION_EXIT; } @@ -201,11 +227,12 @@ * future writes to it fail). */ static void -memguard_guard(void *addr) +memguard_guard(void *addr, int numpgs) { void *a = (void *)round_page((unsigned long)addr); (void)vm_map_protect(memguard_map, (vm_offset_t)a, - (vm_offset_t)((unsigned long)a + PAGE_SIZE), VM_PROT_READ, 0); + (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)), + VM_PROT_READ, 0); } /* @@ -213,10 +240,63 @@ * allow full data access). */ static void -memguard_unguard(void *addr) +memguard_unguard(void *addr, int numpgs) { void *a = (void *)round_page((unsigned long)addr); (void)vm_map_protect(memguard_map, (vm_offset_t)a, - (vm_offset_t)((unsigned long)a + PAGE_SIZE), + (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)), VM_PROT_READ | VM_PROT_WRITE, 0); } + +/* + * vtomgfifo() converts a virtual address of the first page allocated for + * an item to a memguard_fifo_pool reference for the corresponding item's + * size. + * + * vsetmgfifo() sets a reference in an underlying page for the specified + * virtual address to an appropriate memguard_fifo_pool. + * + * These routines are very similar to those defined by UMA in uma_int.h + */ +static struct memguard_fifo * +vtomgfifo(vm_offset_t va) +{ + vm_page_t p; + struct memguard_fifo *mgfifo; + + p = PHYS_TO_VM_PAGE(pmap_kextract(va)); + mgfifo = (struct memguard_fifo *)p->object; + + /* + * We use PG_SLAB, just like UMA does, even though we stash + * a reference to a memguard_fifo, and not a slab. + */ + if ((p->flags & PG_SLAB) == 0) + panic("MEMGUARD: Expected memguard_fifo reference to be set!"); + return mgfifo; +} + +static void +vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo) +{ + vm_page_t p; + + p = PHYS_TO_VM_PAGE(pmap_kextract(va)); + p->object = (vm_object_t)mgfifo; + /* + * We use PG_SLAB, just like UMA does, even though we stash + * a reference to a memguard_fifo, and not a slab. + */ + p->flags |= PG_SLAB; +} + +static void +vclrmgfifo(vm_offset_t va) +{ + vm_page_t p; + + p = PHYS_TO_VM_PAGE(pmap_kextract(va)); + p->object = NULL; + p->flags &= ~PG_SLAB; +} +