diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index aef1e4e..be225fb 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -874,7 +874,7 @@ malloc_uninit(void *data) * Look for memory leaks. */ temp_allocs = temp_bytes = 0; - for (i = 0; i < MAXCPU; i++) { + for (i = 0; i < 1; i++) { mtsp = &mtip->mti_stats[i]; temp_allocs += mtsp->mts_numallocs; temp_allocs -= mtsp->mts_numfrees; diff --git a/sys/kern/subr_vmem.c b/sys/kern/subr_vmem.c index 80940be..89d62ed 100644 --- a/sys/kern/subr_vmem.c +++ b/sys/kern/subr_vmem.c @@ -665,7 +665,8 @@ vmem_startup(void) * CPUs to attempt to allocate new tags concurrently to limit * false restarts in UMA. */ - uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2); + //mst look here + uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (1 + 1) / 2); uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); #endif } diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index b96c421..6382437 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -98,6 +98,14 @@ __FBSDID("$FreeBSD$"); #include #endif +//mst: override some defines +#undef curcpu +#define curcpu 0 +#undef CPU_FOREACH +#define CPU_FOREACH(i) \ + for ((i) = 0; (i) <= 0; (i)++) \ + if (!CPU_ABSENT((i))) + /* * This is the zone and keg from which all zones are spawned. The idea is that * even the zone & keg heads are allocated from the allocator, so we use the @@ -1228,6 +1236,7 @@ keg_small_init(uma_keg_t keg) if (keg->uk_flags & UMA_ZONE_PCPU) { u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU; + ncpus = 1; keg->uk_slabsize = sizeof(struct pcpu); keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), @@ -1822,7 +1831,7 @@ uma_startup(void *bootmem, int boot_pages) #endif args.name = "UMA Zones"; args.size = sizeof(struct uma_zone) + - (sizeof(struct uma_cache) * (mp_maxid + 1)); + (sizeof(struct uma_cache) * (0 + 1)); args.ctor = zone_ctor; args.dtor = zone_dtor; args.uminit = zero_init; @@ -3301,7 +3310,7 @@ uma_zero_item(void *item, uma_zone_t zone) { if (zone->uz_flags & UMA_ZONE_PCPU) { - for (int i = 0; i < mp_ncpus; i++) + for (int i = 0; i < 1; i++) bzero(zpcpu_get_cpu(item, i), zone->uz_size); } else bzero(item, zone->uz_size); @@ -3465,7 +3474,7 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) */ bzero(&ush, sizeof(ush)); ush.ush_version = UMA_STREAM_VERSION; - ush.ush_maxcpus = (mp_maxid + 1); + ush.ush_maxcpus = (0 + 1); ush.ush_count = count; (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); @@ -3509,7 +3518,7 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) * accept the possible race associated with bucket * exchange during monitoring. */ - for (i = 0; i < (mp_maxid + 1); i++) { + for (i = 0; i < (0 + 1); i++) { bzero(&ups, sizeof(ups)); if (kz->uk_flags & UMA_ZFLAG_INTERNAL) goto skip; diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h index 11ab24f..b5b5a05 100644 --- a/sys/vm/uma_int.h +++ b/sys/vm/uma_int.h @@ -107,7 +107,7 @@ #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ -#define UMA_BOOT_PAGES 64 /* Pages allocated for startup */ +#define UMA_BOOT_PAGES 512 /* Pages allocated for startup */ /* Max waste percentage before going to off page slab management */ #define UMA_MAX_WASTE 10