5.1 PAE testing results: 2 panics

From: Cagle, John (ISS-Houston) <"Cagle,>
Date: Thu, 22 May 2003 20:11:33 -0500
I am testing -current on a ProLiant DL560 4P with 8.25 GB of RAM.
I have enabled PAE and SMP+APIC in my kernel config.

I initially had problems with the ciss and bge drivers, but Paul
Saab has added PAE support to those drivers and they have been
checked into CVS (Thanks Paul!).

I am now running into a reproduceable panic when doing a single
SSH session doing a "find . -print" on an NFS file share:

 panic: kmem_malloc(4096): kmem_map too small: 96624640 total allocated

I tried lowering kern.maxvnodes from 536130 (default) to 200000,
but that did not alleviate the panic.

If anyone has any ideas of what might be the problem, please let
me know.  I have attached a stack trace and some statistics
captured just before the panic that may be helpful.

Also, I get a different panic whenever mounting /dev/fd0:

root_at_bigdaddy:~> mount /dev/fd0 /mnt
panic: isa_dmastart: bad bounce buffer
cpuid = 2; lapic.id = 02000000
Debugger("panic")
Stopped at      Debugger+0x55:  xchgl   %ebx,in_Debugger.0
db> trace
Debugger(c0429eec,2000000,c0442f3a,e44abbe0,1) at Debugger+0x55
panic(c0442f3a,200,2,dc,8) at panic+0x11f
isa_dmastart(100000,d5787000,200,2,c042ae33) at isa_dmastart+0x78
fdstate(d27b4700,7,e44abcb4,c03ef591,d27b4700) at fdstate+0x4df
fdc_intr(d27b4700,e44abce0,c028f62c,d27b4700,0) at fdc_intr+0x18
fd_pseudointr(d27b4700,0,c042ae33,bf,c51c) at fd_pseudointr+0x11
softclock(0,0,c0427d36,233,ce8391e0) at softclock+0x19c
ithread_loop(ce823200,e44abd48,c0427be7,2f8,0) at ithread_loop+0x182
fork_exit(c026ca00,ce823200,e44abd48) at fork_exit+0xc0
fork_trampoline() at fork_trampoline+0x1a
--- trap 0x1, eip = 0, esp = 0xe44abd7c, ebp = 0 ---

Thanks,
John
--------------------------------
John Cagle     john.cagle_at_hp.com
Principal Member Technical Staff
   Industry Standard Servers
    Hewlett-Packard Company


panic: kmem_malloc(4096): kmem_map too small: 96624640 total allocated
cpuid = 0; lapic.id = 00000000
Debugger("panic")
Stopped at      Debugger+0x55:  xchgl   %ebx,in_Debugger.0
db> trace
Debugger(c041b6da,0,c042cea5,e8d49774,1) at Debugger+0x55
panic(c042cea5,1000,5c26000,e8d497a0,c04886a8) at panic+0x11f
kmem_malloc(c062e0b0,1000,102,e8d497fc,c0398072) at kmem_malloc+0x124
page_alloc(c063a9c0,1000,e8d497ef,102,c042e75d) at page_alloc+0x27
slab_zalloc(c063a9c0,2,c045388c,c048e920,c063aa98) at slab_zalloc+0x112
uma_zone_slab(c063a9c0,2,163,d41fddb0,c063aa98) at uma_zone_slab+0xd8
uma_zalloc_bucket(c063a9c0,2,c042e754,586,0) at uma_zalloc_bucket+0x17d
uma_zalloc_arg(c063a9c0,0,2,c042ec5c,2d498b4) at uma_zalloc_arg+0x307
vm_object_allocate_wait(2,1,0,2,e8d498e4) at
vm_object_allocate_wait+0x38
vm_object_allocate(2,1,0,88,c04867a0) at vm_object_allocate+0x28
vnode_pager_alloc(d41fddb0,200,0,0,0) at vnode_pager_alloc+0x192
vop_stdcreatevobject(e8d499cc,e8d499e0,c02d541a,e8d499cc,1) at
vop_stdcreatevobject+0x175
vop_defaultop(e8d499cc,1,c04227e5,cb1,c046eca0) at vop_defaultop+0x18
vfs_object_create(d41fddb0,d346a4c0,d3826b00,a4,d346a4c0) at
vfs_object_create+0x5a
namei(e8d49bd8,e8d49a74,c02a0c3b,c0487c08,d346a4c0) at namei+0x2f6
vn_open_cred(e8d49bd8,e8d49cd8,1,d3826b00,e8d49cc4) at
vn_open_cred+0x258
vn_open(e8d49bd8,e8d49cd8,1,2ab,c02a0ca3) at vn_open+0x29
kern_open(d346a4c0,80559c8,0,5,1) at kern_open+0x13d
open(d346a4c0,e8d49d10,c0432e37,3fb,3) at open+0x30
syscall(805002f,bf7f002f,bf7f002f,8055980,8053040) at syscall+0x26e
Xint0x80_syscall() at Xint0x80_syscall+0x1d
--- syscall (5, FreeBSD ELF32, open), eip = 0x280bc6f3, esp =
0xbf7ff8cc, ebp = 0xbf7ffa98 ---
db>

mbuf usage:
        GEN cache:      0/0 (in use/in pool)
        CPU #0 cache:   388/448 (in use/in pool)
        CPU #1 cache:   0/64 (in use/in pool)
        CPU #2 cache:   4/64 (in use/in pool)
        CPU #3 cache:   0/64 (in use/in pool)
        CPU #4 cache:   0/288 (in use/in pool)
        CPU #5 cache:   0/64 (in use/in pool)
        CPU #6 cache:   1/64 (in use/in pool)
        CPU #7 cache:   0/64 (in use/in pool)
        Total:          393/1120 (in use/in pool)
        Mbuf cache high watermark: 512
        Mbuf cache low watermark: 128
        Maximum possible: 51200
        Allocated mbuf types:
          393 mbufs allocated to data
        2% of mbuf map consumed
mbuf cluster usage:
        GEN cache:      0/0 (in use/in pool)
        CPU #0 cache:   382/392 (in use/in pool)
        CPU #1 cache:   0/64 (in use/in pool)
        CPU #2 cache:   1/64 (in use/in pool)
        CPU #3 cache:   0/64 (in use/in pool)
        CPU #4 cache:   0/128 (in use/in pool)
        CPU #5 cache:   0/64 (in use/in pool)
        CPU #6 cache:   1/64 (in use/in pool)
        CPU #7 cache:   0/64 (in use/in pool)
        Total:          384/904 (in use/in pool)
        Cluster cache high watermark: 128
        Cluster cache low watermark: 16
        Maximum possible: 25600
        3% of cluster map consumed
2088 KBytes of wired memory reserved (41% in use)
0 requests for memory denied
0 requests for memory delayed
0 calls to protocol drain routines

        Type  InUse MemUse HighUse Requests  Size(s)
     atkbddev     2     1K      1K        2  32
MSDOSFS mount     1  1024K   1024K        1
     nexusdev     3     1K      1K        3  16
      memdesc     1     4K      4K        1  4096
        DEVFS   104    16K     16K      104  16,32,128,4096
    VM pgdata     1    64K     64K        1  65536
    UFS mount    12    25K     60K       24  64,256,1024,2048,4096,32768
    UFS ihash     1  2048K   2048K        1
  UFS dirhash    21     4K      4K       21  16,32,64,512
       dirrem     0     0K      1K       26  32
       diradd     0     0K      1K       28  32
     freefile     0     0K      1K       17  32
     freeblks     0     0K      3K       16  256
     freefrag     0     0K      1K        7  32
   allocindir     0     0K     29K      489  64
     indirdep     0     0K    867K      244  32,16384
  allocdirect     0     0K      2K       46  128
    bmsafemap     0     0K      2K       74  32
       newblk     1     1K      1K      536  64,256
     inodedep     1  2048K   2054K       66  128,256
      pagedep     1   256K    257K       12  64
     p1003.1b     1     1K      1K        1  16
   NFS daemon     1     1K      1K        1  256
     NFS hash     1  2048K   2048K        1
 NFSV3 diroff    12     6K      6K       12  512
      NFS req     0     0K      1K    38698  64
    in6_multi     6     1K      1K        6  64
     syncache     1     8K      8K        1  8192
     in_multi     2     1K      1K        2  32
         igmp     1     1K      1K        1  16
     routetbl    52     8K      8K       77  16,32,64,128,256
           lo     1     1K      1K        1  512
        clone     4    16K     16K        4  4096
  ether_multi    28     2K      2K       28  16,32,64
       ifaddr    58    11K     11K       59  32,256,512,2048
          BPF    10    10K     10K       10  128,256,4096
        mount    13     4K      4K       17  16,32,128,512
      ATA DMA     1     1K      1K        1  64
       vnodes    26     7K      7K      151  16,32,64,128,256
cluster_save buffer     0     0K      1K        2  32,64
     vfscache 10702  4766K   4766K    11121  64,128
   BIO buffer    20    40K     68K       62  2048
          pcb    12     5K      5K       54  16,32,64,2048
       soname     2     1K      1K      247  16,32,128
         ptys     2     1K      1K        2  512
         ttys  1108   142K    142K     2368  128,512
          shm     1    16K     16K        1  16384
          sem     4     7K      7K        4  1024,4096
          msg     4    25K     25K        4  512,4096,16384
     ioctlops     0     0K      1K       22  512,1024
  ATA generic     2     1K      1K        3  16,512
      acpidev    50     1K      1K       50  16
      acpisem    20     3K      3K       20  128
     acpitask     0     0K      1K        6  16,32
    taskqueue     3     1K      1K        3  128
         sbuf     0     0K     13K      138
16,32,64,128,256,512,1024,2048,4096,8192
         rman   116     8K      8K      453  16,64
      mbufmgr   169   141K    141K      169  32,64,256,8192,16384
         kobj   126   504K    504K      126  4096
 eventhandler    34     2K      2K       34  32,128
      devstat     4     9K      9K        4  16,4096
          bus   732   125K    233K     4150
16,32,64,128,256,512,1024,2048,4096,8192,16384,32768
         SWAP     2  2189K   2189K        2  64
    sysctltmp     0     0K      4K      999
16,32,64,128,256,512,2048,4096
    sysctloid   134     5K      5K      134  16,32,64
       sysctl     0     0K      1K      934  16,32,64
      uidinfo     3     2K      2K        9  32,1024
    ciss_data     4     2K      2K        5  32,128,512,1024
         cred    19     3K      3K     1148  128
      subproc    84     8K      9K      675  64,256
         proc     2     8K      8K        2  4096
      session    19     3K      3K       24  128
         pgrp    23     3K      3K      159  128
       acpica  1638    85K     86K    14047  16,32,64,128,256,512,1024
       module   176    12K     12K      176  64,128
       ip6ndp     3     1K      1K        4  64,128,512
         temp     8   117K    117K     3193
16,32,64,128,256,512,1024,2048,4096,8192,32768
       devbuf  2530   975K   1007K     3166
16,32,64,128,256,512,1024,2048,4096,16384
        lockf     1     1K      1K        5  64
       linker    34     2K      2K       54  16,32,256
   ACD driver     1     1K      1K        1  1024
       KTRACE   100    13K     13K      100  128
      ithread    44     4K      4K       44  64,128
       zombie     0     0K      1K      575  128
    proc-args    23     2K      2K      609  16,32,64,128,256
       kqueue     4     4K      6K       47  256,1024
         kenv   108     6K      6K      109  16,32,64,2048
        sigio     1     1K      1K        1  32
    file desc    70    18K     20K      645  256
        dev_t    31     8K      8K       31  256
ATAPI generic     1     1K      1K        2  32,128
  ISOFS mount     1  2048K   2048K        1
       isadev    22     2K      2K       22  64
         GEOM    47     6K      8K      175  16,32,64,128,256,512,1024
   pfs_fileno     1    32K     32K        1  32768
    pfs_nodes    20     3K      3K       20  128
Received on Thu May 22 2003 - 16:11:39 UTC

This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:37:09 UTC