Hi, the access to sysmaps_pcpu[] should be atomic with respect to thread migration. Otherwise, a sysmaps for one CPU can be stolen by another CPU and the purpose of per CPU sysmaps is broken. A patch is enclosed. Svata Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c (revision 246831) +++ sys/i386/i386/pmap.c (working copy) _at__at_ -4146,11 +4146,11 _at__at_ { struct sysmaps *sysmaps; + sched_pin(); sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; mtx_lock(&sysmaps->lock); if (*sysmaps->CMAP2) panic("pmap_zero_page: CMAP2 busy"); - sched_pin(); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0); invlcaddr(sysmaps->CADDR2); _at__at_ -4171,11 +4171,11 _at__at_ { struct sysmaps *sysmaps; + sched_pin(); sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; mtx_lock(&sysmaps->lock); if (*sysmaps->CMAP2) panic("pmap_zero_page_area: CMAP2 busy"); - sched_pin(); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0); invlcaddr(sysmaps->CADDR2); _at__at_ -4220,13 +4220,13 _at__at_ { struct sysmaps *sysmaps; + sched_pin(); sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; mtx_lock(&sysmaps->lock); if (*sysmaps->CMAP1) panic("pmap_copy_page: CMAP1 busy"); if (*sysmaps->CMAP2) panic("pmap_copy_page: CMAP2 busy"); - sched_pin(); invlpg((u_int)sysmaps->CADDR1); invlpg((u_int)sysmaps->CADDR2); *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | _at__at_ -5072,11 +5072,11 _at__at_ vm_offset_t sva, eva; if ((cpu_feature & CPUID_CLFSH) != 0) { + sched_pin(); sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; mtx_lock(&sysmaps->lock); if (*sysmaps->CMAP2) panic("pmap_flush_page: CMAP2 busy"); - sched_pin(); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0); invlcaddr(sysmaps->CADDR2);Received on Mon Feb 18 2013 - 11:44:42 UTC
This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:40:34 UTC