diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index c5b0b048a41..2f1422b36c4 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -1776,6 +1776,9 @@ moea_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, { struct pvo_entry *pvo, *tpvo, key; struct pte *pt; + struct pte old_pte; + vm_page_t m; + int32_t refchg; KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("moea_protect: non current pmap")); @@ -1803,12 +1806,31 @@ moea_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; pvo->pvo_pte.pte.pte_lo |= PTE_BR; + old_pte = *pt; + /* * If the PVO is in the page table, update that pte as well. */ if (pt != NULL) { moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); + if (pm != kernel_pmap && m != NULL && + (m->a.flags & PGA_EXECUTABLE) == 0 && + (pvo->pvo_pte.pa & (PTE_I | PTE_G)) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) + vm_page_aflag_set(m, PGA_EXECUTABLE); + moea_syncicache(pvo->pvo_pte.pa & PTE_RPGN, + PAGE_SIZE); + } mtx_unlock(&moea_table_mutex); + if ((pvo->pvo_vaddr & PVO_MANAGED) && + (pvo->pvo_pte.prot & VM_PROT_WRITE)) { + m = PHYS_TO_VM_PAGE(old_pte.pte_lo & PTE_RPGN); + refchg = atomic_readandclear_32(&m->md.mdpg_attrs); + if (refchg & PTE_CHG) + vm_page_dirty(m); + if (refchg & PTE_REF) + vm_page_aflag_set(m, PGA_REFERENCED); + } } } rw_wunlock(&pvh_global_lock);