Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 13 additions & 8 deletions exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -1451,7 +1451,6 @@ void tlb_flush_masked(CPUState *env, uint32_t mmu_indexes_mask)

void tlb_flush_page_masked(CPUState *env, target_ulong addr, uint32_t mmu_indexes_mask, bool from_generated_code)
{
int i;
int mmu_idx;

/* Check if we need to flush due to large pages. */
Expand All @@ -1465,15 +1464,21 @@ void tlb_flush_page_masked(CPUState *env, target_ulong addr, uint32_t mmu_indexe
env->current_tb = NULL;
}

addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
for(mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx += 1) {
if(extract32(mmu_indexes_mask, mmu_idx, 1)) {
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
/* ARM's minimum page size is 4K, but TARGET_PAGE_BITS is 10 (1K).
When the guest flushes a 4K page, we must invalidate all 1K
sub-pages within that 4K region, otherwise stale TLB entries
for sibling sub-pages cause writes to hit wrong physical pages. */
target_ulong base_4k = addr & ~(target_ulong)0xFFF;
for(target_ulong subpage = base_4k; subpage < base_4k + 0x1000; subpage += TARGET_PAGE_SIZE) {
target_ulong sp_addr = subpage & TARGET_PAGE_MASK;
int i = (sp_addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
for(mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx += 1) {
if(extract32(mmu_indexes_mask, mmu_idx, 1)) {
tlb_flush_entry(&env->tlb_table[mmu_idx][i], sp_addr);
}
}
tlb_flush_jmp_cache(env, sp_addr);
}

tlb_flush_jmp_cache(env, addr);
}

void tlb_flush_page(CPUState *env, target_ulong addr, bool from_generated_code)
Expand Down