From 3c868823413d76bdd80c643603be8ab09dcb4d65 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 30 Jan 2008 13:33:52 +0100 Subject: [PATCH] x86: c_p_a() fix: reorder TLB / cache flushes to follow Intel recommendation Intel recommends to first flush the TLBs and then the caches on caching attribute changes. c_p_a() previously did it the other way round. Reorder that. The procedure is still not fully compliant to the Intel documentation because Intel recommends a all CPU synchronization step between the TLB flushes and the cache flushes. However on all new Intel CPUs this is now meaningless anyways because they support Self-Snoop and can skip the cache flush step anyway. [ mingo@elte.hu: decoupled from clflush and ported it to x86.git ] Signed-off-by: Andi Kleen Acked-by: Jan Beulich Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/pageattr_32.c | 12 ++++++------ arch/x86/mm/pageattr_64.c | 3 ++- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c index 523fd5b37df..5cb5c7101f4 100644 --- a/arch/x86/mm/pageattr_32.c +++ b/arch/x86/mm/pageattr_32.c @@ -87,6 +87,12 @@ static void flush_kernel_map(void *arg) struct list_head *lh = (struct list_head *)arg; struct page *p; + /* + * Flush all to work around Errata in early athlons regarding + * large page flushing. + */ + __flush_tlb_all(); + /* High level code is not ready for clflush yet */ if (0 && cpu_has_clflush) { list_for_each_entry(p, lh, lru) @@ -95,12 +101,6 @@ static void flush_kernel_map(void *arg) if (boot_cpu_data.x86_model >= 4) wbinvd(); } - - /* - * Flush all to work around Errata in early athlons regarding - * large page flushing. - */ - __flush_tlb_all(); } static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c index 4d172881af7..3ccdb1401e6 100644 --- a/arch/x86/mm/pageattr_64.c +++ b/arch/x86/mm/pageattr_64.c @@ -82,6 +82,8 @@ static void flush_kernel_map(void *arg) struct list_head *l = (struct list_head *)arg; struct page *pg; + __flush_tlb_all(); + /* When clflush is available always use it because it is much cheaper than WBINVD. */ /* clflush is still broken. Disable for now. */ @@ -94,7 +96,6 @@ static void flush_kernel_map(void *arg) clflush_cache_range(addr, PAGE_SIZE); } } - __flush_tlb_all(); } static inline void flush_map(struct list_head *l) -- 2.11.4.GIT