From 944916858a430a0627e483657d4cfa2cd2dfb4f7 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 2 Jun 2009 21:17:45 +0000 Subject: [PATCH] powerpc: Shield code specific to 64-bit server processors This is a random collection of added ifdef's around portions of code that only mak sense on server processors. Using either CONFIG_PPC_STD_MMU_64 or CONFIG_PPC_BOOK3S as seems appropriate. This is meant to make the future merging of Book3E 64-bit support easier. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/lppaca.h | 6 ++++++ arch/powerpc/include/asm/mmu.h | 4 ++-- arch/powerpc/include/asm/pgtable-ppc64.h | 5 +++++ arch/powerpc/kernel/irq.c | 2 ++ arch/powerpc/kernel/pci_64.c | 5 +++++ arch/powerpc/kernel/process.c | 2 +- arch/powerpc/kernel/prom.c | 2 +- arch/powerpc/kernel/setup_64.c | 5 ++++- arch/powerpc/mm/Makefile | 7 ++++--- arch/powerpc/mm/init_64.c | 2 ++ 10 files changed, 32 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index d2a65e8ca6a..f78f65c38f0 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -20,6 +20,11 @@ #define _ASM_POWERPC_LPPACA_H #ifdef __KERNEL__ +/* These definitions relate to hypervisors that only exist when using + * a server type processor + */ +#ifdef CONFIG_PPC_BOOK3S + //============================================================================= // // This control block contains the data that is shared between the @@ -158,5 +163,6 @@ struct slb_shadow { extern struct slb_shadow slb_shadow[]; +#endif /* CONFIG_PPC_BOOK3S */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_LPPACA_H */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 325b7208a14..fb57ded592f 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -74,10 +74,10 @@ extern void early_init_mmu_secondary(void); #endif /* !__ASSEMBLY__ */ -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC_STD_MMU_64) /* 64-bit classic hash table MMU */ # include -#elif defined(CONFIG_PPC_STD_MMU) +#elif defined(CONFIG_PPC_STD_MMU_32) /* 32-bit classic hash table MMU */ # include #elif defined(CONFIG_40x) diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index c40db05f21e..8cd083c6150 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -31,9 +31,11 @@ #error TASK_SIZE_USER64 exceeds pagetable range #endif +#ifdef CONFIG_PPC_STD_MMU_64 #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) #error TASK_SIZE_USER64 exceeds user VSID range #endif +#endif /* * Define the address range of the vmalloc VM area. @@ -199,8 +201,11 @@ static inline unsigned long pte_update(struct mm_struct *mm, if (!huge) assert_pte_locked(mm, addr); +#ifdef CONFIG_PPC_STD_MMU_64 if (old & _PAGE_HASHPTE) hpte_need_flush(mm, addr, ptep, old, huge); +#endif + return old; } diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 7d46e5d5b20..8564a412e7a 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -117,6 +117,7 @@ notrace void raw_local_irq_restore(unsigned long en) if (!en) return; +#ifdef CONFIG_PPC_STD_MMU_64 if (firmware_has_feature(FW_FEATURE_ISERIES)) { /* * Do we need to disable preemption here? Not really: in the @@ -134,6 +135,7 @@ notrace void raw_local_irq_restore(unsigned long en) if (local_paca->lppaca_ptr->int_dword.any_int) iseries_handle_interrupts(); } +#endif /* CONFIG_PPC_STD_MMU_64 */ /* * if (get_paca()->hard_enabled) return; diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index dd6c7a3bf72..461c91625a8 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -420,6 +420,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus) * so flushing the hash table is the only sane way to make sure * that no hash entries are covering that removed bridge area * while still allowing other busses overlapping those pages + * + * Note: If we ever support P2P hotplug on Book3E, we'll have + * to do an appropriate TLB flush here too */ if (bus->self) { struct resource *res = bus->resource[0]; @@ -427,8 +430,10 @@ int pcibios_unmap_io_space(struct pci_bus *bus) pr_debug("IO unmapping for PCI-PCI bridge %s\n", pci_name(bus->self)); +#ifdef CONFIG_PPC_STD_MMU_64 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, res->end + _IO_BASE + 1); +#endif return 0; } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7b44a33f03c..3e7135bbe40 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -650,7 +650,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.ksp_limit = (unsigned long)task_stack_page(p) + _ALIGN_UP(sizeof(struct thread_info), 16); -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_STD_MMU_64 if (cpu_has_feature(CPU_FTR_SLB)) { unsigned long sp_vsid; unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index ce01ff2474d..d4405b95bfa 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -585,7 +585,7 @@ static void __init check_cpu_pa_features(unsigned long node) ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); } -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_STD_MMU_64 static void __init check_cpu_slb_size(unsigned long node) { u32 *slb_size_ptr; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index c410c606955..42221055f0c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -417,9 +417,11 @@ void __init setup_system(void) if (ppc64_caches.iline_size != 0x80) printk("ppc64_caches.icache_line_size = 0x%x\n", ppc64_caches.iline_size); +#ifdef CONFIG_PPC_STD_MMU_64 if (htab_address) printk("htab_address = 0x%p\n", htab_address); printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); +#endif /* CONFIG_PPC_STD_MMU_64 */ if (PHYSICAL_START > 0) printk("physical_start = 0x%lx\n", PHYSICAL_START); @@ -511,8 +513,9 @@ void __init setup_arch(char **cmdline_p) irqstack_early_init(); emergency_stack_init(); +#ifdef CONFIG_PPC_STD_MMU_64 stabs_alloc(); - +#endif /* set up the bootmem stuff with available memory */ do_init_bootmem(); sparse_init(); diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index b746f4ca420..c4bcf072cb3 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -11,10 +11,11 @@ obj-y := fault.o mem.o pgtable.o gup.o \ pgtable_$(CONFIG_WORD_SIZE).o obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ tlb_nohash_low.o -hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o -obj-$(CONFIG_PPC64) += hash_utils_64.o \ +obj-$(CONFIG_PPC64) += mmap_64.o +hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o +obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \ slb_low.o slb.o stab.o \ - mmap_64.o $(hash-y) + mmap_64.o $(hash64-y) obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ tlb_hash$(CONFIG_WORD_SIZE).o \ diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 3e6a6543f53..68a821add28 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -66,6 +66,7 @@ #include "mmu_decl.h" +#ifdef CONFIG_PPC_STD_MMU_64 #if PGTABLE_RANGE > USER_VSID_RANGE #warning Limited user VSID range means pagetable space is wasted #endif @@ -73,6 +74,7 @@ #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) #warning TASK_SIZE is smaller than it needs to be. #endif +#endif /* CONFIG_PPC_STD_MMU_64 */ phys_addr_t memstart_addr = ~0; phys_addr_t kernstart_addr; -- 2.11.4.GIT