[PATCH] ppc64: msChunks cleanups
[linux-2.6/linux-loongson.git] / arch / ppc64 / kernel / iSeries_setup.c
blobe47984ba7c7c85e9aedb855615d45c0d75a5e20d
1 /*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
5 * Module name: iSeries_setup.c
7 * Description:
8 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
10 * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
11 * <dan@net4x.com>.
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
19 #undef DEBUG
21 #include <linux/config.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/smp.h>
25 #include <linux/param.h>
26 #include <linux/string.h>
27 #include <linux/initrd.h>
28 #include <linux/seq_file.h>
29 #include <linux/kdev_t.h>
30 #include <linux/major.h>
31 #include <linux/root_dev.h>
33 #include <asm/processor.h>
34 #include <asm/machdep.h>
35 #include <asm/page.h>
36 #include <asm/mmu.h>
37 #include <asm/pgtable.h>
38 #include <asm/mmu_context.h>
39 #include <asm/cputable.h>
40 #include <asm/sections.h>
41 #include <asm/iommu.h>
42 #include <asm/firmware.h>
44 #include <asm/time.h>
45 #include "iSeries_setup.h"
46 #include <asm/naca.h>
47 #include <asm/paca.h>
48 #include <asm/cache.h>
49 #include <asm/sections.h>
50 #include <asm/abs_addr.h>
51 #include <asm/iSeries/HvCallHpt.h>
52 #include <asm/iSeries/HvLpConfig.h>
53 #include <asm/iSeries/HvCallEvent.h>
54 #include <asm/iSeries/HvCallSm.h>
55 #include <asm/iSeries/HvCallXm.h>
56 #include <asm/iSeries/ItLpQueue.h>
57 #include <asm/iSeries/IoHriMainStore.h>
58 #include <asm/iSeries/mf.h>
59 #include <asm/iSeries/HvLpEvent.h>
60 #include <asm/iSeries/iSeries_irq.h>
61 #include <asm/iSeries/IoHriProcessorVpd.h>
62 #include <asm/iSeries/ItVpdAreas.h>
63 #include <asm/iSeries/LparMap.h>
65 extern void hvlog(char *fmt, ...);
67 #ifdef DEBUG
68 #define DBG(fmt...) hvlog(fmt)
69 #else
70 #define DBG(fmt...)
71 #endif
73 /* Function Prototypes */
74 extern void ppcdbg_initialize(void);
76 static void build_iSeries_Memory_Map(void);
77 static void setup_iSeries_cache_sizes(void);
78 static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
79 #ifdef CONFIG_PCI
80 extern void iSeries_pci_final_fixup(void);
81 #else
82 static void iSeries_pci_final_fixup(void) { }
83 #endif
85 /* Global Variables */
86 static unsigned long procFreqHz;
87 static unsigned long procFreqMhz;
88 static unsigned long procFreqMhzHundreths;
90 static unsigned long tbFreqHz;
91 static unsigned long tbFreqMhz;
92 static unsigned long tbFreqMhzHundreths;
94 int piranha_simulator;
96 extern int rd_size; /* Defined in drivers/block/rd.c */
97 extern unsigned long klimit;
98 extern unsigned long embedded_sysmap_start;
99 extern unsigned long embedded_sysmap_end;
101 extern unsigned long iSeries_recal_tb;
102 extern unsigned long iSeries_recal_titan;
104 static int mf_initialized;
106 struct MemoryBlock {
107 unsigned long absStart;
108 unsigned long absEnd;
109 unsigned long logicalStart;
110 unsigned long logicalEnd;
114 * Process the main store vpd to determine where the holes in memory are
115 * and return the number of physical blocks and fill in the array of
116 * block data.
118 static unsigned long iSeries_process_Condor_mainstore_vpd(
119 struct MemoryBlock *mb_array, unsigned long max_entries)
121 unsigned long holeFirstChunk, holeSizeChunks;
122 unsigned long numMemoryBlocks = 1;
123 struct IoHriMainStoreSegment4 *msVpd =
124 (struct IoHriMainStoreSegment4 *)xMsVpd;
125 unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
126 unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
127 unsigned long holeSize = holeEnd - holeStart;
129 printk("Mainstore_VPD: Condor\n");
131 * Determine if absolute memory has any
132 * holes so that we can interpret the
133 * access map we get back from the hypervisor
134 * correctly.
136 mb_array[0].logicalStart = 0;
137 mb_array[0].logicalEnd = 0x100000000;
138 mb_array[0].absStart = 0;
139 mb_array[0].absEnd = 0x100000000;
141 if (holeSize) {
142 numMemoryBlocks = 2;
143 holeStart = holeStart & 0x000fffffffffffff;
144 holeStart = addr_to_chunk(holeStart);
145 holeFirstChunk = holeStart;
146 holeSize = addr_to_chunk(holeSize);
147 holeSizeChunks = holeSize;
148 printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
149 holeFirstChunk, holeSizeChunks );
150 mb_array[0].logicalEnd = holeFirstChunk;
151 mb_array[0].absEnd = holeFirstChunk;
152 mb_array[1].logicalStart = holeFirstChunk;
153 mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
154 mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
155 mb_array[1].absEnd = 0x100000000;
157 return numMemoryBlocks;
160 #define MaxSegmentAreas 32
161 #define MaxSegmentAdrRangeBlocks 128
162 #define MaxAreaRangeBlocks 4
164 static unsigned long iSeries_process_Regatta_mainstore_vpd(
165 struct MemoryBlock *mb_array, unsigned long max_entries)
167 struct IoHriMainStoreSegment5 *msVpdP =
168 (struct IoHriMainStoreSegment5 *)xMsVpd;
169 unsigned long numSegmentBlocks = 0;
170 u32 existsBits = msVpdP->msAreaExists;
171 unsigned long area_num;
173 printk("Mainstore_VPD: Regatta\n");
175 for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
176 unsigned long numAreaBlocks;
177 struct IoHriMainStoreArea4 *currentArea;
179 if (existsBits & 0x80000000) {
180 unsigned long block_num;
182 currentArea = &msVpdP->msAreaArray[area_num];
183 numAreaBlocks = currentArea->numAdrRangeBlocks;
184 printk("ms_vpd: processing area %2ld blocks=%ld",
185 area_num, numAreaBlocks);
186 for (block_num = 0; block_num < numAreaBlocks;
187 ++block_num ) {
188 /* Process an address range block */
189 struct MemoryBlock tempBlock;
190 unsigned long i;
192 tempBlock.absStart =
193 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
194 tempBlock.absEnd =
195 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
196 tempBlock.logicalStart = 0;
197 tempBlock.logicalEnd = 0;
198 printk("\n block %ld absStart=%016lx absEnd=%016lx",
199 block_num, tempBlock.absStart,
200 tempBlock.absEnd);
202 for (i = 0; i < numSegmentBlocks; ++i) {
203 if (mb_array[i].absStart ==
204 tempBlock.absStart)
205 break;
207 if (i == numSegmentBlocks) {
208 if (numSegmentBlocks == max_entries)
209 panic("iSeries_process_mainstore_vpd: too many memory blocks");
210 mb_array[numSegmentBlocks] = tempBlock;
211 ++numSegmentBlocks;
212 } else
213 printk(" (duplicate)");
215 printk("\n");
217 existsBits <<= 1;
219 /* Now sort the blocks found into ascending sequence */
220 if (numSegmentBlocks > 1) {
221 unsigned long m, n;
223 for (m = 0; m < numSegmentBlocks - 1; ++m) {
224 for (n = numSegmentBlocks - 1; m < n; --n) {
225 if (mb_array[n].absStart <
226 mb_array[n-1].absStart) {
227 struct MemoryBlock tempBlock;
229 tempBlock = mb_array[n];
230 mb_array[n] = mb_array[n-1];
231 mb_array[n-1] = tempBlock;
237 * Assign "logical" addresses to each block. These
238 * addresses correspond to the hypervisor "bitmap" space.
239 * Convert all addresses into units of 256K chunks.
242 unsigned long i, nextBitmapAddress;
244 printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
245 nextBitmapAddress = 0;
246 for (i = 0; i < numSegmentBlocks; ++i) {
247 unsigned long length = mb_array[i].absEnd -
248 mb_array[i].absStart;
250 mb_array[i].logicalStart = nextBitmapAddress;
251 mb_array[i].logicalEnd = nextBitmapAddress + length;
252 nextBitmapAddress += length;
253 printk(" Bitmap range: %016lx - %016lx\n"
254 " Absolute range: %016lx - %016lx\n",
255 mb_array[i].logicalStart,
256 mb_array[i].logicalEnd,
257 mb_array[i].absStart, mb_array[i].absEnd);
258 mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
259 0x000fffffffffffff);
260 mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
261 0x000fffffffffffff);
262 mb_array[i].logicalStart =
263 addr_to_chunk(mb_array[i].logicalStart);
264 mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
268 return numSegmentBlocks;
271 static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
272 unsigned long max_entries)
274 unsigned long i;
275 unsigned long mem_blocks = 0;
277 if (cpu_has_feature(CPU_FTR_SLB))
278 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
279 max_entries);
280 else
281 mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
282 max_entries);
284 printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks);
285 for (i = 0; i < mem_blocks; ++i) {
286 printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
287 " abs chunks %016lx - %016lx\n",
288 i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
289 mb_array[i].absStart, mb_array[i].absEnd);
291 return mem_blocks;
294 static void __init iSeries_get_cmdline(void)
296 char *p, *q;
298 /* copy the command line parameter from the primary VSP */
299 HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
300 HvLpDma_Direction_RemoteToLocal);
302 p = cmd_line;
303 q = cmd_line + 255;
304 while(p < q) {
305 if (!*p || *p == '\n')
306 break;
307 ++p;
309 *p = 0;
312 static void __init iSeries_init_early(void)
314 extern unsigned long memory_limit;
316 DBG(" -> iSeries_init_early()\n");
318 ppc64_firmware_features = FW_FEATURE_ISERIES;
320 ppcdbg_initialize();
322 #if defined(CONFIG_BLK_DEV_INITRD)
324 * If the init RAM disk has been configured and there is
325 * a non-zero starting address for it, set it up
327 if (naca.xRamDisk) {
328 initrd_start = (unsigned long)__va(naca.xRamDisk);
329 initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE;
330 initrd_below_start_ok = 1; // ramdisk in kernel space
331 ROOT_DEV = Root_RAM0;
332 if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize)
333 rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024;
334 } else
335 #endif /* CONFIG_BLK_DEV_INITRD */
337 /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
340 iSeries_recal_tb = get_tb();
341 iSeries_recal_titan = HvCallXm_loadTod();
344 * Cache sizes must be initialized before hpte_init_iSeries is called
345 * as the later need them for flush_icache_range()
347 setup_iSeries_cache_sizes();
350 * Initialize the hash table management pointers
352 hpte_init_iSeries();
355 * Initialize the DMA/TCE management
357 iommu_init_early_iSeries();
360 * Initialize the table which translate Linux physical addresses to
361 * AS/400 absolute addresses
363 build_iSeries_Memory_Map();
365 iSeries_get_cmdline();
367 /* Save unparsed command line copy for /proc/cmdline */
368 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
370 /* Parse early parameters, in particular mem=x */
371 parse_early_param();
373 if (memory_limit) {
374 if (memory_limit < systemcfg->physicalMemorySize)
375 systemcfg->physicalMemorySize = memory_limit;
376 else {
377 printk("Ignoring mem=%lu >= ram_top.\n", memory_limit);
378 memory_limit = 0;
382 /* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */
383 iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
385 lmb_init();
386 lmb_add(0, systemcfg->physicalMemorySize);
387 lmb_analyze();
388 lmb_reserve(0, __pa(klimit));
390 /* Initialize machine-dependency vectors */
391 #ifdef CONFIG_SMP
392 smp_init_iSeries();
393 #endif
394 if (itLpNaca.xPirEnvironMode == 0)
395 piranha_simulator = 1;
397 /* Associate Lp Event Queue 0 with processor 0 */
398 HvCallEvent_setLpEventQueueInterruptProc(0, 0);
400 mf_init();
401 mf_initialized = 1;
402 mb();
404 /* If we were passed an initrd, set the ROOT_DEV properly if the values
405 * look sensible. If not, clear initrd reference.
407 #ifdef CONFIG_BLK_DEV_INITRD
408 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
409 initrd_end > initrd_start)
410 ROOT_DEV = Root_RAM0;
411 else
412 initrd_start = initrd_end = 0;
413 #endif /* CONFIG_BLK_DEV_INITRD */
415 DBG(" <- iSeries_init_early()\n");
418 struct msChunks msChunks = {
419 /* XXX We don't use these, but Piranha might need them. */
420 .chunk_size = MSCHUNKS_CHUNK_SIZE,
421 .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
422 .chunk_mask = MSCHUNKS_OFFSET_MASK,
424 EXPORT_SYMBOL(msChunks);
426 void msChunks_alloc(unsigned long num_chunks)
428 klimit = _ALIGN(klimit, sizeof(u32));
429 msChunks.abs = (u32 *)klimit;
430 klimit += num_chunks * sizeof(u32);
431 msChunks.num_chunks = num_chunks;
435 * The iSeries may have very large memories ( > 128 GB ) and a partition
436 * may get memory in "chunks" that may be anywhere in the 2**52 real
437 * address space. The chunks are 256K in size. To map this to the
438 * memory model Linux expects, the AS/400 specific code builds a
439 * translation table to translate what Linux thinks are "physical"
440 * addresses to the actual real addresses. This allows us to make
441 * it appear to Linux that we have contiguous memory starting at
442 * physical address zero while in fact this could be far from the truth.
443 * To avoid confusion, I'll let the words physical and/or real address
444 * apply to the Linux addresses while I'll use "absolute address" to
445 * refer to the actual hardware real address.
447 * build_iSeries_Memory_Map gets information from the Hypervisor and
448 * looks at the Main Store VPD to determine the absolute addresses
449 * of the memory that has been assigned to our partition and builds
450 * a table used to translate Linux's physical addresses to these
451 * absolute addresses. Absolute addresses are needed when
452 * communicating with the hypervisor (e.g. to build HPT entries)
455 static void __init build_iSeries_Memory_Map(void)
457 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
458 u32 nextPhysChunk;
459 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
460 u32 num_ptegs;
461 u32 totalChunks,moreChunks;
462 u32 currChunk, thisChunk, absChunk;
463 u32 currDword;
464 u32 chunkBit;
465 u64 map;
466 struct MemoryBlock mb[32];
467 unsigned long numMemoryBlocks, curBlock;
469 /* Chunk size on iSeries is 256K bytes */
470 totalChunks = (u32)HvLpConfig_getMsChunks();
471 msChunks_alloc(totalChunks);
474 * Get absolute address of our load area
475 * and map it to physical address 0
476 * This guarantees that the loadarea ends up at physical 0
477 * otherwise, it might not be returned by PLIC as the first
478 * chunks
481 loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
482 loadAreaSize = itLpNaca.xLoadAreaChunks;
485 * Only add the pages already mapped here.
486 * Otherwise we might add the hpt pages
487 * The rest of the pages of the load area
488 * aren't in the HPT yet and can still
489 * be assigned an arbitrary physical address
491 if ((loadAreaSize * 64) > HvPagesToMap)
492 loadAreaSize = HvPagesToMap / 64;
494 loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
497 * TODO Do we need to do something if the HPT is in the 64MB load area?
498 * This would be required if the itLpNaca.xLoadAreaChunks includes
499 * the HPT size
502 printk("Mapping load area - physical addr = 0000000000000000\n"
503 " absolute addr = %016lx\n",
504 chunk_to_addr(loadAreaFirstChunk));
505 printk("Load area size %dK\n", loadAreaSize * 256);
507 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
508 msChunks.abs[nextPhysChunk] =
509 loadAreaFirstChunk + nextPhysChunk;
512 * Get absolute address of our HPT and remember it so
513 * we won't map it to any physical address
515 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
516 hptSizePages = (u32)HvCallHpt_getHptPages();
517 hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT);
518 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
520 printk("HPT absolute addr = %016lx, size = %dK\n",
521 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
523 /* Fill in the hashed page table hash mask */
524 num_ptegs = hptSizePages *
525 (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
526 htab_hash_mask = num_ptegs - 1;
529 * The actual hashed page table is in the hypervisor,
530 * we have no direct access
532 htab_address = NULL;
535 * Determine if absolute memory has any
536 * holes so that we can interpret the
537 * access map we get back from the hypervisor
538 * correctly.
540 numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
543 * Process the main store access map from the hypervisor
544 * to build up our physical -> absolute translation table
546 curBlock = 0;
547 currChunk = 0;
548 currDword = 0;
549 moreChunks = totalChunks;
551 while (moreChunks) {
552 map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
553 currDword);
554 thisChunk = currChunk;
555 while (map) {
556 chunkBit = map >> 63;
557 map <<= 1;
558 if (chunkBit) {
559 --moreChunks;
560 while (thisChunk >= mb[curBlock].logicalEnd) {
561 ++curBlock;
562 if (curBlock >= numMemoryBlocks)
563 panic("out of memory blocks");
565 if (thisChunk < mb[curBlock].logicalStart)
566 panic("memory block error");
568 absChunk = mb[curBlock].absStart +
569 (thisChunk - mb[curBlock].logicalStart);
570 if (((absChunk < hptFirstChunk) ||
571 (absChunk > hptLastChunk)) &&
572 ((absChunk < loadAreaFirstChunk) ||
573 (absChunk > loadAreaLastChunk))) {
574 msChunks.abs[nextPhysChunk] = absChunk;
575 ++nextPhysChunk;
578 ++thisChunk;
580 ++currDword;
581 currChunk += 64;
585 * main store size (in chunks) is
586 * totalChunks - hptSizeChunks
587 * which should be equal to
588 * nextPhysChunk
590 systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
594 * Set up the variables that describe the cache line sizes
595 * for this machine.
597 static void __init setup_iSeries_cache_sizes(void)
599 unsigned int i, n;
600 unsigned int procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
602 systemcfg->icache_size =
603 ppc64_caches.isize = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
604 systemcfg->icache_line_size =
605 ppc64_caches.iline_size =
606 xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
607 systemcfg->dcache_size =
608 ppc64_caches.dsize =
609 xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
610 systemcfg->dcache_line_size =
611 ppc64_caches.dline_size =
612 xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
613 ppc64_caches.ilines_per_page = PAGE_SIZE / ppc64_caches.iline_size;
614 ppc64_caches.dlines_per_page = PAGE_SIZE / ppc64_caches.dline_size;
616 i = ppc64_caches.iline_size;
617 n = 0;
618 while ((i = (i / 2)))
619 ++n;
620 ppc64_caches.log_iline_size = n;
622 i = ppc64_caches.dline_size;
623 n = 0;
624 while ((i = (i / 2)))
625 ++n;
626 ppc64_caches.log_dline_size = n;
628 printk("D-cache line size = %d\n",
629 (unsigned int)ppc64_caches.dline_size);
630 printk("I-cache line size = %d\n",
631 (unsigned int)ppc64_caches.iline_size);
635 * Create a pte. Used during initialization only.
637 static void iSeries_make_pte(unsigned long va, unsigned long pa,
638 int mode)
640 hpte_t local_hpte, rhpte;
641 unsigned long hash, vpn;
642 long slot;
644 vpn = va >> PAGE_SHIFT;
645 hash = hpt_hash(vpn, 0);
647 local_hpte.r = pa | mode;
648 local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
649 | HPTE_V_BOLTED | HPTE_V_VALID;
651 slot = HvCallHpt_findValid(&rhpte, vpn);
652 if (slot < 0) {
653 /* Must find space in primary group */
654 panic("hash_page: hpte already exists\n");
656 HvCallHpt_addValidate(slot, 0, &local_hpte);
660 * Bolt the kernel addr space into the HPT
662 static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
664 unsigned long pa;
665 unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
666 hpte_t hpte;
668 for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
669 unsigned long ea = (unsigned long)__va(pa);
670 unsigned long vsid = get_kernel_vsid(ea);
671 unsigned long va = (vsid << 28) | (pa & 0xfffffff);
672 unsigned long vpn = va >> PAGE_SHIFT;
673 unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
675 /* Make non-kernel text non-executable */
676 if (!in_kernel_text(ea))
677 mode_rw |= HW_NO_EXEC;
679 if (hpte.v & HPTE_V_VALID) {
680 /* HPTE exists, so just bolt it */
681 HvCallHpt_setSwBits(slot, 0x10, 0);
682 /* And make sure the pp bits are correct */
683 HvCallHpt_setPp(slot, PP_RWXX);
684 } else
685 /* No HPTE exists, so create a new bolted one */
686 iSeries_make_pte(va, phys_to_abs(pa), mode_rw);
691 * Document me.
693 static void __init iSeries_setup_arch(void)
695 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
697 /* Add an eye catcher and the systemcfg layout version number */
698 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
699 systemcfg->version.major = SYSTEMCFG_MAJOR;
700 systemcfg->version.minor = SYSTEMCFG_MINOR;
702 /* Setup the Lp Event Queue */
703 setup_hvlpevent_queue();
705 /* Compute processor frequency */
706 procFreqHz = ((1UL << 34) * 1000000) /
707 xIoHriProcessorVpd[procIx].xProcFreq;
708 procFreqMhz = procFreqHz / 1000000;
709 procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
710 ppc_proc_freq = procFreqHz;
712 /* Compute time base frequency */
713 tbFreqHz = ((1UL << 32) * 1000000) /
714 xIoHriProcessorVpd[procIx].xTimeBaseFreq;
715 tbFreqMhz = tbFreqHz / 1000000;
716 tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
717 ppc_tb_freq = tbFreqHz;
719 printk("Max logical processors = %d\n",
720 itVpdAreas.xSlicMaxLogicalProcs);
721 printk("Max physical processors = %d\n",
722 itVpdAreas.xSlicMaxPhysicalProcs);
723 printk("Processor frequency = %lu.%02lu\n", procFreqMhz,
724 procFreqMhzHundreths);
725 printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
726 tbFreqMhzHundreths);
727 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
728 printk("Processor version = %x\n", systemcfg->processor);
731 static void iSeries_get_cpuinfo(struct seq_file *m)
733 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
737 * Document me.
738 * and Implement me.
740 static int iSeries_get_irq(struct pt_regs *regs)
742 /* -2 means ignore this interrupt */
743 return -2;
747 * Document me.
749 static void iSeries_restart(char *cmd)
751 mf_reboot();
755 * Document me.
757 static void iSeries_power_off(void)
759 mf_power_off();
763 * Document me.
765 static void iSeries_halt(void)
767 mf_power_off();
771 * void __init iSeries_calibrate_decr()
773 * Description:
774 * This routine retrieves the internal processor frequency from the VPD,
775 * and sets up the kernel timer decrementer based on that value.
778 static void __init iSeries_calibrate_decr(void)
780 unsigned long cyclesPerUsec;
781 struct div_result divres;
783 /* Compute decrementer (and TB) frequency in cycles/sec */
784 cyclesPerUsec = ppc_tb_freq / 1000000;
787 * Set the amount to refresh the decrementer by. This
788 * is the number of decrementer ticks it takes for
789 * 1/HZ seconds.
791 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
793 #if 0
794 /* TEST CODE FOR ADJTIME */
795 tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;
796 /* END OF TEST CODE */
797 #endif
800 * tb_ticks_per_sec = freq; would give better accuracy
801 * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
802 * that jiffies (and xtime) will match the time returned
803 * by do_gettimeofday.
805 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
806 tb_ticks_per_usec = cyclesPerUsec;
807 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
808 div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
809 tb_to_xs = divres.result_low;
810 setup_default_decr();
813 static void __init iSeries_progress(char * st, unsigned short code)
815 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
816 if (!piranha_simulator && mf_initialized) {
817 if (code != 0xffff)
818 mf_display_progress(code);
819 else
820 mf_clear_src();
824 static void __init iSeries_fixup_klimit(void)
827 * Change klimit to take into account any ram disk
828 * that may be included
830 if (naca.xRamDisk)
831 klimit = KERNELBASE + (u64)naca.xRamDisk +
832 (naca.xRamDiskSize * PAGE_SIZE);
833 else {
835 * No ram disk was included - check and see if there
836 * was an embedded system map. Change klimit to take
837 * into account any embedded system map
839 if (embedded_sysmap_end)
840 klimit = KERNELBASE + ((embedded_sysmap_end + 4095) &
841 0xfffffffffffff000);
845 static int __init iSeries_src_init(void)
847 /* clear the progress line */
848 ppc_md.progress(" ", 0xffff);
849 return 0;
852 late_initcall(iSeries_src_init);
854 static inline void process_iSeries_events(void)
856 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
859 static void yield_shared_processor(void)
861 unsigned long tb;
863 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
864 HvCall_MaskLpEvent |
865 HvCall_MaskLpProd |
866 HvCall_MaskTimeout);
868 tb = get_tb();
869 /* Compute future tb value when yield should expire */
870 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
873 * The decrementer stops during the yield. Force a fake decrementer
874 * here and let the timer_interrupt code sort out the actual time.
876 get_paca()->lppaca.int_dword.fields.decr_int = 1;
877 process_iSeries_events();
880 static int iseries_shared_idle(void)
882 while (1) {
883 while (!need_resched() && !hvlpevent_is_pending()) {
884 local_irq_disable();
885 ppc64_runlatch_off();
887 /* Recheck with irqs off */
888 if (!need_resched() && !hvlpevent_is_pending())
889 yield_shared_processor();
891 HMT_medium();
892 local_irq_enable();
895 ppc64_runlatch_on();
897 if (hvlpevent_is_pending())
898 process_iSeries_events();
900 schedule();
903 return 0;
906 static int iseries_dedicated_idle(void)
908 long oldval;
910 while (1) {
911 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
913 if (!oldval) {
914 set_thread_flag(TIF_POLLING_NRFLAG);
916 while (!need_resched()) {
917 ppc64_runlatch_off();
918 HMT_low();
920 if (hvlpevent_is_pending()) {
921 HMT_medium();
922 ppc64_runlatch_on();
923 process_iSeries_events();
927 HMT_medium();
928 clear_thread_flag(TIF_POLLING_NRFLAG);
929 } else {
930 set_need_resched();
933 ppc64_runlatch_on();
934 schedule();
937 return 0;
940 #ifndef CONFIG_PCI
941 void __init iSeries_init_IRQ(void) { }
942 #endif
944 void __init iSeries_early_setup(void)
946 iSeries_fixup_klimit();
948 ppc_md.setup_arch = iSeries_setup_arch;
949 ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
950 ppc_md.init_IRQ = iSeries_init_IRQ;
951 ppc_md.get_irq = iSeries_get_irq;
952 ppc_md.init_early = iSeries_init_early,
954 ppc_md.pcibios_fixup = iSeries_pci_final_fixup;
956 ppc_md.restart = iSeries_restart;
957 ppc_md.power_off = iSeries_power_off;
958 ppc_md.halt = iSeries_halt;
960 ppc_md.get_boot_time = iSeries_get_boot_time;
961 ppc_md.set_rtc_time = iSeries_set_rtc_time;
962 ppc_md.get_rtc_time = iSeries_get_rtc_time;
963 ppc_md.calibrate_decr = iSeries_calibrate_decr;
964 ppc_md.progress = iSeries_progress;
966 if (get_paca()->lppaca.shared_proc) {
967 ppc_md.idle_loop = iseries_shared_idle;
968 printk(KERN_INFO "Using shared processor idle loop\n");
969 } else {
970 ppc_md.idle_loop = iseries_dedicated_idle;
971 printk(KERN_INFO "Using dedicated idle loop\n");