Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / include / asm-ia64 / io.h
blob4ebed77aa4726c5263e0ff1a0ff5dfacc60daf11
1 #ifndef _ASM_IA64_IO_H
2 #define _ASM_IA64_IO_H
4 /*
5 * This file contains the definitions for the emulated IO instructions
6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
8 * versions of the single-IO instructions (inb_p/inw_p/..).
10 * This file is not meant to be obfuscating: it's just complicated to
11 * (a) handle it all in a way that makes gcc able to optimize it as
12 * well as possible and (b) trying to avoid writing the same thing
13 * over and over again with slight variations and possibly making a
14 * mistake somewhere.
16 * Copyright (C) 1998-2003 Hewlett-Packard Co
17 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
19 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
22 /* We don't use IO slowdowns on the ia64, but.. */
23 #define __SLOW_DOWN_IO do { } while (0)
24 #define SLOW_DOWN_IO do { } while (0)
26 #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
29 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
30 * large machines may have multiple other I/O spaces so we can't place any a priori limit
31 * on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
33 #define IO_SPACE_LIMIT 0xffffffffffffffffUL
35 #define MAX_IO_SPACES_BITS 8
36 #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
37 #define IO_SPACE_BITS 24
38 #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
40 #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
41 #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
42 #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
44 #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
46 struct io_space {
47 unsigned long mmio_base; /* base in MMIO space */
48 int sparse;
51 extern struct io_space io_space[];
52 extern unsigned int num_io_spaces;
54 # ifdef __KERNEL__
57 * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
58 * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
59 * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
61 * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
62 * code that uses bare port numbers without the prerequisite pci_iomap().
64 #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
65 #define PIO_MASK (PIO_OFFSET - 1)
66 #define PIO_RESERVED __IA64_UNCACHED_OFFSET
67 #define HAVE_ARCH_PIO_SIZE
69 #include <asm/intrinsics.h>
70 #include <asm/machvec.h>
71 #include <asm/page.h>
72 #include <asm/system.h>
73 #include <asm-generic/iomap.h>
76 * Change virtual addresses to physical addresses and vv.
78 static inline unsigned long
79 virt_to_phys (volatile void *address)
81 return (unsigned long) address - PAGE_OFFSET;
84 static inline void*
85 phys_to_virt (unsigned long address)
87 return (void *) (address + PAGE_OFFSET);
90 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
91 extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
92 extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */
93 extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
96 * The following two macros are deprecated and scheduled for removal.
97 * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
99 #define bus_to_virt phys_to_virt
100 #define virt_to_bus virt_to_phys
101 #define page_to_bus page_to_phys
103 # endif /* KERNEL */
106 * Memory fence w/accept. This should never be used in code that is
107 * not IA-64 specific.
109 #define __ia64_mf_a() ia64_mfa()
112 * ___ia64_mmiowb - I/O write barrier
114 * Ensure ordering of I/O space writes. This will make sure that writes
115 * following the barrier will arrive after all previous writes. For most
116 * ia64 platforms, this is a simple 'mf.a' instruction.
118 * See Documentation/DocBook/deviceiobook.tmpl for more information.
120 static inline void ___ia64_mmiowb(void)
122 ia64_mfa();
125 static inline void*
126 __ia64_mk_io_addr (unsigned long port)
128 struct io_space *space;
129 unsigned long offset;
131 space = &io_space[IO_SPACE_NR(port)];
132 port = IO_SPACE_PORT(port);
133 if (space->sparse)
134 offset = IO_SPACE_SPARSE_ENCODING(port);
135 else
136 offset = port;
138 return (void *) (space->mmio_base | offset);
141 #define __ia64_inb ___ia64_inb
142 #define __ia64_inw ___ia64_inw
143 #define __ia64_inl ___ia64_inl
144 #define __ia64_outb ___ia64_outb
145 #define __ia64_outw ___ia64_outw
146 #define __ia64_outl ___ia64_outl
147 #define __ia64_readb ___ia64_readb
148 #define __ia64_readw ___ia64_readw
149 #define __ia64_readl ___ia64_readl
150 #define __ia64_readq ___ia64_readq
151 #define __ia64_readb_relaxed ___ia64_readb
152 #define __ia64_readw_relaxed ___ia64_readw
153 #define __ia64_readl_relaxed ___ia64_readl
154 #define __ia64_readq_relaxed ___ia64_readq
155 #define __ia64_writeb ___ia64_writeb
156 #define __ia64_writew ___ia64_writew
157 #define __ia64_writel ___ia64_writel
158 #define __ia64_writeq ___ia64_writeq
159 #define __ia64_mmiowb ___ia64_mmiowb
162 * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
163 * that the access has completed before executing other I/O accesses. Since we're doing
164 * the accesses through an uncachable (UC) translation, the CPU will execute them in
165 * program order. However, we still need to tell the compiler not to shuffle them around
166 * during optimization, which is why we use "volatile" pointers.
169 static inline unsigned int
170 ___ia64_inb (unsigned long port)
172 volatile unsigned char *addr = __ia64_mk_io_addr(port);
173 unsigned char ret;
175 ret = *addr;
176 __ia64_mf_a();
177 return ret;
180 static inline unsigned int
181 ___ia64_inw (unsigned long port)
183 volatile unsigned short *addr = __ia64_mk_io_addr(port);
184 unsigned short ret;
186 ret = *addr;
187 __ia64_mf_a();
188 return ret;
191 static inline unsigned int
192 ___ia64_inl (unsigned long port)
194 volatile unsigned int *addr = __ia64_mk_io_addr(port);
195 unsigned int ret;
197 ret = *addr;
198 __ia64_mf_a();
199 return ret;
202 static inline void
203 ___ia64_outb (unsigned char val, unsigned long port)
205 volatile unsigned char *addr = __ia64_mk_io_addr(port);
207 *addr = val;
208 __ia64_mf_a();
211 static inline void
212 ___ia64_outw (unsigned short val, unsigned long port)
214 volatile unsigned short *addr = __ia64_mk_io_addr(port);
216 *addr = val;
217 __ia64_mf_a();
220 static inline void
221 ___ia64_outl (unsigned int val, unsigned long port)
223 volatile unsigned int *addr = __ia64_mk_io_addr(port);
225 *addr = val;
226 __ia64_mf_a();
229 static inline void
230 __insb (unsigned long port, void *dst, unsigned long count)
232 unsigned char *dp = dst;
234 while (count--)
235 *dp++ = platform_inb(port);
238 static inline void
239 __insw (unsigned long port, void *dst, unsigned long count)
241 unsigned short *dp = dst;
243 while (count--)
244 *dp++ = platform_inw(port);
247 static inline void
248 __insl (unsigned long port, void *dst, unsigned long count)
250 unsigned int *dp = dst;
252 while (count--)
253 *dp++ = platform_inl(port);
256 static inline void
257 __outsb (unsigned long port, const void *src, unsigned long count)
259 const unsigned char *sp = src;
261 while (count--)
262 platform_outb(*sp++, port);
265 static inline void
266 __outsw (unsigned long port, const void *src, unsigned long count)
268 const unsigned short *sp = src;
270 while (count--)
271 platform_outw(*sp++, port);
274 static inline void
275 __outsl (unsigned long port, const void *src, unsigned long count)
277 const unsigned int *sp = src;
279 while (count--)
280 platform_outl(*sp++, port);
284 * Unfortunately, some platforms are broken and do not follow the IA-64 architecture
285 * specification regarding legacy I/O support. Thus, we have to make these operations
286 * platform dependent...
288 #define __inb platform_inb
289 #define __inw platform_inw
290 #define __inl platform_inl
291 #define __outb platform_outb
292 #define __outw platform_outw
293 #define __outl platform_outl
294 #define __mmiowb platform_mmiowb
296 #define inb(p) __inb(p)
297 #define inw(p) __inw(p)
298 #define inl(p) __inl(p)
299 #define insb(p,d,c) __insb(p,d,c)
300 #define insw(p,d,c) __insw(p,d,c)
301 #define insl(p,d,c) __insl(p,d,c)
302 #define outb(v,p) __outb(v,p)
303 #define outw(v,p) __outw(v,p)
304 #define outl(v,p) __outl(v,p)
305 #define outsb(p,s,c) __outsb(p,s,c)
306 #define outsw(p,s,c) __outsw(p,s,c)
307 #define outsl(p,s,c) __outsl(p,s,c)
308 #define mmiowb() __mmiowb()
311 * The address passed to these functions are ioremap()ped already.
313 * We need these to be machine vectors since some platforms don't provide
314 * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
315 * a good idea). Writes are ok though for all existing ia64 platforms (and
316 * hopefully it'll stay that way).
318 static inline unsigned char
319 ___ia64_readb (const volatile void __iomem *addr)
321 return *(volatile unsigned char __force *)addr;
324 static inline unsigned short
325 ___ia64_readw (const volatile void __iomem *addr)
327 return *(volatile unsigned short __force *)addr;
330 static inline unsigned int
331 ___ia64_readl (const volatile void __iomem *addr)
333 return *(volatile unsigned int __force *) addr;
336 static inline unsigned long
337 ___ia64_readq (const volatile void __iomem *addr)
339 return *(volatile unsigned long __force *) addr;
342 static inline void
343 __writeb (unsigned char val, volatile void __iomem *addr)
345 *(volatile unsigned char __force *) addr = val;
348 static inline void
349 __writew (unsigned short val, volatile void __iomem *addr)
351 *(volatile unsigned short __force *) addr = val;
354 static inline void
355 __writel (unsigned int val, volatile void __iomem *addr)
357 *(volatile unsigned int __force *) addr = val;
360 static inline void
361 __writeq (unsigned long val, volatile void __iomem *addr)
363 *(volatile unsigned long __force *) addr = val;
366 #define __readb platform_readb
367 #define __readw platform_readw
368 #define __readl platform_readl
369 #define __readq platform_readq
370 #define __readb_relaxed platform_readb_relaxed
371 #define __readw_relaxed platform_readw_relaxed
372 #define __readl_relaxed platform_readl_relaxed
373 #define __readq_relaxed platform_readq_relaxed
375 #define readb(a) __readb((a))
376 #define readw(a) __readw((a))
377 #define readl(a) __readl((a))
378 #define readq(a) __readq((a))
379 #define readb_relaxed(a) __readb_relaxed((a))
380 #define readw_relaxed(a) __readw_relaxed((a))
381 #define readl_relaxed(a) __readl_relaxed((a))
382 #define readq_relaxed(a) __readq_relaxed((a))
383 #define __raw_readb readb
384 #define __raw_readw readw
385 #define __raw_readl readl
386 #define __raw_readq readq
387 #define __raw_readb_relaxed readb_relaxed
388 #define __raw_readw_relaxed readw_relaxed
389 #define __raw_readl_relaxed readl_relaxed
390 #define __raw_readq_relaxed readq_relaxed
391 #define writeb(v,a) __writeb((v), (a))
392 #define writew(v,a) __writew((v), (a))
393 #define writel(v,a) __writel((v), (a))
394 #define writeq(v,a) __writeq((v), (a))
395 #define __raw_writeb writeb
396 #define __raw_writew writew
397 #define __raw_writel writel
398 #define __raw_writeq writeq
400 #ifndef inb_p
401 # define inb_p inb
402 #endif
403 #ifndef inw_p
404 # define inw_p inw
405 #endif
406 #ifndef inl_p
407 # define inl_p inl
408 #endif
410 #ifndef outb_p
411 # define outb_p outb
412 #endif
413 #ifndef outw_p
414 # define outw_p outw
415 #endif
416 #ifndef outl_p
417 # define outl_p outl
418 #endif
420 # ifdef __KERNEL__
422 extern void __iomem * ioremap(unsigned long offset, unsigned long size);
423 extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
424 extern void iounmap (volatile void __iomem *addr);
426 /* Use normal IO mappings for DMI */
427 #define dmi_ioremap ioremap
428 #define dmi_iounmap(x,l) iounmap(x)
429 #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
432 * String version of IO memory access ops:
434 extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
435 extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
436 extern void memset_io(volatile void __iomem *s, int c, long n);
438 # endif /* __KERNEL__ */
441 * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
442 * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
443 * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
444 * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
445 * over BIO-level virtual merging.
447 extern unsigned long ia64_max_iommu_merge_mask;
448 #if 1
449 #define BIO_VMERGE_BOUNDARY 0
450 #else
452 * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
453 * replaced by dma_merge_mask() or something of that sort. Note: the only way
454 * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
455 * expanded into:
457 * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
459 * which is precisely what we want.
461 #define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
462 #endif
464 #endif /* _ASM_IA64_IO_H */