allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / x86_64 / kernel / pci-gart.c
blobae091cdc1a4de3764389c5a18e36adb42b9ddfcc
1 /*
2 * Dynamic DMA mapping support for AMD Hammer.
3 *
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
6 * with more than 4GB.
8 * See Documentation/DMA-mapping.txt for the interface specification.
9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs.
13 #include <linux/types.h>
14 #include <linux/ctype.h>
15 #include <linux/agp_backend.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/string.h>
19 #include <linux/spinlock.h>
20 #include <linux/pci.h>
21 #include <linux/module.h>
22 #include <linux/topology.h>
23 #include <linux/interrupt.h>
24 #include <linux/bitops.h>
25 #include <linux/kdebug.h>
26 #include <asm/atomic.h>
27 #include <asm/io.h>
28 #include <asm/mtrr.h>
29 #include <asm/pgtable.h>
30 #include <asm/proto.h>
31 #include <asm/cacheflush.h>
32 #include <asm/swiotlb.h>
33 #include <asm/dma.h>
34 #include <asm/k8.h>
36 unsigned long iommu_bus_base; /* GART remapping area (physical) */
37 static unsigned long iommu_size; /* size of remapping area bytes */
38 static unsigned long iommu_pages; /* .. and in pages */
40 u32 *iommu_gatt_base; /* Remapping table */
42 /* If this is disabled the IOMMU will use an optimized flushing strategy
43 of only flushing when an mapping is reused. With it true the GART is flushed
44 for every mapping. Problem is that doing the lazy flush seems to trigger
45 bugs with some popular PCI cards, in particular 3ware (but has been also
46 also seen with Qlogic at least). */
47 int iommu_fullflush = 1;
49 /* Allocation bitmap for the remapping area */
50 static DEFINE_SPINLOCK(iommu_bitmap_lock);
51 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
53 static u32 gart_unmapped_entry;
55 #define GPTE_VALID 1
56 #define GPTE_COHERENT 2
57 #define GPTE_ENCODE(x) \
58 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
59 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
61 #define to_pages(addr,size) \
62 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
64 #define EMERGENCY_PAGES 32 /* = 128KB */
66 #ifdef CONFIG_AGP
67 #define AGPEXTERN extern
68 #else
69 #define AGPEXTERN
70 #endif
72 /* backdoor interface to AGP driver */
73 AGPEXTERN int agp_memory_reserved;
74 AGPEXTERN __u32 *agp_gatt_table;
76 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
77 static int need_flush; /* global flush state. set for each gart wrap */
79 static unsigned long alloc_iommu(int size)
81 unsigned long offset, flags;
83 spin_lock_irqsave(&iommu_bitmap_lock, flags);
84 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
85 if (offset == -1) {
86 need_flush = 1;
87 offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
89 if (offset != -1) {
90 set_bit_string(iommu_gart_bitmap, offset, size);
91 next_bit = offset+size;
92 if (next_bit >= iommu_pages) {
93 next_bit = 0;
94 need_flush = 1;
97 if (iommu_fullflush)
98 need_flush = 1;
99 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
100 return offset;
103 static void free_iommu(unsigned long offset, int size)
105 unsigned long flags;
106 spin_lock_irqsave(&iommu_bitmap_lock, flags);
107 __clear_bit_string(iommu_gart_bitmap, offset, size);
108 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
112 * Use global flush state to avoid races with multiple flushers.
114 static void flush_gart(void)
116 unsigned long flags;
117 spin_lock_irqsave(&iommu_bitmap_lock, flags);
118 if (need_flush) {
119 k8_flush_garts();
120 need_flush = 0;
122 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
125 #ifdef CONFIG_IOMMU_LEAK
127 #define SET_LEAK(x) if (iommu_leak_tab) \
128 iommu_leak_tab[x] = __builtin_return_address(0);
129 #define CLEAR_LEAK(x) if (iommu_leak_tab) \
130 iommu_leak_tab[x] = NULL;
132 /* Debugging aid for drivers that don't free their IOMMU tables */
133 static void **iommu_leak_tab;
134 static int leak_trace;
135 int iommu_leak_pages = 20;
136 void dump_leak(void)
138 int i;
139 static int dump;
140 if (dump || !iommu_leak_tab) return;
141 dump = 1;
142 show_stack(NULL,NULL);
143 /* Very crude. dump some from the end of the table too */
144 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
145 for (i = 0; i < iommu_leak_pages; i+=2) {
146 printk("%lu: ", iommu_pages-i);
147 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
148 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
150 printk("\n");
152 #else
153 #define SET_LEAK(x)
154 #define CLEAR_LEAK(x)
155 #endif
157 static void iommu_full(struct device *dev, size_t size, int dir)
160 * Ran out of IOMMU space for this operation. This is very bad.
161 * Unfortunately the drivers cannot handle this operation properly.
162 * Return some non mapped prereserved space in the aperture and
163 * let the Northbridge deal with it. This will result in garbage
164 * in the IO operation. When the size exceeds the prereserved space
165 * memory corruption will occur or random memory will be DMAed
166 * out. Hopefully no network devices use single mappings that big.
169 printk(KERN_ERR
170 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
171 size, dev->bus_id);
173 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
174 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
175 panic("PCI-DMA: Memory would be corrupted\n");
176 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
177 panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
180 #ifdef CONFIG_IOMMU_LEAK
181 dump_leak();
182 #endif
185 static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
187 u64 mask = *dev->dma_mask;
188 int high = addr + size > mask;
189 int mmu = high;
190 if (force_iommu)
191 mmu = 1;
192 return mmu;
195 static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
197 u64 mask = *dev->dma_mask;
198 int high = addr + size > mask;
199 int mmu = high;
200 return mmu;
203 /* Map a single continuous physical area into the IOMMU.
204 * Caller needs to check if the iommu is needed and flush.
206 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
207 size_t size, int dir)
209 unsigned long npages = to_pages(phys_mem, size);
210 unsigned long iommu_page = alloc_iommu(npages);
211 int i;
212 if (iommu_page == -1) {
213 if (!nonforced_iommu(dev, phys_mem, size))
214 return phys_mem;
215 if (panic_on_overflow)
216 panic("dma_map_area overflow %lu bytes\n", size);
217 iommu_full(dev, size, dir);
218 return bad_dma_address;
221 for (i = 0; i < npages; i++) {
222 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
223 SET_LEAK(iommu_page + i);
224 phys_mem += PAGE_SIZE;
226 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
229 static dma_addr_t gart_map_simple(struct device *dev, char *buf,
230 size_t size, int dir)
232 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
233 flush_gart();
234 return map;
237 /* Map a single area into the IOMMU */
238 dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
240 unsigned long phys_mem, bus;
242 if (!dev)
243 dev = &fallback_dev;
245 phys_mem = virt_to_phys(addr);
246 if (!need_iommu(dev, phys_mem, size))
247 return phys_mem;
249 bus = gart_map_simple(dev, addr, size, dir);
250 return bus;
254 * Free a DMA mapping.
256 void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
257 size_t size, int direction)
259 unsigned long iommu_page;
260 int npages;
261 int i;
263 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
264 dma_addr >= iommu_bus_base + iommu_size)
265 return;
266 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
267 npages = to_pages(dma_addr, size);
268 for (i = 0; i < npages; i++) {
269 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
270 CLEAR_LEAK(iommu_page + i);
272 free_iommu(iommu_page, npages);
276 * Wrapper for pci_unmap_single working with scatterlists.
278 void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
280 int i;
282 for (i = 0; i < nents; i++) {
283 struct scatterlist *s = &sg[i];
284 if (!s->dma_length || !s->length)
285 break;
286 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
290 /* Fallback for dma_map_sg in case of overflow */
291 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
292 int nents, int dir)
294 int i;
296 #ifdef CONFIG_IOMMU_DEBUG
297 printk(KERN_DEBUG "dma_map_sg overflow\n");
298 #endif
300 for (i = 0; i < nents; i++ ) {
301 struct scatterlist *s = &sg[i];
302 unsigned long addr = page_to_phys(s->page) + s->offset;
303 if (nonforced_iommu(dev, addr, s->length)) {
304 addr = dma_map_area(dev, addr, s->length, dir);
305 if (addr == bad_dma_address) {
306 if (i > 0)
307 gart_unmap_sg(dev, sg, i, dir);
308 nents = 0;
309 sg[0].dma_length = 0;
310 break;
313 s->dma_address = addr;
314 s->dma_length = s->length;
316 flush_gart();
317 return nents;
320 /* Map multiple scatterlist entries continuous into the first. */
321 static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
322 struct scatterlist *sout, unsigned long pages)
324 unsigned long iommu_start = alloc_iommu(pages);
325 unsigned long iommu_page = iommu_start;
326 int i;
328 if (iommu_start == -1)
329 return -1;
331 for (i = start; i < stopat; i++) {
332 struct scatterlist *s = &sg[i];
333 unsigned long pages, addr;
334 unsigned long phys_addr = s->dma_address;
336 BUG_ON(i > start && s->offset);
337 if (i == start) {
338 *sout = *s;
339 sout->dma_address = iommu_bus_base;
340 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
341 sout->dma_length = s->length;
342 } else {
343 sout->dma_length += s->length;
346 addr = phys_addr;
347 pages = to_pages(s->offset, s->length);
348 while (pages--) {
349 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
350 SET_LEAK(iommu_page);
351 addr += PAGE_SIZE;
352 iommu_page++;
355 BUG_ON(iommu_page - iommu_start != pages);
356 return 0;
359 static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
360 struct scatterlist *sout,
361 unsigned long pages, int need)
363 if (!need) {
364 BUG_ON(stopat - start != 1);
365 *sout = sg[start];
366 sout->dma_length = sg[start].length;
367 return 0;
369 return __dma_map_cont(sg, start, stopat, sout, pages);
373 * DMA map all entries in a scatterlist.
374 * Merge chunks that have page aligned sizes into a continuous mapping.
376 int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
378 int i;
379 int out;
380 int start;
381 unsigned long pages = 0;
382 int need = 0, nextneed;
384 if (nents == 0)
385 return 0;
387 if (!dev)
388 dev = &fallback_dev;
390 out = 0;
391 start = 0;
392 for (i = 0; i < nents; i++) {
393 struct scatterlist *s = &sg[i];
394 dma_addr_t addr = page_to_phys(s->page) + s->offset;
395 s->dma_address = addr;
396 BUG_ON(s->length == 0);
398 nextneed = need_iommu(dev, addr, s->length);
400 /* Handle the previous not yet processed entries */
401 if (i > start) {
402 struct scatterlist *ps = &sg[i-1];
403 /* Can only merge when the last chunk ends on a page
404 boundary and the new one doesn't have an offset. */
405 if (!iommu_merge || !nextneed || !need || s->offset ||
406 (ps->offset + ps->length) % PAGE_SIZE) {
407 if (dma_map_cont(sg, start, i, sg+out, pages,
408 need) < 0)
409 goto error;
410 out++;
411 pages = 0;
412 start = i;
416 need = nextneed;
417 pages += to_pages(s->offset, s->length);
419 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
420 goto error;
421 out++;
422 flush_gart();
423 if (out < nents)
424 sg[out].dma_length = 0;
425 return out;
427 error:
428 flush_gart();
429 gart_unmap_sg(dev, sg, nents, dir);
430 /* When it was forced or merged try again in a dumb way */
431 if (force_iommu || iommu_merge) {
432 out = dma_map_sg_nonforce(dev, sg, nents, dir);
433 if (out > 0)
434 return out;
436 if (panic_on_overflow)
437 panic("dma_map_sg: overflow on %lu pages\n", pages);
438 iommu_full(dev, pages << PAGE_SHIFT, dir);
439 for (i = 0; i < nents; i++)
440 sg[i].dma_address = bad_dma_address;
441 return 0;
444 static int no_agp;
446 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
448 unsigned long a;
449 if (!iommu_size) {
450 iommu_size = aper_size;
451 if (!no_agp)
452 iommu_size /= 2;
455 a = aper + iommu_size;
456 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
458 if (iommu_size < 64*1024*1024)
459 printk(KERN_WARNING
460 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
462 return iommu_size;
465 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
467 unsigned aper_size = 0, aper_base_32;
468 u64 aper_base;
469 unsigned aper_order;
471 pci_read_config_dword(dev, 0x94, &aper_base_32);
472 pci_read_config_dword(dev, 0x90, &aper_order);
473 aper_order = (aper_order >> 1) & 7;
475 aper_base = aper_base_32 & 0x7fff;
476 aper_base <<= 25;
478 aper_size = (32 * 1024 * 1024) << aper_order;
479 if (aper_base + aper_size > 0x100000000UL || !aper_size)
480 aper_base = 0;
482 *size = aper_size;
483 return aper_base;
487 * Private Northbridge GATT initialization in case we cannot use the
488 * AGP driver for some reason.
490 static __init int init_k8_gatt(struct agp_kern_info *info)
492 struct pci_dev *dev;
493 void *gatt;
494 unsigned aper_base, new_aper_base;
495 unsigned aper_size, gatt_size, new_aper_size;
496 int i;
498 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
499 aper_size = aper_base = info->aper_size = 0;
500 dev = NULL;
501 for (i = 0; i < num_k8_northbridges; i++) {
502 dev = k8_northbridges[i];
503 new_aper_base = read_aperture(dev, &new_aper_size);
504 if (!new_aper_base)
505 goto nommu;
507 if (!aper_base) {
508 aper_size = new_aper_size;
509 aper_base = new_aper_base;
511 if (aper_size != new_aper_size || aper_base != new_aper_base)
512 goto nommu;
514 if (!aper_base)
515 goto nommu;
516 info->aper_base = aper_base;
517 info->aper_size = aper_size>>20;
519 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
520 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
521 if (!gatt)
522 panic("Cannot allocate GATT table");
523 if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
524 panic("Could not set GART PTEs to uncacheable pages");
525 global_flush_tlb();
527 memset(gatt, 0, gatt_size);
528 agp_gatt_table = gatt;
530 for (i = 0; i < num_k8_northbridges; i++) {
531 u32 ctl;
532 u32 gatt_reg;
534 dev = k8_northbridges[i];
535 gatt_reg = __pa(gatt) >> 12;
536 gatt_reg <<= 4;
537 pci_write_config_dword(dev, 0x98, gatt_reg);
538 pci_read_config_dword(dev, 0x90, &ctl);
540 ctl |= 1;
541 ctl &= ~((1<<4) | (1<<5));
543 pci_write_config_dword(dev, 0x90, ctl);
545 flush_gart();
547 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
548 return 0;
550 nommu:
551 /* Should not happen anymore */
552 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
553 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
554 return -1;
557 extern int agp_amd64_init(void);
559 static const struct dma_mapping_ops gart_dma_ops = {
560 .mapping_error = NULL,
561 .map_single = gart_map_single,
562 .map_simple = gart_map_simple,
563 .unmap_single = gart_unmap_single,
564 .sync_single_for_cpu = NULL,
565 .sync_single_for_device = NULL,
566 .sync_single_range_for_cpu = NULL,
567 .sync_single_range_for_device = NULL,
568 .sync_sg_for_cpu = NULL,
569 .sync_sg_for_device = NULL,
570 .map_sg = gart_map_sg,
571 .unmap_sg = gart_unmap_sg,
574 void __init gart_iommu_init(void)
576 struct agp_kern_info info;
577 unsigned long aper_size;
578 unsigned long iommu_start;
579 unsigned long scratch;
580 long i;
582 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
583 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
584 return;
587 #ifndef CONFIG_AGP_AMD64
588 no_agp = 1;
589 #else
590 /* Makefile puts PCI initialization via subsys_initcall first. */
591 /* Add other K8 AGP bridge drivers here */
592 no_agp = no_agp ||
593 (agp_amd64_init() < 0) ||
594 (agp_copy_info(agp_bridge, &info) < 0);
595 #endif
597 if (swiotlb)
598 return;
600 /* Did we detect a different HW IOMMU? */
601 if (iommu_detected && !iommu_aperture)
602 return;
604 if (no_iommu ||
605 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
606 !iommu_aperture ||
607 (no_agp && init_k8_gatt(&info) < 0)) {
608 if (end_pfn > MAX_DMA32_PFN) {
609 printk(KERN_ERR "WARNING more than 4GB of memory "
610 "but GART IOMMU not available.\n"
611 KERN_ERR "WARNING 32bit PCI may malfunction.\n");
613 return;
616 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
617 aper_size = info.aper_size * 1024 * 1024;
618 iommu_size = check_iommu_size(info.aper_base, aper_size);
619 iommu_pages = iommu_size >> PAGE_SHIFT;
621 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
622 get_order(iommu_pages/8));
623 if (!iommu_gart_bitmap)
624 panic("Cannot allocate iommu bitmap\n");
625 memset(iommu_gart_bitmap, 0, iommu_pages/8);
627 #ifdef CONFIG_IOMMU_LEAK
628 if (leak_trace) {
629 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
630 get_order(iommu_pages*sizeof(void *)));
631 if (iommu_leak_tab)
632 memset(iommu_leak_tab, 0, iommu_pages * 8);
633 else
634 printk("PCI-DMA: Cannot allocate leak trace area\n");
636 #endif
639 * Out of IOMMU space handling.
640 * Reserve some invalid pages at the beginning of the GART.
642 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
644 agp_memory_reserved = iommu_size;
645 printk(KERN_INFO
646 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
647 iommu_size>>20);
649 iommu_start = aper_size - iommu_size;
650 iommu_bus_base = info.aper_base + iommu_start;
651 bad_dma_address = iommu_bus_base;
652 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
655 * Unmap the IOMMU part of the GART. The alias of the page is
656 * always mapped with cache enabled and there is no full cache
657 * coherency across the GART remapping. The unmapping avoids
658 * automatic prefetches from the CPU allocating cache lines in
659 * there. All CPU accesses are done via the direct mapping to
660 * the backing memory. The GART address is only used by PCI
661 * devices.
663 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
666 * Try to workaround a bug (thanks to BenH)
667 * Set unmapped entries to a scratch page instead of 0.
668 * Any prefetches that hit unmapped entries won't get an bus abort
669 * then.
671 scratch = get_zeroed_page(GFP_KERNEL);
672 if (!scratch)
673 panic("Cannot allocate iommu scratch page");
674 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
675 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
676 iommu_gatt_base[i] = gart_unmapped_entry;
678 flush_gart();
679 dma_ops = &gart_dma_ops;
682 void __init gart_parse_options(char *p)
684 int arg;
686 #ifdef CONFIG_IOMMU_LEAK
687 if (!strncmp(p,"leak",4)) {
688 leak_trace = 1;
689 p += 4;
690 if (*p == '=') ++p;
691 if (isdigit(*p) && get_option(&p, &arg))
692 iommu_leak_pages = arg;
694 #endif
695 if (isdigit(*p) && get_option(&p, &arg))
696 iommu_size = arg;
697 if (!strncmp(p, "fullflush",8))
698 iommu_fullflush = 1;
699 if (!strncmp(p, "nofullflush",11))
700 iommu_fullflush = 0;
701 if (!strncmp(p,"noagp",5))
702 no_agp = 1;
703 if (!strncmp(p, "noaperture",10))
704 fix_aperture = 0;
705 /* duplicated from pci-dma.c */
706 if (!strncmp(p,"force",5))
707 iommu_aperture_allowed = 1;
708 if (!strncmp(p,"allowed",7))
709 iommu_aperture_allowed = 1;
710 if (!strncmp(p, "memaper", 7)) {
711 fallback_aper_force = 1;
712 p += 7;
713 if (*p == '=') {
714 ++p;
715 if (get_option(&p, &arg))
716 fallback_aper_order = arg;