2 * linux/arch/alpha/kernel/core_titan.c
4 * Code common to all TITAN core logic chips.
7 #define __EXTERN_INLINE inline
9 #include <asm/core_titan.h>
10 #undef __EXTERN_INLINE
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/pci.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
20 #include <asm/ptrace.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlbflush.h>
29 /* Save Titan configuration data as the console had it set up. */
33 unsigned long wsba
[4];
36 } saved_config
[4] __attribute__((common
));
39 * Is PChip 1 present? No need to query it more than once.
41 static int titan_pchip1_present
;
44 * BIOS32-style PCI interface:
47 #define DEBUG_CONFIG 0
50 # define DBG_CFG(args) printk args
52 # define DBG_CFG(args)
57 * Routines to access TIG registers.
59 static inline volatile unsigned long *
60 mk_tig_addr(int offset
)
62 return (volatile unsigned long *)(TITAN_TIG_SPACE
+ (offset
<< 6));
66 titan_read_tig(int offset
, u8 value
)
68 volatile unsigned long *tig_addr
= mk_tig_addr(offset
);
69 return (u8
)(*tig_addr
& 0xff);
73 titan_write_tig(int offset
, u8 value
)
75 volatile unsigned long *tig_addr
= mk_tig_addr(offset
);
76 *tig_addr
= (unsigned long)value
;
81 * Given a bus, device, and function number, compute resulting
82 * configuration space address
83 * accordingly. It is therefore not safe to have concurrent
84 * invocations to configuration space access routines, but there
85 * really shouldn't be any need for this.
87 * Note that all config space accesses use Type 1 address format.
89 * Note also that type 1 is determined by non-zero bus number.
93 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
94 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
95 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
96 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
97 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
100 * 23:16 bus number (8 bits = 128 possible buses)
101 * 15:11 Device number (5 bits)
102 * 10:8 function number
103 * 7:2 register number
106 * The function number selects which function of a multi-function device
107 * (e.g., SCSI and Ethernet).
109 * The register selects a DWORD (32 bit) register offset. Hence it
110 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
115 mk_conf_addr(struct pci_bus
*pbus
, unsigned int device_fn
, int where
,
116 unsigned long *pci_addr
, unsigned char *type1
)
118 struct pci_controller
*hose
= pbus
->sysdata
;
120 u8 bus
= pbus
->number
;
122 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
123 "pci_addr=0x%p, type1=0x%p)\n",
124 bus
, device_fn
, where
, pci_addr
, type1
));
126 if (!pbus
->parent
) /* No parent means peer PCI bus. */
130 addr
= (bus
<< 16) | (device_fn
<< 8) | where
;
131 addr
|= hose
->config_space_base
;
134 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr
));
139 titan_read_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
140 int size
, u32
*value
)
145 if (mk_conf_addr(bus
, devfn
, where
, &addr
, &type1
))
146 return PCIBIOS_DEVICE_NOT_FOUND
;
150 *value
= __kernel_ldbu(*(vucp
)addr
);
153 *value
= __kernel_ldwu(*(vusp
)addr
);
156 *value
= *(vuip
)addr
;
160 return PCIBIOS_SUCCESSFUL
;
164 titan_write_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
170 if (mk_conf_addr(bus
, devfn
, where
, &addr
, &type1
))
171 return PCIBIOS_DEVICE_NOT_FOUND
;
175 __kernel_stb(value
, *(vucp
)addr
);
177 __kernel_ldbu(*(vucp
)addr
);
180 __kernel_stw(value
, *(vusp
)addr
);
182 __kernel_ldwu(*(vusp
)addr
);
191 return PCIBIOS_SUCCESSFUL
;
194 struct pci_ops titan_pci_ops
=
196 .read
= titan_read_config
,
197 .write
= titan_write_config
,
202 titan_pci_tbi(struct pci_controller
*hose
, dma_addr_t start
, dma_addr_t end
)
204 titan_pachip
*pachip
=
205 (hose
->index
& 1) ? TITAN_pachip1
: TITAN_pachip0
;
206 titan_pachip_port
*port
;
207 volatile unsigned long *csr
;
210 /* Get the right hose. */
211 port
= &pachip
->g_port
;
213 port
= &pachip
->a_port
;
215 /* We can invalidate up to 8 tlb entries in a go. The flush
216 matches against <31:16> in the pci address.
217 Note that gtlbi* and atlbi* are in the same place in the g_port
218 and a_port, respectively, so the g_port offset can be used
219 even if hose is an a_port */
220 csr
= &port
->port_specific
.g
.gtlbia
.csr
;
221 if (((start
^ end
) & 0xffff0000) == 0)
222 csr
= &port
->port_specific
.g
.gtlbiv
.csr
;
224 /* For TBIA, it doesn't matter what value we write. For TBI,
225 it's the shifted tag bits. */
226 value
= (start
& 0xffff0000) >> 12;
235 titan_query_agp(titan_pachip_port
*port
)
237 union TPAchipPCTL pctl
;
240 pctl
.pctl_q_whole
= port
->pctl
.csr
;
242 return pctl
.pctl_r_bits
.apctl_v_agp_present
;
247 titan_init_one_pachip_port(titan_pachip_port
*port
, int index
)
249 struct pci_controller
*hose
;
251 hose
= alloc_pci_controller();
254 hose
->io_space
= alloc_resource();
255 hose
->mem_space
= alloc_resource();
258 * This is for userland consumption. The 40-bit PIO bias that we
259 * use in the kernel through KSEG doesn't work in the page table
260 * based user mappings. (43-bit KSEG sign extends the physical
261 * address from bit 40 to hit the I/O bit - mapped addresses don't).
262 * So make sure we get the 43-bit PIO bias.
264 hose
->sparse_mem_base
= 0;
265 hose
->sparse_io_base
= 0;
267 = (TITAN_MEM(index
) & 0xffffffffffUL
) | 0x80000000000UL
;
269 = (TITAN_IO(index
) & 0xffffffffffUL
) | 0x80000000000UL
;
271 hose
->config_space_base
= TITAN_CONF(index
);
274 hose
->io_space
->start
= TITAN_IO(index
) - TITAN_IO_BIAS
;
275 hose
->io_space
->end
= hose
->io_space
->start
+ TITAN_IO_SPACE
- 1;
276 hose
->io_space
->name
= pci_io_names
[index
];
277 hose
->io_space
->flags
= IORESOURCE_IO
;
279 hose
->mem_space
->start
= TITAN_MEM(index
) - TITAN_MEM_BIAS
;
280 hose
->mem_space
->end
= hose
->mem_space
->start
+ 0xffffffff;
281 hose
->mem_space
->name
= pci_mem_names
[index
];
282 hose
->mem_space
->flags
= IORESOURCE_MEM
;
284 if (request_resource(&ioport_resource
, hose
->io_space
) < 0)
285 printk(KERN_ERR
"Failed to request IO on hose %d\n", index
);
286 if (request_resource(&iomem_resource
, hose
->mem_space
) < 0)
287 printk(KERN_ERR
"Failed to request MEM on hose %d\n", index
);
290 * Save the existing PCI window translations. SRM will
291 * need them when we go to reboot.
293 saved_config
[index
].wsba
[0] = port
->wsba
[0].csr
;
294 saved_config
[index
].wsm
[0] = port
->wsm
[0].csr
;
295 saved_config
[index
].tba
[0] = port
->tba
[0].csr
;
297 saved_config
[index
].wsba
[1] = port
->wsba
[1].csr
;
298 saved_config
[index
].wsm
[1] = port
->wsm
[1].csr
;
299 saved_config
[index
].tba
[1] = port
->tba
[1].csr
;
301 saved_config
[index
].wsba
[2] = port
->wsba
[2].csr
;
302 saved_config
[index
].wsm
[2] = port
->wsm
[2].csr
;
303 saved_config
[index
].tba
[2] = port
->tba
[2].csr
;
305 saved_config
[index
].wsba
[3] = port
->wsba
[3].csr
;
306 saved_config
[index
].wsm
[3] = port
->wsm
[3].csr
;
307 saved_config
[index
].tba
[3] = port
->tba
[3].csr
;
310 * Set up the PCI to main memory translation windows.
312 * Note: Window 3 on Titan is Scatter-Gather ONLY.
314 * Window 0 is scatter-gather 8MB at 8MB (for isa)
315 * Window 1 is direct access 1GB at 2GB
316 * Window 2 is scatter-gather 1GB at 3GB
318 hose
->sg_isa
= iommu_arena_new(hose
, 0x00800000, 0x00800000, 0);
319 hose
->sg_isa
->align_entry
= 8; /* 64KB for ISA */
321 hose
->sg_pci
= iommu_arena_new(hose
, 0xc0000000, 0x40000000, 0);
322 hose
->sg_pci
->align_entry
= 4; /* Titan caches 4 PTEs at a time */
324 port
->wsba
[0].csr
= hose
->sg_isa
->dma_base
| 3;
325 port
->wsm
[0].csr
= (hose
->sg_isa
->size
- 1) & 0xfff00000;
326 port
->tba
[0].csr
= virt_to_phys(hose
->sg_isa
->ptes
);
328 port
->wsba
[1].csr
= __direct_map_base
| 1;
329 port
->wsm
[1].csr
= (__direct_map_size
- 1) & 0xfff00000;
330 port
->tba
[1].csr
= 0;
332 port
->wsba
[2].csr
= hose
->sg_pci
->dma_base
| 3;
333 port
->wsm
[2].csr
= (hose
->sg_pci
->size
- 1) & 0xfff00000;
334 port
->tba
[2].csr
= virt_to_phys(hose
->sg_pci
->ptes
);
336 port
->wsba
[3].csr
= 0;
338 /* Enable the Monster Window to make DAC pci64 possible. */
339 port
->pctl
.csr
|= pctl_m_mwin
;
342 * If it's an AGP port, initialize agplastwr.
344 if (titan_query_agp(port
))
345 port
->port_specific
.a
.agplastwr
.csr
= __direct_map_base
;
347 titan_pci_tbi(hose
, 0, -1);
351 titan_init_pachips(titan_pachip
*pachip0
, titan_pachip
*pachip1
)
353 titan_pchip1_present
= TITAN_cchip
->csc
.csr
& 1L<<14;
355 /* Init the ports in hose order... */
356 titan_init_one_pachip_port(&pachip0
->g_port
, 0); /* hose 0 */
357 if (titan_pchip1_present
)
358 titan_init_one_pachip_port(&pachip1
->g_port
, 1);/* hose 1 */
359 titan_init_one_pachip_port(&pachip0
->a_port
, 2); /* hose 2 */
360 if (titan_pchip1_present
)
361 titan_init_one_pachip_port(&pachip1
->a_port
, 3);/* hose 3 */
365 titan_init_arch(void)
368 printk("%s: titan_init_arch()\n", __FUNCTION__
);
369 printk("%s: CChip registers:\n", __FUNCTION__
);
370 printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__
, TITAN_cchip
->csc
.csr
);
371 printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__
, TITAN_cchip
->mtr
.csr
);
372 printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__
, TITAN_cchip
->misc
.csr
);
373 printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__
, TITAN_cchip
->dim0
.csr
);
374 printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__
, TITAN_cchip
->dim1
.csr
);
375 printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__
, TITAN_cchip
->dir0
.csr
);
376 printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__
, TITAN_cchip
->dir1
.csr
);
377 printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__
, TITAN_cchip
->drir
.csr
);
379 printk("%s: DChip registers:\n", __FUNCTION__
);
380 printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__
, TITAN_dchip
->dsc
.csr
);
381 printk("%s: CSR_STR 0x%lx\n", __FUNCTION__
, TITAN_dchip
->str
.csr
);
382 printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__
, TITAN_dchip
->drev
.csr
);
385 boot_cpuid
= __hard_smp_processor_id();
387 /* With multiple PCI busses, we play with I/O as physical addrs. */
388 ioport_resource
.end
= ~0UL;
389 iomem_resource
.end
= ~0UL;
391 /* PCI DMA Direct Mapping is 1GB at 2GB. */
392 __direct_map_base
= 0x80000000;
393 __direct_map_size
= 0x40000000;
395 /* Init the PA chip(s). */
396 titan_init_pachips(TITAN_pachip0
, TITAN_pachip1
);
398 /* Check for graphic console location (if any). */
399 find_console_vga_hose();
403 titan_kill_one_pachip_port(titan_pachip_port
*port
, int index
)
405 port
->wsba
[0].csr
= saved_config
[index
].wsba
[0];
406 port
->wsm
[0].csr
= saved_config
[index
].wsm
[0];
407 port
->tba
[0].csr
= saved_config
[index
].tba
[0];
409 port
->wsba
[1].csr
= saved_config
[index
].wsba
[1];
410 port
->wsm
[1].csr
= saved_config
[index
].wsm
[1];
411 port
->tba
[1].csr
= saved_config
[index
].tba
[1];
413 port
->wsba
[2].csr
= saved_config
[index
].wsba
[2];
414 port
->wsm
[2].csr
= saved_config
[index
].wsm
[2];
415 port
->tba
[2].csr
= saved_config
[index
].tba
[2];
417 port
->wsba
[3].csr
= saved_config
[index
].wsba
[3];
418 port
->wsm
[3].csr
= saved_config
[index
].wsm
[3];
419 port
->tba
[3].csr
= saved_config
[index
].tba
[3];
423 titan_kill_pachips(titan_pachip
*pachip0
, titan_pachip
*pachip1
)
425 if (titan_pchip1_present
) {
426 titan_kill_one_pachip_port(&pachip1
->g_port
, 1);
427 titan_kill_one_pachip_port(&pachip1
->a_port
, 3);
429 titan_kill_one_pachip_port(&pachip0
->g_port
, 0);
430 titan_kill_one_pachip_port(&pachip0
->a_port
, 2);
434 titan_kill_arch(int mode
)
436 titan_kill_pachips(TITAN_pachip0
, TITAN_pachip1
);
445 titan_ioportmap(unsigned long addr
)
447 FIXUP_IOADDR_VGA(addr
);
448 return (void __iomem
*)(addr
+ TITAN_IO_BIAS
);
453 titan_ioremap(unsigned long addr
, unsigned long size
)
455 int h
= (addr
& TITAN_HOSE_MASK
) >> TITAN_HOSE_SHIFT
;
456 unsigned long baddr
= addr
& ~TITAN_HOSE_MASK
;
457 unsigned long last
= baddr
+ size
- 1;
458 struct pci_controller
*hose
;
459 struct vm_struct
*area
;
465 * Adjust the address and hose, if necessary.
467 if (pci_vga_hose
&& __is_mem_vga(addr
)) {
468 h
= pci_vga_hose
->index
;
469 addr
+= pci_vga_hose
->mem_space
->start
;
475 for (hose
= hose_head
; hose
; hose
= hose
->next
)
476 if (hose
->index
== h
)
482 * Is it direct-mapped?
484 if ((baddr
>= __direct_map_base
) &&
485 ((baddr
+ size
- 1) < __direct_map_base
+ __direct_map_size
)) {
486 vaddr
= addr
- __direct_map_base
+ TITAN_MEM_BIAS
;
487 return (void __iomem
*) vaddr
;
491 * Check the scatter-gather arena.
494 baddr
>= (unsigned long)hose
->sg_pci
->dma_base
&&
495 last
< (unsigned long)hose
->sg_pci
->dma_base
+ hose
->sg_pci
->size
){
498 * Adjust the limits (mappings must be page aligned)
500 baddr
-= hose
->sg_pci
->dma_base
;
501 last
-= hose
->sg_pci
->dma_base
;
503 size
= PAGE_ALIGN(last
) - baddr
;
508 area
= get_vm_area(size
, VM_IOREMAP
);
510 printk("ioremap failed... no vm_area...\n");
514 ptes
= hose
->sg_pci
->ptes
;
515 for (vaddr
= (unsigned long)area
->addr
;
517 baddr
+= PAGE_SIZE
, vaddr
+= PAGE_SIZE
) {
518 pfn
= ptes
[baddr
>> PAGE_SHIFT
];
520 printk("ioremap failed... pte not valid...\n");
524 pfn
>>= 1; /* make it a true pfn */
526 if (__alpha_remap_area_pages(vaddr
,
529 printk("FAILED to remap_area_pages...\n");
537 vaddr
= (unsigned long)area
->addr
+ (addr
& ~PAGE_MASK
);
538 return (void __iomem
*) vaddr
;
541 /* Assume a legacy (read: VGA) address, and return appropriately. */
542 return (void __iomem
*)(addr
+ TITAN_MEM_BIAS
);
546 titan_iounmap(volatile void __iomem
*xaddr
)
548 unsigned long addr
= (unsigned long) xaddr
;
549 if (addr
>= VMALLOC_START
)
550 vfree((void *)(PAGE_MASK
& addr
));
554 titan_is_mmio(const volatile void __iomem
*xaddr
)
556 unsigned long addr
= (unsigned long) xaddr
;
558 if (addr
>= VMALLOC_START
)
561 return (addr
& 0x100000000UL
) == 0;
564 #ifndef CONFIG_ALPHA_GENERIC
565 EXPORT_SYMBOL(titan_ioportmap
);
566 EXPORT_SYMBOL(titan_ioremap
);
567 EXPORT_SYMBOL(titan_iounmap
);
568 EXPORT_SYMBOL(titan_is_mmio
);
574 #include <linux/agp_backend.h>
575 #include <asm/agp_backend.h>
576 #include <linux/slab.h>
577 #include <linux/delay.h>
579 struct titan_agp_aperture
{
580 struct pci_iommu_arena
*arena
;
586 titan_agp_setup(alpha_agp_info
*agp
)
588 struct titan_agp_aperture
*aper
;
590 if (!alpha_agpgart_size
)
593 aper
= kmalloc(sizeof(struct titan_agp_aperture
), GFP_KERNEL
);
597 aper
->arena
= agp
->hose
->sg_pci
;
598 aper
->pg_count
= alpha_agpgart_size
/ PAGE_SIZE
;
599 aper
->pg_start
= iommu_reserve(aper
->arena
, aper
->pg_count
,
601 if (aper
->pg_start
< 0) {
602 printk(KERN_ERR
"Failed to reserve AGP memory\n");
607 agp
->aperture
.bus_base
=
608 aper
->arena
->dma_base
+ aper
->pg_start
* PAGE_SIZE
;
609 agp
->aperture
.size
= aper
->pg_count
* PAGE_SIZE
;
610 agp
->aperture
.sysdata
= aper
;
616 titan_agp_cleanup(alpha_agp_info
*agp
)
618 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
621 status
= iommu_release(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
622 if (status
== -EBUSY
) {
624 "Attempted to release bound AGP memory - unbinding\n");
625 iommu_unbind(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
626 status
= iommu_release(aper
->arena
, aper
->pg_start
,
630 printk(KERN_ERR
"Failed to release AGP memory\n");
637 titan_agp_configure(alpha_agp_info
*agp
)
639 union TPAchipPCTL pctl
;
640 titan_pachip_port
*port
= agp
->private;
641 pctl
.pctl_q_whole
= port
->pctl
.csr
;
643 /* Side-Band Addressing? */
644 pctl
.pctl_r_bits
.apctl_v_agp_sba_en
= agp
->mode
.bits
.sba
;
647 pctl
.pctl_r_bits
.apctl_v_agp_rate
= 0; /* 1x */
648 if (agp
->mode
.bits
.rate
& 2)
649 pctl
.pctl_r_bits
.apctl_v_agp_rate
= 1; /* 2x */
651 if (agp
->mode
.bits
.rate
& 4)
652 pctl
.pctl_r_bits
.apctl_v_agp_rate
= 2; /* 4x */
656 pctl
.pctl_r_bits
.apctl_v_agp_hp_rd
= 2;
657 pctl
.pctl_r_bits
.apctl_v_agp_lp_rd
= 7;
662 pctl
.pctl_r_bits
.apctl_v_agp_en
= agp
->mode
.bits
.enable
;
665 printk("Enabling AGP: %dX%s\n",
666 1 << pctl
.pctl_r_bits
.apctl_v_agp_rate
,
667 pctl
.pctl_r_bits
.apctl_v_agp_sba_en
? " - SBA" : "");
670 port
->pctl
.csr
= pctl
.pctl_q_whole
;
672 /* And wait at least 5000 66MHz cycles (per Titan spec). */
679 titan_agp_bind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
681 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
682 return iommu_bind(aper
->arena
, aper
->pg_start
+ pg_start
,
683 mem
->page_count
, mem
->memory
);
687 titan_agp_unbind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
689 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
690 return iommu_unbind(aper
->arena
, aper
->pg_start
+ pg_start
,
695 titan_agp_translate(alpha_agp_info
*agp
, dma_addr_t addr
)
697 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
698 unsigned long baddr
= addr
- aper
->arena
->dma_base
;
701 if (addr
< agp
->aperture
.bus_base
||
702 addr
>= agp
->aperture
.bus_base
+ agp
->aperture
.size
) {
703 printk("%s: addr out of range\n", __FUNCTION__
);
707 pte
= aper
->arena
->ptes
[baddr
>> PAGE_SHIFT
];
709 printk("%s: pte not valid\n", __FUNCTION__
);
713 return (pte
>> 1) << PAGE_SHIFT
;
716 struct alpha_agp_ops titan_agp_ops
=
718 .setup
= titan_agp_setup
,
719 .cleanup
= titan_agp_cleanup
,
720 .configure
= titan_agp_configure
,
721 .bind
= titan_agp_bind_memory
,
722 .unbind
= titan_agp_unbind_memory
,
723 .translate
= titan_agp_translate
730 struct pci_controller
*hose
;
731 titan_pachip_port
*port
;
733 union TPAchipPCTL pctl
;
738 port
= &TITAN_pachip0
->a_port
;
739 if (titan_query_agp(port
))
742 titan_pchip1_present
&&
743 titan_query_agp(port
= &TITAN_pachip1
->a_port
))
747 * Find the hose the port is on.
749 for (hose
= hose_head
; hose
; hose
= hose
->next
)
750 if (hose
->index
== hosenum
)
753 if (!hose
|| !hose
->sg_pci
)
757 * Allocate the info structure.
759 agp
= kmalloc(sizeof(*agp
), GFP_KERNEL
);
766 agp
->ops
= &titan_agp_ops
;
769 * Aperture - not configured until ops.setup().
771 * FIXME - should we go ahead and allocate it here?
773 agp
->aperture
.bus_base
= 0;
774 agp
->aperture
.size
= 0;
775 agp
->aperture
.sysdata
= NULL
;
780 agp
->capability
.lw
= 0;
781 agp
->capability
.bits
.rate
= 3; /* 2x, 1x */
782 agp
->capability
.bits
.sba
= 1;
783 agp
->capability
.bits
.rq
= 7; /* 8 - 1 */
788 pctl
.pctl_q_whole
= port
->pctl
.csr
;
790 agp
->mode
.bits
.rate
= 1 << pctl
.pctl_r_bits
.apctl_v_agp_rate
;
791 agp
->mode
.bits
.sba
= pctl
.pctl_r_bits
.apctl_v_agp_sba_en
;
792 agp
->mode
.bits
.rq
= 7; /* RQ Depth? */
793 agp
->mode
.bits
.enable
= pctl
.pctl_r_bits
.apctl_v_agp_en
;