2 * AGPGART module version 0.99
3 * Copyright (C) 1999 Jeff Hartmann
4 * Copyright (C) 1999 Precision Insight, Inc.
5 * Copyright (C) 1999 Xi Graphics, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 * - Allocate more than order 0 pages to avoid too much linear map splitting.
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/agp_backend.h>
35 struct serverworks_page_map
{
37 unsigned long *remapped
;
40 static struct _serverworks_private
{
41 struct pci_dev
*svrwrks_dev
; /* device one */
42 volatile u8
*registers
;
43 struct serverworks_page_map
**gatt_pages
;
45 struct serverworks_page_map scratch_dir
;
49 } serverworks_private
;
51 static int serverworks_create_page_map(struct serverworks_page_map
*page_map
)
55 page_map
->real
= (unsigned long *) __get_free_page(GFP_KERNEL
);
56 if (page_map
->real
== NULL
) {
59 SetPageReserved(virt_to_page(page_map
->real
));
61 page_map
->remapped
= ioremap_nocache(virt_to_phys(page_map
->real
),
63 if (page_map
->remapped
== NULL
) {
64 ClearPageReserved(virt_to_page(page_map
->real
));
65 free_page((unsigned long) page_map
->real
);
66 page_map
->real
= NULL
;
71 for(i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); i
++) {
72 page_map
->remapped
[i
] = agp_bridge
.scratch_page
;
78 static void serverworks_free_page_map(struct serverworks_page_map
*page_map
)
80 iounmap(page_map
->remapped
);
81 ClearPageReserved(virt_to_page(page_map
->real
));
82 free_page((unsigned long) page_map
->real
);
85 static void serverworks_free_gatt_pages(void)
88 struct serverworks_page_map
**tables
;
89 struct serverworks_page_map
*entry
;
91 tables
= serverworks_private
.gatt_pages
;
92 for(i
= 0; i
< serverworks_private
.num_tables
; i
++) {
95 if (entry
->real
!= NULL
) {
96 serverworks_free_page_map(entry
);
104 static int serverworks_create_gatt_pages(int nr_tables
)
106 struct serverworks_page_map
**tables
;
107 struct serverworks_page_map
*entry
;
111 tables
= kmalloc((nr_tables
+ 1) * sizeof(struct serverworks_page_map
*),
113 if (tables
== NULL
) {
116 memset(tables
, 0, sizeof(struct serverworks_page_map
*) * (nr_tables
+ 1));
117 for (i
= 0; i
< nr_tables
; i
++) {
118 entry
= kmalloc(sizeof(struct serverworks_page_map
), GFP_KERNEL
);
123 memset(entry
, 0, sizeof(struct serverworks_page_map
));
125 retval
= serverworks_create_page_map(entry
);
126 if (retval
!= 0) break;
128 serverworks_private
.num_tables
= nr_tables
;
129 serverworks_private
.gatt_pages
= tables
;
131 if (retval
!= 0) serverworks_free_gatt_pages();
136 #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
137 GET_PAGE_DIR_IDX(addr)]->remapped)
139 #ifndef GET_PAGE_DIR_OFF
140 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
143 #ifndef GET_PAGE_DIR_IDX
144 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
145 GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
149 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
152 static int serverworks_create_gatt_table(void)
154 struct aper_size_info_lvl2
*value
;
155 struct serverworks_page_map page_dir
;
160 value
= A_SIZE_LVL2(agp_bridge
.current_size
);
161 retval
= serverworks_create_page_map(&page_dir
);
165 retval
= serverworks_create_page_map(&serverworks_private
.scratch_dir
);
167 serverworks_free_page_map(&page_dir
);
170 /* Create a fake scratch directory */
171 for(i
= 0; i
< 1024; i
++) {
172 serverworks_private
.scratch_dir
.remapped
[i
] = (unsigned long) agp_bridge
.scratch_page
;
173 page_dir
.remapped
[i
] =
174 virt_to_phys(serverworks_private
.scratch_dir
.real
);
175 page_dir
.remapped
[i
] |= 0x00000001;
178 retval
= serverworks_create_gatt_pages(value
->num_entries
/ 1024);
180 serverworks_free_page_map(&page_dir
);
181 serverworks_free_page_map(&serverworks_private
.scratch_dir
);
185 agp_bridge
.gatt_table_real
= page_dir
.real
;
186 agp_bridge
.gatt_table
= page_dir
.remapped
;
187 agp_bridge
.gatt_bus_addr
= virt_to_phys(page_dir
.real
);
189 /* Get the address for the gart region.
190 * This is a bus address even on the alpha, b/c its
191 * used to program the agp master not the cpu
194 pci_read_config_dword(agp_bridge
.dev
,
195 serverworks_private
.gart_addr_ofs
,
197 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
199 /* Calculate the agp offset */
201 for(i
= 0; i
< value
->num_entries
/ 1024; i
++) {
202 page_dir
.remapped
[i
] =
203 virt_to_phys(serverworks_private
.gatt_pages
[i
]->real
);
204 page_dir
.remapped
[i
] |= 0x00000001;
210 static int serverworks_free_gatt_table(void)
212 struct serverworks_page_map page_dir
;
214 page_dir
.real
= agp_bridge
.gatt_table_real
;
215 page_dir
.remapped
= agp_bridge
.gatt_table
;
217 serverworks_free_gatt_pages();
218 serverworks_free_page_map(&page_dir
);
219 serverworks_free_page_map(&serverworks_private
.scratch_dir
);
223 static int serverworks_fetch_size(void)
228 struct aper_size_info_lvl2
*values
;
230 values
= A_SIZE_LVL2(agp_bridge
.aperture_sizes
);
231 pci_read_config_dword(agp_bridge
.dev
,
232 serverworks_private
.gart_addr_ofs
,
234 pci_write_config_dword(agp_bridge
.dev
,
235 serverworks_private
.gart_addr_ofs
,
237 pci_read_config_dword(agp_bridge
.dev
,
238 serverworks_private
.gart_addr_ofs
,
240 pci_write_config_dword(agp_bridge
.dev
,
241 serverworks_private
.gart_addr_ofs
,
243 temp2
&= SVWRKS_SIZE_MASK
;
245 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
246 if (temp2
== values
[i
].size_value
) {
247 agp_bridge
.previous_size
=
248 agp_bridge
.current_size
= (void *) (values
+ i
);
250 agp_bridge
.aperture_size_idx
= i
;
251 return values
[i
].size
;
258 static int serverworks_configure(void)
260 struct aper_size_info_lvl2
*current_size
;
267 current_size
= A_SIZE_LVL2(agp_bridge
.current_size
);
269 /* Get the memory mapped registers */
270 pci_read_config_dword(agp_bridge
.dev
,
271 serverworks_private
.mm_addr_ofs
,
273 temp
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
274 serverworks_private
.registers
= (volatile u8
*) ioremap(temp
, 4096);
276 OUTREG8(serverworks_private
.registers
, SVWRKS_GART_CACHE
, 0x0a);
278 OUTREG32(serverworks_private
.registers
, SVWRKS_GATTBASE
,
279 agp_bridge
.gatt_bus_addr
);
281 cap_reg
= INREG16(serverworks_private
.registers
, SVWRKS_COMMAND
);
284 OUTREG16(serverworks_private
.registers
, SVWRKS_COMMAND
, cap_reg
);
286 pci_read_config_byte(serverworks_private
.svrwrks_dev
,
287 SVWRKS_AGP_ENABLE
, &enable_reg
);
288 enable_reg
|= 0x1; /* Agp Enable bit */
289 pci_write_config_byte(serverworks_private
.svrwrks_dev
,
290 SVWRKS_AGP_ENABLE
, enable_reg
);
291 agp_bridge
.tlb_flush(NULL
);
293 pci_read_config_byte(serverworks_private
.svrwrks_dev
, 0x34, &cap_ptr
);
294 if (cap_ptr
!= 0x00) {
296 pci_read_config_dword(serverworks_private
.svrwrks_dev
,
299 if ((cap_id
& 0xff) != 0x02)
300 cap_ptr
= (cap_id
>> 8) & 0xff;
302 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
304 agp_bridge
.capndx
= cap_ptr
;
306 /* Fill in the mode register */
307 pci_read_config_dword(serverworks_private
.svrwrks_dev
,
308 agp_bridge
.capndx
+ 4,
311 pci_read_config_byte(agp_bridge
.dev
,
315 pci_write_config_byte(agp_bridge
.dev
,
319 pci_read_config_byte(agp_bridge
.dev
,
322 enable_reg
|= (1<<6);
323 pci_write_config_byte(agp_bridge
.dev
,
330 static void serverworks_cleanup(void)
332 iounmap((void *) serverworks_private
.registers
);
336 * This routine could be implemented by taking the addresses
337 * written to the GATT, and flushing them individually. However
338 * currently it just flushes the whole table. Which is probably
339 * more efficent, since agp_memory blocks can be a large number of
343 static void serverworks_tlbflush(agp_memory
* temp
)
347 OUTREG8(serverworks_private
.registers
, SVWRKS_POSTFLUSH
, 0x01);
348 end
= jiffies
+ 3*HZ
;
349 while(INREG8(serverworks_private
.registers
,
350 SVWRKS_POSTFLUSH
) == 0x01) {
351 if((signed)(end
- jiffies
) <= 0) {
352 printk(KERN_ERR
"Posted write buffer flush took more"
356 OUTREG32(serverworks_private
.registers
, SVWRKS_DIRFLUSH
, 0x00000001);
357 end
= jiffies
+ 3*HZ
;
358 while(INREG32(serverworks_private
.registers
,
359 SVWRKS_DIRFLUSH
) == 0x00000001) {
360 if((signed)(end
- jiffies
) <= 0) {
361 printk(KERN_ERR
"TLB flush took more"
367 static unsigned long serverworks_mask_memory(unsigned long addr
, int type
)
369 /* Only type 0 is supported by the serverworks chipsets */
371 return addr
| agp_bridge
.masks
[0].mask
;
374 static int serverworks_insert_memory(agp_memory
* mem
,
375 off_t pg_start
, int type
)
377 int i
, j
, num_entries
;
378 unsigned long *cur_gatt
;
381 num_entries
= A_SIZE_LVL2(agp_bridge
.current_size
)->num_entries
;
383 if (type
!= 0 || mem
->type
!= 0) {
386 if ((pg_start
+ mem
->page_count
) > num_entries
) {
391 while (j
< (pg_start
+ mem
->page_count
)) {
392 addr
= (j
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
393 cur_gatt
= SVRWRKS_GET_GATT(addr
);
394 if (!PGE_EMPTY(cur_gatt
[GET_GATT_OFF(addr
)])) {
400 if (mem
->is_flushed
== FALSE
) {
402 mem
->is_flushed
= TRUE
;
405 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
406 addr
= (j
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
407 cur_gatt
= SVRWRKS_GET_GATT(addr
);
408 cur_gatt
[GET_GATT_OFF(addr
)] = mem
->memory
[i
];
410 agp_bridge
.tlb_flush(mem
);
414 static int serverworks_remove_memory(agp_memory
* mem
, off_t pg_start
,
418 unsigned long *cur_gatt
;
421 if (type
!= 0 || mem
->type
!= 0) {
426 agp_bridge
.tlb_flush(mem
);
428 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
429 addr
= (i
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
430 cur_gatt
= SVRWRKS_GET_GATT(addr
);
431 cur_gatt
[GET_GATT_OFF(addr
)] =
432 (unsigned long) agp_bridge
.scratch_page
;
435 agp_bridge
.tlb_flush(mem
);
439 static struct gatt_mask serverworks_masks
[] =
441 {.mask
= 0x00000001, .type
= 0}
444 static struct aper_size_info_lvl2 serverworks_sizes
[7] =
446 {2048, 524288, 0x80000000},
447 {1024, 262144, 0xc0000000},
448 {512, 131072, 0xe0000000},
449 {256, 65536, 0xf0000000},
450 {128, 32768, 0xf8000000},
451 {64, 16384, 0xfc000000},
452 {32, 8192, 0xfe000000}
455 static void serverworks_agp_enable(u32 mode
)
457 struct pci_dev
*device
= NULL
;
458 u32 command
, scratch
, cap_id
;
461 pci_read_config_dword(serverworks_private
.svrwrks_dev
,
462 agp_bridge
.capndx
+ 4,
466 * PASS1: go throu all devices that claim to be
467 * AGP devices and collect their data.
471 pci_for_each_dev(device
) {
472 cap_ptr
= pci_find_capability(device
, PCI_CAP_ID_AGP
);
473 if (cap_ptr
!= 0x00) {
475 pci_read_config_dword(device
,
478 if ((cap_id
& 0xff) != 0x02)
479 cap_ptr
= (cap_id
>> 8) & 0xff;
481 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
483 if (cap_ptr
!= 0x00) {
485 * Ok, here we have a AGP device. Disable impossible
486 * settings, and adjust the readqueue to the minimum.
489 pci_read_config_dword(device
, cap_ptr
+ 4, &scratch
);
491 /* adjust RQ depth */
493 ((command
& ~0xff000000) |
494 min_t(u32
, (mode
& 0xff000000),
495 min_t(u32
, (command
& 0xff000000),
496 (scratch
& 0xff000000))));
498 /* disable SBA if it's not supported */
499 if (!((command
& 0x00000200) &&
500 (scratch
& 0x00000200) &&
501 (mode
& 0x00000200)))
502 command
&= ~0x00000200;
505 command
&= ~0x00000010;
507 command
&= ~0x00000008;
509 if (!((command
& 4) &&
512 command
&= ~0x00000004;
514 if (!((command
& 2) &&
517 command
&= ~0x00000002;
519 if (!((command
& 1) &&
522 command
&= ~0x00000001;
526 * PASS2: Figure out the 4X/2X/1X setting and enable the
527 * target (our motherboard chipset).
531 command
&= ~3; /* 4X */
534 command
&= ~5; /* 2X */
537 command
&= ~6; /* 1X */
539 command
|= 0x00000100;
541 pci_write_config_dword(serverworks_private
.svrwrks_dev
,
542 agp_bridge
.capndx
+ 8,
546 * PASS3: Go throu all AGP devices and update the
550 pci_for_each_dev(device
) {
551 cap_ptr
= pci_find_capability(device
, PCI_CAP_ID_AGP
);
553 pci_write_config_dword(device
, cap_ptr
+ 8, command
);
557 int __init
serverworks_setup (struct pci_dev
*pdev
)
562 serverworks_private
.svrwrks_dev
= pdev
;
564 agp_bridge
.masks
= serverworks_masks
;
565 agp_bridge
.num_of_masks
= 1;
566 agp_bridge
.aperture_sizes
= (void *) serverworks_sizes
;
567 agp_bridge
.size_type
= LVL2_APER_SIZE
;
568 agp_bridge
.num_aperture_sizes
= 7;
569 agp_bridge
.dev_private_data
= (void *) &serverworks_private
;
570 agp_bridge
.needs_scratch_page
= TRUE
;
571 agp_bridge
.configure
= serverworks_configure
;
572 agp_bridge
.fetch_size
= serverworks_fetch_size
;
573 agp_bridge
.cleanup
= serverworks_cleanup
;
574 agp_bridge
.tlb_flush
= serverworks_tlbflush
;
575 agp_bridge
.mask_memory
= serverworks_mask_memory
;
576 agp_bridge
.agp_enable
= serverworks_agp_enable
;
577 agp_bridge
.cache_flush
= global_cache_flush
;
578 agp_bridge
.create_gatt_table
= serverworks_create_gatt_table
;
579 agp_bridge
.free_gatt_table
= serverworks_free_gatt_table
;
580 agp_bridge
.insert_memory
= serverworks_insert_memory
;
581 agp_bridge
.remove_memory
= serverworks_remove_memory
;
582 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
583 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
584 agp_bridge
.agp_alloc_page
= agp_generic_alloc_page
;
585 agp_bridge
.agp_destroy_page
= agp_generic_destroy_page
;
586 agp_bridge
.suspend
= agp_generic_suspend
;
587 agp_bridge
.resume
= agp_generic_resume
;
588 agp_bridge
.cant_use_aperture
= 0;
590 pci_read_config_dword(agp_bridge
.dev
,
594 serverworks_private
.gart_addr_ofs
= 0x10;
596 if(temp
& PCI_BASE_ADDRESS_MEM_TYPE_64
) {
597 pci_read_config_dword(agp_bridge
.dev
,
601 printk("Detected 64 bit aperture address, but top "
602 "bits are not zero. Disabling agp\n");
605 serverworks_private
.mm_addr_ofs
= 0x18;
607 serverworks_private
.mm_addr_ofs
= 0x14;
610 pci_read_config_dword(agp_bridge
.dev
,
611 serverworks_private
.mm_addr_ofs
,
613 if(temp
& PCI_BASE_ADDRESS_MEM_TYPE_64
) {
614 pci_read_config_dword(agp_bridge
.dev
,
615 serverworks_private
.mm_addr_ofs
+ 4,
618 printk("Detected 64 bit MMIO address, but top "
619 "bits are not zero. Disabling agp\n");