2 * Copyright (c) 2008 Juan Romero Pardines
3 * Copyright (c) 2008 Mark Kettenis
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <sys/param.h>
19 #include <sys/ioctl.h>
21 #include <sys/types.h>
23 #include <machine/sysarch.h>
24 #include <machine/mtrr.h>
26 #include <dev/pci/pciio.h>
27 #include <dev/pci/pcireg.h>
28 #include <dev/pci/pcidevs.h>
38 #include "pciaccess.h"
39 #include "pciaccess_private.h"
44 pci_read(int bus
, int dev
, int func
, uint32_t reg
, uint32_t *val
)
46 struct pciio_bdf_cfgreg io
;
49 bzero(&io
, sizeof(io
));
55 err
= ioctl(pcifd
, PCI_IOC_BDF_CFGREAD
, &io
);
65 pci_write(int bus
, int dev
, int func
, uint32_t reg
, uint32_t val
)
67 struct pciio_bdf_cfgreg io
;
69 bzero(&io
, sizeof(io
));
76 return ioctl(pcifd
, PCI_IOC_BDF_CFGWRITE
, &io
);
80 pci_nfuncs(int bus
, int dev
)
84 if (pci_read(bus
, dev
, 0, PCI_BHLC_REG
, &hdr
) != 0)
87 return (PCI_HDRTYPE_MULTIFN(hdr
) ? 8 : 1);
91 pci_device_netbsd_map_range(struct pci_device
*dev
,
92 struct pci_device_mapping
*map
)
95 int fd
, error
, nmtrr
, prot
= PROT_READ
;
97 if ((fd
= open("/dev/mem", O_RDWR
)) == -1)
100 if (map
->flags
& PCI_DEV_MAP_FLAG_WRITABLE
)
103 map
->memory
= mmap(NULL
, map
->size
, prot
, MAP_SHARED
,
105 if (map
->memory
== MAP_FAILED
)
108 /* No need to set an MTRR if it's the default mode. */
109 if ((map
->flags
& PCI_DEV_MAP_FLAG_CACHABLE
) ||
110 (map
->flags
& PCI_DEV_MAP_FLAG_WRITE_COMBINE
)) {
111 mtrr
.base
= map
->base
;
112 mtrr
.len
= map
->size
;
113 mtrr
.flags
= MTRR_VALID
;
115 if (map
->flags
& PCI_DEV_MAP_FLAG_CACHABLE
)
116 mtrr
.type
= MTRR_TYPE_WB
;
117 if (map
->flags
& PCI_DEV_MAP_FLAG_WRITE_COMBINE
)
118 mtrr
.type
= MTRR_TYPE_WC
;
120 error
= i386_set_mtrr(&mtrr
, &nmtrr
);
123 error
= x86_64_set_mtrr(&mtrr
, &nmtrr
);
137 pci_device_netbsd_unmap_range(struct pci_device
*dev
,
138 struct pci_device_mapping
*map
)
143 if ((map
->flags
& PCI_DEV_MAP_FLAG_CACHABLE
) ||
144 (map
->flags
& PCI_DEV_MAP_FLAG_WRITE_COMBINE
)) {
145 mtrr
.base
= map
->base
;
146 mtrr
.len
= map
->size
;
147 mtrr
.type
= MTRR_TYPE_UC
;
148 mtrr
.flags
= 0; /* clear/set MTRR */
150 error
= i386_set_mtrr(&mtrr
, &nmtrr
);
153 error
= x86_64_set_mtrr(&mtrr
, &nmtrr
);
159 return pci_device_generic_unmap_range(dev
, map
);
163 pci_device_netbsd_read(struct pci_device
*dev
, void *data
,
164 pciaddr_t offset
, pciaddr_t size
, pciaddr_t
*bytes_read
)
166 struct pciio_bdf_cfgreg io
;
169 io
.device
= dev
->dev
;
170 io
.function
= dev
->func
;
174 int toread
= MIN(size
, 4 - (offset
& 0x3));
176 io
.cfgreg
.reg
= (offset
& ~0x3);
178 if (ioctl(pcifd
, PCI_IOC_BDF_CFGREAD
, &io
) == -1)
181 io
.cfgreg
.val
= htole32(io
.cfgreg
.val
);
182 io
.cfgreg
.val
>>= ((offset
& 0x3) * 8);
184 memcpy(data
, &io
.cfgreg
.val
, toread
);
187 data
= (char *)data
+ toread
;
189 *bytes_read
+= toread
;
196 pci_device_netbsd_write(struct pci_device
*dev
, const void *data
,
197 pciaddr_t offset
, pciaddr_t size
, pciaddr_t
*bytes_written
)
199 struct pciio_bdf_cfgreg io
;
201 if ((offset
% 4) == 0 || (size
% 4) == 0)
205 io
.device
= dev
->dev
;
206 io
.function
= dev
->func
;
210 io
.cfgreg
.reg
= offset
;
211 memcpy(&io
.cfgreg
.val
, data
, 4);
213 if (ioctl(pcifd
, PCI_IOC_BDF_CFGWRITE
, &io
) == -1)
217 data
= (char *)data
+ 4;
226 pci_system_netbsd_destroy(void)
234 pci_device_netbsd_probe(struct pci_device
*device
)
236 struct pci_device_private
*priv
= (struct pci_device_private
*)device
;
237 struct pci_mem_region
*region
;
238 uint64_t reg64
, size64
;
239 uint32_t bar
, reg
, size
;
240 int bus
, dev
, func
, err
;
246 err
= pci_read(bus
, dev
, func
, PCI_BHLC_REG
, ®
);
250 priv
->header_type
= PCI_HDRTYPE_TYPE(reg
);
251 if (priv
->header_type
!= 0)
254 region
= device
->regions
;
255 for (bar
= PCI_MAPREG_START
; bar
< PCI_MAPREG_END
;
256 bar
+= sizeof(uint32_t), region
++) {
257 err
= pci_read(bus
, dev
, func
, bar
, ®
);
261 /* Probe the size of the region. */
262 err
= pci_write(bus
, dev
, func
, bar
, ~0);
265 pci_read(bus
, dev
, func
, bar
, &size
);
266 pci_write(bus
, dev
, func
, bar
, reg
);
268 if (PCI_MAPREG_TYPE(reg
) == PCI_MAPREG_TYPE_IO
) {
270 region
->base_addr
= PCI_MAPREG_IO_ADDR(reg
);
271 region
->size
= PCI_MAPREG_IO_SIZE(size
);
273 if (PCI_MAPREG_MEM_PREFETCHABLE(reg
))
274 region
->is_prefetchable
= 1;
275 switch(PCI_MAPREG_MEM_TYPE(reg
)) {
276 case PCI_MAPREG_MEM_TYPE_32BIT
:
277 case PCI_MAPREG_MEM_TYPE_32BIT_1M
:
278 region
->base_addr
= PCI_MAPREG_MEM_ADDR(reg
);
279 region
->size
= PCI_MAPREG_MEM_SIZE(size
);
281 case PCI_MAPREG_MEM_TYPE_64BIT
:
287 bar
+= sizeof(uint32_t);
289 err
= pci_read(bus
, dev
, func
, bar
, ®
);
292 reg64
|= (uint64_t)reg
<< 32;
294 err
= pci_write(bus
, dev
, func
, bar
, ~0);
297 pci_read(bus
, dev
, func
, bar
, &size
);
298 pci_write(bus
, dev
, func
, bar
, reg64
>> 32);
299 size64
|= (uint64_t)size
<< 32;
301 region
->base_addr
= PCI_MAPREG_MEM64_ADDR(reg64
);
302 region
->size
= PCI_MAPREG_MEM64_SIZE(size64
);
312 static const struct pci_system_methods netbsd_pci_methods
= {
313 pci_system_netbsd_destroy
,
316 pci_device_netbsd_probe
,
317 pci_device_netbsd_map_range
,
318 pci_device_netbsd_unmap_range
,
319 pci_device_netbsd_read
,
320 pci_device_netbsd_write
,
321 pci_fill_capabilities_generic
325 pci_system_netbsd_create(void)
327 struct pci_device_private
*device
;
328 int bus
, dev
, func
, ndevs
, nfuncs
;
331 pcifd
= open("/dev/pci0", O_RDWR
);
335 pci_sys
= calloc(1, sizeof(struct pci_system
));
336 if (pci_sys
== NULL
) {
341 pci_sys
->methods
= &netbsd_pci_methods
;
344 for (bus
= 0; bus
< 256; bus
++) {
345 for (dev
= 0; dev
< 32; dev
++) {
346 nfuncs
= pci_nfuncs(bus
, dev
);
347 for (func
= 0; func
< nfuncs
; func
++) {
348 if (pci_read(bus
, dev
, func
, PCI_ID_REG
,
351 if (PCI_VENDOR(reg
) == PCI_VENDOR_INVALID
||
352 PCI_VENDOR(reg
) == 0)
360 pci_sys
->num_devices
= ndevs
;
361 pci_sys
->devices
= calloc(ndevs
, sizeof(struct pci_device_private
));
362 if (pci_sys
->devices
== NULL
) {
368 device
= pci_sys
->devices
;
369 for (bus
= 0; bus
< 256; bus
++) {
370 for (dev
= 0; dev
< 32; dev
++) {
371 nfuncs
= pci_nfuncs(bus
, dev
);
372 for (func
= 0; func
< nfuncs
; func
++) {
373 if (pci_read(bus
, dev
, func
, PCI_ID_REG
,
376 if (PCI_VENDOR(reg
) == PCI_VENDOR_INVALID
||
377 PCI_VENDOR(reg
) == 0)
380 device
->base
.domain
= 0;
381 device
->base
.bus
= bus
;
382 device
->base
.dev
= dev
;
383 device
->base
.func
= func
;
384 device
->base
.vendor_id
= PCI_VENDOR(reg
);
385 device
->base
.device_id
= PCI_PRODUCT(reg
);
387 if (pci_read(bus
, dev
, func
, PCI_CLASS_REG
,
391 device
->base
.device_class
=
392 PCI_INTERFACE(reg
) | PCI_CLASS(reg
) << 16 |
393 PCI_SUBCLASS(reg
) << 8;
394 device
->base
.revision
= PCI_REVISION(reg
);
396 if (pci_read(bus
, dev
, func
, PCI_SUBSYS_ID_REG
,
400 device
->base
.subvendor_id
= PCI_VENDOR(reg
);
401 device
->base
.subdevice_id
= PCI_PRODUCT(reg
);