2 * IBM PowerPC Virtual I/O Infrastructure Support.
4 * Copyright (c) 2003,2008 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
9 * Robert Jennings <rcjenn@us.ibm.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/types.h>
18 #include <linux/stat.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/slab.h>
22 #include <linux/console.h>
23 #include <linux/export.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/kobject.h>
28 #include <asm/iommu.h>
32 #include <asm/firmware.h>
34 #include <asm/abs_addr.h>
36 #include <asm/hvcall.h>
38 static struct bus_type vio_bus_type
;
40 static struct vio_dev vio_bus_device
= { /* fake "parent" device */
43 .dev
.init_name
= "vio",
44 .dev
.bus
= &vio_bus_type
,
47 #ifdef CONFIG_PPC_SMLPAR
49 * vio_cmo_pool - A pool of IO memory for CMO use
51 * @size: The size of the pool in bytes
52 * @free: The amount of free memory in the pool
59 /* How many ms to delay queued balance work */
60 #define VIO_CMO_BALANCE_DELAY 100
62 /* Portion out IO memory to CMO devices by this chunk size */
63 #define VIO_CMO_BALANCE_CHUNK 131072
66 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
68 * @vio_dev: struct vio_dev pointer
69 * @list: pointer to other devices on bus that are being tracked
71 struct vio_cmo_dev_entry
{
72 struct vio_dev
*viodev
;
73 struct list_head list
;
77 * vio_cmo - VIO bus accounting structure for CMO entitlement
79 * @lock: spinlock for entire structure
80 * @balance_q: work queue for balancing system entitlement
81 * @device_list: list of CMO-enabled devices requiring entitlement
82 * @entitled: total system entitlement in bytes
83 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
84 * @excess: pool of excess entitlement not needed for device reserves or spare
85 * @spare: IO memory for device hotplug functionality
86 * @min: minimum necessary for system operation
87 * @desired: desired memory for system operation
88 * @curr: bytes currently allocated
89 * @high: high water mark for IO data usage
93 struct delayed_work balance_q
;
94 struct list_head device_list
;
96 struct vio_cmo_pool reserve
;
97 struct vio_cmo_pool excess
;
106 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
108 static int vio_cmo_num_OF_devs(void)
110 struct device_node
*node_vroot
;
114 * Count the number of vdevice entries with an
115 * ibm,my-dma-window OF property
117 node_vroot
= of_find_node_by_name(NULL
, "vdevice");
119 struct device_node
*of_node
;
120 struct property
*prop
;
122 for_each_child_of_node(node_vroot
, of_node
) {
123 prop
= of_find_property(of_node
, "ibm,my-dma-window",
129 of_node_put(node_vroot
);
134 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
136 * @viodev: VIO device requesting IO memory
137 * @size: size of allocation requested
139 * Allocations come from memory reserved for the devices and any excess
140 * IO memory available to all devices. The spare pool used to service
141 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
145 * 0 for successful allocation and -ENOMEM for a failure
147 static inline int vio_cmo_alloc(struct vio_dev
*viodev
, size_t size
)
150 size_t reserve_free
= 0;
151 size_t excess_free
= 0;
154 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
156 /* Determine the amount of free entitlement available in reserve */
157 if (viodev
->cmo
.entitled
> viodev
->cmo
.allocated
)
158 reserve_free
= viodev
->cmo
.entitled
- viodev
->cmo
.allocated
;
160 /* If spare is not fulfilled, the excess pool can not be used. */
161 if (vio_cmo
.spare
>= VIO_CMO_MIN_ENT
)
162 excess_free
= vio_cmo
.excess
.free
;
164 /* The request can be satisfied */
165 if ((reserve_free
+ excess_free
) >= size
) {
166 vio_cmo
.curr
+= size
;
167 if (vio_cmo
.curr
> vio_cmo
.high
)
168 vio_cmo
.high
= vio_cmo
.curr
;
169 viodev
->cmo
.allocated
+= size
;
170 size
-= min(reserve_free
, size
);
171 vio_cmo
.excess
.free
-= size
;
175 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
180 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
181 * @viodev: VIO device freeing IO memory
182 * @size: size of deallocation
184 * IO memory is freed by the device back to the correct memory pools.
185 * The spare pool is replenished first from either memory pool, then
186 * the reserve pool is used to reduce device entitlement, the excess
187 * pool is used to increase the reserve pool toward the desired entitlement
188 * target, and then the remaining memory is returned to the pools.
191 static inline void vio_cmo_dealloc(struct vio_dev
*viodev
, size_t size
)
194 size_t spare_needed
= 0;
195 size_t excess_freed
= 0;
196 size_t reserve_freed
= size
;
200 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
201 vio_cmo
.curr
-= size
;
203 /* Amount of memory freed from the excess pool */
204 if (viodev
->cmo
.allocated
> viodev
->cmo
.entitled
) {
205 excess_freed
= min(reserve_freed
, (viodev
->cmo
.allocated
-
206 viodev
->cmo
.entitled
));
207 reserve_freed
-= excess_freed
;
210 /* Remove allocation from device */
211 viodev
->cmo
.allocated
-= (reserve_freed
+ excess_freed
);
213 /* Spare is a subset of the reserve pool, replenish it first. */
214 spare_needed
= VIO_CMO_MIN_ENT
- vio_cmo
.spare
;
217 * Replenish the spare in the reserve pool from the excess pool.
218 * This moves entitlement into the reserve pool.
220 if (spare_needed
&& excess_freed
) {
221 tmp
= min(excess_freed
, spare_needed
);
222 vio_cmo
.excess
.size
-= tmp
;
223 vio_cmo
.reserve
.size
+= tmp
;
224 vio_cmo
.spare
+= tmp
;
231 * Replenish the spare in the reserve pool from the reserve pool.
232 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
233 * if needed, and gives it to the spare pool. The amount of used
234 * memory in this pool does not change.
236 if (spare_needed
&& reserve_freed
) {
237 tmp
= min3(spare_needed
, reserve_freed
, (viodev
->cmo
.entitled
- VIO_CMO_MIN_ENT
));
239 vio_cmo
.spare
+= tmp
;
240 viodev
->cmo
.entitled
-= tmp
;
241 reserve_freed
-= tmp
;
247 * Increase the reserve pool until the desired allocation is met.
248 * Move an allocation freed from the excess pool into the reserve
249 * pool and schedule a balance operation.
251 if (excess_freed
&& (vio_cmo
.desired
> vio_cmo
.reserve
.size
)) {
252 tmp
= min(excess_freed
, (vio_cmo
.desired
- vio_cmo
.reserve
.size
));
254 vio_cmo
.excess
.size
-= tmp
;
255 vio_cmo
.reserve
.size
+= tmp
;
260 /* Return memory from the excess pool to that pool */
262 vio_cmo
.excess
.free
+= excess_freed
;
265 schedule_delayed_work(&vio_cmo
.balance_q
, VIO_CMO_BALANCE_DELAY
);
266 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
270 * vio_cmo_entitlement_update - Manage system entitlement changes
272 * @new_entitlement: new system entitlement to attempt to accommodate
274 * Increases in entitlement will be used to fulfill the spare entitlement
275 * and the rest is given to the excess pool. Decreases, if they are
276 * possible, come from the excess pool and from unused device entitlement
278 * Returns: 0 on success, -ENOMEM when change can not be made
280 int vio_cmo_entitlement_update(size_t new_entitlement
)
282 struct vio_dev
*viodev
;
283 struct vio_cmo_dev_entry
*dev_ent
;
285 size_t avail
, delta
, tmp
;
287 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
289 /* Entitlement increases */
290 if (new_entitlement
> vio_cmo
.entitled
) {
291 delta
= new_entitlement
- vio_cmo
.entitled
;
293 /* Fulfill spare allocation */
294 if (vio_cmo
.spare
< VIO_CMO_MIN_ENT
) {
295 tmp
= min(delta
, (VIO_CMO_MIN_ENT
- vio_cmo
.spare
));
296 vio_cmo
.spare
+= tmp
;
297 vio_cmo
.reserve
.size
+= tmp
;
301 /* Remaining new allocation goes to the excess pool */
302 vio_cmo
.entitled
+= delta
;
303 vio_cmo
.excess
.size
+= delta
;
304 vio_cmo
.excess
.free
+= delta
;
309 /* Entitlement decreases */
310 delta
= vio_cmo
.entitled
- new_entitlement
;
311 avail
= vio_cmo
.excess
.free
;
314 * Need to check how much unused entitlement each device can
315 * sacrifice to fulfill entitlement change.
317 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
321 viodev
= dev_ent
->viodev
;
322 if ((viodev
->cmo
.entitled
> viodev
->cmo
.allocated
) &&
323 (viodev
->cmo
.entitled
> VIO_CMO_MIN_ENT
))
324 avail
+= viodev
->cmo
.entitled
-
325 max_t(size_t, viodev
->cmo
.allocated
,
329 if (delta
<= avail
) {
330 vio_cmo
.entitled
-= delta
;
332 /* Take entitlement from the excess pool first */
333 tmp
= min(vio_cmo
.excess
.free
, delta
);
334 vio_cmo
.excess
.size
-= tmp
;
335 vio_cmo
.excess
.free
-= tmp
;
339 * Remove all but VIO_CMO_MIN_ENT bytes from devices
340 * until entitlement change is served
342 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
346 viodev
= dev_ent
->viodev
;
348 if ((viodev
->cmo
.entitled
> viodev
->cmo
.allocated
) &&
349 (viodev
->cmo
.entitled
> VIO_CMO_MIN_ENT
))
350 tmp
= viodev
->cmo
.entitled
-
351 max_t(size_t, viodev
->cmo
.allocated
,
353 viodev
->cmo
.entitled
-= min(tmp
, delta
);
354 delta
-= min(tmp
, delta
);
357 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
362 schedule_delayed_work(&vio_cmo
.balance_q
, 0);
363 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
368 * vio_cmo_balance - Balance entitlement among devices
370 * @work: work queue structure for this operation
372 * Any system entitlement above the minimum needed for devices, or
373 * already allocated to devices, can be distributed to the devices.
374 * The list of devices is iterated through to recalculate the desired
375 * entitlement level and to determine how much entitlement above the
376 * minimum entitlement is allocated to devices.
378 * Small chunks of the available entitlement are given to devices until
379 * their requirements are fulfilled or there is no entitlement left to give.
380 * Upon completion sizes of the reserve and excess pools are calculated.
382 * The system minimum entitlement level is also recalculated here.
383 * Entitlement will be reserved for devices even after vio_bus_remove to
384 * accommodate reloading the driver. The OF tree is walked to count the
385 * number of devices present and this will remove entitlement for devices
386 * that have actually left the system after having vio_bus_remove called.
388 static void vio_cmo_balance(struct work_struct
*work
)
391 struct vio_dev
*viodev
;
392 struct vio_cmo_dev_entry
*dev_ent
;
394 size_t avail
= 0, level
, chunk
, need
;
395 int devcount
= 0, fulfilled
;
397 cmo
= container_of(work
, struct vio_cmo
, balance_q
.work
);
399 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
401 /* Calculate minimum entitlement and fulfill spare */
402 cmo
->min
= vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT
;
403 BUG_ON(cmo
->min
> cmo
->entitled
);
404 cmo
->spare
= min_t(size_t, VIO_CMO_MIN_ENT
, (cmo
->entitled
- cmo
->min
));
405 cmo
->min
+= cmo
->spare
;
406 cmo
->desired
= cmo
->min
;
409 * Determine how much entitlement is available and reset device
412 avail
= cmo
->entitled
- cmo
->spare
;
413 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
414 viodev
= dev_ent
->viodev
;
416 viodev
->cmo
.entitled
= VIO_CMO_MIN_ENT
;
417 cmo
->desired
+= (viodev
->cmo
.desired
- VIO_CMO_MIN_ENT
);
418 avail
-= max_t(size_t, viodev
->cmo
.allocated
, VIO_CMO_MIN_ENT
);
422 * Having provided each device with the minimum entitlement, loop
423 * over the devices portioning out the remaining entitlement
424 * until there is nothing left.
426 level
= VIO_CMO_MIN_ENT
;
429 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
430 viodev
= dev_ent
->viodev
;
432 if (viodev
->cmo
.desired
<= level
) {
438 * Give the device up to VIO_CMO_BALANCE_CHUNK
439 * bytes of entitlement, but do not exceed the
440 * desired level of entitlement for the device.
442 chunk
= min_t(size_t, avail
, VIO_CMO_BALANCE_CHUNK
);
443 chunk
= min(chunk
, (viodev
->cmo
.desired
-
444 viodev
->cmo
.entitled
));
445 viodev
->cmo
.entitled
+= chunk
;
448 * If the memory for this entitlement increase was
449 * already allocated to the device it does not come
450 * from the available pool being portioned out.
452 need
= max(viodev
->cmo
.allocated
, viodev
->cmo
.entitled
)-
453 max(viodev
->cmo
.allocated
, level
);
457 if (fulfilled
== devcount
)
459 level
+= VIO_CMO_BALANCE_CHUNK
;
462 /* Calculate new reserve and excess pool sizes */
463 cmo
->reserve
.size
= cmo
->min
;
464 cmo
->excess
.free
= 0;
465 cmo
->excess
.size
= 0;
467 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
468 viodev
= dev_ent
->viodev
;
469 /* Calculated reserve size above the minimum entitlement */
470 if (viodev
->cmo
.entitled
)
471 cmo
->reserve
.size
+= (viodev
->cmo
.entitled
-
473 /* Calculated used excess entitlement */
474 if (viodev
->cmo
.allocated
> viodev
->cmo
.entitled
)
475 need
+= viodev
->cmo
.allocated
- viodev
->cmo
.entitled
;
477 cmo
->excess
.size
= cmo
->entitled
- cmo
->reserve
.size
;
478 cmo
->excess
.free
= cmo
->excess
.size
- need
;
480 cancel_delayed_work(to_delayed_work(work
));
481 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
484 static void *vio_dma_iommu_alloc_coherent(struct device
*dev
, size_t size
,
485 dma_addr_t
*dma_handle
, gfp_t flag
)
487 struct vio_dev
*viodev
= to_vio_dev(dev
);
490 if (vio_cmo_alloc(viodev
, roundup(size
, PAGE_SIZE
))) {
491 atomic_inc(&viodev
->cmo
.allocs_failed
);
495 ret
= dma_iommu_ops
.alloc_coherent(dev
, size
, dma_handle
, flag
);
496 if (unlikely(ret
== NULL
)) {
497 vio_cmo_dealloc(viodev
, roundup(size
, PAGE_SIZE
));
498 atomic_inc(&viodev
->cmo
.allocs_failed
);
504 static void vio_dma_iommu_free_coherent(struct device
*dev
, size_t size
,
505 void *vaddr
, dma_addr_t dma_handle
)
507 struct vio_dev
*viodev
= to_vio_dev(dev
);
509 dma_iommu_ops
.free_coherent(dev
, size
, vaddr
, dma_handle
);
511 vio_cmo_dealloc(viodev
, roundup(size
, PAGE_SIZE
));
514 static dma_addr_t
vio_dma_iommu_map_page(struct device
*dev
, struct page
*page
,
515 unsigned long offset
, size_t size
,
516 enum dma_data_direction direction
,
517 struct dma_attrs
*attrs
)
519 struct vio_dev
*viodev
= to_vio_dev(dev
);
520 dma_addr_t ret
= DMA_ERROR_CODE
;
522 if (vio_cmo_alloc(viodev
, roundup(size
, IOMMU_PAGE_SIZE
))) {
523 atomic_inc(&viodev
->cmo
.allocs_failed
);
527 ret
= dma_iommu_ops
.map_page(dev
, page
, offset
, size
, direction
, attrs
);
528 if (unlikely(dma_mapping_error(dev
, ret
))) {
529 vio_cmo_dealloc(viodev
, roundup(size
, IOMMU_PAGE_SIZE
));
530 atomic_inc(&viodev
->cmo
.allocs_failed
);
536 static void vio_dma_iommu_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
538 enum dma_data_direction direction
,
539 struct dma_attrs
*attrs
)
541 struct vio_dev
*viodev
= to_vio_dev(dev
);
543 dma_iommu_ops
.unmap_page(dev
, dma_handle
, size
, direction
, attrs
);
545 vio_cmo_dealloc(viodev
, roundup(size
, IOMMU_PAGE_SIZE
));
548 static int vio_dma_iommu_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
549 int nelems
, enum dma_data_direction direction
,
550 struct dma_attrs
*attrs
)
552 struct vio_dev
*viodev
= to_vio_dev(dev
);
553 struct scatterlist
*sgl
;
555 size_t alloc_size
= 0;
557 for (sgl
= sglist
; count
< nelems
; count
++, sgl
++)
558 alloc_size
+= roundup(sgl
->length
, IOMMU_PAGE_SIZE
);
560 if (vio_cmo_alloc(viodev
, alloc_size
)) {
561 atomic_inc(&viodev
->cmo
.allocs_failed
);
565 ret
= dma_iommu_ops
.map_sg(dev
, sglist
, nelems
, direction
, attrs
);
567 if (unlikely(!ret
)) {
568 vio_cmo_dealloc(viodev
, alloc_size
);
569 atomic_inc(&viodev
->cmo
.allocs_failed
);
573 for (sgl
= sglist
, count
= 0; count
< ret
; count
++, sgl
++)
574 alloc_size
-= roundup(sgl
->dma_length
, IOMMU_PAGE_SIZE
);
576 vio_cmo_dealloc(viodev
, alloc_size
);
581 static void vio_dma_iommu_unmap_sg(struct device
*dev
,
582 struct scatterlist
*sglist
, int nelems
,
583 enum dma_data_direction direction
,
584 struct dma_attrs
*attrs
)
586 struct vio_dev
*viodev
= to_vio_dev(dev
);
587 struct scatterlist
*sgl
;
588 size_t alloc_size
= 0;
591 for (sgl
= sglist
; count
< nelems
; count
++, sgl
++)
592 alloc_size
+= roundup(sgl
->dma_length
, IOMMU_PAGE_SIZE
);
594 dma_iommu_ops
.unmap_sg(dev
, sglist
, nelems
, direction
, attrs
);
596 vio_cmo_dealloc(viodev
, alloc_size
);
599 static int vio_dma_iommu_dma_supported(struct device
*dev
, u64 mask
)
601 return dma_iommu_ops
.dma_supported(dev
, mask
);
604 static u64
vio_dma_get_required_mask(struct device
*dev
)
606 return dma_iommu_ops
.get_required_mask(dev
);
609 struct dma_map_ops vio_dma_mapping_ops
= {
610 .alloc_coherent
= vio_dma_iommu_alloc_coherent
,
611 .free_coherent
= vio_dma_iommu_free_coherent
,
612 .map_sg
= vio_dma_iommu_map_sg
,
613 .unmap_sg
= vio_dma_iommu_unmap_sg
,
614 .map_page
= vio_dma_iommu_map_page
,
615 .unmap_page
= vio_dma_iommu_unmap_page
,
616 .dma_supported
= vio_dma_iommu_dma_supported
,
617 .get_required_mask
= vio_dma_get_required_mask
,
621 * vio_cmo_set_dev_desired - Set desired entitlement for a device
623 * @viodev: struct vio_dev for device to alter
624 * @new_desired: new desired entitlement level in bytes
626 * For use by devices to request a change to their entitlement at runtime or
627 * through sysfs. The desired entitlement level is changed and a balancing
628 * of system resources is scheduled to run in the future.
630 void vio_cmo_set_dev_desired(struct vio_dev
*viodev
, size_t desired
)
633 struct vio_cmo_dev_entry
*dev_ent
;
636 if (!firmware_has_feature(FW_FEATURE_CMO
))
639 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
640 if (desired
< VIO_CMO_MIN_ENT
)
641 desired
= VIO_CMO_MIN_ENT
;
644 * Changes will not be made for devices not in the device list.
645 * If it is not in the device list, then no driver is loaded
646 * for the device and it can not receive entitlement.
648 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
)
649 if (viodev
== dev_ent
->viodev
) {
654 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
658 /* Increase/decrease in desired device entitlement */
659 if (desired
>= viodev
->cmo
.desired
) {
660 /* Just bump the bus and device values prior to a balance*/
661 vio_cmo
.desired
+= desired
- viodev
->cmo
.desired
;
662 viodev
->cmo
.desired
= desired
;
664 /* Decrease bus and device values for desired entitlement */
665 vio_cmo
.desired
-= viodev
->cmo
.desired
- desired
;
666 viodev
->cmo
.desired
= desired
;
668 * If less entitlement is desired than current entitlement, move
669 * any reserve memory in the change region to the excess pool.
671 if (viodev
->cmo
.entitled
> desired
) {
672 vio_cmo
.reserve
.size
-= viodev
->cmo
.entitled
- desired
;
673 vio_cmo
.excess
.size
+= viodev
->cmo
.entitled
- desired
;
675 * If entitlement moving from the reserve pool to the
676 * excess pool is currently unused, add to the excess
679 if (viodev
->cmo
.allocated
< viodev
->cmo
.entitled
)
680 vio_cmo
.excess
.free
+= viodev
->cmo
.entitled
-
681 max(viodev
->cmo
.allocated
, desired
);
682 viodev
->cmo
.entitled
= desired
;
685 schedule_delayed_work(&vio_cmo
.balance_q
, 0);
686 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
690 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
692 * @viodev - Pointer to struct vio_dev for device
694 * Determine the devices IO memory entitlement needs, attempting
695 * to satisfy the system minimum entitlement at first and scheduling
696 * a balance operation to take care of the rest at a later time.
698 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
699 * -ENOMEM when entitlement is not available for device or
703 static int vio_cmo_bus_probe(struct vio_dev
*viodev
)
705 struct vio_cmo_dev_entry
*dev_ent
;
706 struct device
*dev
= &viodev
->dev
;
707 struct vio_driver
*viodrv
= to_vio_driver(dev
->driver
);
712 * Check to see that device has a DMA window and configure
713 * entitlement for the device.
715 if (of_get_property(viodev
->dev
.of_node
,
716 "ibm,my-dma-window", NULL
)) {
717 /* Check that the driver is CMO enabled and get desired DMA */
718 if (!viodrv
->get_desired_dma
) {
719 dev_err(dev
, "%s: device driver does not support CMO\n",
724 viodev
->cmo
.desired
= IOMMU_PAGE_ALIGN(viodrv
->get_desired_dma(viodev
));
725 if (viodev
->cmo
.desired
< VIO_CMO_MIN_ENT
)
726 viodev
->cmo
.desired
= VIO_CMO_MIN_ENT
;
727 size
= VIO_CMO_MIN_ENT
;
729 dev_ent
= kmalloc(sizeof(struct vio_cmo_dev_entry
),
734 dev_ent
->viodev
= viodev
;
735 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
736 list_add(&dev_ent
->list
, &vio_cmo
.device_list
);
738 viodev
->cmo
.desired
= 0;
740 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
744 * If the needs for vio_cmo.min have not changed since they
745 * were last set, the number of devices in the OF tree has
746 * been constant and the IO memory for this is already in
749 if (vio_cmo
.min
== ((vio_cmo_num_OF_devs() + 1) *
751 /* Updated desired entitlement if device requires it */
753 vio_cmo
.desired
+= (viodev
->cmo
.desired
-
758 tmp
= vio_cmo
.spare
+ vio_cmo
.excess
.free
;
760 dev_err(dev
, "%s: insufficient free "
761 "entitlement to add device. "
762 "Need %lu, have %lu\n", __func__
,
763 size
, (vio_cmo
.spare
+ tmp
));
764 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
768 /* Use excess pool first to fulfill request */
769 tmp
= min(size
, vio_cmo
.excess
.free
);
770 vio_cmo
.excess
.free
-= tmp
;
771 vio_cmo
.excess
.size
-= tmp
;
772 vio_cmo
.reserve
.size
+= tmp
;
774 /* Use spare if excess pool was insufficient */
775 vio_cmo
.spare
-= size
- tmp
;
777 /* Update bus accounting */
779 vio_cmo
.desired
+= viodev
->cmo
.desired
;
781 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
786 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
788 * @viodev - Pointer to struct vio_dev for device
790 * Remove the device from the cmo device list. The minimum entitlement
791 * will be reserved for the device as long as it is in the system. The
792 * rest of the entitlement the device had been allocated will be returned
795 static void vio_cmo_bus_remove(struct vio_dev
*viodev
)
797 struct vio_cmo_dev_entry
*dev_ent
;
801 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
802 if (viodev
->cmo
.allocated
) {
803 dev_err(&viodev
->dev
, "%s: device had %lu bytes of IO "
804 "allocated after remove operation.\n",
805 __func__
, viodev
->cmo
.allocated
);
810 * Remove the device from the device list being maintained for
811 * CMO enabled devices.
813 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
)
814 if (viodev
== dev_ent
->viodev
) {
815 list_del(&dev_ent
->list
);
821 * Devices may not require any entitlement and they do not need
822 * to be processed. Otherwise, return the device's entitlement
825 if (viodev
->cmo
.entitled
) {
827 * This device has not yet left the OF tree, it's
828 * minimum entitlement remains in vio_cmo.min and
831 vio_cmo
.desired
-= (viodev
->cmo
.desired
- VIO_CMO_MIN_ENT
);
834 * Save min allocation for device in reserve as long
835 * as it exists in OF tree as determined by later
838 viodev
->cmo
.entitled
-= VIO_CMO_MIN_ENT
;
840 /* Replenish spare from freed reserve pool */
841 if (viodev
->cmo
.entitled
&& (vio_cmo
.spare
< VIO_CMO_MIN_ENT
)) {
842 tmp
= min(viodev
->cmo
.entitled
, (VIO_CMO_MIN_ENT
-
844 vio_cmo
.spare
+= tmp
;
845 viodev
->cmo
.entitled
-= tmp
;
848 /* Remaining reserve goes to excess pool */
849 vio_cmo
.excess
.size
+= viodev
->cmo
.entitled
;
850 vio_cmo
.excess
.free
+= viodev
->cmo
.entitled
;
851 vio_cmo
.reserve
.size
-= viodev
->cmo
.entitled
;
854 * Until the device is removed it will keep a
855 * minimum entitlement; this will guarantee that
856 * a module unload/load will result in a success.
858 viodev
->cmo
.entitled
= VIO_CMO_MIN_ENT
;
859 viodev
->cmo
.desired
= VIO_CMO_MIN_ENT
;
860 atomic_set(&viodev
->cmo
.allocs_failed
, 0);
863 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
866 static void vio_cmo_set_dma_ops(struct vio_dev
*viodev
)
868 set_dma_ops(&viodev
->dev
, &vio_dma_mapping_ops
);
872 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
874 * Set up the reserve and excess entitlement pools based on available
875 * system entitlement and the number of devices in the OF tree that
876 * require entitlement in the reserve pool.
878 static void vio_cmo_bus_init(void)
880 struct hvcall_mpp_data mpp_data
;
883 memset(&vio_cmo
, 0, sizeof(struct vio_cmo
));
884 spin_lock_init(&vio_cmo
.lock
);
885 INIT_LIST_HEAD(&vio_cmo
.device_list
);
886 INIT_DELAYED_WORK(&vio_cmo
.balance_q
, vio_cmo_balance
);
888 /* Get current system entitlement */
889 err
= h_get_mpp(&mpp_data
);
892 * On failure, continue with entitlement set to 0, will panic()
893 * later when spare is reserved.
895 if (err
!= H_SUCCESS
) {
896 printk(KERN_ERR
"%s: unable to determine system IO "\
897 "entitlement. (%d)\n", __func__
, err
);
898 vio_cmo
.entitled
= 0;
900 vio_cmo
.entitled
= mpp_data
.entitled_mem
;
903 /* Set reservation and check against entitlement */
904 vio_cmo
.spare
= VIO_CMO_MIN_ENT
;
905 vio_cmo
.reserve
.size
= vio_cmo
.spare
;
906 vio_cmo
.reserve
.size
+= (vio_cmo_num_OF_devs() *
908 if (vio_cmo
.reserve
.size
> vio_cmo
.entitled
) {
909 printk(KERN_ERR
"%s: insufficient system entitlement\n",
911 panic("%s: Insufficient system entitlement", __func__
);
914 /* Set the remaining accounting variables */
915 vio_cmo
.excess
.size
= vio_cmo
.entitled
- vio_cmo
.reserve
.size
;
916 vio_cmo
.excess
.free
= vio_cmo
.excess
.size
;
917 vio_cmo
.min
= vio_cmo
.reserve
.size
;
918 vio_cmo
.desired
= vio_cmo
.reserve
.size
;
921 /* sysfs device functions and data structures for CMO */
923 #define viodev_cmo_rd_attr(name) \
924 static ssize_t viodev_cmo_##name##_show(struct device *dev, \
925 struct device_attribute *attr, \
928 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
931 static ssize_t
viodev_cmo_allocs_failed_show(struct device
*dev
,
932 struct device_attribute
*attr
, char *buf
)
934 struct vio_dev
*viodev
= to_vio_dev(dev
);
935 return sprintf(buf
, "%d\n", atomic_read(&viodev
->cmo
.allocs_failed
));
938 static ssize_t
viodev_cmo_allocs_failed_reset(struct device
*dev
,
939 struct device_attribute
*attr
, const char *buf
, size_t count
)
941 struct vio_dev
*viodev
= to_vio_dev(dev
);
942 atomic_set(&viodev
->cmo
.allocs_failed
, 0);
946 static ssize_t
viodev_cmo_desired_set(struct device
*dev
,
947 struct device_attribute
*attr
, const char *buf
, size_t count
)
949 struct vio_dev
*viodev
= to_vio_dev(dev
);
953 ret
= strict_strtoul(buf
, 10, &new_desired
);
957 vio_cmo_set_dev_desired(viodev
, new_desired
);
961 viodev_cmo_rd_attr(desired
);
962 viodev_cmo_rd_attr(entitled
);
963 viodev_cmo_rd_attr(allocated
);
965 static ssize_t
name_show(struct device
*, struct device_attribute
*, char *);
966 static ssize_t
devspec_show(struct device
*, struct device_attribute
*, char *);
967 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
969 static struct device_attribute vio_cmo_dev_attrs
[] = {
973 __ATTR(cmo_desired
, S_IWUSR
|S_IRUSR
|S_IWGRP
|S_IRGRP
|S_IROTH
,
974 viodev_cmo_desired_show
, viodev_cmo_desired_set
),
975 __ATTR(cmo_entitled
, S_IRUGO
, viodev_cmo_entitled_show
, NULL
),
976 __ATTR(cmo_allocated
, S_IRUGO
, viodev_cmo_allocated_show
, NULL
),
977 __ATTR(cmo_allocs_failed
, S_IWUSR
|S_IRUSR
|S_IWGRP
|S_IRGRP
|S_IROTH
,
978 viodev_cmo_allocs_failed_show
, viodev_cmo_allocs_failed_reset
),
982 /* sysfs bus functions and data structures for CMO */
984 #define viobus_cmo_rd_attr(name) \
986 viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
988 return sprintf(buf, "%lu\n", vio_cmo.name); \
991 #define viobus_cmo_pool_rd_attr(name, var) \
993 viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
995 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
998 static ssize_t
viobus_cmo_high_reset(struct bus_type
*bt
, const char *buf
,
1001 unsigned long flags
;
1003 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
1004 vio_cmo
.high
= vio_cmo
.curr
;
1005 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
1010 viobus_cmo_rd_attr(entitled
);
1011 viobus_cmo_pool_rd_attr(reserve
, size
);
1012 viobus_cmo_pool_rd_attr(excess
, size
);
1013 viobus_cmo_pool_rd_attr(excess
, free
);
1014 viobus_cmo_rd_attr(spare
);
1015 viobus_cmo_rd_attr(min
);
1016 viobus_cmo_rd_attr(desired
);
1017 viobus_cmo_rd_attr(curr
);
1018 viobus_cmo_rd_attr(high
);
1020 static struct bus_attribute vio_cmo_bus_attrs
[] = {
1021 __ATTR(cmo_entitled
, S_IRUGO
, viobus_cmo_entitled_show
, NULL
),
1022 __ATTR(cmo_reserve_size
, S_IRUGO
, viobus_cmo_reserve_pool_show_size
, NULL
),
1023 __ATTR(cmo_excess_size
, S_IRUGO
, viobus_cmo_excess_pool_show_size
, NULL
),
1024 __ATTR(cmo_excess_free
, S_IRUGO
, viobus_cmo_excess_pool_show_free
, NULL
),
1025 __ATTR(cmo_spare
, S_IRUGO
, viobus_cmo_spare_show
, NULL
),
1026 __ATTR(cmo_min
, S_IRUGO
, viobus_cmo_min_show
, NULL
),
1027 __ATTR(cmo_desired
, S_IRUGO
, viobus_cmo_desired_show
, NULL
),
1028 __ATTR(cmo_curr
, S_IRUGO
, viobus_cmo_curr_show
, NULL
),
1029 __ATTR(cmo_high
, S_IWUSR
|S_IRUSR
|S_IWGRP
|S_IRGRP
|S_IROTH
,
1030 viobus_cmo_high_show
, viobus_cmo_high_reset
),
1034 static void vio_cmo_sysfs_init(void)
1036 vio_bus_type
.dev_attrs
= vio_cmo_dev_attrs
;
1037 vio_bus_type
.bus_attrs
= vio_cmo_bus_attrs
;
1039 #else /* CONFIG_PPC_SMLPAR */
1040 int vio_cmo_entitlement_update(size_t new_entitlement
) { return 0; }
1041 void vio_cmo_set_dev_desired(struct vio_dev
*viodev
, size_t desired
) {}
1042 static int vio_cmo_bus_probe(struct vio_dev
*viodev
) { return 0; }
1043 static void vio_cmo_bus_remove(struct vio_dev
*viodev
) {}
1044 static void vio_cmo_set_dma_ops(struct vio_dev
*viodev
) {}
1045 static void vio_cmo_bus_init(void) {}
1046 static void vio_cmo_sysfs_init(void) { }
1047 #endif /* CONFIG_PPC_SMLPAR */
1048 EXPORT_SYMBOL(vio_cmo_entitlement_update
);
1049 EXPORT_SYMBOL(vio_cmo_set_dev_desired
);
1051 static struct iommu_table
*vio_build_iommu_table(struct vio_dev
*dev
)
1053 const unsigned char *dma_window
;
1054 struct iommu_table
*tbl
;
1055 unsigned long offset
, size
;
1057 dma_window
= of_get_property(dev
->dev
.of_node
,
1058 "ibm,my-dma-window", NULL
);
1062 tbl
= kzalloc(sizeof(*tbl
), GFP_KERNEL
);
1066 of_parse_dma_window(dev
->dev
.of_node
, dma_window
,
1067 &tbl
->it_index
, &offset
, &size
);
1069 /* TCE table size - measured in tce entries */
1070 tbl
->it_size
= size
>> IOMMU_PAGE_SHIFT
;
1071 /* offset for VIO should always be 0 */
1072 tbl
->it_offset
= offset
>> IOMMU_PAGE_SHIFT
;
1074 tbl
->it_type
= TCE_VB
;
1075 tbl
->it_blocksize
= 16;
1077 return iommu_init_table(tbl
, -1);
1081 * vio_match_device: - Tell if a VIO device has a matching
1082 * VIO device id structure.
1083 * @ids: array of VIO device id structures to search in
1084 * @dev: the VIO device structure to match against
1086 * Used by a driver to check whether a VIO device present in the
1087 * system is in its list of supported devices. Returns the matching
1088 * vio_device_id structure or NULL if there is no match.
1090 static const struct vio_device_id
*vio_match_device(
1091 const struct vio_device_id
*ids
, const struct vio_dev
*dev
)
1093 while (ids
->type
[0] != '\0') {
1094 if ((strncmp(dev
->type
, ids
->type
, strlen(ids
->type
)) == 0) &&
1095 of_device_is_compatible(dev
->dev
.of_node
,
1104 * Convert from struct device to struct vio_dev and pass to driver.
1105 * dev->driver has already been set by generic code because vio_bus_match
1108 static int vio_bus_probe(struct device
*dev
)
1110 struct vio_dev
*viodev
= to_vio_dev(dev
);
1111 struct vio_driver
*viodrv
= to_vio_driver(dev
->driver
);
1112 const struct vio_device_id
*id
;
1113 int error
= -ENODEV
;
1118 id
= vio_match_device(viodrv
->id_table
, viodev
);
1120 memset(&viodev
->cmo
, 0, sizeof(viodev
->cmo
));
1121 if (firmware_has_feature(FW_FEATURE_CMO
)) {
1122 error
= vio_cmo_bus_probe(viodev
);
1126 error
= viodrv
->probe(viodev
, id
);
1127 if (error
&& firmware_has_feature(FW_FEATURE_CMO
))
1128 vio_cmo_bus_remove(viodev
);
1134 /* convert from struct device to struct vio_dev and pass to driver. */
1135 static int vio_bus_remove(struct device
*dev
)
1137 struct vio_dev
*viodev
= to_vio_dev(dev
);
1138 struct vio_driver
*viodrv
= to_vio_driver(dev
->driver
);
1139 struct device
*devptr
;
1143 * Hold a reference to the device after the remove function is called
1144 * to allow for CMO accounting cleanup for the device.
1146 devptr
= get_device(dev
);
1149 ret
= viodrv
->remove(viodev
);
1151 if (!ret
&& firmware_has_feature(FW_FEATURE_CMO
))
1152 vio_cmo_bus_remove(viodev
);
1159 * vio_register_driver: - Register a new vio driver
1160 * @drv: The vio_driver structure to be registered.
1162 int vio_register_driver(struct vio_driver
*viodrv
)
1164 printk(KERN_DEBUG
"%s: driver %s registering\n", __func__
,
1165 viodrv
->driver
.name
);
1167 /* fill in 'struct driver' fields */
1168 viodrv
->driver
.bus
= &vio_bus_type
;
1170 return driver_register(&viodrv
->driver
);
1172 EXPORT_SYMBOL(vio_register_driver
);
1175 * vio_unregister_driver - Remove registration of vio driver.
1176 * @driver: The vio_driver struct to be removed form registration
1178 void vio_unregister_driver(struct vio_driver
*viodrv
)
1180 driver_unregister(&viodrv
->driver
);
1182 EXPORT_SYMBOL(vio_unregister_driver
);
1184 /* vio_dev refcount hit 0 */
1185 static void __devinit
vio_dev_release(struct device
*dev
)
1187 struct iommu_table
*tbl
= get_iommu_table_base(dev
);
1190 iommu_free_table(tbl
, dev
->of_node
?
1191 dev
->of_node
->full_name
: dev_name(dev
));
1192 of_node_put(dev
->of_node
);
1193 kfree(to_vio_dev(dev
));
1197 * vio_register_device_node: - Register a new vio device.
1198 * @of_node: The OF node for this device.
1200 * Creates and initializes a vio_dev structure from the data in
1201 * of_node and adds it to the list of virtual devices.
1202 * Returns a pointer to the created vio_dev or NULL if node has
1203 * NULL device_type or compatible fields.
1205 struct vio_dev
*vio_register_device_node(struct device_node
*of_node
)
1207 struct vio_dev
*viodev
;
1208 const unsigned int *unit_address
;
1210 /* we need the 'device_type' property, in order to match with drivers */
1211 if (of_node
->type
== NULL
) {
1212 printk(KERN_WARNING
"%s: node %s missing 'device_type'\n",
1214 of_node
->name
? of_node
->name
: "<unknown>");
1218 unit_address
= of_get_property(of_node
, "reg", NULL
);
1219 if (unit_address
== NULL
) {
1220 printk(KERN_WARNING
"%s: node %s missing 'reg'\n",
1222 of_node
->name
? of_node
->name
: "<unknown>");
1226 /* allocate a vio_dev for this node */
1227 viodev
= kzalloc(sizeof(struct vio_dev
), GFP_KERNEL
);
1231 viodev
->irq
= irq_of_parse_and_map(of_node
, 0);
1233 dev_set_name(&viodev
->dev
, "%x", *unit_address
);
1234 viodev
->name
= of_node
->name
;
1235 viodev
->type
= of_node
->type
;
1236 viodev
->unit_address
= *unit_address
;
1237 viodev
->dev
.of_node
= of_node_get(of_node
);
1239 if (firmware_has_feature(FW_FEATURE_CMO
))
1240 vio_cmo_set_dma_ops(viodev
);
1242 set_dma_ops(&viodev
->dev
, &dma_iommu_ops
);
1243 set_iommu_table_base(&viodev
->dev
, vio_build_iommu_table(viodev
));
1244 set_dev_node(&viodev
->dev
, of_node_to_nid(of_node
));
1246 /* init generic 'struct device' fields: */
1247 viodev
->dev
.parent
= &vio_bus_device
.dev
;
1248 viodev
->dev
.bus
= &vio_bus_type
;
1249 viodev
->dev
.release
= vio_dev_release
;
1250 /* needed to ensure proper operation of coherent allocations
1251 * later, in case driver doesn't set it explicitly */
1252 dma_set_mask(&viodev
->dev
, DMA_BIT_MASK(64));
1253 dma_set_coherent_mask(&viodev
->dev
, DMA_BIT_MASK(64));
1255 /* register with generic device framework */
1256 if (device_register(&viodev
->dev
)) {
1257 printk(KERN_ERR
"%s: failed to register device %s\n",
1258 __func__
, dev_name(&viodev
->dev
));
1259 put_device(&viodev
->dev
);
1265 EXPORT_SYMBOL(vio_register_device_node
);
1268 * vio_bus_init: - Initialize the virtual IO bus
1270 static int __init
vio_bus_init(void)
1273 struct device_node
*node_vroot
;
1275 if (firmware_has_feature(FW_FEATURE_CMO
))
1276 vio_cmo_sysfs_init();
1278 err
= bus_register(&vio_bus_type
);
1280 printk(KERN_ERR
"failed to register VIO bus\n");
1285 * The fake parent of all vio devices, just to give us
1288 err
= device_register(&vio_bus_device
.dev
);
1290 printk(KERN_WARNING
"%s: device_register returned %i\n",
1295 if (firmware_has_feature(FW_FEATURE_CMO
))
1298 node_vroot
= of_find_node_by_name(NULL
, "vdevice");
1300 struct device_node
*of_node
;
1303 * Create struct vio_devices for each virtual device in
1304 * the device tree. Drivers will associate with them later.
1306 for (of_node
= node_vroot
->child
; of_node
!= NULL
;
1307 of_node
= of_node
->sibling
)
1308 vio_register_device_node(of_node
);
1309 of_node_put(node_vroot
);
1314 __initcall(vio_bus_init
);
1316 static ssize_t
name_show(struct device
*dev
,
1317 struct device_attribute
*attr
, char *buf
)
1319 return sprintf(buf
, "%s\n", to_vio_dev(dev
)->name
);
1322 static ssize_t
devspec_show(struct device
*dev
,
1323 struct device_attribute
*attr
, char *buf
)
1325 struct device_node
*of_node
= dev
->of_node
;
1327 return sprintf(buf
, "%s\n", of_node
? of_node
->full_name
: "none");
1330 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
1333 const struct vio_dev
*vio_dev
= to_vio_dev(dev
);
1334 struct device_node
*dn
;
1340 cp
= of_get_property(dn
, "compatible", NULL
);
1344 return sprintf(buf
, "vio:T%sS%s\n", vio_dev
->type
, cp
);
1347 static struct device_attribute vio_dev_attrs
[] = {
1350 __ATTR_RO(modalias
),
1354 void __devinit
vio_unregister_device(struct vio_dev
*viodev
)
1356 device_unregister(&viodev
->dev
);
1358 EXPORT_SYMBOL(vio_unregister_device
);
1360 static int vio_bus_match(struct device
*dev
, struct device_driver
*drv
)
1362 const struct vio_dev
*vio_dev
= to_vio_dev(dev
);
1363 struct vio_driver
*vio_drv
= to_vio_driver(drv
);
1364 const struct vio_device_id
*ids
= vio_drv
->id_table
;
1366 return (ids
!= NULL
) && (vio_match_device(ids
, vio_dev
) != NULL
);
1369 static int vio_hotplug(struct device
*dev
, struct kobj_uevent_env
*env
)
1371 const struct vio_dev
*vio_dev
= to_vio_dev(dev
);
1372 struct device_node
*dn
;
1378 cp
= of_get_property(dn
, "compatible", NULL
);
1382 add_uevent_var(env
, "MODALIAS=vio:T%sS%s", vio_dev
->type
, cp
);
1386 static struct bus_type vio_bus_type
= {
1388 .dev_attrs
= vio_dev_attrs
,
1389 .uevent
= vio_hotplug
,
1390 .match
= vio_bus_match
,
1391 .probe
= vio_bus_probe
,
1392 .remove
= vio_bus_remove
,
1396 * vio_get_attribute: - get attribute for virtual device
1397 * @vdev: The vio device to get property.
1398 * @which: The property/attribute to be extracted.
1399 * @length: Pointer to length of returned data size (unused if NULL).
1401 * Calls prom.c's of_get_property() to return the value of the
1402 * attribute specified by @which
1404 const void *vio_get_attribute(struct vio_dev
*vdev
, char *which
, int *length
)
1406 return of_get_property(vdev
->dev
.of_node
, which
, length
);
1408 EXPORT_SYMBOL(vio_get_attribute
);
1410 #ifdef CONFIG_PPC_PSERIES
1411 /* vio_find_name() - internal because only vio.c knows how we formatted the
1414 static struct vio_dev
*vio_find_name(const char *name
)
1416 struct device
*found
;
1418 found
= bus_find_device_by_name(&vio_bus_type
, NULL
, name
);
1422 return to_vio_dev(found
);
1426 * vio_find_node - find an already-registered vio_dev
1427 * @vnode: device_node of the virtual device we're looking for
1429 struct vio_dev
*vio_find_node(struct device_node
*vnode
)
1431 const uint32_t *unit_address
;
1434 /* construct the kobject name from the device node */
1435 unit_address
= of_get_property(vnode
, "reg", NULL
);
1438 snprintf(kobj_name
, sizeof(kobj_name
), "%x", *unit_address
);
1440 return vio_find_name(kobj_name
);
1442 EXPORT_SYMBOL(vio_find_node
);
1444 int vio_enable_interrupts(struct vio_dev
*dev
)
1446 int rc
= h_vio_signal(dev
->unit_address
, VIO_IRQ_ENABLE
);
1447 if (rc
!= H_SUCCESS
)
1448 printk(KERN_ERR
"vio: Error 0x%x enabling interrupts\n", rc
);
1451 EXPORT_SYMBOL(vio_enable_interrupts
);
1453 int vio_disable_interrupts(struct vio_dev
*dev
)
1455 int rc
= h_vio_signal(dev
->unit_address
, VIO_IRQ_DISABLE
);
1456 if (rc
!= H_SUCCESS
)
1457 printk(KERN_ERR
"vio: Error 0x%x disabling interrupts\n", rc
);
1460 EXPORT_SYMBOL(vio_disable_interrupts
);
1461 #endif /* CONFIG_PPC_PSERIES */