4 * Copyright (c) 2008 Bull S.A.S.
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
12 #include <linux/kvm_host.h>
13 #include <linux/kvm.h>
15 #include "coalesced_mmio.h"
17 static inline struct kvm_coalesced_mmio_dev
*to_mmio(struct kvm_io_device
*dev
)
19 return container_of(dev
, struct kvm_coalesced_mmio_dev
, dev
);
22 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev
*dev
,
25 struct kvm_coalesced_mmio_zone
*zone
;
26 struct kvm_coalesced_mmio_ring
*ring
;
30 /* Are we able to batch it ? */
32 /* last is the first free entry
33 * check if we don't meet the first used entry
34 * there is always one unused entry in the buffer
36 ring
= dev
->kvm
->coalesced_mmio_ring
;
37 avail
= (ring
->first
- ring
->last
- 1) % KVM_COALESCED_MMIO_MAX
;
38 if (avail
< KVM_MAX_VCPUS
) {
43 /* is it in a batchable area ? */
45 for (i
= 0; i
< dev
->nb_zones
; i
++) {
48 /* (addr,len) is fully included in
49 * (zone->addr, zone->size)
52 if (zone
->addr
<= addr
&&
53 addr
+ len
<= zone
->addr
+ zone
->size
)
59 static int coalesced_mmio_write(struct kvm_io_device
*this,
60 gpa_t addr
, int len
, const void *val
)
62 struct kvm_coalesced_mmio_dev
*dev
= to_mmio(this);
63 struct kvm_coalesced_mmio_ring
*ring
= dev
->kvm
->coalesced_mmio_ring
;
64 if (!coalesced_mmio_in_range(dev
, addr
, len
))
67 spin_lock(&dev
->lock
);
69 /* copy data in first free entry of the ring */
71 ring
->coalesced_mmio
[ring
->last
].phys_addr
= addr
;
72 ring
->coalesced_mmio
[ring
->last
].len
= len
;
73 memcpy(ring
->coalesced_mmio
[ring
->last
].data
, val
, len
);
75 ring
->last
= (ring
->last
+ 1) % KVM_COALESCED_MMIO_MAX
;
76 spin_unlock(&dev
->lock
);
80 static void coalesced_mmio_destructor(struct kvm_io_device
*this)
82 struct kvm_coalesced_mmio_dev
*dev
= to_mmio(this);
87 static const struct kvm_io_device_ops coalesced_mmio_ops
= {
88 .write
= coalesced_mmio_write
,
89 .destructor
= coalesced_mmio_destructor
,
92 int kvm_coalesced_mmio_init(struct kvm
*kvm
)
94 struct kvm_coalesced_mmio_dev
*dev
;
97 dev
= kzalloc(sizeof(struct kvm_coalesced_mmio_dev
), GFP_KERNEL
);
100 spin_lock_init(&dev
->lock
);
101 kvm_iodevice_init(&dev
->dev
, &coalesced_mmio_ops
);
103 kvm
->coalesced_mmio_dev
= dev
;
105 ret
= kvm_io_bus_register_dev(kvm
, &kvm
->mmio_bus
, &dev
->dev
);
112 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm
*kvm
,
113 struct kvm_coalesced_mmio_zone
*zone
)
115 struct kvm_coalesced_mmio_dev
*dev
= kvm
->coalesced_mmio_dev
;
120 down_write(&kvm
->slots_lock
);
121 if (dev
->nb_zones
>= KVM_COALESCED_MMIO_ZONE_MAX
) {
122 up_write(&kvm
->slots_lock
);
126 dev
->zone
[dev
->nb_zones
] = *zone
;
129 up_write(&kvm
->slots_lock
);
133 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm
*kvm
,
134 struct kvm_coalesced_mmio_zone
*zone
)
137 struct kvm_coalesced_mmio_dev
*dev
= kvm
->coalesced_mmio_dev
;
138 struct kvm_coalesced_mmio_zone
*z
;
143 down_write(&kvm
->slots_lock
);
147 z
= &dev
->zone
[i
- 1];
149 /* unregister all zones
150 * included in (zone->addr, zone->size)
153 if (zone
->addr
<= z
->addr
&&
154 z
->addr
+ z
->size
<= zone
->addr
+ zone
->size
) {
156 *z
= dev
->zone
[dev
->nb_zones
];
161 up_write(&kvm
->slots_lock
);