2 * Atomic operations on 64-bit quantities.
4 * Copyright (C) 2017 Red Hat, Inc.
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "qemu/atomic.h"
14 #include "qemu/stats64.h"
15 #include "qemu/processor.h"
17 #ifndef CONFIG_ATOMIC64
18 static inline void stat64_rdlock(Stat64
*s
)
20 /* Keep out incoming writers to avoid them starving us. */
21 qatomic_add(&s
->lock
, 2);
23 /* If there is a concurrent writer, wait for it. */
24 while (qatomic_read(&s
->lock
) & 1) {
29 static inline void stat64_rdunlock(Stat64
*s
)
31 qatomic_sub(&s
->lock
, 2);
34 static inline bool stat64_wrtrylock(Stat64
*s
)
36 return qatomic_cmpxchg(&s
->lock
, 0, 1) == 0;
39 static inline void stat64_wrunlock(Stat64
*s
)
41 qatomic_dec(&s
->lock
);
44 uint64_t stat64_get(const Stat64
*s
)
48 stat64_rdlock((Stat64
*)s
);
50 /* 64-bit writes always take the lock, so we can read in
53 high
= qatomic_read(&s
->high
);
54 low
= qatomic_read(&s
->low
);
55 stat64_rdunlock((Stat64
*)s
);
57 return ((uint64_t)high
<< 32) | low
;
60 void stat64_set(Stat64
*s
, uint64_t val
)
62 while (!stat64_wrtrylock(s
)) {
66 qatomic_set(&s
->high
, val
>> 32);
67 qatomic_set(&s
->low
, val
);
71 bool stat64_add32_carry(Stat64
*s
, uint32_t low
, uint32_t high
)
75 if (!stat64_wrtrylock(s
)) {
80 /* 64-bit reads always take the lock, so they don't care about the
81 * order of our update. By updating s->low first, we can check
82 * whether we have to carry into s->high.
84 old
= qatomic_fetch_add(&s
->low
, low
);
85 high
+= (old
+ low
) < old
;
86 qatomic_add(&s
->high
, high
);
91 bool stat64_min_slow(Stat64
*s
, uint64_t value
)
96 if (!stat64_wrtrylock(s
)) {
101 high
= qatomic_read(&s
->high
);
102 low
= qatomic_read(&s
->low
);
104 orig
= ((uint64_t)high
<< 32) | low
;
106 /* We have to set low before high, just like stat64_min reads
107 * high before low. The value may become higher temporarily, but
108 * stat64_get does not notice (it takes the lock) and the only ill
109 * effect on stat64_min is that the slow path may be triggered
112 qatomic_set(&s
->low
, (uint32_t)value
);
114 qatomic_set(&s
->high
, value
>> 32);
120 bool stat64_max_slow(Stat64
*s
, uint64_t value
)
125 if (!stat64_wrtrylock(s
)) {
130 high
= qatomic_read(&s
->high
);
131 low
= qatomic_read(&s
->low
);
133 orig
= ((uint64_t)high
<< 32) | low
;
135 /* We have to set low before high, just like stat64_max reads
136 * high before low. The value may become lower temporarily, but
137 * stat64_get does not notice (it takes the lock) and the only ill
138 * effect on stat64_max is that the slow path may be triggered
141 qatomic_set(&s
->low
, (uint32_t)value
);
143 qatomic_set(&s
->high
, value
>> 32);