block: workaround for unaligned byte range in fallocate()
[qemu/ar7.git] / util / stats64.c
blob389c365a9ef8fe6fc58577abfcc510af45534e7b
1 /*
2 * Atomic operations on 64-bit quantities.
4 * Copyright (C) 2017 Red Hat, Inc.
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "qemu/atomic.h"
14 #include "qemu/stats64.h"
15 #include "qemu/processor.h"
17 #ifndef CONFIG_ATOMIC64
18 static inline void stat64_rdlock(Stat64 *s)
20 /* Keep out incoming writers to avoid them starving us. */
21 atomic_add(&s->lock, 2);
23 /* If there is a concurrent writer, wait for it. */
24 while (atomic_read(&s->lock) & 1) {
25 cpu_relax();
29 static inline void stat64_rdunlock(Stat64 *s)
31 atomic_sub(&s->lock, 2);
34 static inline bool stat64_wrtrylock(Stat64 *s)
36 return atomic_cmpxchg(&s->lock, 0, 1) == 0;
39 static inline void stat64_wrunlock(Stat64 *s)
41 atomic_dec(&s->lock);
44 uint64_t stat64_get(const Stat64 *s)
46 uint32_t high, low;
48 stat64_rdlock((Stat64 *)s);
50 /* 64-bit writes always take the lock, so we can read in
51 * any order.
53 high = atomic_read(&s->high);
54 low = atomic_read(&s->low);
55 stat64_rdunlock((Stat64 *)s);
57 return ((uint64_t)high << 32) | low;
60 bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
62 uint32_t old;
64 if (!stat64_wrtrylock(s)) {
65 cpu_relax();
66 return false;
69 /* 64-bit reads always take the lock, so they don't care about the
70 * order of our update. By updating s->low first, we can check
71 * whether we have to carry into s->high.
73 old = atomic_fetch_add(&s->low, low);
74 high += (old + low) < old;
75 atomic_add(&s->high, high);
76 stat64_wrunlock(s);
77 return true;
80 bool stat64_min_slow(Stat64 *s, uint64_t value)
82 uint32_t high, low;
83 uint64_t orig;
85 if (!stat64_wrtrylock(s)) {
86 cpu_relax();
87 return false;
90 high = atomic_read(&s->high);
91 low = atomic_read(&s->low);
93 orig = ((uint64_t)high << 32) | low;
94 if (value < orig) {
95 /* We have to set low before high, just like stat64_min reads
96 * high before low. The value may become higher temporarily, but
97 * stat64_get does not notice (it takes the lock) and the only ill
98 * effect on stat64_min is that the slow path may be triggered
99 * unnecessarily.
101 atomic_set(&s->low, (uint32_t)value);
102 smp_wmb();
103 atomic_set(&s->high, value >> 32);
105 stat64_wrunlock(s);
106 return true;
109 bool stat64_max_slow(Stat64 *s, uint64_t value)
111 uint32_t high, low;
112 uint64_t orig;
114 if (!stat64_wrtrylock(s)) {
115 cpu_relax();
116 return false;
119 high = atomic_read(&s->high);
120 low = atomic_read(&s->low);
122 orig = ((uint64_t)high << 32) | low;
123 if (value > orig) {
124 /* We have to set low before high, just like stat64_max reads
125 * high before low. The value may become lower temporarily, but
126 * stat64_get does not notice (it takes the lock) and the only ill
127 * effect on stat64_max is that the slow path may be triggered
128 * unnecessarily.
130 atomic_set(&s->low, (uint32_t)value);
131 smp_wmb();
132 atomic_set(&s->high, value >> 32);
134 stat64_wrunlock(s);
135 return true;
137 #endif