4 * Copyright 2007 OpenVZ SWsoft Inc
6 * Author: Pavel Emelianov <xemul@openvz.org>
10 #include <linux/types.h>
11 #include <linux/parser.h>
13 #include <linux/slab.h>
14 #include <linux/res_counter.h>
15 #include <linux/uaccess.h>
18 void res_counter_init(struct res_counter
*counter
, struct res_counter
*parent
)
20 spin_lock_init(&counter
->lock
);
21 counter
->limit
= RESOURCE_MAX
;
22 counter
->soft_limit
= RESOURCE_MAX
;
23 counter
->parent
= parent
;
26 int res_counter_charge_locked(struct res_counter
*counter
, unsigned long val
)
28 if (counter
->usage
+ val
> counter
->limit
) {
33 counter
->usage
+= val
;
34 if (counter
->usage
> counter
->max_usage
)
35 counter
->max_usage
= counter
->usage
;
39 int res_counter_charge(struct res_counter
*counter
, unsigned long val
,
40 struct res_counter
**limit_fail_at
,
41 struct res_counter
**soft_limit_fail_at
)
45 struct res_counter
*c
, *u
;
47 *limit_fail_at
= NULL
;
48 if (soft_limit_fail_at
)
49 *soft_limit_fail_at
= NULL
;
50 local_irq_save(flags
);
51 for (c
= counter
; c
!= NULL
; c
= c
->parent
) {
53 ret
= res_counter_charge_locked(c
, val
);
55 * With soft limits, we return the highest ancestor
56 * that exceeds its soft limit
58 if (soft_limit_fail_at
&&
59 !res_counter_soft_limit_check_locked(c
))
60 *soft_limit_fail_at
= c
;
61 spin_unlock(&c
->lock
);
70 for (u
= counter
; u
!= c
; u
= u
->parent
) {
72 res_counter_uncharge_locked(u
, val
);
73 spin_unlock(&u
->lock
);
76 local_irq_restore(flags
);
80 void res_counter_uncharge_locked(struct res_counter
*counter
, unsigned long val
)
82 if (WARN_ON(counter
->usage
< val
))
85 counter
->usage
-= val
;
88 void res_counter_uncharge(struct res_counter
*counter
, unsigned long val
,
89 bool *was_soft_limit_excess
)
92 struct res_counter
*c
;
94 local_irq_save(flags
);
95 for (c
= counter
; c
!= NULL
; c
= c
->parent
) {
97 if (was_soft_limit_excess
)
98 *was_soft_limit_excess
=
99 !res_counter_soft_limit_check_locked(c
);
100 res_counter_uncharge_locked(c
, val
);
101 spin_unlock(&c
->lock
);
103 local_irq_restore(flags
);
107 static inline unsigned long long *
108 res_counter_member(struct res_counter
*counter
, int member
)
112 return &counter
->usage
;
114 return &counter
->max_usage
;
116 return &counter
->limit
;
118 return &counter
->failcnt
;
120 return &counter
->soft_limit
;
127 ssize_t
res_counter_read(struct res_counter
*counter
, int member
,
128 const char __user
*userbuf
, size_t nbytes
, loff_t
*pos
,
129 int (*read_strategy
)(unsigned long long val
, char *st_buf
))
131 unsigned long long *val
;
135 val
= res_counter_member(counter
, member
);
137 s
+= read_strategy(*val
, s
);
139 s
+= sprintf(s
, "%llu\n", *val
);
140 return simple_read_from_buffer((void __user
*)userbuf
, nbytes
,
144 u64
res_counter_read_u64(struct res_counter
*counter
, int member
)
146 return *res_counter_member(counter
, member
);
149 int res_counter_memparse_write_strategy(const char *buf
,
150 unsigned long long *res
)
154 /* return RESOURCE_MAX(unlimited) if "-1" is specified */
156 *res
= simple_strtoull(buf
+ 1, &end
, 10);
157 if (*res
!= 1 || *end
!= '\0')
163 /* FIXME - make memparse() take const char* args */
164 *res
= memparse((char *)buf
, &end
);
168 *res
= PAGE_ALIGN(*res
);
172 int res_counter_write(struct res_counter
*counter
, int member
,
173 const char *buf
, write_strategy_fn write_strategy
)
177 unsigned long long tmp
, *val
;
179 if (write_strategy
) {
180 if (write_strategy(buf
, &tmp
))
183 tmp
= simple_strtoull(buf
, &end
, 10);
187 spin_lock_irqsave(&counter
->lock
, flags
);
188 val
= res_counter_member(counter
, member
);
190 spin_unlock_irqrestore(&counter
->lock
, flags
);