2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
38 * - subvol delete -> delete when ref goes to 0? delete limits also?
42 * - copy also limits on subvol creation
44 * - caches fuer ulists
45 * - performance benchmarks
46 * - check all ioctl parameters
50 * one struct for each qgroup, organized in fs_info->qgroup_tree.
58 u64 rfer
; /* referenced */
59 u64 rfer_cmpr
; /* referenced compressed */
60 u64 excl
; /* exclusive */
61 u64 excl_cmpr
; /* exclusive compressed */
66 u64 lim_flags
; /* which limits are set */
73 * reservation tracking
80 struct list_head groups
; /* groups this group is member of */
81 struct list_head members
; /* groups that are members of this group */
82 struct list_head dirty
; /* dirty groups */
83 struct rb_node node
; /* tree of qgroups */
86 * temp variables for accounting operations
93 * glue structure to represent the relations between qgroups.
95 struct btrfs_qgroup_list
{
96 struct list_head next_group
;
97 struct list_head next_member
;
98 struct btrfs_qgroup
*group
;
99 struct btrfs_qgroup
*member
;
102 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
103 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
106 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
108 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
);
110 /* must be called with qgroup_ioctl_lock held */
111 static struct btrfs_qgroup
*find_qgroup_rb(struct btrfs_fs_info
*fs_info
,
114 struct rb_node
*n
= fs_info
->qgroup_tree
.rb_node
;
115 struct btrfs_qgroup
*qgroup
;
118 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
119 if (qgroup
->qgroupid
< qgroupid
)
121 else if (qgroup
->qgroupid
> qgroupid
)
129 /* must be called with qgroup_lock held */
130 static struct btrfs_qgroup
*add_qgroup_rb(struct btrfs_fs_info
*fs_info
,
133 struct rb_node
**p
= &fs_info
->qgroup_tree
.rb_node
;
134 struct rb_node
*parent
= NULL
;
135 struct btrfs_qgroup
*qgroup
;
139 qgroup
= rb_entry(parent
, struct btrfs_qgroup
, node
);
141 if (qgroup
->qgroupid
< qgroupid
)
143 else if (qgroup
->qgroupid
> qgroupid
)
149 qgroup
= kzalloc(sizeof(*qgroup
), GFP_ATOMIC
);
151 return ERR_PTR(-ENOMEM
);
153 qgroup
->qgroupid
= qgroupid
;
154 INIT_LIST_HEAD(&qgroup
->groups
);
155 INIT_LIST_HEAD(&qgroup
->members
);
156 INIT_LIST_HEAD(&qgroup
->dirty
);
158 rb_link_node(&qgroup
->node
, parent
, p
);
159 rb_insert_color(&qgroup
->node
, &fs_info
->qgroup_tree
);
164 static void __del_qgroup_rb(struct btrfs_qgroup
*qgroup
)
166 struct btrfs_qgroup_list
*list
;
168 list_del(&qgroup
->dirty
);
169 while (!list_empty(&qgroup
->groups
)) {
170 list
= list_first_entry(&qgroup
->groups
,
171 struct btrfs_qgroup_list
, next_group
);
172 list_del(&list
->next_group
);
173 list_del(&list
->next_member
);
177 while (!list_empty(&qgroup
->members
)) {
178 list
= list_first_entry(&qgroup
->members
,
179 struct btrfs_qgroup_list
, next_member
);
180 list_del(&list
->next_group
);
181 list_del(&list
->next_member
);
187 /* must be called with qgroup_lock held */
188 static int del_qgroup_rb(struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
190 struct btrfs_qgroup
*qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
195 rb_erase(&qgroup
->node
, &fs_info
->qgroup_tree
);
196 __del_qgroup_rb(qgroup
);
200 /* must be called with qgroup_lock held */
201 static int add_relation_rb(struct btrfs_fs_info
*fs_info
,
202 u64 memberid
, u64 parentid
)
204 struct btrfs_qgroup
*member
;
205 struct btrfs_qgroup
*parent
;
206 struct btrfs_qgroup_list
*list
;
208 member
= find_qgroup_rb(fs_info
, memberid
);
209 parent
= find_qgroup_rb(fs_info
, parentid
);
210 if (!member
|| !parent
)
213 list
= kzalloc(sizeof(*list
), GFP_ATOMIC
);
217 list
->group
= parent
;
218 list
->member
= member
;
219 list_add_tail(&list
->next_group
, &member
->groups
);
220 list_add_tail(&list
->next_member
, &parent
->members
);
225 /* must be called with qgroup_lock held */
226 static int del_relation_rb(struct btrfs_fs_info
*fs_info
,
227 u64 memberid
, u64 parentid
)
229 struct btrfs_qgroup
*member
;
230 struct btrfs_qgroup
*parent
;
231 struct btrfs_qgroup_list
*list
;
233 member
= find_qgroup_rb(fs_info
, memberid
);
234 parent
= find_qgroup_rb(fs_info
, parentid
);
235 if (!member
|| !parent
)
238 list_for_each_entry(list
, &member
->groups
, next_group
) {
239 if (list
->group
== parent
) {
240 list_del(&list
->next_group
);
241 list_del(&list
->next_member
);
249 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
250 int btrfs_verify_qgroup_counts(struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
253 struct btrfs_qgroup
*qgroup
;
255 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
258 if (qgroup
->rfer
!= rfer
|| qgroup
->excl
!= excl
)
265 * The full config is read in one go, only called from open_ctree()
266 * It doesn't use any locking, as at this point we're still single-threaded
268 int btrfs_read_qgroup_config(struct btrfs_fs_info
*fs_info
)
270 struct btrfs_key key
;
271 struct btrfs_key found_key
;
272 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
273 struct btrfs_path
*path
= NULL
;
274 struct extent_buffer
*l
;
278 u64 rescan_progress
= 0;
280 if (!fs_info
->quota_enabled
)
283 fs_info
->qgroup_ulist
= ulist_alloc(GFP_NOFS
);
284 if (!fs_info
->qgroup_ulist
) {
289 path
= btrfs_alloc_path();
295 /* default this to quota off, in case no status key is found */
296 fs_info
->qgroup_flags
= 0;
299 * pass 1: read status, all qgroup infos and limits
304 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 1);
309 struct btrfs_qgroup
*qgroup
;
311 slot
= path
->slots
[0];
313 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
315 if (found_key
.type
== BTRFS_QGROUP_STATUS_KEY
) {
316 struct btrfs_qgroup_status_item
*ptr
;
318 ptr
= btrfs_item_ptr(l
, slot
,
319 struct btrfs_qgroup_status_item
);
321 if (btrfs_qgroup_status_version(l
, ptr
) !=
322 BTRFS_QGROUP_STATUS_VERSION
) {
324 "old qgroup version, quota disabled");
327 if (btrfs_qgroup_status_generation(l
, ptr
) !=
328 fs_info
->generation
) {
329 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
331 "qgroup generation mismatch, "
332 "marked as inconsistent");
334 fs_info
->qgroup_flags
= btrfs_qgroup_status_flags(l
,
336 rescan_progress
= btrfs_qgroup_status_rescan(l
, ptr
);
340 if (found_key
.type
!= BTRFS_QGROUP_INFO_KEY
&&
341 found_key
.type
!= BTRFS_QGROUP_LIMIT_KEY
)
344 qgroup
= find_qgroup_rb(fs_info
, found_key
.offset
);
345 if ((qgroup
&& found_key
.type
== BTRFS_QGROUP_INFO_KEY
) ||
346 (!qgroup
&& found_key
.type
== BTRFS_QGROUP_LIMIT_KEY
)) {
347 btrfs_err(fs_info
, "inconsitent qgroup config");
348 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
351 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
352 if (IS_ERR(qgroup
)) {
353 ret
= PTR_ERR(qgroup
);
357 switch (found_key
.type
) {
358 case BTRFS_QGROUP_INFO_KEY
: {
359 struct btrfs_qgroup_info_item
*ptr
;
361 ptr
= btrfs_item_ptr(l
, slot
,
362 struct btrfs_qgroup_info_item
);
363 qgroup
->rfer
= btrfs_qgroup_info_rfer(l
, ptr
);
364 qgroup
->rfer_cmpr
= btrfs_qgroup_info_rfer_cmpr(l
, ptr
);
365 qgroup
->excl
= btrfs_qgroup_info_excl(l
, ptr
);
366 qgroup
->excl_cmpr
= btrfs_qgroup_info_excl_cmpr(l
, ptr
);
367 /* generation currently unused */
370 case BTRFS_QGROUP_LIMIT_KEY
: {
371 struct btrfs_qgroup_limit_item
*ptr
;
373 ptr
= btrfs_item_ptr(l
, slot
,
374 struct btrfs_qgroup_limit_item
);
375 qgroup
->lim_flags
= btrfs_qgroup_limit_flags(l
, ptr
);
376 qgroup
->max_rfer
= btrfs_qgroup_limit_max_rfer(l
, ptr
);
377 qgroup
->max_excl
= btrfs_qgroup_limit_max_excl(l
, ptr
);
378 qgroup
->rsv_rfer
= btrfs_qgroup_limit_rsv_rfer(l
, ptr
);
379 qgroup
->rsv_excl
= btrfs_qgroup_limit_rsv_excl(l
, ptr
);
384 ret
= btrfs_next_item(quota_root
, path
);
390 btrfs_release_path(path
);
393 * pass 2: read all qgroup relations
396 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
398 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 0);
402 slot
= path
->slots
[0];
404 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
406 if (found_key
.type
!= BTRFS_QGROUP_RELATION_KEY
)
409 if (found_key
.objectid
> found_key
.offset
) {
410 /* parent <- member, not needed to build config */
411 /* FIXME should we omit the key completely? */
415 ret
= add_relation_rb(fs_info
, found_key
.objectid
,
417 if (ret
== -ENOENT
) {
419 "orphan qgroup relation 0x%llx->0x%llx",
420 found_key
.objectid
, found_key
.offset
);
421 ret
= 0; /* ignore the error */
426 ret
= btrfs_next_item(quota_root
, path
);
433 fs_info
->qgroup_flags
|= flags
;
434 if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
)) {
435 fs_info
->quota_enabled
= 0;
436 fs_info
->pending_quota_state
= 0;
437 } else if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
&&
439 ret
= qgroup_rescan_init(fs_info
, rescan_progress
, 0);
441 btrfs_free_path(path
);
444 ulist_free(fs_info
->qgroup_ulist
);
445 fs_info
->qgroup_ulist
= NULL
;
446 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
449 return ret
< 0 ? ret
: 0;
453 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
454 * first two are in single-threaded paths.And for the third one, we have set
455 * quota_root to be null with qgroup_lock held before, so it is safe to clean
456 * up the in-memory structures without qgroup_lock held.
458 void btrfs_free_qgroup_config(struct btrfs_fs_info
*fs_info
)
461 struct btrfs_qgroup
*qgroup
;
463 while ((n
= rb_first(&fs_info
->qgroup_tree
))) {
464 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
465 rb_erase(n
, &fs_info
->qgroup_tree
);
466 __del_qgroup_rb(qgroup
);
469 * we call btrfs_free_qgroup_config() when umounting
470 * filesystem and disabling quota, so we set qgroup_ulit
471 * to be null here to avoid double free.
473 ulist_free(fs_info
->qgroup_ulist
);
474 fs_info
->qgroup_ulist
= NULL
;
477 static int add_qgroup_relation_item(struct btrfs_trans_handle
*trans
,
478 struct btrfs_root
*quota_root
,
482 struct btrfs_path
*path
;
483 struct btrfs_key key
;
485 path
= btrfs_alloc_path();
490 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
493 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
, 0);
495 btrfs_mark_buffer_dirty(path
->nodes
[0]);
497 btrfs_free_path(path
);
501 static int del_qgroup_relation_item(struct btrfs_trans_handle
*trans
,
502 struct btrfs_root
*quota_root
,
506 struct btrfs_path
*path
;
507 struct btrfs_key key
;
509 path
= btrfs_alloc_path();
514 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
517 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
526 ret
= btrfs_del_item(trans
, quota_root
, path
);
528 btrfs_free_path(path
);
532 static int add_qgroup_item(struct btrfs_trans_handle
*trans
,
533 struct btrfs_root
*quota_root
, u64 qgroupid
)
536 struct btrfs_path
*path
;
537 struct btrfs_qgroup_info_item
*qgroup_info
;
538 struct btrfs_qgroup_limit_item
*qgroup_limit
;
539 struct extent_buffer
*leaf
;
540 struct btrfs_key key
;
542 if (btrfs_test_is_dummy_root(quota_root
))
545 path
= btrfs_alloc_path();
550 key
.type
= BTRFS_QGROUP_INFO_KEY
;
551 key
.offset
= qgroupid
;
554 * Avoid a transaction abort by catching -EEXIST here. In that
555 * case, we proceed by re-initializing the existing structure
559 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
560 sizeof(*qgroup_info
));
561 if (ret
&& ret
!= -EEXIST
)
564 leaf
= path
->nodes
[0];
565 qgroup_info
= btrfs_item_ptr(leaf
, path
->slots
[0],
566 struct btrfs_qgroup_info_item
);
567 btrfs_set_qgroup_info_generation(leaf
, qgroup_info
, trans
->transid
);
568 btrfs_set_qgroup_info_rfer(leaf
, qgroup_info
, 0);
569 btrfs_set_qgroup_info_rfer_cmpr(leaf
, qgroup_info
, 0);
570 btrfs_set_qgroup_info_excl(leaf
, qgroup_info
, 0);
571 btrfs_set_qgroup_info_excl_cmpr(leaf
, qgroup_info
, 0);
573 btrfs_mark_buffer_dirty(leaf
);
575 btrfs_release_path(path
);
577 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
578 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
579 sizeof(*qgroup_limit
));
580 if (ret
&& ret
!= -EEXIST
)
583 leaf
= path
->nodes
[0];
584 qgroup_limit
= btrfs_item_ptr(leaf
, path
->slots
[0],
585 struct btrfs_qgroup_limit_item
);
586 btrfs_set_qgroup_limit_flags(leaf
, qgroup_limit
, 0);
587 btrfs_set_qgroup_limit_max_rfer(leaf
, qgroup_limit
, 0);
588 btrfs_set_qgroup_limit_max_excl(leaf
, qgroup_limit
, 0);
589 btrfs_set_qgroup_limit_rsv_rfer(leaf
, qgroup_limit
, 0);
590 btrfs_set_qgroup_limit_rsv_excl(leaf
, qgroup_limit
, 0);
592 btrfs_mark_buffer_dirty(leaf
);
596 btrfs_free_path(path
);
600 static int del_qgroup_item(struct btrfs_trans_handle
*trans
,
601 struct btrfs_root
*quota_root
, u64 qgroupid
)
604 struct btrfs_path
*path
;
605 struct btrfs_key key
;
607 path
= btrfs_alloc_path();
612 key
.type
= BTRFS_QGROUP_INFO_KEY
;
613 key
.offset
= qgroupid
;
614 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
623 ret
= btrfs_del_item(trans
, quota_root
, path
);
627 btrfs_release_path(path
);
629 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
630 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
639 ret
= btrfs_del_item(trans
, quota_root
, path
);
642 btrfs_free_path(path
);
646 static int update_qgroup_limit_item(struct btrfs_trans_handle
*trans
,
647 struct btrfs_root
*root
, u64 qgroupid
,
648 u64 flags
, u64 max_rfer
, u64 max_excl
,
649 u64 rsv_rfer
, u64 rsv_excl
)
651 struct btrfs_path
*path
;
652 struct btrfs_key key
;
653 struct extent_buffer
*l
;
654 struct btrfs_qgroup_limit_item
*qgroup_limit
;
659 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
660 key
.offset
= qgroupid
;
662 path
= btrfs_alloc_path();
666 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
674 slot
= path
->slots
[0];
675 qgroup_limit
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_limit_item
);
676 btrfs_set_qgroup_limit_flags(l
, qgroup_limit
, flags
);
677 btrfs_set_qgroup_limit_max_rfer(l
, qgroup_limit
, max_rfer
);
678 btrfs_set_qgroup_limit_max_excl(l
, qgroup_limit
, max_excl
);
679 btrfs_set_qgroup_limit_rsv_rfer(l
, qgroup_limit
, rsv_rfer
);
680 btrfs_set_qgroup_limit_rsv_excl(l
, qgroup_limit
, rsv_excl
);
682 btrfs_mark_buffer_dirty(l
);
685 btrfs_free_path(path
);
689 static int update_qgroup_info_item(struct btrfs_trans_handle
*trans
,
690 struct btrfs_root
*root
,
691 struct btrfs_qgroup
*qgroup
)
693 struct btrfs_path
*path
;
694 struct btrfs_key key
;
695 struct extent_buffer
*l
;
696 struct btrfs_qgroup_info_item
*qgroup_info
;
700 if (btrfs_test_is_dummy_root(root
))
704 key
.type
= BTRFS_QGROUP_INFO_KEY
;
705 key
.offset
= qgroup
->qgroupid
;
707 path
= btrfs_alloc_path();
711 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
719 slot
= path
->slots
[0];
720 qgroup_info
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_info_item
);
721 btrfs_set_qgroup_info_generation(l
, qgroup_info
, trans
->transid
);
722 btrfs_set_qgroup_info_rfer(l
, qgroup_info
, qgroup
->rfer
);
723 btrfs_set_qgroup_info_rfer_cmpr(l
, qgroup_info
, qgroup
->rfer_cmpr
);
724 btrfs_set_qgroup_info_excl(l
, qgroup_info
, qgroup
->excl
);
725 btrfs_set_qgroup_info_excl_cmpr(l
, qgroup_info
, qgroup
->excl_cmpr
);
727 btrfs_mark_buffer_dirty(l
);
730 btrfs_free_path(path
);
734 static int update_qgroup_status_item(struct btrfs_trans_handle
*trans
,
735 struct btrfs_fs_info
*fs_info
,
736 struct btrfs_root
*root
)
738 struct btrfs_path
*path
;
739 struct btrfs_key key
;
740 struct extent_buffer
*l
;
741 struct btrfs_qgroup_status_item
*ptr
;
746 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
749 path
= btrfs_alloc_path();
753 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
761 slot
= path
->slots
[0];
762 ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_status_item
);
763 btrfs_set_qgroup_status_flags(l
, ptr
, fs_info
->qgroup_flags
);
764 btrfs_set_qgroup_status_generation(l
, ptr
, trans
->transid
);
765 btrfs_set_qgroup_status_rescan(l
, ptr
,
766 fs_info
->qgroup_rescan_progress
.objectid
);
768 btrfs_mark_buffer_dirty(l
);
771 btrfs_free_path(path
);
776 * called with qgroup_lock held
778 static int btrfs_clean_quota_tree(struct btrfs_trans_handle
*trans
,
779 struct btrfs_root
*root
)
781 struct btrfs_path
*path
;
782 struct btrfs_key key
;
783 struct extent_buffer
*leaf
= NULL
;
787 path
= btrfs_alloc_path();
791 path
->leave_spinning
= 1;
798 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
801 leaf
= path
->nodes
[0];
802 nr
= btrfs_header_nritems(leaf
);
806 * delete the leaf one by one
807 * since the whole tree is going
811 ret
= btrfs_del_items(trans
, root
, path
, 0, nr
);
815 btrfs_release_path(path
);
819 root
->fs_info
->pending_quota_state
= 0;
820 btrfs_free_path(path
);
824 int btrfs_quota_enable(struct btrfs_trans_handle
*trans
,
825 struct btrfs_fs_info
*fs_info
)
827 struct btrfs_root
*quota_root
;
828 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
829 struct btrfs_path
*path
= NULL
;
830 struct btrfs_qgroup_status_item
*ptr
;
831 struct extent_buffer
*leaf
;
832 struct btrfs_key key
;
833 struct btrfs_key found_key
;
834 struct btrfs_qgroup
*qgroup
= NULL
;
838 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
839 if (fs_info
->quota_root
) {
840 fs_info
->pending_quota_state
= 1;
844 fs_info
->qgroup_ulist
= ulist_alloc(GFP_NOFS
);
845 if (!fs_info
->qgroup_ulist
) {
851 * initially create the quota tree
853 quota_root
= btrfs_create_tree(trans
, fs_info
,
854 BTRFS_QUOTA_TREE_OBJECTID
);
855 if (IS_ERR(quota_root
)) {
856 ret
= PTR_ERR(quota_root
);
860 path
= btrfs_alloc_path();
867 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
870 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
875 leaf
= path
->nodes
[0];
876 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0],
877 struct btrfs_qgroup_status_item
);
878 btrfs_set_qgroup_status_generation(leaf
, ptr
, trans
->transid
);
879 btrfs_set_qgroup_status_version(leaf
, ptr
, BTRFS_QGROUP_STATUS_VERSION
);
880 fs_info
->qgroup_flags
= BTRFS_QGROUP_STATUS_FLAG_ON
|
881 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
882 btrfs_set_qgroup_status_flags(leaf
, ptr
, fs_info
->qgroup_flags
);
883 btrfs_set_qgroup_status_rescan(leaf
, ptr
, 0);
885 btrfs_mark_buffer_dirty(leaf
);
888 key
.type
= BTRFS_ROOT_REF_KEY
;
891 btrfs_release_path(path
);
892 ret
= btrfs_search_slot_for_read(tree_root
, &key
, path
, 1, 0);
900 slot
= path
->slots
[0];
901 leaf
= path
->nodes
[0];
902 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
904 if (found_key
.type
== BTRFS_ROOT_REF_KEY
) {
905 ret
= add_qgroup_item(trans
, quota_root
,
910 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
911 if (IS_ERR(qgroup
)) {
912 ret
= PTR_ERR(qgroup
);
916 ret
= btrfs_next_item(tree_root
, path
);
924 btrfs_release_path(path
);
925 ret
= add_qgroup_item(trans
, quota_root
, BTRFS_FS_TREE_OBJECTID
);
929 qgroup
= add_qgroup_rb(fs_info
, BTRFS_FS_TREE_OBJECTID
);
930 if (IS_ERR(qgroup
)) {
931 ret
= PTR_ERR(qgroup
);
934 spin_lock(&fs_info
->qgroup_lock
);
935 fs_info
->quota_root
= quota_root
;
936 fs_info
->pending_quota_state
= 1;
937 spin_unlock(&fs_info
->qgroup_lock
);
939 btrfs_free_path(path
);
942 free_extent_buffer(quota_root
->node
);
943 free_extent_buffer(quota_root
->commit_root
);
948 ulist_free(fs_info
->qgroup_ulist
);
949 fs_info
->qgroup_ulist
= NULL
;
951 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
955 int btrfs_quota_disable(struct btrfs_trans_handle
*trans
,
956 struct btrfs_fs_info
*fs_info
)
958 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
959 struct btrfs_root
*quota_root
;
962 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
963 if (!fs_info
->quota_root
)
965 spin_lock(&fs_info
->qgroup_lock
);
966 fs_info
->quota_enabled
= 0;
967 fs_info
->pending_quota_state
= 0;
968 quota_root
= fs_info
->quota_root
;
969 fs_info
->quota_root
= NULL
;
970 spin_unlock(&fs_info
->qgroup_lock
);
972 btrfs_free_qgroup_config(fs_info
);
974 ret
= btrfs_clean_quota_tree(trans
, quota_root
);
978 ret
= btrfs_del_root(trans
, tree_root
, "a_root
->root_key
);
982 list_del("a_root
->dirty_list
);
984 btrfs_tree_lock(quota_root
->node
);
985 clean_tree_block(trans
, tree_root
->fs_info
, quota_root
->node
);
986 btrfs_tree_unlock(quota_root
->node
);
987 btrfs_free_tree_block(trans
, quota_root
, quota_root
->node
, 0, 1);
989 free_extent_buffer(quota_root
->node
);
990 free_extent_buffer(quota_root
->commit_root
);
993 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
997 static void qgroup_dirty(struct btrfs_fs_info
*fs_info
,
998 struct btrfs_qgroup
*qgroup
)
1000 if (list_empty(&qgroup
->dirty
))
1001 list_add(&qgroup
->dirty
, &fs_info
->dirty_qgroups
);
1004 int btrfs_add_qgroup_relation(struct btrfs_trans_handle
*trans
,
1005 struct btrfs_fs_info
*fs_info
, u64 src
, u64 dst
)
1007 struct btrfs_root
*quota_root
;
1008 struct btrfs_qgroup
*parent
;
1009 struct btrfs_qgroup
*member
;
1010 struct btrfs_qgroup_list
*list
;
1013 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1014 quota_root
= fs_info
->quota_root
;
1019 member
= find_qgroup_rb(fs_info
, src
);
1020 parent
= find_qgroup_rb(fs_info
, dst
);
1021 if (!member
|| !parent
) {
1026 /* check if such qgroup relation exist firstly */
1027 list_for_each_entry(list
, &member
->groups
, next_group
) {
1028 if (list
->group
== parent
) {
1034 ret
= add_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1038 ret
= add_qgroup_relation_item(trans
, quota_root
, dst
, src
);
1040 del_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1044 spin_lock(&fs_info
->qgroup_lock
);
1045 ret
= add_relation_rb(quota_root
->fs_info
, src
, dst
);
1046 spin_unlock(&fs_info
->qgroup_lock
);
1048 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1052 int btrfs_del_qgroup_relation(struct btrfs_trans_handle
*trans
,
1053 struct btrfs_fs_info
*fs_info
, u64 src
, u64 dst
)
1055 struct btrfs_root
*quota_root
;
1056 struct btrfs_qgroup
*parent
;
1057 struct btrfs_qgroup
*member
;
1058 struct btrfs_qgroup_list
*list
;
1062 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1063 quota_root
= fs_info
->quota_root
;
1069 member
= find_qgroup_rb(fs_info
, src
);
1070 parent
= find_qgroup_rb(fs_info
, dst
);
1071 if (!member
|| !parent
) {
1076 /* check if such qgroup relation exist firstly */
1077 list_for_each_entry(list
, &member
->groups
, next_group
) {
1078 if (list
->group
== parent
)
1084 ret
= del_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1085 err
= del_qgroup_relation_item(trans
, quota_root
, dst
, src
);
1089 spin_lock(&fs_info
->qgroup_lock
);
1090 del_relation_rb(fs_info
, src
, dst
);
1091 spin_unlock(&fs_info
->qgroup_lock
);
1093 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1097 int btrfs_create_qgroup(struct btrfs_trans_handle
*trans
,
1098 struct btrfs_fs_info
*fs_info
, u64 qgroupid
, char *name
)
1100 struct btrfs_root
*quota_root
;
1101 struct btrfs_qgroup
*qgroup
;
1104 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1105 quota_root
= fs_info
->quota_root
;
1110 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1116 ret
= add_qgroup_item(trans
, quota_root
, qgroupid
);
1120 spin_lock(&fs_info
->qgroup_lock
);
1121 qgroup
= add_qgroup_rb(fs_info
, qgroupid
);
1122 spin_unlock(&fs_info
->qgroup_lock
);
1125 ret
= PTR_ERR(qgroup
);
1127 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1131 int btrfs_remove_qgroup(struct btrfs_trans_handle
*trans
,
1132 struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
1134 struct btrfs_root
*quota_root
;
1135 struct btrfs_qgroup
*qgroup
;
1138 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1139 quota_root
= fs_info
->quota_root
;
1145 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1150 /* check if there are no relations to this qgroup */
1151 if (!list_empty(&qgroup
->groups
) ||
1152 !list_empty(&qgroup
->members
)) {
1157 ret
= del_qgroup_item(trans
, quota_root
, qgroupid
);
1159 spin_lock(&fs_info
->qgroup_lock
);
1160 del_qgroup_rb(quota_root
->fs_info
, qgroupid
);
1161 spin_unlock(&fs_info
->qgroup_lock
);
1163 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1167 int btrfs_limit_qgroup(struct btrfs_trans_handle
*trans
,
1168 struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
1169 struct btrfs_qgroup_limit
*limit
)
1171 struct btrfs_root
*quota_root
;
1172 struct btrfs_qgroup
*qgroup
;
1175 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1176 quota_root
= fs_info
->quota_root
;
1182 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1187 ret
= update_qgroup_limit_item(trans
, quota_root
, qgroupid
,
1188 limit
->flags
, limit
->max_rfer
,
1189 limit
->max_excl
, limit
->rsv_rfer
,
1192 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1193 btrfs_info(fs_info
, "unable to update quota limit for %llu",
1197 spin_lock(&fs_info
->qgroup_lock
);
1198 qgroup
->lim_flags
= limit
->flags
;
1199 qgroup
->max_rfer
= limit
->max_rfer
;
1200 qgroup
->max_excl
= limit
->max_excl
;
1201 qgroup
->rsv_rfer
= limit
->rsv_rfer
;
1202 qgroup
->rsv_excl
= limit
->rsv_excl
;
1203 spin_unlock(&fs_info
->qgroup_lock
);
1205 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1209 static int comp_oper_exist(struct btrfs_qgroup_operation
*oper1
,
1210 struct btrfs_qgroup_operation
*oper2
)
1213 * Ignore seq and type here, we're looking for any operation
1214 * at all related to this extent on that root.
1216 if (oper1
->bytenr
< oper2
->bytenr
)
1218 if (oper1
->bytenr
> oper2
->bytenr
)
1220 if (oper1
->ref_root
< oper2
->ref_root
)
1222 if (oper1
->ref_root
> oper2
->ref_root
)
1227 static int qgroup_oper_exists(struct btrfs_fs_info
*fs_info
,
1228 struct btrfs_qgroup_operation
*oper
)
1231 struct btrfs_qgroup_operation
*cur
;
1234 spin_lock(&fs_info
->qgroup_op_lock
);
1235 n
= fs_info
->qgroup_op_tree
.rb_node
;
1237 cur
= rb_entry(n
, struct btrfs_qgroup_operation
, n
);
1238 cmp
= comp_oper_exist(cur
, oper
);
1244 spin_unlock(&fs_info
->qgroup_op_lock
);
1248 spin_unlock(&fs_info
->qgroup_op_lock
);
1252 static int comp_oper(struct btrfs_qgroup_operation
*oper1
,
1253 struct btrfs_qgroup_operation
*oper2
)
1255 if (oper1
->bytenr
< oper2
->bytenr
)
1257 if (oper1
->bytenr
> oper2
->bytenr
)
1259 if (oper1
->ref_root
< oper2
->ref_root
)
1261 if (oper1
->ref_root
> oper2
->ref_root
)
1263 if (oper1
->seq
< oper2
->seq
)
1265 if (oper1
->seq
> oper2
->seq
)
1267 if (oper1
->type
< oper2
->type
)
1269 if (oper1
->type
> oper2
->type
)
1274 static int insert_qgroup_oper(struct btrfs_fs_info
*fs_info
,
1275 struct btrfs_qgroup_operation
*oper
)
1278 struct rb_node
*parent
= NULL
;
1279 struct btrfs_qgroup_operation
*cur
;
1282 spin_lock(&fs_info
->qgroup_op_lock
);
1283 p
= &fs_info
->qgroup_op_tree
.rb_node
;
1286 cur
= rb_entry(parent
, struct btrfs_qgroup_operation
, n
);
1287 cmp
= comp_oper(cur
, oper
);
1289 p
= &(*p
)->rb_right
;
1293 spin_unlock(&fs_info
->qgroup_op_lock
);
1297 rb_link_node(&oper
->n
, parent
, p
);
1298 rb_insert_color(&oper
->n
, &fs_info
->qgroup_op_tree
);
1299 spin_unlock(&fs_info
->qgroup_op_lock
);
1304 * Record a quota operation for processing later on.
1305 * @trans: the transaction we are adding the delayed op to.
1306 * @fs_info: the fs_info for this fs.
1307 * @ref_root: the root of the reference we are acting on,
1308 * @bytenr: the bytenr we are acting on.
1309 * @num_bytes: the number of bytes in the reference.
1310 * @type: the type of operation this is.
1311 * @mod_seq: do we need to get a sequence number for looking up roots.
1313 * We just add it to our trans qgroup_ref_list and carry on and process these
1314 * operations in order at some later point. If the reference root isn't a fs
1315 * root then we don't bother with doing anything.
1317 * MUST BE HOLDING THE REF LOCK.
1319 int btrfs_qgroup_record_ref(struct btrfs_trans_handle
*trans
,
1320 struct btrfs_fs_info
*fs_info
, u64 ref_root
,
1321 u64 bytenr
, u64 num_bytes
,
1322 enum btrfs_qgroup_operation_type type
, int mod_seq
)
1324 struct btrfs_qgroup_operation
*oper
;
1327 if (!is_fstree(ref_root
) || !fs_info
->quota_enabled
)
1330 oper
= kmalloc(sizeof(*oper
), GFP_NOFS
);
1334 oper
->ref_root
= ref_root
;
1335 oper
->bytenr
= bytenr
;
1336 oper
->num_bytes
= num_bytes
;
1338 oper
->seq
= atomic_inc_return(&fs_info
->qgroup_op_seq
);
1339 INIT_LIST_HEAD(&oper
->elem
.list
);
1342 trace_btrfs_qgroup_record_ref(oper
);
1344 if (type
== BTRFS_QGROUP_OPER_SUB_SUBTREE
) {
1346 * If any operation for this bytenr/ref_root combo
1347 * exists, then we know it's not exclusively owned and
1348 * shouldn't be queued up.
1350 * This also catches the case where we have a cloned
1351 * extent that gets queued up multiple times during
1354 if (qgroup_oper_exists(fs_info
, oper
)) {
1360 ret
= insert_qgroup_oper(fs_info
, oper
);
1362 /* Shouldn't happen so have an assert for developers */
1367 list_add_tail(&oper
->list
, &trans
->qgroup_ref_list
);
1370 btrfs_get_tree_mod_seq(fs_info
, &oper
->elem
);
1376 * The easy accounting, if we are adding/removing the only ref for an extent
1377 * then this qgroup and all of the parent qgroups get their refrence and
1378 * exclusive counts adjusted.
1380 static int qgroup_excl_accounting(struct btrfs_fs_info
*fs_info
,
1381 struct btrfs_qgroup_operation
*oper
)
1383 struct btrfs_qgroup
*qgroup
;
1385 struct btrfs_qgroup_list
*glist
;
1386 struct ulist_node
*unode
;
1387 struct ulist_iterator uiter
;
1391 tmp
= ulist_alloc(GFP_NOFS
);
1395 spin_lock(&fs_info
->qgroup_lock
);
1396 if (!fs_info
->quota_root
)
1398 qgroup
= find_qgroup_rb(fs_info
, oper
->ref_root
);
1401 switch (oper
->type
) {
1402 case BTRFS_QGROUP_OPER_ADD_EXCL
:
1405 case BTRFS_QGROUP_OPER_SUB_EXCL
:
1411 qgroup
->rfer
+= sign
* oper
->num_bytes
;
1412 qgroup
->rfer_cmpr
+= sign
* oper
->num_bytes
;
1414 WARN_ON(sign
< 0 && qgroup
->excl
< oper
->num_bytes
);
1415 qgroup
->excl
+= sign
* oper
->num_bytes
;
1416 qgroup
->excl_cmpr
+= sign
* oper
->num_bytes
;
1418 qgroup_dirty(fs_info
, qgroup
);
1420 /* Get all of the parent groups that contain this qgroup */
1421 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1422 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1423 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1428 /* Iterate all of the parents and adjust their reference counts */
1429 ULIST_ITER_INIT(&uiter
);
1430 while ((unode
= ulist_next(tmp
, &uiter
))) {
1431 qgroup
= u64_to_ptr(unode
->aux
);
1432 qgroup
->rfer
+= sign
* oper
->num_bytes
;
1433 qgroup
->rfer_cmpr
+= sign
* oper
->num_bytes
;
1434 WARN_ON(sign
< 0 && qgroup
->excl
< oper
->num_bytes
);
1435 qgroup
->excl
+= sign
* oper
->num_bytes
;
1436 qgroup
->excl_cmpr
+= sign
* oper
->num_bytes
;
1437 qgroup_dirty(fs_info
, qgroup
);
1439 /* Add any parents of the parents */
1440 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1441 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1442 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1449 spin_unlock(&fs_info
->qgroup_lock
);
1455 * Walk all of the roots that pointed to our bytenr and adjust their refcnts as
1458 static int qgroup_calc_old_refcnt(struct btrfs_fs_info
*fs_info
,
1459 u64 root_to_skip
, struct ulist
*tmp
,
1460 struct ulist
*roots
, struct ulist
*qgroups
,
1461 u64 seq
, int *old_roots
, int rescan
)
1463 struct ulist_node
*unode
;
1464 struct ulist_iterator uiter
;
1465 struct ulist_node
*tmp_unode
;
1466 struct ulist_iterator tmp_uiter
;
1467 struct btrfs_qgroup
*qg
;
1470 ULIST_ITER_INIT(&uiter
);
1471 while ((unode
= ulist_next(roots
, &uiter
))) {
1472 /* We don't count our current root here */
1473 if (unode
->val
== root_to_skip
)
1475 qg
= find_qgroup_rb(fs_info
, unode
->val
);
1479 * We could have a pending removal of this same ref so we may
1480 * not have actually found our ref root when doing
1481 * btrfs_find_all_roots, so we need to keep track of how many
1482 * old roots we find in case we removed ours and added a
1483 * different one at the same time. I don't think this could
1484 * happen in practice but that sort of thinking leads to pain
1485 * and suffering and to the dark side.
1490 ret
= ulist_add(qgroups
, qg
->qgroupid
, ptr_to_u64(qg
),
1494 ret
= ulist_add(tmp
, qg
->qgroupid
, ptr_to_u64(qg
), GFP_ATOMIC
);
1497 ULIST_ITER_INIT(&tmp_uiter
);
1498 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
1499 struct btrfs_qgroup_list
*glist
;
1501 qg
= u64_to_ptr(tmp_unode
->aux
);
1503 * We use this sequence number to keep from having to
1504 * run the whole list and 0 out the refcnt every time.
1505 * We basically use sequnce as the known 0 count and
1506 * then add 1 everytime we see a qgroup. This is how we
1507 * get how many of the roots actually point up to the
1508 * upper level qgroups in order to determine exclusive
1511 * For rescan we want to set old_refcnt to seq so our
1512 * exclusive calculations end up correct.
1515 qg
->old_refcnt
= seq
;
1516 else if (qg
->old_refcnt
< seq
)
1517 qg
->old_refcnt
= seq
+ 1;
1521 if (qg
->new_refcnt
< seq
)
1522 qg
->new_refcnt
= seq
+ 1;
1525 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1526 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
1527 ptr_to_u64(glist
->group
),
1531 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1532 ptr_to_u64(glist
->group
),
1543 * We need to walk forward in our operation tree and account for any roots that
1544 * were deleted after we made this operation.
1546 static int qgroup_account_deleted_refs(struct btrfs_fs_info
*fs_info
,
1547 struct btrfs_qgroup_operation
*oper
,
1549 struct ulist
*qgroups
, u64 seq
,
1552 struct ulist_node
*unode
;
1553 struct ulist_iterator uiter
;
1554 struct btrfs_qgroup
*qg
;
1555 struct btrfs_qgroup_operation
*tmp_oper
;
1562 * We only walk forward in the tree since we're only interested in
1563 * removals that happened _after_ our operation.
1565 spin_lock(&fs_info
->qgroup_op_lock
);
1566 n
= rb_next(&oper
->n
);
1567 spin_unlock(&fs_info
->qgroup_op_lock
);
1570 tmp_oper
= rb_entry(n
, struct btrfs_qgroup_operation
, n
);
1571 while (tmp_oper
->bytenr
== oper
->bytenr
) {
1573 * If it's not a removal we don't care, additions work out
1574 * properly with our refcnt tracking.
1576 if (tmp_oper
->type
!= BTRFS_QGROUP_OPER_SUB_SHARED
&&
1577 tmp_oper
->type
!= BTRFS_QGROUP_OPER_SUB_EXCL
)
1579 qg
= find_qgroup_rb(fs_info
, tmp_oper
->ref_root
);
1582 ret
= ulist_add(qgroups
, qg
->qgroupid
, ptr_to_u64(qg
),
1588 * We only want to increase old_roots if this qgroup is
1589 * not already in the list of qgroups. If it is already
1590 * there then that means it must have been re-added or
1591 * the delete will be discarded because we had an
1592 * existing ref that we haven't looked up yet. In this
1593 * case we don't want to increase old_roots. So if ret
1594 * == 1 then we know that this is the first time we've
1595 * seen this qgroup and we can bump the old_roots.
1598 ret
= ulist_add(tmp
, qg
->qgroupid
, ptr_to_u64(qg
),
1604 spin_lock(&fs_info
->qgroup_op_lock
);
1605 n
= rb_next(&tmp_oper
->n
);
1606 spin_unlock(&fs_info
->qgroup_op_lock
);
1609 tmp_oper
= rb_entry(n
, struct btrfs_qgroup_operation
, n
);
1612 /* Ok now process the qgroups we found */
1613 ULIST_ITER_INIT(&uiter
);
1614 while ((unode
= ulist_next(tmp
, &uiter
))) {
1615 struct btrfs_qgroup_list
*glist
;
1617 qg
= u64_to_ptr(unode
->aux
);
1618 if (qg
->old_refcnt
< seq
)
1619 qg
->old_refcnt
= seq
+ 1;
1622 if (qg
->new_refcnt
< seq
)
1623 qg
->new_refcnt
= seq
+ 1;
1626 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1627 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
1628 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1631 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1632 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1640 /* Add refcnt for the newly added reference. */
1641 static int qgroup_calc_new_refcnt(struct btrfs_fs_info
*fs_info
,
1642 struct btrfs_qgroup_operation
*oper
,
1643 struct btrfs_qgroup
*qgroup
,
1644 struct ulist
*tmp
, struct ulist
*qgroups
,
1647 struct ulist_node
*unode
;
1648 struct ulist_iterator uiter
;
1649 struct btrfs_qgroup
*qg
;
1653 ret
= ulist_add(qgroups
, qgroup
->qgroupid
, ptr_to_u64(qgroup
),
1657 ret
= ulist_add(tmp
, qgroup
->qgroupid
, ptr_to_u64(qgroup
),
1661 ULIST_ITER_INIT(&uiter
);
1662 while ((unode
= ulist_next(tmp
, &uiter
))) {
1663 struct btrfs_qgroup_list
*glist
;
1665 qg
= u64_to_ptr(unode
->aux
);
1666 if (oper
->type
== BTRFS_QGROUP_OPER_ADD_SHARED
) {
1667 if (qg
->new_refcnt
< seq
)
1668 qg
->new_refcnt
= seq
+ 1;
1672 if (qg
->old_refcnt
< seq
)
1673 qg
->old_refcnt
= seq
+ 1;
1677 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1678 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1679 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1682 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
1683 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1692 * This adjusts the counters for all referenced qgroups if need be.
1694 static int qgroup_adjust_counters(struct btrfs_fs_info
*fs_info
,
1695 u64 root_to_skip
, u64 num_bytes
,
1696 struct ulist
*qgroups
, u64 seq
,
1697 int old_roots
, int new_roots
, int rescan
)
1699 struct ulist_node
*unode
;
1700 struct ulist_iterator uiter
;
1701 struct btrfs_qgroup
*qg
;
1702 u64 cur_new_count
, cur_old_count
;
1704 ULIST_ITER_INIT(&uiter
);
1705 while ((unode
= ulist_next(qgroups
, &uiter
))) {
1708 qg
= u64_to_ptr(unode
->aux
);
1710 * Wasn't referenced before but is now, add to the reference
1713 if (qg
->old_refcnt
<= seq
&& qg
->new_refcnt
> seq
) {
1714 qg
->rfer
+= num_bytes
;
1715 qg
->rfer_cmpr
+= num_bytes
;
1720 * Was referenced before but isn't now, subtract from the
1721 * reference counters.
1723 if (qg
->old_refcnt
> seq
&& qg
->new_refcnt
<= seq
) {
1724 qg
->rfer
-= num_bytes
;
1725 qg
->rfer_cmpr
-= num_bytes
;
1729 if (qg
->old_refcnt
< seq
)
1732 cur_old_count
= qg
->old_refcnt
- seq
;
1733 if (qg
->new_refcnt
< seq
)
1736 cur_new_count
= qg
->new_refcnt
- seq
;
1739 * If our refcount was the same as the roots previously but our
1740 * new count isn't the same as the number of roots now then we
1741 * went from having a exclusive reference on this range to not.
1743 if (old_roots
&& cur_old_count
== old_roots
&&
1744 (cur_new_count
!= new_roots
|| new_roots
== 0)) {
1745 WARN_ON(cur_new_count
!= new_roots
&& new_roots
== 0);
1746 qg
->excl
-= num_bytes
;
1747 qg
->excl_cmpr
-= num_bytes
;
1752 * If we didn't reference all the roots before but now we do we
1753 * have an exclusive reference to this range.
1755 if ((!old_roots
|| (old_roots
&& cur_old_count
!= old_roots
))
1756 && cur_new_count
== new_roots
) {
1757 qg
->excl
+= num_bytes
;
1758 qg
->excl_cmpr
+= num_bytes
;
1763 qgroup_dirty(fs_info
, qg
);
1769 * If we removed a data extent and there were other references for that bytenr
1770 * then we need to lookup all referenced roots to make sure we still don't
1771 * reference this bytenr. If we do then we can just discard this operation.
1773 static int check_existing_refs(struct btrfs_trans_handle
*trans
,
1774 struct btrfs_fs_info
*fs_info
,
1775 struct btrfs_qgroup_operation
*oper
)
1777 struct ulist
*roots
= NULL
;
1778 struct ulist_node
*unode
;
1779 struct ulist_iterator uiter
;
1782 ret
= btrfs_find_all_roots(trans
, fs_info
, oper
->bytenr
,
1783 oper
->elem
.seq
, &roots
);
1788 ULIST_ITER_INIT(&uiter
);
1789 while ((unode
= ulist_next(roots
, &uiter
))) {
1790 if (unode
->val
== oper
->ref_root
) {
1796 btrfs_put_tree_mod_seq(fs_info
, &oper
->elem
);
1802 * If we share a reference across multiple roots then we may need to adjust
1803 * various qgroups referenced and exclusive counters. The basic premise is this
1805 * 1) We have seq to represent a 0 count. Instead of looping through all of the
1806 * qgroups and resetting their refcount to 0 we just constantly bump this
1807 * sequence number to act as the base reference count. This means that if
1808 * anybody is equal to or below this sequence they were never referenced. We
1809 * jack this sequence up by the number of roots we found each time in order to
1810 * make sure we don't have any overlap.
1812 * 2) We first search all the roots that reference the area _except_ the root
1813 * we're acting on currently. This makes up the old_refcnt of all the qgroups
1816 * 3) We walk all of the qgroups referenced by the root we are currently acting
1817 * on, and will either adjust old_refcnt in the case of a removal or the
1818 * new_refcnt in the case of an addition.
1820 * 4) Finally we walk all the qgroups that are referenced by this range
1821 * including the root we are acting on currently. We will adjust the counters
1822 * based on the number of roots we had and will have after this operation.
1824 * Take this example as an illustration
1828 * [qg 0/0] [qg 0/1] [qg 0/2]
1832 * Say we are adding a reference that is covered by qg 0/0. The first step
1833 * would give a refcnt of 1 to qg 0/1 and 0/2 and a refcnt of 2 to qg 1/0 with
1834 * old_roots being 2. Because it is adding new_roots will be 1. We then go
1835 * through qg 0/0 which will get the new_refcnt set to 1 and add 1 to qg 1/0's
1836 * new_refcnt, bringing it to 3. We then walk through all of the qgroups, we
1837 * notice that the old refcnt for qg 0/0 < the new refcnt, so we added a
1838 * reference and thus must add the size to the referenced bytes. Everything
1839 * else is the same so nothing else changes.
1841 static int qgroup_shared_accounting(struct btrfs_trans_handle
*trans
,
1842 struct btrfs_fs_info
*fs_info
,
1843 struct btrfs_qgroup_operation
*oper
)
1845 struct ulist
*roots
= NULL
;
1846 struct ulist
*qgroups
, *tmp
;
1847 struct btrfs_qgroup
*qgroup
;
1848 struct seq_list elem
= SEQ_LIST_INIT(elem
);
1854 if (oper
->elem
.seq
) {
1855 ret
= check_existing_refs(trans
, fs_info
, oper
);
1862 qgroups
= ulist_alloc(GFP_NOFS
);
1866 tmp
= ulist_alloc(GFP_NOFS
);
1868 ulist_free(qgroups
);
1872 btrfs_get_tree_mod_seq(fs_info
, &elem
);
1873 ret
= btrfs_find_all_roots(trans
, fs_info
, oper
->bytenr
, elem
.seq
,
1875 btrfs_put_tree_mod_seq(fs_info
, &elem
);
1877 ulist_free(qgroups
);
1881 spin_lock(&fs_info
->qgroup_lock
);
1882 qgroup
= find_qgroup_rb(fs_info
, oper
->ref_root
);
1885 seq
= fs_info
->qgroup_seq
;
1888 * So roots is the list of all the roots currently pointing at the
1889 * bytenr, including the ref we are adding if we are adding, or not if
1890 * we are removing a ref. So we pass in the ref_root to skip that root
1891 * in our calculations. We set old_refnct and new_refcnt cause who the
1892 * hell knows what everything looked like before, and it doesn't matter
1895 ret
= qgroup_calc_old_refcnt(fs_info
, oper
->ref_root
, tmp
, roots
, qgroups
,
1896 seq
, &old_roots
, 0);
1901 * Now adjust the refcounts of the qgroups that care about this
1902 * reference, either the old_count in the case of removal or new_count
1903 * in the case of an addition.
1905 ret
= qgroup_calc_new_refcnt(fs_info
, oper
, qgroup
, tmp
, qgroups
,
1911 * ...in the case of removals. If we had a removal before we got around
1912 * to processing this operation then we need to find that guy and count
1913 * his references as if they really existed so we don't end up screwing
1914 * up the exclusive counts. Then whenever we go to process the delete
1915 * everything will be grand and we can account for whatever exclusive
1916 * changes need to be made there. We also have to pass in old_roots so
1917 * we have an accurate count of the roots as it pertains to this
1918 * operations view of the world.
1920 ret
= qgroup_account_deleted_refs(fs_info
, oper
, tmp
, qgroups
, seq
,
1926 * We are adding our root, need to adjust up the number of roots,
1927 * otherwise old_roots is the number of roots we want.
1929 if (oper
->type
== BTRFS_QGROUP_OPER_ADD_SHARED
) {
1930 new_roots
= old_roots
+ 1;
1932 new_roots
= old_roots
;
1935 fs_info
->qgroup_seq
+= old_roots
+ 1;
1939 * And now the magic happens, bless Arne for having a pretty elegant
1940 * solution for this.
1942 qgroup_adjust_counters(fs_info
, oper
->ref_root
, oper
->num_bytes
,
1943 qgroups
, seq
, old_roots
, new_roots
, 0);
1945 spin_unlock(&fs_info
->qgroup_lock
);
1946 ulist_free(qgroups
);
1953 * Process a reference to a shared subtree. This type of operation is
1954 * queued during snapshot removal when we encounter extents which are
1955 * shared between more than one root.
1957 static int qgroup_subtree_accounting(struct btrfs_trans_handle
*trans
,
1958 struct btrfs_fs_info
*fs_info
,
1959 struct btrfs_qgroup_operation
*oper
)
1961 struct ulist
*roots
= NULL
;
1962 struct ulist_node
*unode
;
1963 struct ulist_iterator uiter
;
1964 struct btrfs_qgroup_list
*glist
;
1965 struct ulist
*parents
;
1968 struct btrfs_qgroup
*qg
;
1970 struct seq_list elem
= SEQ_LIST_INIT(elem
);
1972 parents
= ulist_alloc(GFP_NOFS
);
1976 btrfs_get_tree_mod_seq(fs_info
, &elem
);
1977 ret
= btrfs_find_all_roots(trans
, fs_info
, oper
->bytenr
,
1979 btrfs_put_tree_mod_seq(fs_info
, &elem
);
1983 if (roots
->nnodes
!= 1)
1986 ULIST_ITER_INIT(&uiter
);
1987 unode
= ulist_next(roots
, &uiter
); /* Only want 1 so no need to loop */
1989 * If we find our ref root then that means all refs
1990 * this extent has to the root have not yet been
1991 * deleted. In that case, we do nothing and let the
1992 * last ref for this bytenr drive our update.
1994 * This can happen for example if an extent is
1995 * referenced multiple times in a snapshot (clone,
1996 * etc). If we are in the middle of snapshot removal,
1997 * queued updates for such an extent will find the
1998 * root if we have not yet finished removing the
2001 if (unode
->val
== oper
->ref_root
)
2004 root_obj
= unode
->val
;
2007 spin_lock(&fs_info
->qgroup_lock
);
2008 qg
= find_qgroup_rb(fs_info
, root_obj
);
2012 qg
->excl
+= oper
->num_bytes
;
2013 qg
->excl_cmpr
+= oper
->num_bytes
;
2014 qgroup_dirty(fs_info
, qg
);
2017 * Adjust counts for parent groups. First we find all
2018 * parents, then in the 2nd loop we do the adjustment
2019 * while adding parents of the parents to our ulist.
2021 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2022 err
= ulist_add(parents
, glist
->group
->qgroupid
,
2023 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
2030 ULIST_ITER_INIT(&uiter
);
2031 while ((unode
= ulist_next(parents
, &uiter
))) {
2032 qg
= u64_to_ptr(unode
->aux
);
2033 qg
->excl
+= oper
->num_bytes
;
2034 qg
->excl_cmpr
+= oper
->num_bytes
;
2035 qgroup_dirty(fs_info
, qg
);
2037 /* Add any parents of the parents */
2038 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2039 err
= ulist_add(parents
, glist
->group
->qgroupid
,
2040 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
2049 spin_unlock(&fs_info
->qgroup_lock
);
2053 ulist_free(parents
);
2058 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
2059 * from the fs. First, all roots referencing the extent are searched, and
2060 * then the space is accounted accordingly to the different roots. The
2061 * accounting algorithm works in 3 steps documented inline.
2063 static int btrfs_qgroup_account(struct btrfs_trans_handle
*trans
,
2064 struct btrfs_fs_info
*fs_info
,
2065 struct btrfs_qgroup_operation
*oper
)
2069 if (!fs_info
->quota_enabled
)
2072 BUG_ON(!fs_info
->quota_root
);
2074 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2075 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
2076 if (fs_info
->qgroup_rescan_progress
.objectid
<= oper
->bytenr
) {
2077 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2081 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2083 ASSERT(is_fstree(oper
->ref_root
));
2085 trace_btrfs_qgroup_account(oper
);
2087 switch (oper
->type
) {
2088 case BTRFS_QGROUP_OPER_ADD_EXCL
:
2089 case BTRFS_QGROUP_OPER_SUB_EXCL
:
2090 ret
= qgroup_excl_accounting(fs_info
, oper
);
2092 case BTRFS_QGROUP_OPER_ADD_SHARED
:
2093 case BTRFS_QGROUP_OPER_SUB_SHARED
:
2094 ret
= qgroup_shared_accounting(trans
, fs_info
, oper
);
2096 case BTRFS_QGROUP_OPER_SUB_SUBTREE
:
2097 ret
= qgroup_subtree_accounting(trans
, fs_info
, oper
);
2106 * Needs to be called everytime we run delayed refs, even if there is an error
2107 * in order to cleanup outstanding operations.
2109 int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle
*trans
,
2110 struct btrfs_fs_info
*fs_info
)
2112 struct btrfs_qgroup_operation
*oper
;
2115 while (!list_empty(&trans
->qgroup_ref_list
)) {
2116 oper
= list_first_entry(&trans
->qgroup_ref_list
,
2117 struct btrfs_qgroup_operation
, list
);
2118 list_del_init(&oper
->list
);
2119 if (!ret
|| !trans
->aborted
)
2120 ret
= btrfs_qgroup_account(trans
, fs_info
, oper
);
2121 spin_lock(&fs_info
->qgroup_op_lock
);
2122 rb_erase(&oper
->n
, &fs_info
->qgroup_op_tree
);
2123 spin_unlock(&fs_info
->qgroup_op_lock
);
2124 btrfs_put_tree_mod_seq(fs_info
, &oper
->elem
);
2131 * called from commit_transaction. Writes all changed qgroups to disk.
2133 int btrfs_run_qgroups(struct btrfs_trans_handle
*trans
,
2134 struct btrfs_fs_info
*fs_info
)
2136 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
2138 int start_rescan_worker
= 0;
2143 if (!fs_info
->quota_enabled
&& fs_info
->pending_quota_state
)
2144 start_rescan_worker
= 1;
2146 fs_info
->quota_enabled
= fs_info
->pending_quota_state
;
2148 spin_lock(&fs_info
->qgroup_lock
);
2149 while (!list_empty(&fs_info
->dirty_qgroups
)) {
2150 struct btrfs_qgroup
*qgroup
;
2151 qgroup
= list_first_entry(&fs_info
->dirty_qgroups
,
2152 struct btrfs_qgroup
, dirty
);
2153 list_del_init(&qgroup
->dirty
);
2154 spin_unlock(&fs_info
->qgroup_lock
);
2155 ret
= update_qgroup_info_item(trans
, quota_root
, qgroup
);
2157 fs_info
->qgroup_flags
|=
2158 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2159 spin_lock(&fs_info
->qgroup_lock
);
2161 if (fs_info
->quota_enabled
)
2162 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_ON
;
2164 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
2165 spin_unlock(&fs_info
->qgroup_lock
);
2167 ret
= update_qgroup_status_item(trans
, fs_info
, quota_root
);
2169 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2171 if (!ret
&& start_rescan_worker
) {
2172 ret
= qgroup_rescan_init(fs_info
, 0, 1);
2174 qgroup_rescan_zero_tracking(fs_info
);
2175 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
2176 &fs_info
->qgroup_rescan_work
);
2187 * copy the acounting information between qgroups. This is necessary when a
2188 * snapshot or a subvolume is created
2190 int btrfs_qgroup_inherit(struct btrfs_trans_handle
*trans
,
2191 struct btrfs_fs_info
*fs_info
, u64 srcid
, u64 objectid
,
2192 struct btrfs_qgroup_inherit
*inherit
)
2197 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
2198 struct btrfs_qgroup
*srcgroup
;
2199 struct btrfs_qgroup
*dstgroup
;
2203 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
2204 if (!fs_info
->quota_enabled
)
2213 i_qgroups
= (u64
*)(inherit
+ 1);
2214 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
2215 2 * inherit
->num_excl_copies
;
2216 for (i
= 0; i
< nums
; ++i
) {
2217 srcgroup
= find_qgroup_rb(fs_info
, *i_qgroups
);
2227 * create a tracking group for the subvol itself
2229 ret
= add_qgroup_item(trans
, quota_root
, objectid
);
2233 if (inherit
&& inherit
->flags
& BTRFS_QGROUP_INHERIT_SET_LIMITS
) {
2234 ret
= update_qgroup_limit_item(trans
, quota_root
, objectid
,
2236 inherit
->lim
.max_rfer
,
2237 inherit
->lim
.max_excl
,
2238 inherit
->lim
.rsv_rfer
,
2239 inherit
->lim
.rsv_excl
);
2245 struct btrfs_root
*srcroot
;
2246 struct btrfs_key srckey
;
2248 srckey
.objectid
= srcid
;
2249 srckey
.type
= BTRFS_ROOT_ITEM_KEY
;
2250 srckey
.offset
= (u64
)-1;
2251 srcroot
= btrfs_read_fs_root_no_name(fs_info
, &srckey
);
2252 if (IS_ERR(srcroot
)) {
2253 ret
= PTR_ERR(srcroot
);
2258 level_size
= srcroot
->nodesize
;
2263 * add qgroup to all inherited groups
2266 i_qgroups
= (u64
*)(inherit
+ 1);
2267 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2268 ret
= add_qgroup_relation_item(trans
, quota_root
,
2269 objectid
, *i_qgroups
);
2272 ret
= add_qgroup_relation_item(trans
, quota_root
,
2273 *i_qgroups
, objectid
);
2281 spin_lock(&fs_info
->qgroup_lock
);
2283 dstgroup
= add_qgroup_rb(fs_info
, objectid
);
2284 if (IS_ERR(dstgroup
)) {
2285 ret
= PTR_ERR(dstgroup
);
2290 srcgroup
= find_qgroup_rb(fs_info
, srcid
);
2295 * We call inherit after we clone the root in order to make sure
2296 * our counts don't go crazy, so at this point the only
2297 * difference between the two roots should be the root node.
2299 dstgroup
->rfer
= srcgroup
->rfer
;
2300 dstgroup
->rfer_cmpr
= srcgroup
->rfer_cmpr
;
2301 dstgroup
->excl
= level_size
;
2302 dstgroup
->excl_cmpr
= level_size
;
2303 srcgroup
->excl
= level_size
;
2304 srcgroup
->excl_cmpr
= level_size
;
2306 /* inherit the limit info */
2307 dstgroup
->lim_flags
= srcgroup
->lim_flags
;
2308 dstgroup
->max_rfer
= srcgroup
->max_rfer
;
2309 dstgroup
->max_excl
= srcgroup
->max_excl
;
2310 dstgroup
->rsv_rfer
= srcgroup
->rsv_rfer
;
2311 dstgroup
->rsv_excl
= srcgroup
->rsv_excl
;
2313 qgroup_dirty(fs_info
, dstgroup
);
2314 qgroup_dirty(fs_info
, srcgroup
);
2320 i_qgroups
= (u64
*)(inherit
+ 1);
2321 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2322 ret
= add_relation_rb(quota_root
->fs_info
, objectid
,
2329 for (i
= 0; i
< inherit
->num_ref_copies
; ++i
) {
2330 struct btrfs_qgroup
*src
;
2331 struct btrfs_qgroup
*dst
;
2333 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2334 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2341 dst
->rfer
= src
->rfer
- level_size
;
2342 dst
->rfer_cmpr
= src
->rfer_cmpr
- level_size
;
2345 for (i
= 0; i
< inherit
->num_excl_copies
; ++i
) {
2346 struct btrfs_qgroup
*src
;
2347 struct btrfs_qgroup
*dst
;
2349 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2350 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2357 dst
->excl
= src
->excl
+ level_size
;
2358 dst
->excl_cmpr
= src
->excl_cmpr
+ level_size
;
2363 spin_unlock(&fs_info
->qgroup_lock
);
2365 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
2370 * reserve some space for a qgroup and all its parents. The reservation takes
2371 * place with start_transaction or dealloc_reserve, similar to ENOSPC
2372 * accounting. If not enough space is available, EDQUOT is returned.
2373 * We assume that the requested space is new for all qgroups.
2375 int btrfs_qgroup_reserve(struct btrfs_root
*root
, u64 num_bytes
)
2377 struct btrfs_root
*quota_root
;
2378 struct btrfs_qgroup
*qgroup
;
2379 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2380 u64 ref_root
= root
->root_key
.objectid
;
2382 struct ulist_node
*unode
;
2383 struct ulist_iterator uiter
;
2385 if (!is_fstree(ref_root
))
2391 spin_lock(&fs_info
->qgroup_lock
);
2392 quota_root
= fs_info
->quota_root
;
2396 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2401 * in a first step, we check all affected qgroups if any limits would
2404 ulist_reinit(fs_info
->qgroup_ulist
);
2405 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2406 (uintptr_t)qgroup
, GFP_ATOMIC
);
2409 ULIST_ITER_INIT(&uiter
);
2410 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2411 struct btrfs_qgroup
*qg
;
2412 struct btrfs_qgroup_list
*glist
;
2414 qg
= u64_to_ptr(unode
->aux
);
2416 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) &&
2417 qg
->reserved
+ (s64
)qg
->rfer
+ num_bytes
>
2423 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) &&
2424 qg
->reserved
+ (s64
)qg
->excl
+ num_bytes
>
2430 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2431 ret
= ulist_add(fs_info
->qgroup_ulist
,
2432 glist
->group
->qgroupid
,
2433 (uintptr_t)glist
->group
, GFP_ATOMIC
);
2440 * no limits exceeded, now record the reservation into all qgroups
2442 ULIST_ITER_INIT(&uiter
);
2443 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2444 struct btrfs_qgroup
*qg
;
2446 qg
= u64_to_ptr(unode
->aux
);
2448 qg
->reserved
+= num_bytes
;
2452 spin_unlock(&fs_info
->qgroup_lock
);
2456 void btrfs_qgroup_free(struct btrfs_root
*root
, u64 num_bytes
)
2458 struct btrfs_root
*quota_root
;
2459 struct btrfs_qgroup
*qgroup
;
2460 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2461 struct ulist_node
*unode
;
2462 struct ulist_iterator uiter
;
2463 u64 ref_root
= root
->root_key
.objectid
;
2466 if (!is_fstree(ref_root
))
2472 spin_lock(&fs_info
->qgroup_lock
);
2474 quota_root
= fs_info
->quota_root
;
2478 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2482 ulist_reinit(fs_info
->qgroup_ulist
);
2483 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2484 (uintptr_t)qgroup
, GFP_ATOMIC
);
2487 ULIST_ITER_INIT(&uiter
);
2488 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2489 struct btrfs_qgroup
*qg
;
2490 struct btrfs_qgroup_list
*glist
;
2492 qg
= u64_to_ptr(unode
->aux
);
2494 qg
->reserved
-= num_bytes
;
2496 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2497 ret
= ulist_add(fs_info
->qgroup_ulist
,
2498 glist
->group
->qgroupid
,
2499 (uintptr_t)glist
->group
, GFP_ATOMIC
);
2506 spin_unlock(&fs_info
->qgroup_lock
);
2509 void assert_qgroups_uptodate(struct btrfs_trans_handle
*trans
)
2511 if (list_empty(&trans
->qgroup_ref_list
) && !trans
->delayed_ref_elem
.seq
)
2513 btrfs_err(trans
->root
->fs_info
,
2514 "qgroups not uptodate in trans handle %p: list is%s empty, "
2516 trans
, list_empty(&trans
->qgroup_ref_list
) ? "" : " not",
2517 (u32
)(trans
->delayed_ref_elem
.seq
>> 32),
2518 (u32
)trans
->delayed_ref_elem
.seq
);
2523 * returns < 0 on error, 0 when more leafs are to be scanned.
2524 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
2527 qgroup_rescan_leaf(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
2528 struct btrfs_trans_handle
*trans
, struct ulist
*qgroups
,
2529 struct ulist
*tmp
, struct extent_buffer
*scratch_leaf
)
2531 struct btrfs_key found
;
2532 struct ulist
*roots
= NULL
;
2533 struct seq_list tree_mod_seq_elem
= SEQ_LIST_INIT(tree_mod_seq_elem
);
2540 path
->leave_spinning
= 1;
2541 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2542 ret
= btrfs_search_slot_for_read(fs_info
->extent_root
,
2543 &fs_info
->qgroup_rescan_progress
,
2546 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2547 fs_info
->qgroup_rescan_progress
.objectid
,
2548 fs_info
->qgroup_rescan_progress
.type
,
2549 fs_info
->qgroup_rescan_progress
.offset
, ret
);
2553 * The rescan is about to end, we will not be scanning any
2554 * further blocks. We cannot unset the RESCAN flag here, because
2555 * we want to commit the transaction if everything went well.
2556 * To make the live accounting work in this phase, we set our
2557 * scan progress pointer such that every real extent objectid
2560 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
2561 btrfs_release_path(path
);
2562 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2566 btrfs_item_key_to_cpu(path
->nodes
[0], &found
,
2567 btrfs_header_nritems(path
->nodes
[0]) - 1);
2568 fs_info
->qgroup_rescan_progress
.objectid
= found
.objectid
+ 1;
2570 btrfs_get_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
2571 memcpy(scratch_leaf
, path
->nodes
[0], sizeof(*scratch_leaf
));
2572 slot
= path
->slots
[0];
2573 btrfs_release_path(path
);
2574 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2576 for (; slot
< btrfs_header_nritems(scratch_leaf
); ++slot
) {
2577 btrfs_item_key_to_cpu(scratch_leaf
, &found
, slot
);
2578 if (found
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2579 found
.type
!= BTRFS_METADATA_ITEM_KEY
)
2581 if (found
.type
== BTRFS_METADATA_ITEM_KEY
)
2582 num_bytes
= fs_info
->extent_root
->nodesize
;
2584 num_bytes
= found
.offset
;
2586 ulist_reinit(qgroups
);
2587 ret
= btrfs_find_all_roots(NULL
, fs_info
, found
.objectid
, 0,
2591 spin_lock(&fs_info
->qgroup_lock
);
2592 seq
= fs_info
->qgroup_seq
;
2593 fs_info
->qgroup_seq
+= roots
->nnodes
+ 1; /* max refcnt */
2596 ret
= qgroup_calc_old_refcnt(fs_info
, 0, tmp
, roots
, qgroups
,
2597 seq
, &new_roots
, 1);
2599 spin_unlock(&fs_info
->qgroup_lock
);
2604 ret
= qgroup_adjust_counters(fs_info
, 0, num_bytes
, qgroups
,
2605 seq
, 0, new_roots
, 1);
2607 spin_unlock(&fs_info
->qgroup_lock
);
2611 spin_unlock(&fs_info
->qgroup_lock
);
2615 btrfs_put_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
2620 static void btrfs_qgroup_rescan_worker(struct btrfs_work
*work
)
2622 struct btrfs_fs_info
*fs_info
= container_of(work
, struct btrfs_fs_info
,
2623 qgroup_rescan_work
);
2624 struct btrfs_path
*path
;
2625 struct btrfs_trans_handle
*trans
= NULL
;
2626 struct ulist
*tmp
= NULL
, *qgroups
= NULL
;
2627 struct extent_buffer
*scratch_leaf
= NULL
;
2630 path
= btrfs_alloc_path();
2633 qgroups
= ulist_alloc(GFP_NOFS
);
2636 tmp
= ulist_alloc(GFP_NOFS
);
2639 scratch_leaf
= kmalloc(sizeof(*scratch_leaf
), GFP_NOFS
);
2645 trans
= btrfs_start_transaction(fs_info
->fs_root
, 0);
2646 if (IS_ERR(trans
)) {
2647 err
= PTR_ERR(trans
);
2650 if (!fs_info
->quota_enabled
) {
2653 err
= qgroup_rescan_leaf(fs_info
, path
, trans
,
2654 qgroups
, tmp
, scratch_leaf
);
2657 btrfs_commit_transaction(trans
, fs_info
->fs_root
);
2659 btrfs_end_transaction(trans
, fs_info
->fs_root
);
2663 kfree(scratch_leaf
);
2664 ulist_free(qgroups
);
2666 btrfs_free_path(path
);
2668 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2669 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2672 fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
) {
2673 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2674 } else if (err
< 0) {
2675 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2677 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2680 btrfs_info(fs_info
, "qgroup scan completed%s",
2681 err
== 2 ? " (inconsistency flag cleared)" : "");
2683 btrfs_err(fs_info
, "qgroup scan failed with %d", err
);
2686 complete_all(&fs_info
->qgroup_rescan_completion
);
2690 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2691 * memory required for the rescan context.
2694 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
2700 (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) ||
2701 !(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))) {
2706 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2707 spin_lock(&fs_info
->qgroup_lock
);
2710 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
)
2712 else if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
2716 spin_unlock(&fs_info
->qgroup_lock
);
2717 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2721 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2724 memset(&fs_info
->qgroup_rescan_progress
, 0,
2725 sizeof(fs_info
->qgroup_rescan_progress
));
2726 fs_info
->qgroup_rescan_progress
.objectid
= progress_objectid
;
2728 spin_unlock(&fs_info
->qgroup_lock
);
2729 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2731 init_completion(&fs_info
->qgroup_rescan_completion
);
2733 memset(&fs_info
->qgroup_rescan_work
, 0,
2734 sizeof(fs_info
->qgroup_rescan_work
));
2735 btrfs_init_work(&fs_info
->qgroup_rescan_work
,
2736 btrfs_qgroup_rescan_helper
,
2737 btrfs_qgroup_rescan_worker
, NULL
, NULL
);
2741 btrfs_info(fs_info
, "qgroup_rescan_init failed with %d", ret
);
2749 qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
)
2752 struct btrfs_qgroup
*qgroup
;
2754 spin_lock(&fs_info
->qgroup_lock
);
2755 /* clear all current qgroup tracking information */
2756 for (n
= rb_first(&fs_info
->qgroup_tree
); n
; n
= rb_next(n
)) {
2757 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
2759 qgroup
->rfer_cmpr
= 0;
2761 qgroup
->excl_cmpr
= 0;
2763 spin_unlock(&fs_info
->qgroup_lock
);
2767 btrfs_qgroup_rescan(struct btrfs_fs_info
*fs_info
)
2770 struct btrfs_trans_handle
*trans
;
2772 ret
= qgroup_rescan_init(fs_info
, 0, 1);
2777 * We have set the rescan_progress to 0, which means no more
2778 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2779 * However, btrfs_qgroup_account_ref may be right after its call
2780 * to btrfs_find_all_roots, in which case it would still do the
2782 * To solve this, we're committing the transaction, which will
2783 * ensure we run all delayed refs and only after that, we are
2784 * going to clear all tracking information for a clean start.
2787 trans
= btrfs_join_transaction(fs_info
->fs_root
);
2788 if (IS_ERR(trans
)) {
2789 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2790 return PTR_ERR(trans
);
2792 ret
= btrfs_commit_transaction(trans
, fs_info
->fs_root
);
2794 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2798 qgroup_rescan_zero_tracking(fs_info
);
2800 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
2801 &fs_info
->qgroup_rescan_work
);
2806 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info
*fs_info
)
2811 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2812 spin_lock(&fs_info
->qgroup_lock
);
2813 running
= fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2814 spin_unlock(&fs_info
->qgroup_lock
);
2815 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2818 ret
= wait_for_completion_interruptible(
2819 &fs_info
->qgroup_rescan_completion
);
2825 * this is only called from open_ctree where we're still single threaded, thus
2826 * locking is omitted here.
2829 btrfs_qgroup_rescan_resume(struct btrfs_fs_info
*fs_info
)
2831 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
)
2832 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
2833 &fs_info
->qgroup_rescan_work
);