4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/lu_object.c
35 * These are the only exported functions, they provide some generic
36 * infrastructure for managing object devices
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 #include "../../include/linux/libcfs/libcfs.h"
45 # include <linux/module.h>
48 #include "../../include/linux/libcfs/libcfs_hash.h"
49 #include "../include/obd_class.h"
50 #include "../include/obd_support.h"
51 #include "../include/lustre_disk.h"
52 #include "../include/lustre_fid.h"
53 #include "../include/lu_object.h"
54 #include "../include/cl_object.h"
55 #include "../include/lu_ref.h"
56 #include <linux/list.h>
59 LU_CACHE_PERCENT_MAX
= 50,
60 LU_CACHE_PERCENT_DEFAULT
= 20
63 #define LU_CACHE_NR_MAX_ADJUST 128
64 #define LU_CACHE_NR_UNLIMITED -1
65 #define LU_CACHE_NR_DEFAULT LU_CACHE_NR_UNLIMITED
66 #define LU_CACHE_NR_LDISKFS_LIMIT LU_CACHE_NR_UNLIMITED
67 #define LU_CACHE_NR_ZFS_LIMIT 256
69 #define LU_SITE_BITS_MIN 12
70 #define LU_SITE_BITS_MAX 24
72 * total 256 buckets, we don't want too many buckets because:
73 * - consume too much memory
74 * - avoid unbalanced LRU list
76 #define LU_SITE_BKT_BITS 8
78 static unsigned int lu_cache_percent
= LU_CACHE_PERCENT_DEFAULT
;
79 module_param(lu_cache_percent
, int, 0644);
80 MODULE_PARM_DESC(lu_cache_percent
, "Percentage of memory to be used as lu_object cache");
82 static long lu_cache_nr
= LU_CACHE_NR_DEFAULT
;
83 module_param(lu_cache_nr
, long, 0644);
84 MODULE_PARM_DESC(lu_cache_nr
, "Maximum number of objects in lu_object cache");
86 static void lu_object_free(const struct lu_env
*env
, struct lu_object
*o
);
87 static __u32
ls_stats_read(struct lprocfs_stats
*stats
, int idx
);
90 * Decrease reference counter on object. If last reference is freed, return
91 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
92 * case, free object immediately.
94 void lu_object_put(const struct lu_env
*env
, struct lu_object
*o
)
96 struct lu_site_bkt_data
*bkt
;
97 struct lu_object_header
*top
;
99 struct lu_object
*orig
;
100 struct cfs_hash_bd bd
;
101 const struct lu_fid
*fid
;
104 site
= o
->lo_dev
->ld_site
;
108 * till we have full fids-on-OST implemented anonymous objects
109 * are possible in OSP. such an object isn't listed in the site
110 * so we should not remove it from the site.
112 fid
= lu_object_fid(o
);
113 if (fid_is_zero(fid
)) {
114 LASSERT(!top
->loh_hash
.next
&& !top
->loh_hash
.pprev
);
115 LASSERT(list_empty(&top
->loh_lru
));
116 if (!atomic_dec_and_test(&top
->loh_ref
))
118 list_for_each_entry_reverse(o
, &top
->loh_layers
, lo_linkage
) {
119 if (o
->lo_ops
->loo_object_release
)
120 o
->lo_ops
->loo_object_release(env
, o
);
122 lu_object_free(env
, orig
);
126 cfs_hash_bd_get(site
->ls_obj_hash
, &top
->loh_fid
, &bd
);
127 bkt
= cfs_hash_bd_extra_get(site
->ls_obj_hash
, &bd
);
129 if (!cfs_hash_bd_dec_and_lock(site
->ls_obj_hash
, &bd
, &top
->loh_ref
)) {
130 if (lu_object_is_dying(top
)) {
132 * somebody may be waiting for this, currently only
133 * used for cl_object, see cl_object_put_last().
135 wake_up_all(&bkt
->lsb_marche_funebre
);
141 * When last reference is released, iterate over object
142 * layers, and notify them that object is no longer busy.
144 list_for_each_entry_reverse(o
, &top
->loh_layers
, lo_linkage
) {
145 if (o
->lo_ops
->loo_object_release
)
146 o
->lo_ops
->loo_object_release(env
, o
);
149 if (!lu_object_is_dying(top
)) {
150 LASSERT(list_empty(&top
->loh_lru
));
151 list_add_tail(&top
->loh_lru
, &bkt
->lsb_lru
);
153 lprocfs_counter_incr(site
->ls_stats
, LU_SS_LRU_LEN
);
154 CDEBUG(D_INODE
, "Add %p to site lru. hash: %p, bkt: %p, lru_len: %ld\n",
155 o
, site
->ls_obj_hash
, bkt
, bkt
->lsb_lru_len
);
156 cfs_hash_bd_unlock(site
->ls_obj_hash
, &bd
, 1);
161 * If object is dying (will not be cached), then removed it
162 * from hash table and LRU.
164 * This is done with hash table and LRU lists locked. As the only
165 * way to acquire first reference to previously unreferenced
166 * object is through hash-table lookup (lu_object_find()),
167 * or LRU scanning (lu_site_purge()), that are done under hash-table
168 * and LRU lock, no race with concurrent object lookup is possible
169 * and we can safely destroy object below.
171 if (!test_and_set_bit(LU_OBJECT_UNHASHED
, &top
->loh_flags
))
172 cfs_hash_bd_del_locked(site
->ls_obj_hash
, &bd
, &top
->loh_hash
);
173 cfs_hash_bd_unlock(site
->ls_obj_hash
, &bd
, 1);
175 * Object was already removed from hash and lru above, can
178 lu_object_free(env
, orig
);
180 EXPORT_SYMBOL(lu_object_put
);
183 * Kill the object and take it out of LRU cache.
184 * Currently used by client code for layout change.
186 void lu_object_unhash(const struct lu_env
*env
, struct lu_object
*o
)
188 struct lu_object_header
*top
;
191 set_bit(LU_OBJECT_HEARD_BANSHEE
, &top
->loh_flags
);
192 if (!test_and_set_bit(LU_OBJECT_UNHASHED
, &top
->loh_flags
)) {
193 struct lu_site
*site
= o
->lo_dev
->ld_site
;
194 struct cfs_hash
*obj_hash
= site
->ls_obj_hash
;
195 struct cfs_hash_bd bd
;
197 cfs_hash_bd_get_and_lock(obj_hash
, &top
->loh_fid
, &bd
, 1);
198 if (!list_empty(&top
->loh_lru
)) {
199 struct lu_site_bkt_data
*bkt
;
201 list_del_init(&top
->loh_lru
);
202 bkt
= cfs_hash_bd_extra_get(obj_hash
, &bd
);
204 lprocfs_counter_decr(site
->ls_stats
, LU_SS_LRU_LEN
);
206 cfs_hash_bd_del_locked(obj_hash
, &bd
, &top
->loh_hash
);
207 cfs_hash_bd_unlock(obj_hash
, &bd
, 1);
210 EXPORT_SYMBOL(lu_object_unhash
);
213 * Allocate new object.
215 * This follows object creation protocol, described in the comment within
216 * struct lu_device_operations definition.
218 static struct lu_object
*lu_object_alloc(const struct lu_env
*env
,
219 struct lu_device
*dev
,
220 const struct lu_fid
*f
,
221 const struct lu_object_conf
*conf
)
223 struct lu_object
*scan
;
224 struct lu_object
*top
;
225 struct list_head
*layers
;
226 unsigned int init_mask
= 0;
227 unsigned int init_flag
;
232 * Create top-level object slice. This will also create
235 top
= dev
->ld_ops
->ldo_object_alloc(env
, NULL
, dev
);
237 return ERR_PTR(-ENOMEM
);
241 * This is the only place where object fid is assigned. It's constant
244 top
->lo_header
->loh_fid
= *f
;
245 layers
= &top
->lo_header
->loh_layers
;
249 * Call ->loo_object_init() repeatedly, until no more new
250 * object slices are created.
254 list_for_each_entry(scan
, layers
, lo_linkage
) {
255 if (init_mask
& init_flag
)
258 scan
->lo_header
= top
->lo_header
;
259 result
= scan
->lo_ops
->loo_object_init(env
, scan
, conf
);
261 lu_object_free(env
, top
);
262 return ERR_PTR(result
);
264 init_mask
|= init_flag
;
270 list_for_each_entry_reverse(scan
, layers
, lo_linkage
) {
271 if (scan
->lo_ops
->loo_object_start
) {
272 result
= scan
->lo_ops
->loo_object_start(env
, scan
);
274 lu_object_free(env
, top
);
275 return ERR_PTR(result
);
280 lprocfs_counter_incr(dev
->ld_site
->ls_stats
, LU_SS_CREATED
);
287 static void lu_object_free(const struct lu_env
*env
, struct lu_object
*o
)
289 struct lu_site_bkt_data
*bkt
;
290 struct lu_site
*site
;
291 struct lu_object
*scan
;
292 struct list_head
*layers
;
293 struct list_head splice
;
295 site
= o
->lo_dev
->ld_site
;
296 layers
= &o
->lo_header
->loh_layers
;
297 bkt
= lu_site_bkt_from_fid(site
, &o
->lo_header
->loh_fid
);
299 * First call ->loo_object_delete() method to release all resources.
301 list_for_each_entry_reverse(scan
, layers
, lo_linkage
) {
302 if (scan
->lo_ops
->loo_object_delete
)
303 scan
->lo_ops
->loo_object_delete(env
, scan
);
307 * Then, splice object layers into stand-alone list, and call
308 * ->loo_object_free() on all layers to free memory. Splice is
309 * necessary, because lu_object_header is freed together with the
312 INIT_LIST_HEAD(&splice
);
313 list_splice_init(layers
, &splice
);
314 while (!list_empty(&splice
)) {
316 * Free layers in bottom-to-top order, so that object header
317 * lives as long as possible and ->loo_object_free() methods
318 * can look at its contents.
320 o
= container_of0(splice
.prev
, struct lu_object
, lo_linkage
);
321 list_del_init(&o
->lo_linkage
);
322 o
->lo_ops
->loo_object_free(env
, o
);
325 if (waitqueue_active(&bkt
->lsb_marche_funebre
))
326 wake_up_all(&bkt
->lsb_marche_funebre
);
330 * Free \a nr objects from the cold end of the site LRU list.
332 int lu_site_purge(const struct lu_env
*env
, struct lu_site
*s
, int nr
)
334 struct lu_object_header
*h
;
335 struct lu_object_header
*temp
;
336 struct lu_site_bkt_data
*bkt
;
337 struct cfs_hash_bd bd
;
338 struct cfs_hash_bd bd2
;
339 struct list_head dispose
;
341 unsigned int start
= 0;
346 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU
))
349 INIT_LIST_HEAD(&dispose
);
351 * Under LRU list lock, scan LRU list and move unreferenced objects to
352 * the dispose list, removing them from LRU and hash table.
355 start
= s
->ls_purge_start
;
356 bnr
= (nr
== ~0) ? -1 : nr
/ (int)CFS_HASH_NBKT(s
->ls_obj_hash
) + 1;
359 * It doesn't make any sense to make purge threads parallel, that can
360 * only bring troubles to us. See LU-5331.
362 mutex_lock(&s
->ls_purge_mutex
);
364 cfs_hash_for_each_bucket(s
->ls_obj_hash
, &bd
, i
) {
368 cfs_hash_bd_lock(s
->ls_obj_hash
, &bd
, 1);
369 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, &bd
);
371 list_for_each_entry_safe(h
, temp
, &bkt
->lsb_lru
, loh_lru
) {
372 LASSERT(atomic_read(&h
->loh_ref
) == 0);
374 cfs_hash_bd_get(s
->ls_obj_hash
, &h
->loh_fid
, &bd2
);
375 LASSERT(bd
.bd_bucket
== bd2
.bd_bucket
);
377 cfs_hash_bd_del_locked(s
->ls_obj_hash
,
379 list_move(&h
->loh_lru
, &dispose
);
381 lprocfs_counter_decr(s
->ls_stats
, LU_SS_LRU_LEN
);
385 if (nr
!= ~0 && --nr
== 0)
388 if (count
> 0 && --count
== 0)
391 cfs_hash_bd_unlock(s
->ls_obj_hash
, &bd
, 1);
394 * Free everything on the dispose list. This is safe against
395 * races due to the reasons described in lu_object_put().
397 while (!list_empty(&dispose
)) {
398 h
= container_of0(dispose
.next
,
399 struct lu_object_header
, loh_lru
);
400 list_del_init(&h
->loh_lru
);
401 lu_object_free(env
, lu_object_top(h
));
402 lprocfs_counter_incr(s
->ls_stats
, LU_SS_LRU_PURGED
);
408 mutex_unlock(&s
->ls_purge_mutex
);
410 if (nr
!= 0 && did_sth
&& start
!= 0) {
411 start
= 0; /* restart from the first bucket */
414 /* race on s->ls_purge_start, but nobody cares */
415 s
->ls_purge_start
= i
% CFS_HASH_NBKT(s
->ls_obj_hash
);
419 EXPORT_SYMBOL(lu_site_purge
);
424 * Code below has to jump through certain loops to output object description
425 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
426 * composes object description from strings that are parts of _lines_ of
427 * output (i.e., strings that are not terminated by newline). This doesn't fit
428 * very well into libcfs_debug_msg() interface that assumes that each message
429 * supplied to it is a self-contained output line.
431 * To work around this, strings are collected in a temporary buffer
432 * (implemented as a value of lu_cdebug_key key), until terminating newline
433 * character is detected.
441 * XXX overflow is not handled correctly.
446 struct lu_cdebug_data
{
450 char lck_area
[LU_CDEBUG_LINE
];
453 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
454 LU_KEY_INIT_FINI(lu_global
, struct lu_cdebug_data
);
457 * Key, holding temporary buffer. This key is registered very early by
460 static struct lu_context_key lu_global_key
= {
461 .lct_tags
= LCT_MD_THREAD
| LCT_DT_THREAD
|
462 LCT_MG_THREAD
| LCT_CL_THREAD
| LCT_LOCAL
,
463 .lct_init
= lu_global_key_init
,
464 .lct_fini
= lu_global_key_fini
468 * Printer function emitting messages through libcfs_debug_msg().
470 int lu_cdebug_printer(const struct lu_env
*env
,
471 void *cookie
, const char *format
, ...)
473 struct libcfs_debug_msg_data
*msgdata
= cookie
;
474 struct lu_cdebug_data
*key
;
479 va_start(args
, format
);
481 key
= lu_context_key_get(&env
->le_ctx
, &lu_global_key
);
483 used
= strlen(key
->lck_area
);
484 complete
= format
[strlen(format
) - 1] == '\n';
486 * Append new chunk to the buffer.
488 vsnprintf(key
->lck_area
+ used
,
489 ARRAY_SIZE(key
->lck_area
) - used
, format
, args
);
491 if (cfs_cdebug_show(msgdata
->msg_mask
, msgdata
->msg_subsys
))
492 libcfs_debug_msg(msgdata
, "%s\n", key
->lck_area
);
493 key
->lck_area
[0] = 0;
498 EXPORT_SYMBOL(lu_cdebug_printer
);
501 * Print object header.
503 void lu_object_header_print(const struct lu_env
*env
, void *cookie
,
504 lu_printer_t printer
,
505 const struct lu_object_header
*hdr
)
507 (*printer
)(env
, cookie
, "header@%p[%#lx, %d, "DFID
"%s%s%s]",
508 hdr
, hdr
->loh_flags
, atomic_read(&hdr
->loh_ref
),
510 hlist_unhashed(&hdr
->loh_hash
) ? "" : " hash",
511 list_empty((struct list_head
*)&hdr
->loh_lru
) ? \
513 hdr
->loh_attr
& LOHA_EXISTS
? " exist":"");
515 EXPORT_SYMBOL(lu_object_header_print
);
518 * Print human readable representation of the \a o to the \a printer.
520 void lu_object_print(const struct lu_env
*env
, void *cookie
,
521 lu_printer_t printer
, const struct lu_object
*o
)
523 static const char ruler
[] = "........................................";
524 struct lu_object_header
*top
;
528 lu_object_header_print(env
, cookie
, printer
, top
);
529 (*printer
)(env
, cookie
, "{\n");
531 list_for_each_entry(o
, &top
->loh_layers
, lo_linkage
) {
533 * print `.' \a depth times followed by type name and address
535 (*printer
)(env
, cookie
, "%*.*s%s@%p", depth
, depth
, ruler
,
536 o
->lo_dev
->ld_type
->ldt_name
, o
);
538 if (o
->lo_ops
->loo_object_print
)
539 (*o
->lo_ops
->loo_object_print
)(env
, cookie
, printer
, o
);
541 (*printer
)(env
, cookie
, "\n");
544 (*printer
)(env
, cookie
, "} header@%p\n", top
);
546 EXPORT_SYMBOL(lu_object_print
);
548 static struct lu_object
*htable_lookup(struct lu_site
*s
,
549 struct cfs_hash_bd
*bd
,
550 const struct lu_fid
*f
,
551 wait_queue_t
*waiter
,
554 struct lu_site_bkt_data
*bkt
;
555 struct lu_object_header
*h
;
556 struct hlist_node
*hnode
;
557 __u64 ver
= cfs_hash_bd_version_get(bd
);
560 return ERR_PTR(-ENOENT
);
563 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, bd
);
564 /* cfs_hash_bd_peek_locked is a somehow "internal" function
565 * of cfs_hash, it doesn't add refcount on object.
567 hnode
= cfs_hash_bd_peek_locked(s
->ls_obj_hash
, bd
, (void *)f
);
569 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_MISS
);
570 return ERR_PTR(-ENOENT
);
573 h
= container_of0(hnode
, struct lu_object_header
, loh_hash
);
574 if (likely(!lu_object_is_dying(h
))) {
575 cfs_hash_get(s
->ls_obj_hash
, hnode
);
576 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_HIT
);
577 if (!list_empty(&h
->loh_lru
)) {
578 list_del_init(&h
->loh_lru
);
580 lprocfs_counter_decr(s
->ls_stats
, LU_SS_LRU_LEN
);
582 return lu_object_top(h
);
586 * Lookup found an object being destroyed this object cannot be
587 * returned (to assure that references to dying objects are eventually
588 * drained), and moreover, lookup has to wait until object is freed.
591 init_waitqueue_entry(waiter
, current
);
592 add_wait_queue(&bkt
->lsb_marche_funebre
, waiter
);
593 set_current_state(TASK_UNINTERRUPTIBLE
);
594 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
);
595 return ERR_PTR(-EAGAIN
);
599 * Search cache for an object with the fid \a f. If such object is found,
600 * return it. Otherwise, create new object, insert it into cache and return
601 * it. In any case, additional reference is acquired on the returned object.
603 static struct lu_object
*lu_object_find(const struct lu_env
*env
,
604 struct lu_device
*dev
,
605 const struct lu_fid
*f
,
606 const struct lu_object_conf
*conf
)
608 return lu_object_find_at(env
, dev
->ld_site
->ls_top_dev
, f
, conf
);
612 * Limit the lu_object cache to a maximum of lu_cache_nr objects. Because
613 * the calculation for the number of objects to reclaim is not covered by
614 * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST.
615 * This ensures that many concurrent threads will not accidentally purge
618 static void lu_object_limit(const struct lu_env
*env
, struct lu_device
*dev
)
622 if (lu_cache_nr
== LU_CACHE_NR_UNLIMITED
)
625 size
= cfs_hash_size_get(dev
->ld_site
->ls_obj_hash
);
626 nr
= (__u64
)lu_cache_nr
;
628 lu_site_purge(env
, dev
->ld_site
,
629 min_t(__u64
, size
- nr
, LU_CACHE_NR_MAX_ADJUST
));
632 static struct lu_object
*lu_object_new(const struct lu_env
*env
,
633 struct lu_device
*dev
,
634 const struct lu_fid
*f
,
635 const struct lu_object_conf
*conf
)
639 struct cfs_hash_bd bd
;
641 o
= lu_object_alloc(env
, dev
, f
, conf
);
645 hs
= dev
->ld_site
->ls_obj_hash
;
646 cfs_hash_bd_get_and_lock(hs
, (void *)f
, &bd
, 1);
647 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
648 cfs_hash_bd_unlock(hs
, &bd
, 1);
650 lu_object_limit(env
, dev
);
656 * Core logic of lu_object_find*() functions.
658 static struct lu_object
*lu_object_find_try(const struct lu_env
*env
,
659 struct lu_device
*dev
,
660 const struct lu_fid
*f
,
661 const struct lu_object_conf
*conf
,
662 wait_queue_t
*waiter
)
665 struct lu_object
*shadow
;
668 struct cfs_hash_bd bd
;
672 * This uses standard index maintenance protocol:
674 * - search index under lock, and return object if found;
675 * - otherwise, unlock index, allocate new object;
676 * - lock index and search again;
677 * - if nothing is found (usual case), insert newly created
679 * - otherwise (race: other thread inserted object), free
680 * object just allocated.
684 * For "LOC_F_NEW" case, we are sure the object is new established.
685 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
686 * just alloc and insert directly.
688 * If dying object is found during index search, add @waiter to the
689 * site wait-queue and return ERR_PTR(-EAGAIN).
691 if (conf
&& conf
->loc_flags
& LOC_F_NEW
)
692 return lu_object_new(env
, dev
, f
, conf
);
696 cfs_hash_bd_get_and_lock(hs
, (void *)f
, &bd
, 1);
697 o
= htable_lookup(s
, &bd
, f
, waiter
, &version
);
698 cfs_hash_bd_unlock(hs
, &bd
, 1);
699 if (!IS_ERR(o
) || PTR_ERR(o
) != -ENOENT
)
703 * Allocate new object. This may result in rather complicated
704 * operations, including fld queries, inode loading, etc.
706 o
= lu_object_alloc(env
, dev
, f
, conf
);
710 LASSERT(lu_fid_eq(lu_object_fid(o
), f
));
712 cfs_hash_bd_lock(hs
, &bd
, 1);
714 shadow
= htable_lookup(s
, &bd
, f
, waiter
, &version
);
715 if (likely(PTR_ERR(shadow
) == -ENOENT
)) {
716 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
717 cfs_hash_bd_unlock(hs
, &bd
, 1);
719 lu_object_limit(env
, dev
);
724 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_RACE
);
725 cfs_hash_bd_unlock(hs
, &bd
, 1);
726 lu_object_free(env
, o
);
731 * Much like lu_object_find(), but top level device of object is specifically
732 * \a dev rather than top level device of the site. This interface allows
733 * objects of different "stacking" to be created within the same site.
735 struct lu_object
*lu_object_find_at(const struct lu_env
*env
,
736 struct lu_device
*dev
,
737 const struct lu_fid
*f
,
738 const struct lu_object_conf
*conf
)
740 struct lu_site_bkt_data
*bkt
;
741 struct lu_object
*obj
;
745 obj
= lu_object_find_try(env
, dev
, f
, conf
, &wait
);
746 if (obj
!= ERR_PTR(-EAGAIN
))
749 * lu_object_find_try() already added waiter into the
753 bkt
= lu_site_bkt_from_fid(dev
->ld_site
, (void *)f
);
754 remove_wait_queue(&bkt
->lsb_marche_funebre
, &wait
);
757 EXPORT_SYMBOL(lu_object_find_at
);
760 * Find object with given fid, and return its slice belonging to given device.
762 struct lu_object
*lu_object_find_slice(const struct lu_env
*env
,
763 struct lu_device
*dev
,
764 const struct lu_fid
*f
,
765 const struct lu_object_conf
*conf
)
767 struct lu_object
*top
;
768 struct lu_object
*obj
;
770 top
= lu_object_find(env
, dev
, f
, conf
);
774 obj
= lu_object_locate(top
->lo_header
, dev
->ld_type
);
775 if (unlikely(!obj
)) {
776 lu_object_put(env
, top
);
777 obj
= ERR_PTR(-ENOENT
);
782 EXPORT_SYMBOL(lu_object_find_slice
);
785 * Global list of all device types.
787 static LIST_HEAD(lu_device_types
);
789 int lu_device_type_init(struct lu_device_type
*ldt
)
793 atomic_set(&ldt
->ldt_device_nr
, 0);
794 INIT_LIST_HEAD(&ldt
->ldt_linkage
);
795 if (ldt
->ldt_ops
->ldto_init
)
796 result
= ldt
->ldt_ops
->ldto_init(ldt
);
799 spin_lock(&obd_types_lock
);
800 list_add(&ldt
->ldt_linkage
, &lu_device_types
);
801 spin_unlock(&obd_types_lock
);
806 EXPORT_SYMBOL(lu_device_type_init
);
808 void lu_device_type_fini(struct lu_device_type
*ldt
)
810 spin_lock(&obd_types_lock
);
811 list_del_init(&ldt
->ldt_linkage
);
812 spin_unlock(&obd_types_lock
);
813 if (ldt
->ldt_ops
->ldto_fini
)
814 ldt
->ldt_ops
->ldto_fini(ldt
);
816 EXPORT_SYMBOL(lu_device_type_fini
);
819 * Global list of all sites on this node
821 static LIST_HEAD(lu_sites
);
822 static DEFINE_MUTEX(lu_sites_guard
);
825 * Global environment used by site shrinker.
827 static struct lu_env lu_shrink_env
;
829 struct lu_site_print_arg
{
830 struct lu_env
*lsp_env
;
832 lu_printer_t lsp_printer
;
836 lu_site_obj_print(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
837 struct hlist_node
*hnode
, void *data
)
839 struct lu_site_print_arg
*arg
= (struct lu_site_print_arg
*)data
;
840 struct lu_object_header
*h
;
842 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
843 if (!list_empty(&h
->loh_layers
)) {
844 const struct lu_object
*o
;
846 o
= lu_object_top(h
);
847 lu_object_print(arg
->lsp_env
, arg
->lsp_cookie
,
848 arg
->lsp_printer
, o
);
850 lu_object_header_print(arg
->lsp_env
, arg
->lsp_cookie
,
851 arg
->lsp_printer
, h
);
857 * Print all objects in \a s.
859 void lu_site_print(const struct lu_env
*env
, struct lu_site
*s
, void *cookie
,
860 lu_printer_t printer
)
862 struct lu_site_print_arg arg
= {
863 .lsp_env
= (struct lu_env
*)env
,
864 .lsp_cookie
= cookie
,
865 .lsp_printer
= printer
,
868 cfs_hash_for_each(s
->ls_obj_hash
, lu_site_obj_print
, &arg
);
870 EXPORT_SYMBOL(lu_site_print
);
873 * Return desired hash table order.
875 static unsigned long lu_htable_order(struct lu_device
*top
)
877 unsigned long bits_max
= LU_SITE_BITS_MAX
;
878 unsigned long cache_size
;
882 * Calculate hash table size, assuming that we want reasonable
883 * performance when 20% of total memory is occupied by cache of
886 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
888 cache_size
= totalram_pages
;
890 #if BITS_PER_LONG == 32
891 /* limit hashtable size for lowmem systems to low RAM */
892 if (cache_size
> 1 << (30 - PAGE_SHIFT
))
893 cache_size
= 1 << (30 - PAGE_SHIFT
) * 3 / 4;
896 /* clear off unreasonable cache setting. */
897 if (lu_cache_percent
== 0 || lu_cache_percent
> LU_CACHE_PERCENT_MAX
) {
898 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n",
899 lu_cache_percent
, LU_CACHE_PERCENT_MAX
,
900 LU_CACHE_PERCENT_DEFAULT
);
902 lu_cache_percent
= LU_CACHE_PERCENT_DEFAULT
;
904 cache_size
= cache_size
/ 100 * lu_cache_percent
*
907 for (bits
= 1; (1 << bits
) < cache_size
; ++bits
) {
910 return clamp_t(typeof(bits
), bits
, LU_SITE_BITS_MIN
, bits_max
);
913 static unsigned lu_obj_hop_hash(struct cfs_hash
*hs
,
914 const void *key
, unsigned mask
)
916 struct lu_fid
*fid
= (struct lu_fid
*)key
;
919 hash
= fid_flatten32(fid
);
920 hash
+= (hash
>> 4) + (hash
<< 12); /* mixing oid and seq */
921 hash
= hash_long(hash
, hs
->hs_bkt_bits
);
923 /* give me another random factor */
924 hash
-= hash_long((unsigned long)hs
, fid_oid(fid
) % 11 + 3);
926 hash
<<= hs
->hs_cur_bits
- hs
->hs_bkt_bits
;
927 hash
|= (fid_seq(fid
) + fid_oid(fid
)) & (CFS_HASH_NBKT(hs
) - 1);
932 static void *lu_obj_hop_object(struct hlist_node
*hnode
)
934 return hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
937 static void *lu_obj_hop_key(struct hlist_node
*hnode
)
939 struct lu_object_header
*h
;
941 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
945 static int lu_obj_hop_keycmp(const void *key
, struct hlist_node
*hnode
)
947 struct lu_object_header
*h
;
949 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
950 return lu_fid_eq(&h
->loh_fid
, (struct lu_fid
*)key
);
953 static void lu_obj_hop_get(struct cfs_hash
*hs
, struct hlist_node
*hnode
)
955 struct lu_object_header
*h
;
957 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
958 atomic_inc(&h
->loh_ref
);
961 static void lu_obj_hop_put_locked(struct cfs_hash
*hs
, struct hlist_node
*hnode
)
963 LBUG(); /* we should never called it */
966 static struct cfs_hash_ops lu_site_hash_ops
= {
967 .hs_hash
= lu_obj_hop_hash
,
968 .hs_key
= lu_obj_hop_key
,
969 .hs_keycmp
= lu_obj_hop_keycmp
,
970 .hs_object
= lu_obj_hop_object
,
971 .hs_get
= lu_obj_hop_get
,
972 .hs_put_locked
= lu_obj_hop_put_locked
,
975 static void lu_dev_add_linkage(struct lu_site
*s
, struct lu_device
*d
)
977 spin_lock(&s
->ls_ld_lock
);
978 if (list_empty(&d
->ld_linkage
))
979 list_add(&d
->ld_linkage
, &s
->ls_ld_linkage
);
980 spin_unlock(&s
->ls_ld_lock
);
984 * Initialize site \a s, with \a d as the top level device.
986 int lu_site_init(struct lu_site
*s
, struct lu_device
*top
)
988 struct lu_site_bkt_data
*bkt
;
989 struct cfs_hash_bd bd
;
994 memset(s
, 0, sizeof(*s
));
995 mutex_init(&s
->ls_purge_mutex
);
996 snprintf(name
, sizeof(name
), "lu_site_%s", top
->ld_type
->ldt_name
);
997 for (bits
= lu_htable_order(top
); bits
>= LU_SITE_BITS_MIN
; bits
--) {
998 s
->ls_obj_hash
= cfs_hash_create(name
, bits
, bits
,
999 bits
- LU_SITE_BKT_BITS
,
1002 CFS_HASH_SPIN_BKTLOCK
|
1003 CFS_HASH_NO_ITEMREF
|
1005 CFS_HASH_ASSERT_EMPTY
|
1011 if (!s
->ls_obj_hash
) {
1012 CERROR("failed to create lu_site hash with bits: %lu\n", bits
);
1016 cfs_hash_for_each_bucket(s
->ls_obj_hash
, &bd
, i
) {
1017 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, &bd
);
1018 INIT_LIST_HEAD(&bkt
->lsb_lru
);
1019 init_waitqueue_head(&bkt
->lsb_marche_funebre
);
1022 s
->ls_stats
= lprocfs_alloc_stats(LU_SS_LAST_STAT
, 0);
1024 cfs_hash_putref(s
->ls_obj_hash
);
1025 s
->ls_obj_hash
= NULL
;
1029 lprocfs_counter_init(s
->ls_stats
, LU_SS_CREATED
,
1030 0, "created", "created");
1031 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_HIT
,
1032 0, "cache_hit", "cache_hit");
1033 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_MISS
,
1034 0, "cache_miss", "cache_miss");
1035 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_RACE
,
1036 0, "cache_race", "cache_race");
1037 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
,
1038 0, "cache_death_race", "cache_death_race");
1039 lprocfs_counter_init(s
->ls_stats
, LU_SS_LRU_PURGED
,
1040 0, "lru_purged", "lru_purged");
1042 * Unlike other counters, lru_len can be decremented so
1043 * need lc_sum instead of just lc_count
1045 lprocfs_counter_init(s
->ls_stats
, LU_SS_LRU_LEN
,
1046 LPROCFS_CNTR_AVGMINMAX
, "lru_len", "lru_len");
1048 INIT_LIST_HEAD(&s
->ls_linkage
);
1049 s
->ls_top_dev
= top
;
1052 lu_ref_add(&top
->ld_reference
, "site-top", s
);
1054 INIT_LIST_HEAD(&s
->ls_ld_linkage
);
1055 spin_lock_init(&s
->ls_ld_lock
);
1057 lu_dev_add_linkage(s
, top
);
1061 EXPORT_SYMBOL(lu_site_init
);
1064 * Finalize \a s and release its resources.
1066 void lu_site_fini(struct lu_site
*s
)
1068 mutex_lock(&lu_sites_guard
);
1069 list_del_init(&s
->ls_linkage
);
1070 mutex_unlock(&lu_sites_guard
);
1072 if (s
->ls_obj_hash
) {
1073 cfs_hash_putref(s
->ls_obj_hash
);
1074 s
->ls_obj_hash
= NULL
;
1077 if (s
->ls_top_dev
) {
1078 s
->ls_top_dev
->ld_site
= NULL
;
1079 lu_ref_del(&s
->ls_top_dev
->ld_reference
, "site-top", s
);
1080 lu_device_put(s
->ls_top_dev
);
1081 s
->ls_top_dev
= NULL
;
1085 lprocfs_free_stats(&s
->ls_stats
);
1087 EXPORT_SYMBOL(lu_site_fini
);
1090 * Called when initialization of stack for this site is completed.
1092 int lu_site_init_finish(struct lu_site
*s
)
1096 mutex_lock(&lu_sites_guard
);
1097 result
= lu_context_refill(&lu_shrink_env
.le_ctx
);
1099 list_add(&s
->ls_linkage
, &lu_sites
);
1100 mutex_unlock(&lu_sites_guard
);
1103 EXPORT_SYMBOL(lu_site_init_finish
);
1106 * Acquire additional reference on device \a d
1108 void lu_device_get(struct lu_device
*d
)
1110 atomic_inc(&d
->ld_ref
);
1112 EXPORT_SYMBOL(lu_device_get
);
1115 * Release reference on device \a d.
1117 void lu_device_put(struct lu_device
*d
)
1119 LASSERT(atomic_read(&d
->ld_ref
) > 0);
1120 atomic_dec(&d
->ld_ref
);
1122 EXPORT_SYMBOL(lu_device_put
);
1125 * Initialize device \a d of type \a t.
1127 int lu_device_init(struct lu_device
*d
, struct lu_device_type
*t
)
1129 if (atomic_inc_return(&t
->ldt_device_nr
) == 1 &&
1130 t
->ldt_ops
->ldto_start
)
1131 t
->ldt_ops
->ldto_start(t
);
1133 memset(d
, 0, sizeof(*d
));
1134 atomic_set(&d
->ld_ref
, 0);
1136 lu_ref_init(&d
->ld_reference
);
1137 INIT_LIST_HEAD(&d
->ld_linkage
);
1140 EXPORT_SYMBOL(lu_device_init
);
1143 * Finalize device \a d.
1145 void lu_device_fini(struct lu_device
*d
)
1147 struct lu_device_type
*t
= d
->ld_type
;
1150 d
->ld_obd
->obd_lu_dev
= NULL
;
1154 lu_ref_fini(&d
->ld_reference
);
1155 LASSERTF(atomic_read(&d
->ld_ref
) == 0,
1156 "Refcount is %u\n", atomic_read(&d
->ld_ref
));
1157 LASSERT(atomic_read(&t
->ldt_device_nr
) > 0);
1159 if (atomic_dec_and_test(&t
->ldt_device_nr
) &&
1160 t
->ldt_ops
->ldto_stop
)
1161 t
->ldt_ops
->ldto_stop(t
);
1163 EXPORT_SYMBOL(lu_device_fini
);
1166 * Initialize object \a o that is part of compound object \a h and was created
1169 int lu_object_init(struct lu_object
*o
, struct lu_object_header
*h
,
1170 struct lu_device
*d
)
1172 memset(o
, 0, sizeof(*o
));
1176 lu_ref_add_at(&d
->ld_reference
, &o
->lo_dev_ref
, "lu_object", o
);
1177 INIT_LIST_HEAD(&o
->lo_linkage
);
1181 EXPORT_SYMBOL(lu_object_init
);
1184 * Finalize object and release its resources.
1186 void lu_object_fini(struct lu_object
*o
)
1188 struct lu_device
*dev
= o
->lo_dev
;
1190 LASSERT(list_empty(&o
->lo_linkage
));
1193 lu_ref_del_at(&dev
->ld_reference
, &o
->lo_dev_ref
,
1199 EXPORT_SYMBOL(lu_object_fini
);
1202 * Add object \a o as first layer of compound object \a h
1204 * This is typically called by the ->ldo_object_alloc() method of top-level
1207 void lu_object_add_top(struct lu_object_header
*h
, struct lu_object
*o
)
1209 list_move(&o
->lo_linkage
, &h
->loh_layers
);
1211 EXPORT_SYMBOL(lu_object_add_top
);
1214 * Add object \a o as a layer of compound object, going after \a before.
1216 * This is typically called by the ->ldo_object_alloc() method of \a
1219 void lu_object_add(struct lu_object
*before
, struct lu_object
*o
)
1221 list_move(&o
->lo_linkage
, &before
->lo_linkage
);
1223 EXPORT_SYMBOL(lu_object_add
);
1226 * Initialize compound object.
1228 int lu_object_header_init(struct lu_object_header
*h
)
1230 memset(h
, 0, sizeof(*h
));
1231 atomic_set(&h
->loh_ref
, 1);
1232 INIT_HLIST_NODE(&h
->loh_hash
);
1233 INIT_LIST_HEAD(&h
->loh_lru
);
1234 INIT_LIST_HEAD(&h
->loh_layers
);
1235 lu_ref_init(&h
->loh_reference
);
1238 EXPORT_SYMBOL(lu_object_header_init
);
1241 * Finalize compound object.
1243 void lu_object_header_fini(struct lu_object_header
*h
)
1245 LASSERT(list_empty(&h
->loh_layers
));
1246 LASSERT(list_empty(&h
->loh_lru
));
1247 LASSERT(hlist_unhashed(&h
->loh_hash
));
1248 lu_ref_fini(&h
->loh_reference
);
1250 EXPORT_SYMBOL(lu_object_header_fini
);
1253 * Given a compound object, find its slice, corresponding to the device type
1256 struct lu_object
*lu_object_locate(struct lu_object_header
*h
,
1257 const struct lu_device_type
*dtype
)
1259 struct lu_object
*o
;
1261 list_for_each_entry(o
, &h
->loh_layers
, lo_linkage
) {
1262 if (o
->lo_dev
->ld_type
== dtype
)
1267 EXPORT_SYMBOL(lu_object_locate
);
1270 * Finalize and free devices in the device stack.
1272 * Finalize device stack by purging object cache, and calling
1273 * lu_device_type_operations::ldto_device_fini() and
1274 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1276 void lu_stack_fini(const struct lu_env
*env
, struct lu_device
*top
)
1278 struct lu_site
*site
= top
->ld_site
;
1279 struct lu_device
*scan
;
1280 struct lu_device
*next
;
1282 lu_site_purge(env
, site
, ~0);
1283 for (scan
= top
; scan
; scan
= next
) {
1284 next
= scan
->ld_type
->ldt_ops
->ldto_device_fini(env
, scan
);
1285 lu_ref_del(&scan
->ld_reference
, "lu-stack", &lu_site_init
);
1286 lu_device_put(scan
);
1290 lu_site_purge(env
, site
, ~0);
1292 for (scan
= top
; scan
; scan
= next
) {
1293 const struct lu_device_type
*ldt
= scan
->ld_type
;
1294 struct obd_type
*type
;
1296 next
= ldt
->ldt_ops
->ldto_device_free(env
, scan
);
1297 type
= ldt
->ldt_obd_type
;
1300 class_put_type(type
);
1307 * Maximal number of tld slots.
1309 LU_CONTEXT_KEY_NR
= 40
1312 static struct lu_context_key
*lu_keys
[LU_CONTEXT_KEY_NR
] = { NULL
, };
1314 static DEFINE_SPINLOCK(lu_keys_guard
);
1315 static atomic_t lu_key_initing_cnt
= ATOMIC_INIT(0);
1318 * Global counter incremented whenever key is registered, unregistered,
1319 * revived or quiesced. This is used to void unnecessary calls to
1320 * lu_context_refill(). No locking is provided, as initialization and shutdown
1321 * are supposed to be externally serialized.
1323 static unsigned key_set_version
;
1328 int lu_context_key_register(struct lu_context_key
*key
)
1333 LASSERT(key
->lct_init
);
1334 LASSERT(key
->lct_fini
);
1335 LASSERT(key
->lct_tags
!= 0);
1338 spin_lock(&lu_keys_guard
);
1339 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1342 atomic_set(&key
->lct_used
, 1);
1344 lu_ref_init(&key
->lct_reference
);
1350 spin_unlock(&lu_keys_guard
);
1353 EXPORT_SYMBOL(lu_context_key_register
);
1355 static void key_fini(struct lu_context
*ctx
, int index
)
1357 if (ctx
->lc_value
&& ctx
->lc_value
[index
]) {
1358 struct lu_context_key
*key
;
1360 key
= lu_keys
[index
];
1361 LASSERT(atomic_read(&key
->lct_used
) > 1);
1363 key
->lct_fini(ctx
, key
, ctx
->lc_value
[index
]);
1364 lu_ref_del(&key
->lct_reference
, "ctx", ctx
);
1365 atomic_dec(&key
->lct_used
);
1367 if ((ctx
->lc_tags
& LCT_NOREF
) == 0) {
1368 #ifdef CONFIG_MODULE_UNLOAD
1369 LINVRNT(module_refcount(key
->lct_owner
) > 0);
1371 module_put(key
->lct_owner
);
1373 ctx
->lc_value
[index
] = NULL
;
1380 void lu_context_key_degister(struct lu_context_key
*key
)
1382 LASSERT(atomic_read(&key
->lct_used
) >= 1);
1383 LINVRNT(0 <= key
->lct_index
&& key
->lct_index
< ARRAY_SIZE(lu_keys
));
1385 lu_context_key_quiesce(key
);
1388 spin_lock(&lu_keys_guard
);
1389 key_fini(&lu_shrink_env
.le_ctx
, key
->lct_index
);
1392 * Wait until all transient contexts referencing this key have
1393 * run lu_context_key::lct_fini() method.
1395 while (atomic_read(&key
->lct_used
) > 1) {
1396 spin_unlock(&lu_keys_guard
);
1397 CDEBUG(D_INFO
, "lu_context_key_degister: \"%s\" %p, %d\n",
1398 key
->lct_owner
? key
->lct_owner
->name
: "", key
,
1399 atomic_read(&key
->lct_used
));
1401 spin_lock(&lu_keys_guard
);
1403 if (lu_keys
[key
->lct_index
]) {
1404 lu_keys
[key
->lct_index
] = NULL
;
1405 lu_ref_fini(&key
->lct_reference
);
1407 spin_unlock(&lu_keys_guard
);
1409 LASSERTF(atomic_read(&key
->lct_used
) == 1,
1410 "key has instances: %d\n",
1411 atomic_read(&key
->lct_used
));
1413 EXPORT_SYMBOL(lu_context_key_degister
);
1416 * Register a number of keys. This has to be called after all keys have been
1417 * initialized by a call to LU_CONTEXT_KEY_INIT().
1419 int lu_context_key_register_many(struct lu_context_key
*k
, ...)
1421 struct lu_context_key
*key
= k
;
1427 result
= lu_context_key_register(key
);
1430 key
= va_arg(args
, struct lu_context_key
*);
1437 lu_context_key_degister(k
);
1438 k
= va_arg(args
, struct lu_context_key
*);
1445 EXPORT_SYMBOL(lu_context_key_register_many
);
1448 * De-register a number of keys. This is a dual to
1449 * lu_context_key_register_many().
1451 void lu_context_key_degister_many(struct lu_context_key
*k
, ...)
1457 lu_context_key_degister(k
);
1458 k
= va_arg(args
, struct lu_context_key
*);
1462 EXPORT_SYMBOL(lu_context_key_degister_many
);
1465 * Revive a number of keys.
1467 void lu_context_key_revive_many(struct lu_context_key
*k
, ...)
1473 lu_context_key_revive(k
);
1474 k
= va_arg(args
, struct lu_context_key
*);
1478 EXPORT_SYMBOL(lu_context_key_revive_many
);
1481 * Quiescent a number of keys.
1483 void lu_context_key_quiesce_many(struct lu_context_key
*k
, ...)
1489 lu_context_key_quiesce(k
);
1490 k
= va_arg(args
, struct lu_context_key
*);
1494 EXPORT_SYMBOL(lu_context_key_quiesce_many
);
1497 * Return value associated with key \a key in context \a ctx.
1499 void *lu_context_key_get(const struct lu_context
*ctx
,
1500 const struct lu_context_key
*key
)
1502 LINVRNT(ctx
->lc_state
== LCS_ENTERED
);
1503 LINVRNT(0 <= key
->lct_index
&& key
->lct_index
< ARRAY_SIZE(lu_keys
));
1504 LASSERT(lu_keys
[key
->lct_index
] == key
);
1505 return ctx
->lc_value
[key
->lct_index
];
1507 EXPORT_SYMBOL(lu_context_key_get
);
1510 * List of remembered contexts. XXX document me.
1512 static LIST_HEAD(lu_context_remembered
);
1515 * Destroy \a key in all remembered contexts. This is used to destroy key
1516 * values in "shared" contexts (like service threads), when a module owning
1517 * the key is about to be unloaded.
1519 void lu_context_key_quiesce(struct lu_context_key
*key
)
1521 struct lu_context
*ctx
;
1523 if (!(key
->lct_tags
& LCT_QUIESCENT
)) {
1525 * XXX memory barrier has to go here.
1527 spin_lock(&lu_keys_guard
);
1528 key
->lct_tags
|= LCT_QUIESCENT
;
1531 * Wait until all lu_context_key::lct_init() methods
1534 while (atomic_read(&lu_key_initing_cnt
) > 0) {
1535 spin_unlock(&lu_keys_guard
);
1536 CDEBUG(D_INFO
, "lu_context_key_quiesce: \"%s\" %p, %d (%d)\n",
1537 key
->lct_owner
? key
->lct_owner
->name
: "",
1538 key
, atomic_read(&key
->lct_used
),
1539 atomic_read(&lu_key_initing_cnt
));
1541 spin_lock(&lu_keys_guard
);
1544 list_for_each_entry(ctx
, &lu_context_remembered
, lc_remember
)
1545 key_fini(ctx
, key
->lct_index
);
1546 spin_unlock(&lu_keys_guard
);
1551 void lu_context_key_revive(struct lu_context_key
*key
)
1553 key
->lct_tags
&= ~LCT_QUIESCENT
;
1557 static void keys_fini(struct lu_context
*ctx
)
1564 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
)
1567 kfree(ctx
->lc_value
);
1568 ctx
->lc_value
= NULL
;
1571 static int keys_fill(struct lu_context
*ctx
)
1576 * A serialisation with lu_context_key_quiesce() is needed, but some
1577 * "key->lct_init()" are calling kernel memory allocation routine and
1578 * can't be called while holding a spin_lock.
1579 * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt"
1580 * to ensure the start of the serialisation.
1581 * An atomic_t variable is still used, in order not to reacquire the
1582 * lock when decrementing the counter.
1584 spin_lock(&lu_keys_guard
);
1585 atomic_inc(&lu_key_initing_cnt
);
1586 spin_unlock(&lu_keys_guard
);
1588 LINVRNT(ctx
->lc_value
);
1589 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1590 struct lu_context_key
*key
;
1593 if (!ctx
->lc_value
[i
] && key
&&
1594 (key
->lct_tags
& ctx
->lc_tags
) &&
1596 * Don't create values for a LCT_QUIESCENT key, as this
1597 * will pin module owning a key.
1599 !(key
->lct_tags
& LCT_QUIESCENT
)) {
1602 LINVRNT(key
->lct_init
);
1603 LINVRNT(key
->lct_index
== i
);
1605 LASSERT(key
->lct_owner
);
1606 if (!(ctx
->lc_tags
& LCT_NOREF
) &&
1607 !try_module_get(key
->lct_owner
)) {
1608 /* module is unloading, skip this key */
1612 value
= key
->lct_init(ctx
, key
);
1613 if (unlikely(IS_ERR(value
))) {
1614 atomic_dec(&lu_key_initing_cnt
);
1615 return PTR_ERR(value
);
1618 lu_ref_add_atomic(&key
->lct_reference
, "ctx", ctx
);
1619 atomic_inc(&key
->lct_used
);
1621 * This is the only place in the code, where an
1622 * element of ctx->lc_value[] array is set to non-NULL
1625 ctx
->lc_value
[i
] = value
;
1627 ctx
->lc_tags
|= LCT_HAS_EXIT
;
1629 ctx
->lc_version
= key_set_version
;
1631 atomic_dec(&lu_key_initing_cnt
);
1635 static int keys_init(struct lu_context
*ctx
)
1637 ctx
->lc_value
= kcalloc(ARRAY_SIZE(lu_keys
), sizeof(ctx
->lc_value
[0]),
1639 if (likely(ctx
->lc_value
))
1640 return keys_fill(ctx
);
1646 * Initialize context data-structure. Create values for all keys.
1648 int lu_context_init(struct lu_context
*ctx
, __u32 tags
)
1652 memset(ctx
, 0, sizeof(*ctx
));
1653 ctx
->lc_state
= LCS_INITIALIZED
;
1654 ctx
->lc_tags
= tags
;
1655 if (tags
& LCT_REMEMBER
) {
1656 spin_lock(&lu_keys_guard
);
1657 list_add(&ctx
->lc_remember
, &lu_context_remembered
);
1658 spin_unlock(&lu_keys_guard
);
1660 INIT_LIST_HEAD(&ctx
->lc_remember
);
1663 rc
= keys_init(ctx
);
1665 lu_context_fini(ctx
);
1669 EXPORT_SYMBOL(lu_context_init
);
1672 * Finalize context data-structure. Destroy key values.
1674 void lu_context_fini(struct lu_context
*ctx
)
1676 LINVRNT(ctx
->lc_state
== LCS_INITIALIZED
|| ctx
->lc_state
== LCS_LEFT
);
1677 ctx
->lc_state
= LCS_FINALIZED
;
1679 if ((ctx
->lc_tags
& LCT_REMEMBER
) == 0) {
1680 LASSERT(list_empty(&ctx
->lc_remember
));
1683 } else { /* could race with key degister */
1684 spin_lock(&lu_keys_guard
);
1686 list_del_init(&ctx
->lc_remember
);
1687 spin_unlock(&lu_keys_guard
);
1690 EXPORT_SYMBOL(lu_context_fini
);
1693 * Called before entering context.
1695 void lu_context_enter(struct lu_context
*ctx
)
1697 LINVRNT(ctx
->lc_state
== LCS_INITIALIZED
|| ctx
->lc_state
== LCS_LEFT
);
1698 ctx
->lc_state
= LCS_ENTERED
;
1700 EXPORT_SYMBOL(lu_context_enter
);
1703 * Called after exiting from \a ctx
1705 void lu_context_exit(struct lu_context
*ctx
)
1709 LINVRNT(ctx
->lc_state
== LCS_ENTERED
);
1710 ctx
->lc_state
= LCS_LEFT
;
1711 if (ctx
->lc_tags
& LCT_HAS_EXIT
&& ctx
->lc_value
) {
1712 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1713 /* could race with key quiescency */
1714 if (ctx
->lc_tags
& LCT_REMEMBER
)
1715 spin_lock(&lu_keys_guard
);
1716 if (ctx
->lc_value
[i
]) {
1717 struct lu_context_key
*key
;
1722 key
, ctx
->lc_value
[i
]);
1724 if (ctx
->lc_tags
& LCT_REMEMBER
)
1725 spin_unlock(&lu_keys_guard
);
1729 EXPORT_SYMBOL(lu_context_exit
);
1732 * Allocate for context all missing keys that were registered after context
1733 * creation. key_set_version is only changed in rare cases when modules
1734 * are loaded and removed.
1736 int lu_context_refill(struct lu_context
*ctx
)
1738 return likely(ctx
->lc_version
== key_set_version
) ? 0 : keys_fill(ctx
);
1742 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1743 * obd being added. Currently, this is only used on client side, specifically
1744 * for echo device client, for other stack (like ptlrpc threads), context are
1745 * predefined when the lu_device type are registered, during the module probe
1748 __u32 lu_context_tags_default
;
1749 __u32 lu_session_tags_default
;
1751 int lu_env_init(struct lu_env
*env
, __u32 tags
)
1756 result
= lu_context_init(&env
->le_ctx
, tags
);
1757 if (likely(result
== 0))
1758 lu_context_enter(&env
->le_ctx
);
1761 EXPORT_SYMBOL(lu_env_init
);
1763 void lu_env_fini(struct lu_env
*env
)
1765 lu_context_exit(&env
->le_ctx
);
1766 lu_context_fini(&env
->le_ctx
);
1769 EXPORT_SYMBOL(lu_env_fini
);
1771 int lu_env_refill(struct lu_env
*env
)
1775 result
= lu_context_refill(&env
->le_ctx
);
1776 if (result
== 0 && env
->le_ses
)
1777 result
= lu_context_refill(env
->le_ses
);
1780 EXPORT_SYMBOL(lu_env_refill
);
1782 struct lu_site_stats
{
1783 unsigned lss_populated
;
1784 unsigned lss_max_search
;
1789 static void lu_site_stats_get(struct cfs_hash
*hs
,
1790 struct lu_site_stats
*stats
, int populated
)
1792 struct cfs_hash_bd bd
;
1795 cfs_hash_for_each_bucket(hs
, &bd
, i
) {
1796 struct lu_site_bkt_data
*bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
1797 struct hlist_head
*hhead
;
1799 cfs_hash_bd_lock(hs
, &bd
, 1);
1801 cfs_hash_bd_count_get(&bd
) - bkt
->lsb_lru_len
;
1802 stats
->lss_total
+= cfs_hash_bd_count_get(&bd
);
1803 stats
->lss_max_search
= max((int)stats
->lss_max_search
,
1804 cfs_hash_bd_depmax_get(&bd
));
1806 cfs_hash_bd_unlock(hs
, &bd
, 1);
1810 cfs_hash_bd_for_each_hlist(hs
, &bd
, hhead
) {
1811 if (!hlist_empty(hhead
))
1812 stats
->lss_populated
++;
1814 cfs_hash_bd_unlock(hs
, &bd
, 1);
1819 * lu_cache_shrink_count returns the number of cached objects that are
1820 * candidates to be freed by shrink_slab(). A counter, which tracks
1821 * the number of items in the site's lru, is maintained in the per cpu
1822 * stats of each site. The counter is incremented when an object is added
1823 * to a site's lru and decremented when one is removed. The number of
1824 * free-able objects is the sum of all per cpu counters for all sites.
1826 * Using a per cpu counter is a compromise solution to concurrent access:
1827 * lu_object_put() can update the counter without locking the site and
1828 * lu_cache_shrink_count can sum the counters without locking each
1829 * ls_obj_hash bucket.
1831 static unsigned long lu_cache_shrink_count(struct shrinker
*sk
,
1832 struct shrink_control
*sc
)
1835 struct lu_site
*tmp
;
1836 unsigned long cached
= 0;
1838 if (!(sc
->gfp_mask
& __GFP_FS
))
1841 mutex_lock(&lu_sites_guard
);
1842 list_for_each_entry_safe(s
, tmp
, &lu_sites
, ls_linkage
) {
1843 cached
+= ls_stats_read(s
->ls_stats
, LU_SS_LRU_LEN
);
1845 mutex_unlock(&lu_sites_guard
);
1847 cached
= (cached
/ 100) * sysctl_vfs_cache_pressure
;
1848 CDEBUG(D_INODE
, "%ld objects cached, cache pressure %d\n",
1849 cached
, sysctl_vfs_cache_pressure
);
1854 static unsigned long lu_cache_shrink_scan(struct shrinker
*sk
,
1855 struct shrink_control
*sc
)
1858 struct lu_site
*tmp
;
1859 unsigned long remain
= sc
->nr_to_scan
, freed
= 0;
1862 if (!(sc
->gfp_mask
& __GFP_FS
))
1863 /* We must not take the lu_sites_guard lock when
1864 * __GFP_FS is *not* set because of the deadlock
1865 * possibility detailed above. Additionally,
1866 * since we cannot determine the number of
1867 * objects in the cache without taking this
1868 * lock, we're in a particularly tough spot. As
1869 * a result, we'll just lie and say our cache is
1870 * empty. This _should_ be ok, as we can't
1871 * reclaim objects when __GFP_FS is *not* set
1876 mutex_lock(&lu_sites_guard
);
1877 list_for_each_entry_safe(s
, tmp
, &lu_sites
, ls_linkage
) {
1878 freed
= lu_site_purge(&lu_shrink_env
, s
, remain
);
1881 * Move just shrunk site to the tail of site list to
1882 * assure shrinking fairness.
1884 list_move_tail(&s
->ls_linkage
, &splice
);
1886 list_splice(&splice
, lu_sites
.prev
);
1887 mutex_unlock(&lu_sites_guard
);
1889 return sc
->nr_to_scan
- remain
;
1893 * Debugging printer function using printk().
1895 static struct shrinker lu_site_shrinker
= {
1896 .count_objects
= lu_cache_shrink_count
,
1897 .scan_objects
= lu_cache_shrink_scan
,
1898 .seeks
= DEFAULT_SEEKS
,
1902 * Initialization of global lu_* data.
1904 int lu_global_init(void)
1908 CDEBUG(D_INFO
, "Lustre LU module (%p).\n", &lu_keys
);
1910 result
= lu_ref_global_init();
1914 LU_CONTEXT_KEY_INIT(&lu_global_key
);
1915 result
= lu_context_key_register(&lu_global_key
);
1920 * At this level, we don't know what tags are needed, so allocate them
1921 * conservatively. This should not be too bad, because this
1922 * environment is global.
1924 mutex_lock(&lu_sites_guard
);
1925 result
= lu_env_init(&lu_shrink_env
, LCT_SHRINKER
);
1926 mutex_unlock(&lu_sites_guard
);
1931 * seeks estimation: 3 seeks to read a record from oi, one to read
1932 * inode, one for ea. Unfortunately setting this high value results in
1933 * lu_object/inode cache consuming all the memory.
1935 register_shrinker(&lu_site_shrinker
);
1941 * Dual to lu_global_init().
1943 void lu_global_fini(void)
1945 unregister_shrinker(&lu_site_shrinker
);
1946 lu_context_key_degister(&lu_global_key
);
1949 * Tear shrinker environment down _after_ de-registering
1950 * lu_global_key, because the latter has a value in the former.
1952 mutex_lock(&lu_sites_guard
);
1953 lu_env_fini(&lu_shrink_env
);
1954 mutex_unlock(&lu_sites_guard
);
1956 lu_ref_global_fini();
1959 static __u32
ls_stats_read(struct lprocfs_stats
*stats
, int idx
)
1961 struct lprocfs_counter ret
;
1963 lprocfs_stats_collect(stats
, idx
, &ret
);
1964 if (idx
== LU_SS_LRU_LEN
)
1966 * protect against counter on cpu A being decremented
1967 * before counter is incremented on cpu B; unlikely
1969 return (__u32
)((ret
.lc_sum
> 0) ? ret
.lc_sum
: 0);
1971 return (__u32
)ret
.lc_count
;
1975 * Output site statistical counters into a buffer. Suitable for
1976 * lprocfs_rd_*()-style functions.
1978 int lu_site_stats_print(const struct lu_site
*s
, struct seq_file
*m
)
1980 struct lu_site_stats stats
;
1982 memset(&stats
, 0, sizeof(stats
));
1983 lu_site_stats_get(s
->ls_obj_hash
, &stats
, 1);
1985 seq_printf(m
, "%d/%d %d/%ld %d %d %d %d %d %d %d %d\n",
1988 stats
.lss_populated
,
1989 CFS_HASH_NHLIST(s
->ls_obj_hash
),
1990 stats
.lss_max_search
,
1991 ls_stats_read(s
->ls_stats
, LU_SS_CREATED
),
1992 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_HIT
),
1993 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_MISS
),
1994 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_RACE
),
1995 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
),
1996 ls_stats_read(s
->ls_stats
, LU_SS_LRU_PURGED
),
1997 ls_stats_read(s
->ls_stats
, LU_SS_LRU_LEN
));
2000 EXPORT_SYMBOL(lu_site_stats_print
);
2003 * Helper function to initialize a number of kmem slab caches at once.
2005 int lu_kmem_init(struct lu_kmem_descr
*caches
)
2008 struct lu_kmem_descr
*iter
= caches
;
2010 for (result
= 0; iter
->ckd_cache
; ++iter
) {
2011 *iter
->ckd_cache
= kmem_cache_create(iter
->ckd_name
,
2014 if (!*iter
->ckd_cache
) {
2016 /* free all previously allocated caches */
2017 lu_kmem_fini(caches
);
2023 EXPORT_SYMBOL(lu_kmem_init
);
2026 * Helper function to finalize a number of kmem slab cached at once. Dual to
2029 void lu_kmem_fini(struct lu_kmem_descr
*caches
)
2031 for (; caches
->ckd_cache
; ++caches
) {
2032 kmem_cache_destroy(*caches
->ckd_cache
);
2033 *caches
->ckd_cache
= NULL
;
2036 EXPORT_SYMBOL(lu_kmem_fini
);
2038 void lu_buf_free(struct lu_buf
*buf
)
2042 LASSERT(buf
->lb_len
> 0);
2043 kvfree(buf
->lb_buf
);
2048 EXPORT_SYMBOL(lu_buf_free
);
2050 void lu_buf_alloc(struct lu_buf
*buf
, size_t size
)
2053 LASSERT(!buf
->lb_buf
);
2054 LASSERT(!buf
->lb_len
);
2055 buf
->lb_buf
= libcfs_kvzalloc(size
, GFP_NOFS
);
2056 if (likely(buf
->lb_buf
))
2059 EXPORT_SYMBOL(lu_buf_alloc
);
2061 void lu_buf_realloc(struct lu_buf
*buf
, size_t size
)
2064 lu_buf_alloc(buf
, size
);
2066 EXPORT_SYMBOL(lu_buf_realloc
);
2068 struct lu_buf
*lu_buf_check_and_alloc(struct lu_buf
*buf
, size_t len
)
2070 if (!buf
->lb_buf
&& !buf
->lb_len
)
2071 lu_buf_alloc(buf
, len
);
2073 if ((len
> buf
->lb_len
) && buf
->lb_buf
)
2074 lu_buf_realloc(buf
, len
);
2078 EXPORT_SYMBOL(lu_buf_check_and_alloc
);
2081 * Increase the size of the \a buf.
2082 * preserves old data in buffer
2083 * old buffer remains unchanged on error
2084 * \retval 0 or -ENOMEM
2086 int lu_buf_check_and_grow(struct lu_buf
*buf
, size_t len
)
2090 if (len
<= buf
->lb_len
)
2093 ptr
= libcfs_kvzalloc(len
, GFP_NOFS
);
2097 /* Free the old buf */
2099 memcpy(ptr
, buf
->lb_buf
, buf
->lb_len
);
2100 kvfree(buf
->lb_buf
);