2 #include <linux/sched.h>
3 #include <linux/slab.h>
7 static DEFINE_SPINLOCK(pin_lock
);
9 void pin_remove(struct fs_pin
*pin
)
12 hlist_del_init(&pin
->m_list
);
13 hlist_del_init(&pin
->s_list
);
14 spin_unlock(&pin_lock
);
15 spin_lock_irq(&pin
->wait
.lock
);
17 wake_up_locked(&pin
->wait
);
18 spin_unlock_irq(&pin
->wait
.lock
);
21 void pin_insert_group(struct fs_pin
*pin
, struct vfsmount
*m
, struct hlist_head
*p
)
25 hlist_add_head(&pin
->s_list
, p
);
26 hlist_add_head(&pin
->m_list
, &real_mount(m
)->mnt_pins
);
27 spin_unlock(&pin_lock
);
30 void pin_insert(struct fs_pin
*pin
, struct vfsmount
*m
)
32 pin_insert_group(pin
, m
, &m
->mnt_sb
->s_pins
);
35 void pin_kill(struct fs_pin
*p
)
37 wait_queue_entry_t wait
;
44 spin_lock_irq(&p
->wait
.lock
);
45 if (likely(!p
->done
)) {
47 spin_unlock_irq(&p
->wait
.lock
);
53 spin_unlock_irq(&p
->wait
.lock
);
57 __add_wait_queue(&p
->wait
, &wait
);
59 set_current_state(TASK_UNINTERRUPTIBLE
);
60 spin_unlock_irq(&p
->wait
.lock
);
64 if (likely(list_empty(&wait
.entry
)))
66 /* OK, we know p couldn't have been freed yet */
67 spin_lock_irq(&p
->wait
.lock
);
69 spin_unlock_irq(&p
->wait
.lock
);
76 void mnt_pin_kill(struct mount
*m
)
81 p
= ACCESS_ONCE(m
->mnt_pins
.first
);
86 pin_kill(hlist_entry(p
, struct fs_pin
, m_list
));
90 void group_pin_kill(struct hlist_head
*p
)
95 q
= ACCESS_ONCE(p
->first
);
100 pin_kill(hlist_entry(q
, struct fs_pin
, s_list
));