nvme_fc: Support ctrl_loss_tmo
[linux-2.6/btrfs-unstable.git] / kernel / rcu / rcu_segcblist.h
blob6e36e36478cd3f57338260904eb9335d3e0b85a5
1 /*
2 * RCU segmented callback lists, internal-to-rcu header file
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2017
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 #include <linux/rcu_segcblist.h>
26 * Account for the fact that a previously dequeued callback turned out
27 * to be marked as lazy.
29 static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp)
31 rclp->len_lazy--;
35 * Interim function to return rcu_cblist head pointer. Longer term, the
36 * rcu_cblist will be used more pervasively, removing the need for this
37 * function.
39 static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp)
41 return rclp->head;
45 * Interim function to return rcu_cblist head pointer. Longer term, the
46 * rcu_cblist will be used more pervasively, removing the need for this
47 * function.
49 static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp)
51 WARN_ON_ONCE(!rclp->head);
52 return rclp->tail;
55 void rcu_cblist_init(struct rcu_cblist *rclp);
56 long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim);
57 struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
60 * Is the specified rcu_segcblist structure empty?
62 * But careful! The fact that the ->head field is NULL does not
63 * necessarily imply that there are no callbacks associated with
64 * this structure. When callbacks are being invoked, they are
65 * removed as a group. If callback invocation must be preempted,
66 * the remaining callbacks will be added back to the list. Either
67 * way, the counts are updated later.
69 * So it is often the case that rcu_segcblist_n_cbs() should be used
70 * instead.
72 static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
74 return !rsclp->head;
77 /* Return number of callbacks in segmented callback list. */
78 static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
80 return READ_ONCE(rsclp->len);
83 /* Return number of lazy callbacks in segmented callback list. */
84 static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp)
86 return rsclp->len_lazy;
89 /* Return number of lazy callbacks in segmented callback list. */
90 static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
92 return rsclp->len - rsclp->len_lazy;
96 * Is the specified rcu_segcblist enabled, for example, not corresponding
97 * to an offline or callback-offloaded CPU?
99 static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
101 return !!rsclp->tails[RCU_NEXT_TAIL];
105 * Are all segments following the specified segment of the specified
106 * rcu_segcblist structure empty of callbacks? (The specified
107 * segment might well contain callbacks.)
109 static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
111 return !*rsclp->tails[seg];
115 * Interim function to return rcu_segcblist head pointer. Longer term, the
116 * rcu_segcblist will be used more pervasively, removing the need for this
117 * function.
119 static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp)
121 return rsclp->head;
125 * Interim function to return rcu_segcblist head pointer. Longer term, the
126 * rcu_segcblist will be used more pervasively, removing the need for this
127 * function.
129 static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp)
131 WARN_ON_ONCE(rcu_segcblist_empty(rsclp));
132 return rsclp->tails[RCU_NEXT_TAIL];
135 void rcu_segcblist_init(struct rcu_segcblist *rsclp);
136 void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
137 bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg);
138 bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
139 bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
140 struct rcu_head *rcu_segcblist_dequeue(struct rcu_segcblist *rsclp);
141 void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp);
142 struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
143 struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
144 bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp);
145 void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
146 struct rcu_head *rhp, bool lazy);
147 bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
148 struct rcu_head *rhp, bool lazy);
149 void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
150 struct rcu_cblist *rclp);
151 void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
152 struct rcu_cblist *rclp);
153 void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
154 struct rcu_cblist *rclp);
155 void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
156 struct rcu_cblist *rclp);
157 void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
158 struct rcu_cblist *rclp);
159 void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
160 struct rcu_cblist *rclp);
161 void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
162 bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
163 bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp,
164 unsigned long seq);