GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / block / drbd / drbd_req.h
blob13438bb08208a42dac7920ae821616dd6a5a7d66
1 /*
2 drbd_req.h
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
8 Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
10 DRBD is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
15 DRBD is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 #ifndef _DRBD_REQ_H
26 #define _DRBD_REQ_H
28 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/drbd.h>
32 #include "drbd_int.h"
33 #include "drbd_wrappers.h"
35 /* The request callbacks will be called in irq context by the IDE drivers,
36 and in Softirqs/Tasklets/BH context by the SCSI drivers,
37 and by the receiver and worker in kernel-thread context.
38 Try to get the locking right :) */
41 * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
42 * associated with IO requests originating from the block layer above us.
44 * There are quite a few things that may happen to a drbd request
45 * during its lifetime.
47 * It will be created.
48 * It will be marked with the intention to be
49 * submitted to local disk and/or
50 * send via the network.
52 * It has to be placed on the transfer log and other housekeeping lists,
53 * In case we have a network connection.
55 * It may be identified as a concurrent (write) request
56 * and be handled accordingly.
58 * It may me handed over to the local disk subsystem.
59 * It may be completed by the local disk subsystem,
60 * either successfully or with io-error.
61 * In case it is a READ request, and it failed locally,
62 * it may be retried remotely.
64 * It may be queued for sending.
65 * It may be handed over to the network stack,
66 * which may fail.
67 * It may be acknowledged by the "peer" according to the wire_protocol in use.
68 * this may be a negative ack.
69 * It may receive a faked ack when the network connection is lost and the
70 * transfer log is cleaned up.
71 * Sending may be canceled due to network connection loss.
72 * When it finally has outlived its time,
73 * corresponding dirty bits in the resync-bitmap may be cleared or set,
74 * it will be destroyed,
75 * and completion will be signalled to the originator,
76 * with or without "success".
79 enum drbd_req_event {
80 created,
81 to_be_send,
82 to_be_submitted,
84 queue_for_net_write,
85 queue_for_net_read,
87 send_canceled,
88 send_failed,
89 handed_over_to_network,
90 connection_lost_while_pending,
91 read_retry_remote_canceled,
92 recv_acked_by_peer,
93 write_acked_by_peer,
94 write_acked_by_peer_and_sis, /* and set_in_sync */
95 conflict_discarded_by_peer,
96 neg_acked,
97 barrier_acked, /* in protocol A and B */
98 data_received, /* (remote read) */
100 read_completed_with_error,
101 read_ahead_completed_with_error,
102 write_completed_with_error,
103 completed_ok,
104 nothing, /* for tracing only */
107 /* encoding of request states for now. we don't actually need that many bits.
108 * we don't need to do atomic bit operations either, since most of the time we
109 * need to look at the connection state and/or manipulate some lists at the
110 * same time, so we should hold the request lock anyways.
112 enum drbd_req_state_bits {
113 /* 210
114 * 000: no local possible
115 * 001: to be submitted
116 * UNUSED, we could map: 011: submitted, completion still pending
117 * 110: completed ok
118 * 010: completed with error
120 __RQ_LOCAL_PENDING,
121 __RQ_LOCAL_COMPLETED,
122 __RQ_LOCAL_OK,
124 /* 76543
125 * 00000: no network possible
126 * 00001: to be send
127 * 00011: to be send, on worker queue
128 * 00101: sent, expecting recv_ack (B) or write_ack (C)
129 * 11101: sent,
130 * recv_ack (B) or implicit "ack" (A),
131 * still waiting for the barrier ack.
132 * master_bio may already be completed and invalidated.
133 * 11100: write_acked (C),
134 * data_received (for remote read, any protocol)
135 * or finally the barrier ack has arrived (B,A)...
136 * request can be freed
137 * 01100: neg-acked (write, protocol C)
138 * or neg-d-acked (read, any protocol)
139 * or killed from the transfer log
140 * during cleanup after connection loss
141 * request can be freed
142 * 01000: canceled or send failed...
143 * request can be freed
146 /* if "SENT" is not set, yet, this can still fail or be canceled.
147 * if "SENT" is set already, we still wait for an Ack packet.
148 * when cleared, the master_bio may be completed.
149 * in (B,A) the request object may still linger on the transaction log
150 * until the corresponding barrier ack comes in */
151 __RQ_NET_PENDING,
153 /* If it is QUEUED, and it is a WRITE, it is also registered in the
154 * transfer log. Currently we need this flag to avoid conflicts between
155 * worker canceling the request and tl_clear_barrier killing it from
156 * transfer log. We should restructure the code so this conflict does
157 * no longer occur. */
158 __RQ_NET_QUEUED,
160 /* well, actually only "handed over to the network stack".
162 * TODO can potentially be dropped because of the similar meaning
163 * of RQ_NET_SENT and ~RQ_NET_QUEUED.
164 * however it is not exactly the same. before we drop it
165 * we must ensure that we can tell a request with network part
166 * from a request without, regardless of what happens to it. */
167 __RQ_NET_SENT,
169 /* when set, the request may be freed (if RQ_NET_QUEUED is clear).
170 * basically this means the corresponding P_BARRIER_ACK was received */
171 __RQ_NET_DONE,
173 /* whether or not we know (C) or pretend (B,A) that the write
174 * was successfully written on the peer.
176 __RQ_NET_OK,
178 /* peer called drbd_set_in_sync() for this write */
179 __RQ_NET_SIS,
181 /* keep this last, its for the RQ_NET_MASK */
182 __RQ_NET_MAX,
185 #define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
186 #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
187 #define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK)
189 #define RQ_LOCAL_MASK ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
191 #define RQ_NET_PENDING (1UL << __RQ_NET_PENDING)
192 #define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED)
193 #define RQ_NET_SENT (1UL << __RQ_NET_SENT)
194 #define RQ_NET_DONE (1UL << __RQ_NET_DONE)
195 #define RQ_NET_OK (1UL << __RQ_NET_OK)
196 #define RQ_NET_SIS (1UL << __RQ_NET_SIS)
198 /* 0x1f8 */
199 #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
201 /* epoch entries */
202 static inline
203 struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
205 BUG_ON(mdev->ee_hash_s == 0);
206 return mdev->ee_hash +
207 ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
210 /* transfer log (drbd_request objects) */
211 static inline
212 struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
214 BUG_ON(mdev->tl_hash_s == 0);
215 return mdev->tl_hash +
216 ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
219 /* application reads (drbd_request objects) */
220 static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
222 return mdev->app_reads_hash
223 + ((unsigned int)(sector) % APP_R_HSIZE);
226 /* when we receive the answer for a read request,
227 * verify that we actually know about it */
228 static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
229 u64 id, sector_t sector)
231 struct hlist_head *slot = ar_hash_slot(mdev, sector);
232 struct hlist_node *n;
233 struct drbd_request *req;
235 hlist_for_each_entry(req, n, slot, colision) {
236 if ((unsigned long)req == (unsigned long)id) {
237 D_ASSERT(req->sector == sector);
238 return req;
241 return NULL;
244 static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
245 struct bio *bio_src)
247 struct bio *bio;
248 struct drbd_request *req =
249 mempool_alloc(drbd_request_mempool, GFP_NOIO);
250 if (likely(req)) {
251 bio = bio_clone(bio_src, GFP_NOIO);
253 req->rq_state = 0;
254 req->mdev = mdev;
255 req->master_bio = bio_src;
256 req->private_bio = bio;
257 req->epoch = 0;
258 req->sector = bio->bi_sector;
259 req->size = bio->bi_size;
260 req->start_time = jiffies;
261 INIT_HLIST_NODE(&req->colision);
262 INIT_LIST_HEAD(&req->tl_requests);
263 INIT_LIST_HEAD(&req->w.list);
265 bio->bi_private = req;
266 bio->bi_end_io = drbd_endio_pri;
267 bio->bi_next = NULL;
269 return req;
272 static inline void drbd_req_free(struct drbd_request *req)
274 mempool_free(req, drbd_request_mempool);
277 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
279 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
282 /* Short lived temporary struct on the stack.
283 * We could squirrel the error to be returned into
284 * bio->bi_size, or similar. But that would be too ugly. */
285 struct bio_and_error {
286 struct bio *bio;
287 int error;
290 extern void _req_may_be_done(struct drbd_request *req,
291 struct bio_and_error *m);
292 extern void __req_mod(struct drbd_request *req, enum drbd_req_event what,
293 struct bio_and_error *m);
294 extern void complete_master_bio(struct drbd_conf *mdev,
295 struct bio_and_error *m);
297 /* use this if you don't want to deal with calling complete_master_bio()
298 * outside the spinlock, e.g. when walking some list on cleanup. */
299 static inline void _req_mod(struct drbd_request *req, enum drbd_req_event what)
301 struct drbd_conf *mdev = req->mdev;
302 struct bio_and_error m;
304 /* __req_mod possibly frees req, do not touch req after that! */
305 __req_mod(req, what, &m);
306 if (m.bio)
307 complete_master_bio(mdev, &m);
310 /* completion of master bio is outside of spinlock.
311 * If you need it irqsave, do it your self! */
312 static inline void req_mod(struct drbd_request *req,
313 enum drbd_req_event what)
315 struct drbd_conf *mdev = req->mdev;
316 struct bio_and_error m;
317 spin_lock_irq(&mdev->req_lock);
318 __req_mod(req, what, &m);
319 spin_unlock_irq(&mdev->req_lock);
321 if (m.bio)
322 complete_master_bio(mdev, &m);
324 #endif