Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
drbd_req.h
Go to the documentation of this file.
1 /*
2  drbd_req.h
3 
4  This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6  Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
7  Copyright (C) 2006-2008, Lars Ellenberg <[email protected]>.
8  Copyright (C) 2006-2008, Philipp Reisner <[email protected]>.
9 
10  DRBD is free software; you can redistribute it and/or modify
11  it under the terms of the GNU General Public License as published by
12  the Free Software Foundation; either version 2, or (at your option)
13  any later version.
14 
15  DRBD is distributed in the hope that it will be useful,
16  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  GNU General Public License for more details.
19 
20  You should have received a copy of the GNU General Public License
21  along with drbd; see the file COPYING. If not, write to
22  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24 
25 #ifndef _DRBD_REQ_H
26 #define _DRBD_REQ_H
27 
28 #include <linux/module.h>
29 
30 #include <linux/slab.h>
31 #include <linux/drbd.h>
32 #include "drbd_int.h"
33 #include "drbd_wrappers.h"
34 
35 /* The request callbacks will be called in irq context by the IDE drivers,
36  and in Softirqs/Tasklets/BH context by the SCSI drivers,
37  and by the receiver and worker in kernel-thread context.
38  Try to get the locking right :) */
39 
40 /*
41  * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
42  * associated with IO requests originating from the block layer above us.
43  *
44  * There are quite a few things that may happen to a drbd request
45  * during its lifetime.
46  *
47  * It will be created.
48  * It will be marked with the intention to be
49  * submitted to local disk and/or
50  * send via the network.
51  *
52  * It has to be placed on the transfer log and other housekeeping lists,
53  * In case we have a network connection.
54  *
55  * It may be identified as a concurrent (write) request
56  * and be handled accordingly.
57  *
58  * It may me handed over to the local disk subsystem.
59  * It may be completed by the local disk subsystem,
60  * either successfully or with io-error.
61  * In case it is a READ request, and it failed locally,
62  * it may be retried remotely.
63  *
64  * It may be queued for sending.
65  * It may be handed over to the network stack,
66  * which may fail.
67  * It may be acknowledged by the "peer" according to the wire_protocol in use.
68  * this may be a negative ack.
69  * It may receive a faked ack when the network connection is lost and the
70  * transfer log is cleaned up.
71  * Sending may be canceled due to network connection loss.
72  * When it finally has outlived its time,
73  * corresponding dirty bits in the resync-bitmap may be cleared or set,
74  * it will be destroyed,
75  * and completion will be signalled to the originator,
76  * with or without "success".
77  */
78 
83 
84  /* XXX yes, now I am inconsistent...
85  * these are not "events" but "actions"
86  * oh, well... */
90 
99  write_acked_by_peer_and_sis, /* and set_in_sync */
102  barrier_acked, /* in protocol A and B */
103  data_received, /* (remote read) */
104 
113  nothing, /* for tracing only */
114 };
115 
116 /* encoding of request states for now. we don't actually need that many bits.
117  * we don't need to do atomic bit operations either, since most of the time we
118  * need to look at the connection state and/or manipulate some lists at the
119  * same time, so we should hold the request lock anyways.
120  */
122  /* 3210
123  * 0000: no local possible
124  * 0001: to be submitted
125  * UNUSED, we could map: 011: submitted, completion still pending
126  * 0110: completed ok
127  * 0010: completed with error
128  * 1001: Aborted (before completion)
129  * 1x10: Aborted and completed -> free
130  */
135 
136  /* 87654
137  * 00000: no network possible
138  * 00001: to be send
139  * 00011: to be send, on worker queue
140  * 00101: sent, expecting recv_ack (B) or write_ack (C)
141  * 11101: sent,
142  * recv_ack (B) or implicit "ack" (A),
143  * still waiting for the barrier ack.
144  * master_bio may already be completed and invalidated.
145  * 11100: write_acked (C),
146  * data_received (for remote read, any protocol)
147  * or finally the barrier ack has arrived (B,A)...
148  * request can be freed
149  * 01100: neg-acked (write, protocol C)
150  * or neg-d-acked (read, any protocol)
151  * or killed from the transfer log
152  * during cleanup after connection loss
153  * request can be freed
154  * 01000: canceled or send failed...
155  * request can be freed
156  */
157 
158  /* if "SENT" is not set, yet, this can still fail or be canceled.
159  * if "SENT" is set already, we still wait for an Ack packet.
160  * when cleared, the master_bio may be completed.
161  * in (B,A) the request object may still linger on the transaction log
162  * until the corresponding barrier ack comes in */
164 
165  /* If it is QUEUED, and it is a WRITE, it is also registered in the
166  * transfer log. Currently we need this flag to avoid conflicts between
167  * worker canceling the request and tl_clear_barrier killing it from
168  * transfer log. We should restructure the code so this conflict does
169  * no longer occur. */
171 
172  /* well, actually only "handed over to the network stack".
173  *
174  * TODO can potentially be dropped because of the similar meaning
175  * of RQ_NET_SENT and ~RQ_NET_QUEUED.
176  * however it is not exactly the same. before we drop it
177  * we must ensure that we can tell a request with network part
178  * from a request without, regardless of what happens to it. */
180 
181  /* when set, the request may be freed (if RQ_NET_QUEUED is clear).
182  * basically this means the corresponding P_BARRIER_ACK was received */
184 
185  /* whether or not we know (C) or pretend (B,A) that the write
186  * was successfully written on the peer.
187  */
189 
190  /* peer called drbd_set_in_sync() for this write */
192 
193  /* keep this last, its for the RQ_NET_MASK */
195 
196  /* Set when this is a write, clear for a read */
198 
199  /* Should call drbd_al_complete_io() for this request... */
201 };
202 
203 #define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
204 #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
205 #define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK)
206 #define RQ_LOCAL_ABORTED (1UL << __RQ_LOCAL_ABORTED)
207 
208 #define RQ_LOCAL_MASK ((RQ_LOCAL_ABORTED << 1)-1)
209 
210 #define RQ_NET_PENDING (1UL << __RQ_NET_PENDING)
211 #define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED)
212 #define RQ_NET_SENT (1UL << __RQ_NET_SENT)
213 #define RQ_NET_DONE (1UL << __RQ_NET_DONE)
214 #define RQ_NET_OK (1UL << __RQ_NET_OK)
215 #define RQ_NET_SIS (1UL << __RQ_NET_SIS)
216 
217 /* 0x1f8 */
218 #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
219 
220 #define RQ_WRITE (1UL << __RQ_WRITE)
221 #define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
222 
223 /* For waking up the frozen transfer log mod_req() has to return if the request
224  should be counted in the epoch object*/
225 #define MR_WRITE_SHIFT 0
226 #define MR_WRITE (1 << MR_WRITE_SHIFT)
227 #define MR_READ_SHIFT 1
228 #define MR_READ (1 << MR_READ_SHIFT)
229 
230 /* epoch entries */
231 static inline
232 struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
233 {
234  BUG_ON(mdev->ee_hash_s == 0);
235  return mdev->ee_hash +
236  ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
237 }
238 
239 /* transfer log (drbd_request objects) */
240 static inline
241 struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
242 {
243  BUG_ON(mdev->tl_hash_s == 0);
244  return mdev->tl_hash +
245  ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
246 }
247 
248 /* application reads (drbd_request objects) */
249 static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
250 {
251  return mdev->app_reads_hash
252  + ((unsigned int)(sector) % APP_R_HSIZE);
253 }
254 
255 /* when we receive the answer for a read request,
256  * verify that we actually know about it */
257 static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
258  u64 id, sector_t sector)
259 {
260  struct hlist_head *slot = ar_hash_slot(mdev, sector);
261  struct hlist_node *n;
262  struct drbd_request *req;
263 
264  hlist_for_each_entry(req, n, slot, collision) {
265  if ((unsigned long)req == (unsigned long)id) {
266  D_ASSERT(req->sector == sector);
267  return req;
268  }
269  }
270  return NULL;
271 }
272 
273 static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
274 {
275  struct bio *bio;
276  bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
277 
278  req->private_bio = bio;
279 
280  bio->bi_private = req;
281  bio->bi_end_io = drbd_endio_pri;
282  bio->bi_next = NULL;
283 }
284 
285 static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
286  struct bio *bio_src)
287 {
288  struct drbd_request *req =
290  if (likely(req)) {
291  drbd_req_make_private_bio(req, bio_src);
292 
293  req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
294  req->mdev = mdev;
295  req->master_bio = bio_src;
296  req->epoch = 0;
297  req->sector = bio_src->bi_sector;
298  req->size = bio_src->bi_size;
299  INIT_HLIST_NODE(&req->collision);
300  INIT_LIST_HEAD(&req->tl_requests);
301  INIT_LIST_HEAD(&req->w.list);
302  }
303  return req;
304 }
305 
306 static inline void drbd_req_free(struct drbd_request *req)
307 {
309 }
310 
311 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
312 {
313  return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
314 }
315 
316 /* Short lived temporary struct on the stack.
317  * We could squirrel the error to be returned into
318  * bio->bi_size, or similar. But that would be too ugly. */
320  struct bio *bio;
321  int error;
322 };
323 
324 extern void _req_may_be_done(struct drbd_request *req,
325  struct bio_and_error *m);
326 extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
327  struct bio_and_error *m);
328 extern void complete_master_bio(struct drbd_conf *mdev,
329  struct bio_and_error *m);
330 extern void request_timer_fn(unsigned long data);
331 extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
332 
333 /* use this if you don't want to deal with calling complete_master_bio()
334  * outside the spinlock, e.g. when walking some list on cleanup. */
335 static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
336 {
337  struct drbd_conf *mdev = req->mdev;
338  struct bio_and_error m;
339  int rv;
340 
341  /* __req_mod possibly frees req, do not touch req after that! */
342  rv = __req_mod(req, what, &m);
343  if (m.bio)
344  complete_master_bio(mdev, &m);
345 
346  return rv;
347 }
348 
349 /* completion of master bio is outside of our spinlock.
350  * We still may or may not be inside some irqs disabled section
351  * of the lower level driver completion callback, so we need to
352  * spin_lock_irqsave here. */
353 static inline int req_mod(struct drbd_request *req,
354  enum drbd_req_event what)
355 {
356  unsigned long flags;
357  struct drbd_conf *mdev = req->mdev;
358  struct bio_and_error m;
359  int rv;
360 
361  spin_lock_irqsave(&mdev->req_lock, flags);
362  rv = __req_mod(req, what, &m);
363  spin_unlock_irqrestore(&mdev->req_lock, flags);
364 
365  if (m.bio)
366  complete_master_bio(mdev, &m);
367 
368  return rv;
369 }
370 
371 static inline bool drbd_should_do_remote(union drbd_state s)
372 {
373  return s.pdsk == D_UP_TO_DATE ||
374  (s.pdsk >= D_INCONSISTENT &&
375  s.conn >= C_WF_BITMAP_T &&
376  s.conn < C_AHEAD);
377  /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
378  That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
379  states. */
380 }
381 static inline bool drbd_should_send_oos(union drbd_state s)
382 {
383  return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
384  /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
385  since we enter state C_AHEAD only if proto >= 96 */
386 }
387 
388 #endif