blob: 5c604b4914bfaa7b28197eecb901322c2069f102 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Tejun Heo7cc01582010-08-03 13:14:58 +020010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18#ifndef __LINUX_BIO_H
19#define __LINUX_BIO_H
20
21#include <linux/highmem.h>
22#include <linux/mempool.h>
Jens Axboe22e2c502005-06-27 10:55:12 +020023#include <linux/ioprio.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050024#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
David Howells02a5e0a2007-08-11 22:34:32 +020026#ifdef CONFIG_BLOCK
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/io.h>
29
Tejun Heo7cc01582010-08-03 13:14:58 +020030/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
31#include <linux/blk_types.h>
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#define BIO_DEBUG
34
35#ifdef BIO_DEBUG
36#define BIO_BUG_ON BUG_ON
37#else
38#define BIO_BUG_ON
39#endif
40
Alexey Dobriyand84a8472006-06-25 05:49:32 -070041#define BIO_MAX_PAGES 256
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Mike Christie43b62ce2016-06-05 14:32:20 -050043#define bio_prio(bio) (bio)->bi_ioprio
44#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
Jens Axboe22e2c502005-06-27 10:55:12 +020045
Kent Overstreet4550dd62013-08-07 14:26:21 -070046#define bio_iter_iovec(bio, iter) \
47 bvec_iter_bvec((bio)->bi_io_vec, (iter))
48
49#define bio_iter_page(bio, iter) \
50 bvec_iter_page((bio)->bi_io_vec, (iter))
51#define bio_iter_len(bio, iter) \
52 bvec_iter_len((bio)->bi_io_vec, (iter))
53#define bio_iter_offset(bio, iter) \
54 bvec_iter_offset((bio)->bi_io_vec, (iter))
55
56#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
57#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
58#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
Kent Overstreet79886132013-11-23 17:19:00 -080059
Kent Overstreet458b76e2013-09-24 16:26:05 -070060#define bio_multiple_segments(bio) \
61 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
Kent Overstreet4f024f32013-10-11 15:44:27 -070062#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
63#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
Jens Axboebf2de6f2007-09-27 13:01:25 +020064
Kent Overstreet458b76e2013-09-24 16:26:05 -070065/*
Christoph Hellwigd3849952016-11-01 07:40:11 -060066 * Return the data direction, READ or WRITE.
67 */
68#define bio_data_dir(bio) \
69 (op_is_write(bio_op(bio)) ? WRITE : READ)
70
71/*
Kent Overstreet458b76e2013-09-24 16:26:05 -070072 * Check whether this bio carries any data or not. A NULL bio is allowed.
73 */
74static inline bool bio_has_data(struct bio *bio)
75{
76 if (bio &&
77 bio->bi_iter.bi_size &&
Adrian Hunter7afafc82016-08-16 10:59:35 +030078 bio_op(bio) != REQ_OP_DISCARD &&
79 bio_op(bio) != REQ_OP_SECURE_ERASE)
Kent Overstreet458b76e2013-09-24 16:26:05 -070080 return true;
81
82 return false;
83}
84
Mike Christie95fe6c12016-06-05 14:31:48 -050085static inline bool bio_no_advance_iter(struct bio *bio)
86{
Adrian Hunter7afafc82016-08-16 10:59:35 +030087 return bio_op(bio) == REQ_OP_DISCARD ||
88 bio_op(bio) == REQ_OP_SECURE_ERASE ||
89 bio_op(bio) == REQ_OP_WRITE_SAME;
Mike Christie95fe6c12016-06-05 14:31:48 -050090}
91
Kent Overstreet458b76e2013-09-24 16:26:05 -070092static inline bool bio_mergeable(struct bio *bio)
93{
Jens Axboe1eff9d32016-08-05 15:35:16 -060094 if (bio->bi_opf & REQ_NOMERGE_FLAGS)
Kent Overstreet458b76e2013-09-24 16:26:05 -070095 return false;
96
97 return true;
98}
99
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900100static inline unsigned int bio_cur_bytes(struct bio *bio)
Jens Axboebf2de6f2007-09-27 13:01:25 +0200101{
Kent Overstreet458b76e2013-09-24 16:26:05 -0700102 if (bio_has_data(bio))
Kent Overstreeta4ad39b12013-08-07 14:24:32 -0700103 return bio_iovec(bio).bv_len;
David Woodhousefb2dce82008-08-05 18:01:53 +0100104 else /* dataless requests such as discard */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700105 return bio->bi_iter.bi_size;
Jens Axboebf2de6f2007-09-27 13:01:25 +0200106}
107
108static inline void *bio_data(struct bio *bio)
109{
Kent Overstreet458b76e2013-09-24 16:26:05 -0700110 if (bio_has_data(bio))
Jens Axboebf2de6f2007-09-27 13:01:25 +0200111 return page_address(bio_page(bio)) + bio_offset(bio);
112
113 return NULL;
114}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116/*
117 * will die
118 */
119#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
120#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
121
122/*
123 * queues that have highmem support enabled may still need to revert to
124 * PIO transfers occasionally and thus map high pages temporarily. For
125 * permanent PIO fall back, user is probably better off disabling highmem
126 * I/O completely on that queue (see ide-dma for example)
127 */
Kent Overstreetf619d252013-08-07 14:30:33 -0700128#define __bio_kmap_atomic(bio, iter) \
129 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
130 bio_iter_iovec((bio), (iter)).bv_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Kent Overstreetf619d252013-08-07 14:30:33 -0700132#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134/*
135 * merge helpers etc
136 */
137
Jeremy Fitzhardingef92131c2008-10-29 14:10:51 +0100138/* Default implementation of BIOVEC_PHYS_MERGEABLE */
139#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
140 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/*
143 * allow arch override, for eg virtualized architectures (put in asm/io.h)
144 */
145#ifndef BIOVEC_PHYS_MERGEABLE
146#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
Jeremy Fitzhardingef92131c2008-10-29 14:10:51 +0100147 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148#endif
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
151 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
152#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400153 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Jens Axboe66cb45a2014-06-24 16:22:24 -0600155/*
Kent Overstreetd74c6d52013-02-06 12:23:11 -0800156 * drivers should _never_ use the all version - the bio may have been split
157 * before it got to the driver and the driver won't own all of it
158 */
159#define bio_for_each_segment_all(bvl, bio, i) \
Kent Overstreetf619d252013-08-07 14:30:33 -0700160 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
Kent Overstreetd74c6d52013-02-06 12:23:11 -0800161
Kent Overstreet4550dd62013-08-07 14:26:21 -0700162static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
163 unsigned bytes)
164{
165 iter->bi_sector += bytes >> 9;
166
Mike Christie95fe6c12016-06-05 14:31:48 -0500167 if (bio_no_advance_iter(bio))
Kent Overstreet4550dd62013-08-07 14:26:21 -0700168 iter->bi_size -= bytes;
169 else
170 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
171}
172
Kent Overstreet79886132013-11-23 17:19:00 -0800173#define __bio_for_each_segment(bvl, bio, iter, start) \
174 for (iter = (start); \
Kent Overstreet4550dd62013-08-07 14:26:21 -0700175 (iter).bi_size && \
176 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
177 bio_advance_iter((bio), &(iter), (bvl).bv_len))
Kent Overstreet79886132013-11-23 17:19:00 -0800178
179#define bio_for_each_segment(bvl, bio, iter) \
180 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
181
Kent Overstreet4550dd62013-08-07 14:26:21 -0700182#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Kent Overstreet458b76e2013-09-24 16:26:05 -0700184static inline unsigned bio_segments(struct bio *bio)
185{
186 unsigned segs = 0;
187 struct bio_vec bv;
188 struct bvec_iter iter;
189
Kent Overstreet8423ae32014-02-10 17:45:50 -0800190 /*
191 * We special case discard/write same, because they interpret bi_size
192 * differently:
193 */
194
Mike Christie95fe6c12016-06-05 14:31:48 -0500195 if (bio_op(bio) == REQ_OP_DISCARD)
Kent Overstreet8423ae32014-02-10 17:45:50 -0800196 return 1;
197
Adrian Hunter7afafc82016-08-16 10:59:35 +0300198 if (bio_op(bio) == REQ_OP_SECURE_ERASE)
199 return 1;
200
Mike Christie95fe6c12016-06-05 14:31:48 -0500201 if (bio_op(bio) == REQ_OP_WRITE_SAME)
Kent Overstreet8423ae32014-02-10 17:45:50 -0800202 return 1;
203
Kent Overstreet458b76e2013-09-24 16:26:05 -0700204 bio_for_each_segment(bv, bio, iter)
205 segs++;
206
207 return segs;
208}
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210/*
211 * get a reference to a bio, so it won't disappear. the intended use is
212 * something like:
213 *
214 * bio_get(bio);
215 * submit_bio(rw, bio);
216 * if (bio->bi_flags ...)
217 * do_something
218 * bio_put(bio);
219 *
220 * without the bio_get(), it could potentially complete I/O before submit_bio
221 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
222 * runs
223 */
Jens Axboedac56212015-04-17 16:23:59 -0600224static inline void bio_get(struct bio *bio)
225{
226 bio->bi_flags |= (1 << BIO_REFFED);
227 smp_mb__before_atomic();
228 atomic_inc(&bio->__bi_cnt);
229}
230
231static inline void bio_cnt_set(struct bio *bio, unsigned int count)
232{
233 if (count != 1) {
234 bio->bi_flags |= (1 << BIO_REFFED);
235 smp_mb__before_atomic();
236 }
237 atomic_set(&bio->__bi_cnt, count);
238}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600240static inline bool bio_flagged(struct bio *bio, unsigned int bit)
241{
Jens Axboe2c68f6d2015-07-28 13:14:32 -0600242 return (bio->bi_flags & (1U << bit)) != 0;
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600243}
244
245static inline void bio_set_flag(struct bio *bio, unsigned int bit)
246{
Jens Axboe2c68f6d2015-07-28 13:14:32 -0600247 bio->bi_flags |= (1U << bit);
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600248}
249
250static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
251{
Jens Axboe2c68f6d2015-07-28 13:14:32 -0600252 bio->bi_flags &= ~(1U << bit);
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600253}
254
Ming Lei7bcd79a2016-02-26 23:40:50 +0800255static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
256{
257 *bv = bio_iovec(bio);
258}
259
260static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
261{
262 struct bvec_iter iter = bio->bi_iter;
263 int idx;
264
Ming Lei7bcd79a2016-02-26 23:40:50 +0800265 if (unlikely(!bio_multiple_segments(bio))) {
266 *bv = bio_iovec(bio);
267 return;
268 }
269
270 bio_advance_iter(bio, &iter, iter.bi_size);
271
272 if (!iter.bi_bvec_done)
273 idx = iter.bi_idx - 1;
274 else /* in the middle of bvec */
275 idx = iter.bi_idx;
276
277 *bv = bio->bi_io_vec[idx];
278
279 /*
280 * iter.bi_bvec_done records actual length of the last bvec
281 * if this bio ends in the middle of one io vector
282 */
283 if (iter.bi_bvec_done)
284 bv->bv_len = iter.bi_bvec_done;
285}
286
Martin K. Petersenc6115292014-09-26 19:20:08 -0400287enum bip_flags {
288 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
289 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
290 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
291 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
292 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
293};
294
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200295/*
296 * bio integrity payload
297 */
298struct bio_integrity_payload {
299 struct bio *bip_bio; /* parent bio */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200300
Kent Overstreetd57a5f72013-11-23 17:20:16 -0800301 struct bvec_iter bip_iter;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200302
Kent Overstreetd57a5f72013-11-23 17:20:16 -0800303 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200304
Martin K. Petersen7878cba2009-06-26 15:37:49 +0200305 unsigned short bip_slab; /* slab the bip came from */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200306 unsigned short bip_vcnt; /* # of integrity bio_vecs */
Gu Zhengcbcd10542014-07-01 10:36:47 -0600307 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
Martin K. Petersenb1f0138852014-09-26 19:20:04 -0400308 unsigned short bip_flags; /* control flags */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200309
310 struct work_struct bip_work; /* I/O completion */
Kent Overstreet6fda9812012-10-12 13:18:27 -0700311
312 struct bio_vec *bip_vec;
313 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200314};
Martin K. Petersen18593082014-09-26 19:20:01 -0400315
Keith Busch06c1e392015-12-03 09:32:21 -0700316#if defined(CONFIG_BLK_DEV_INTEGRITY)
317
318static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
319{
Jens Axboe1eff9d32016-08-05 15:35:16 -0600320 if (bio->bi_opf & REQ_INTEGRITY)
Keith Busch06c1e392015-12-03 09:32:21 -0700321 return bio->bi_integrity;
322
323 return NULL;
324}
325
Martin K. Petersenc6115292014-09-26 19:20:08 -0400326static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
327{
328 struct bio_integrity_payload *bip = bio_integrity(bio);
329
330 if (bip)
331 return bip->bip_flags & flag;
332
333 return false;
334}
Martin K. Petersenb1f0138852014-09-26 19:20:04 -0400335
Martin K. Petersen18593082014-09-26 19:20:01 -0400336static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
337{
338 return bip->bip_iter.bi_sector;
339}
340
341static inline void bip_set_seed(struct bio_integrity_payload *bip,
342 sector_t seed)
343{
344 bip->bip_iter.bi_sector = seed;
345}
346
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200347#endif /* CONFIG_BLK_DEV_INTEGRITY */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
Kent Overstreet6678d832013-08-07 11:14:32 -0700349extern void bio_trim(struct bio *bio, int offset, int size);
Kent Overstreet20d01892013-11-23 18:21:01 -0800350extern struct bio *bio_split(struct bio *bio, int sectors,
351 gfp_t gfp, struct bio_set *bs);
352
353/**
354 * bio_next_split - get next @sectors from a bio, splitting if necessary
355 * @bio: bio to split
356 * @sectors: number of sectors to split from the front of @bio
357 * @gfp: gfp mask
358 * @bs: bio set to allocate from
359 *
360 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
361 * than @sectors, returns the original bio unchanged.
362 */
363static inline struct bio *bio_next_split(struct bio *bio, int sectors,
364 gfp_t gfp, struct bio_set *bs)
365{
366 if (sectors >= bio_sectors(bio))
367 return bio;
368
369 return bio_split(bio, sectors, gfp, bs);
370}
371
Jens Axboebb799ca2008-12-10 15:35:05 +0100372extern struct bio_set *bioset_create(unsigned int, unsigned int);
Junichi Nomurad8f429e2014-10-03 17:27:12 -0400373extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374extern void bioset_free(struct bio_set *);
Fabian Fredericka6c39cb4f2014-04-22 15:09:05 -0600375extern mempool_t *biovec_create_pool(int pool_entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
Al Virodd0fc662005-10-07 07:46:04 +0100377extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378extern void bio_put(struct bio *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
Kent Overstreet59d276f2013-11-23 18:19:27 -0800380extern void __bio_clone_fast(struct bio *, struct bio *);
381extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
Kent Overstreetbf800ef2012-09-06 15:35:02 -0700382extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
383
Kent Overstreet3f86a822012-09-06 15:35:01 -0700384extern struct bio_set *fs_bio_set;
385
386static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
387{
388 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
389}
390
Kent Overstreetbf800ef2012-09-06 15:35:02 -0700391static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
392{
393 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
394}
395
Kent Overstreet3f86a822012-09-06 15:35:01 -0700396static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
397{
398 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
399}
400
Kent Overstreetbf800ef2012-09-06 15:35:02 -0700401static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
402{
403 return bio_clone_bioset(bio, gfp_mask, NULL);
404
405}
406
Christoph Hellwig1e3914d2016-11-01 07:40:12 -0600407extern blk_qc_t submit_bio(struct bio *);
408
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200409extern void bio_endio(struct bio *);
410
411static inline void bio_io_error(struct bio *bio)
412{
413 bio->bi_error = -EIO;
414 bio_endio(bio);
415}
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417struct request_queue;
418extern int bio_phys_segments(struct request_queue *, struct bio *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Mike Christie4e49ea42016-06-05 14:31:41 -0500420extern int submit_bio_wait(struct bio *bio);
Kent Overstreet054bdf62012-09-28 13:17:55 -0700421extern void bio_advance(struct bio *, unsigned);
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423extern void bio_init(struct bio *);
Kent Overstreetf44b48c72012-09-06 15:34:58 -0700424extern void bio_reset(struct bio *);
Kent Overstreet196d38bc2013-11-23 18:34:15 -0800425void bio_chain(struct bio *, struct bio *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
Mike Christie6e68af62005-11-11 05:30:27 -0600428extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
429 unsigned int, unsigned int);
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900430struct rq_map_data;
James Bottomley f1970ba2005-06-20 14:06:52 +0200431extern struct bio *bio_map_user_iov(struct request_queue *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100432 const struct iov_iter *, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433extern void bio_unmap_user(struct bio *);
Mike Christie df46b9a2005-06-20 14:04:44 +0200434extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
Al Viro27496a82005-10-21 03:20:48 -0400435 gfp_t);
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200436extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
437 gfp_t, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438extern void bio_set_pages_dirty(struct bio *bio);
439extern void bio_check_pages_dirty(struct bio *bio);
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100440
Gu Zheng394ffa52014-11-24 11:05:22 +0800441void generic_start_io_acct(int rw, unsigned long sectors,
442 struct hd_struct *part);
443void generic_end_io_acct(int rw, struct hd_struct *part,
444 unsigned long start_time);
445
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100446#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
447# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
448#endif
449#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
450extern void bio_flush_dcache_pages(struct bio *bi);
451#else
452static inline void bio_flush_dcache_pages(struct bio *bi)
453{
454}
455#endif
456
Kent Overstreet16ac3d62012-09-10 13:57:51 -0700457extern void bio_copy_data(struct bio *dst, struct bio *src);
Kent Overstreeta0787602012-09-10 14:03:28 -0700458extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
Guoqing Jiang491221f2016-09-22 03:10:01 -0400459extern void bio_free_pages(struct bio *bio);
Kent Overstreet16ac3d62012-09-10 13:57:51 -0700460
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900461extern struct bio *bio_copy_user_iov(struct request_queue *,
Al Viro86d564c2014-02-08 20:42:52 -0500462 struct rq_map_data *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100463 const struct iov_iter *,
464 gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465extern int bio_uncopy_user(struct bio *);
466void zero_fill_bio(struct bio *bio);
Kent Overstreet9f060e22012-10-12 15:29:33 -0700467extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
468extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200469extern unsigned int bvec_nr_vecs(unsigned short idx);
Martin K. Petersen51d654e2008-06-17 18:59:56 +0200470
Tejun Heo852c7882012-03-05 13:15:27 -0800471#ifdef CONFIG_BLK_CGROUP
Tejun Heo1d933cf2015-05-22 17:13:24 -0400472int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
Tejun Heo852c7882012-03-05 13:15:27 -0800473int bio_associate_current(struct bio *bio);
474void bio_disassociate_task(struct bio *bio);
Paolo Valente20bd7232016-07-27 07:22:05 +0200475void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
Tejun Heo852c7882012-03-05 13:15:27 -0800476#else /* CONFIG_BLK_CGROUP */
Tejun Heo1d933cf2015-05-22 17:13:24 -0400477static inline int bio_associate_blkcg(struct bio *bio,
478 struct cgroup_subsys_state *blkcg_css) { return 0; }
Tejun Heo852c7882012-03-05 13:15:27 -0800479static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
480static inline void bio_disassociate_task(struct bio *bio) { }
Paolo Valente20bd7232016-07-27 07:22:05 +0200481static inline void bio_clone_blkcg_association(struct bio *dst,
482 struct bio *src) { }
Tejun Heo852c7882012-03-05 13:15:27 -0800483#endif /* CONFIG_BLK_CGROUP */
484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485#ifdef CONFIG_HIGHMEM
486/*
Alberto Bertogli20b636b2009-02-02 12:41:07 +0100487 * remember never ever reenable interrupts between a bvec_kmap_irq and
488 * bvec_kunmap_irq!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 */
Alberto Bertogli4f570f92009-11-02 11:40:16 +0100490static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
492 unsigned long addr;
493
494 /*
495 * might not be a highmem page, but the preempt/irq count
496 * balancing is a lot nicer this way
497 */
498 local_irq_save(*flags);
Cong Wange8e3c3d2011-11-25 23:14:27 +0800499 addr = (unsigned long) kmap_atomic(bvec->bv_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
501 BUG_ON(addr & ~PAGE_MASK);
502
503 return (char *) addr + bvec->bv_offset;
504}
505
Alberto Bertogli4f570f92009-11-02 11:40:16 +0100506static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
508 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
509
Cong Wange8e3c3d2011-11-25 23:14:27 +0800510 kunmap_atomic((void *) ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 local_irq_restore(*flags);
512}
513
514#else
Geert Uytterhoeven11a691b2010-10-21 10:32:29 +0200515static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
516{
517 return page_address(bvec->bv_page) + bvec->bv_offset;
518}
519
520static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
521{
522 *flags = 0;
523}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524#endif
525
Kent Overstreetf619d252013-08-07 14:30:33 -0700526static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 unsigned long *flags)
528{
Kent Overstreetf619d252013-08-07 14:30:33 -0700529 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530}
531#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
532
533#define bio_kmap_irq(bio, flags) \
Kent Overstreetf619d252013-08-07 14:30:33 -0700534 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
536
Jens Axboe7a67f632008-08-08 11:17:12 +0200537/*
Akinobu Mitae6863072009-04-17 08:41:21 +0200538 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
Christoph Hellwig8f3d8ba2009-04-07 19:55:13 +0200539 *
540 * A bio_list anchors a singly-linked list of bios chained through the bi_next
541 * member of the bio. The bio_list also caches the last list member to allow
542 * fast access to the tail.
543 */
544struct bio_list {
545 struct bio *head;
546 struct bio *tail;
547};
548
549static inline int bio_list_empty(const struct bio_list *bl)
550{
551 return bl->head == NULL;
552}
553
554static inline void bio_list_init(struct bio_list *bl)
555{
556 bl->head = bl->tail = NULL;
557}
558
Jens Axboe320ae512013-10-24 09:20:05 +0100559#define BIO_EMPTY_LIST { NULL, NULL }
560
Christoph Hellwig8f3d8ba2009-04-07 19:55:13 +0200561#define bio_list_for_each(bio, bl) \
562 for (bio = (bl)->head; bio; bio = bio->bi_next)
563
564static inline unsigned bio_list_size(const struct bio_list *bl)
565{
566 unsigned sz = 0;
567 struct bio *bio;
568
569 bio_list_for_each(bio, bl)
570 sz++;
571
572 return sz;
573}
574
575static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
576{
577 bio->bi_next = NULL;
578
579 if (bl->tail)
580 bl->tail->bi_next = bio;
581 else
582 bl->head = bio;
583
584 bl->tail = bio;
585}
586
587static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
588{
589 bio->bi_next = bl->head;
590
591 bl->head = bio;
592
593 if (!bl->tail)
594 bl->tail = bio;
595}
596
597static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
598{
599 if (!bl2->head)
600 return;
601
602 if (bl->tail)
603 bl->tail->bi_next = bl2->head;
604 else
605 bl->head = bl2->head;
606
607 bl->tail = bl2->tail;
608}
609
610static inline void bio_list_merge_head(struct bio_list *bl,
611 struct bio_list *bl2)
612{
613 if (!bl2->head)
614 return;
615
616 if (bl->head)
617 bl2->tail->bi_next = bl->head;
618 else
619 bl->tail = bl2->tail;
620
621 bl->head = bl2->head;
622}
623
Geert Uytterhoeven13685a12009-06-10 04:38:40 +0000624static inline struct bio *bio_list_peek(struct bio_list *bl)
625{
626 return bl->head;
627}
628
Christoph Hellwig8f3d8ba2009-04-07 19:55:13 +0200629static inline struct bio *bio_list_pop(struct bio_list *bl)
630{
631 struct bio *bio = bl->head;
632
633 if (bio) {
634 bl->head = bl->head->bi_next;
635 if (!bl->head)
636 bl->tail = NULL;
637
638 bio->bi_next = NULL;
639 }
640
641 return bio;
642}
643
644static inline struct bio *bio_list_get(struct bio_list *bl)
645{
646 struct bio *bio = bl->head;
647
648 bl->head = bl->tail = NULL;
649
650 return bio;
651}
652
Kent Overstreet57fb2332012-08-24 04:56:11 -0700653/*
Mike Snitzer0ef5a502016-05-05 11:54:22 -0400654 * Increment chain count for the bio. Make sure the CHAIN flag update
655 * is visible before the raised count.
656 */
657static inline void bio_inc_remaining(struct bio *bio)
658{
659 bio_set_flag(bio, BIO_CHAIN);
660 smp_mb__before_atomic();
661 atomic_inc(&bio->__bi_remaining);
662}
663
664/*
Kent Overstreet57fb2332012-08-24 04:56:11 -0700665 * bio_set is used to allow other portions of the IO system to
666 * allocate their own private memory pools for bio and iovec structures.
667 * These memory pools in turn all allocate from the bio_slab
668 * and the bvec_slabs[].
669 */
670#define BIO_POOL_SIZE 2
Kent Overstreet57fb2332012-08-24 04:56:11 -0700671
672struct bio_set {
673 struct kmem_cache *bio_slab;
674 unsigned int front_pad;
675
676 mempool_t *bio_pool;
Kent Overstreet9f060e22012-10-12 15:29:33 -0700677 mempool_t *bvec_pool;
Kent Overstreet57fb2332012-08-24 04:56:11 -0700678#if defined(CONFIG_BLK_DEV_INTEGRITY)
679 mempool_t *bio_integrity_pool;
Kent Overstreet9f060e22012-10-12 15:29:33 -0700680 mempool_t *bvec_integrity_pool;
Kent Overstreet57fb2332012-08-24 04:56:11 -0700681#endif
Kent Overstreetdf2cb6d2012-09-10 14:33:46 -0700682
683 /*
684 * Deadlock avoidance for stacking block drivers: see comments in
685 * bio_alloc_bioset() for details
686 */
687 spinlock_t rescue_lock;
688 struct bio_list rescue_list;
689 struct work_struct rescue_work;
690 struct workqueue_struct *rescue_workqueue;
Kent Overstreet57fb2332012-08-24 04:56:11 -0700691};
692
693struct biovec_slab {
694 int nr_vecs;
695 char *name;
696 struct kmem_cache *slab;
697};
698
699/*
700 * a small number of entries is fine, not going to be performance critical.
701 * basically we just need to survive
702 */
703#define BIO_SPLIT_ENTRIES 2
704
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200705#if defined(CONFIG_BLK_DEV_INTEGRITY)
706
Kent Overstreetd57a5f72013-11-23 17:20:16 -0800707#define bip_for_each_vec(bvl, bip, iter) \
708 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200709
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200710#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
711 for_each_bio(_bio) \
712 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
713
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200714extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700715extern void bio_integrity_free(struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200716extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
Martin K. Petersene7258c12014-09-26 19:19:55 -0400717extern bool bio_integrity_enabled(struct bio *bio);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200718extern int bio_integrity_prep(struct bio *);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200719extern void bio_integrity_endio(struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200720extern void bio_integrity_advance(struct bio *, unsigned int);
721extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700722extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
Martin K. Petersen7878cba2009-06-26 15:37:49 +0200723extern int bioset_integrity_create(struct bio_set *, int);
724extern void bioset_integrity_free(struct bio_set *);
725extern void bio_integrity_init(void);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200726
727#else /* CONFIG_BLK_DEV_INTEGRITY */
728
Martin K. Petersenc6115292014-09-26 19:20:08 -0400729static inline void *bio_integrity(struct bio *bio)
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100730{
Martin K. Petersenc6115292014-09-26 19:20:08 -0400731 return NULL;
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100732}
733
Martin K. Petersene7258c12014-09-26 19:19:55 -0400734static inline bool bio_integrity_enabled(struct bio *bio)
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100735{
Martin K. Petersene7258c12014-09-26 19:19:55 -0400736 return false;
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100737}
738
739static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
740{
741 return 0;
742}
743
744static inline void bioset_integrity_free (struct bio_set *bs)
745{
746 return;
747}
748
749static inline int bio_integrity_prep(struct bio *bio)
750{
751 return 0;
752}
753
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700754static inline void bio_integrity_free(struct bio *bio)
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100755{
756 return;
757}
758
Stephen Rothwell0c614e22011-11-16 09:21:48 +0100759static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700760 gfp_t gfp_mask)
Stephen Rothwell0c614e22011-11-16 09:21:48 +0100761{
762 return 0;
763}
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100764
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100765static inline void bio_integrity_advance(struct bio *bio,
766 unsigned int bytes_done)
767{
768 return;
769}
770
771static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
772 unsigned int sectors)
773{
774 return;
775}
776
777static inline void bio_integrity_init(void)
778{
779 return;
780}
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200781
Martin K. Petersenc6115292014-09-26 19:20:08 -0400782static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
783{
784 return false;
785}
786
Keith Busch06c1e392015-12-03 09:32:21 -0700787static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
788 unsigned int nr)
789{
790 return ERR_PTR(-EINVAL);
791}
792
793static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
794 unsigned int len, unsigned int offset)
795{
796 return 0;
797}
798
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200799#endif /* CONFIG_BLK_DEV_INTEGRITY */
800
David Howells02a5e0a2007-08-11 22:34:32 +0200801#endif /* CONFIG_BLOCK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802#endif /* __LINUX_BIO_H */