blob: cf0867c9d7788c5ef6c265411981b0222347e200 [file] [log] [blame]
bellardfc01f7e2003-06-30 10:03:06 +00001/*
2 * QEMU System Emulator block driver
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardfc01f7e2003-06-30 10:03:06 +00004 * Copyright (c) 2003 Fabrice Bellard
ths5fafdf22007-09-16 21:08:06 +00005 *
bellardfc01f7e2003-06-30 10:03:06 +00006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
blueswir13990d092008-12-05 17:53:21 +000024#include "config-host.h"
pbrookfaf07962007-11-11 02:51:17 +000025#include "qemu-common.h"
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +010026#include "trace.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010027#include "block/block_int.h"
28#include "block/blockjob.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010029#include "qemu/module.h"
Paolo Bonzini7b1b5d12012-12-17 18:19:43 +010030#include "qapi/qmp/qjson.h"
Markus Armbrusterbfb197e2014-10-07 13:59:11 +020031#include "sysemu/block-backend.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/notify.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010034#include "block/coroutine.h"
Benoît Canetc13163f2014-01-23 21:31:34 +010035#include "block/qapi.h"
Luiz Capitulinob2023812011-09-21 17:16:47 -030036#include "qmp-commands.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
Wenchao Xiaa5ee7bd2014-06-18 08:43:44 +020038#include "qapi-event.h"
bellardfc01f7e2003-06-30 10:03:06 +000039
Juan Quintela71e72a12009-07-27 16:12:56 +020040#ifdef CONFIG_BSD
bellard7674e7b2005-04-26 21:59:26 +000041#include <sys/types.h>
42#include <sys/stat.h>
43#include <sys/ioctl.h>
Blue Swirl72cf2d42009-09-12 07:36:22 +000044#include <sys/queue.h>
blueswir1c5e97232009-03-07 20:06:23 +000045#ifndef __DragonFly__
bellard7674e7b2005-04-26 21:59:26 +000046#include <sys/disk.h>
47#endif
blueswir1c5e97232009-03-07 20:06:23 +000048#endif
bellard7674e7b2005-04-26 21:59:26 +000049
aliguori49dc7682009-03-08 16:26:59 +000050#ifdef _WIN32
51#include <windows.h>
52#endif
53
Fam Zhenge4654d22013-11-13 18:29:43 +080054struct BdrvDirtyBitmap {
55 HBitmap *bitmap;
56 QLIST_ENTRY(BdrvDirtyBitmap) list;
57};
58
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +010059#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020061static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
aliguorif141eaf2009-04-07 18:43:24 +000062 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +020063 BlockCompletionFunc *cb, void *opaque);
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020064static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
aliguorif141eaf2009-04-07 18:43:24 +000065 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +020066 BlockCompletionFunc *cb, void *opaque);
Kevin Wolff9f05dc2011-07-15 13:50:26 +020067static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
Kevin Wolf775aa8b2013-12-05 12:09:38 +010073static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +000075 BdrvRequestFlags flags);
Kevin Wolf775aa8b2013-12-05 12:09:38 +010076static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +000078 BdrvRequestFlags flags);
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020079static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
Markus Armbruster097310b2014-10-07 13:59:15 +020084 BlockCompletionFunc *cb,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020085 void *opaque,
86 bool is_write);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +010087static void coroutine_fn bdrv_co_do_rw(void *opaque);
Kevin Wolf621f0582012-03-20 15:12:58 +010088static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +020089 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
bellardec530c82006-04-25 22:36:06 +000090
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +010091static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
blueswir17ee930d2008-09-17 19:04:14 +000093
Benoît Canetdc364f42014-01-23 21:31:32 +010094static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
96
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +010097static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
bellardea2384d2004-08-01 21:59:26 +000099
Markus Armbrustereb852012009-10-27 18:41:44 +0100100/* If non-zero, use only whitelisted block drivers */
101static int use_bdrv_whitelist;
102
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000103#ifdef _WIN32
104static int is_windows_drive_prefix(const char *filename)
105{
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
109}
110
111int is_windows_drive(const char *filename)
112{
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
120}
121#endif
122
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800123/* throttling disk I/O limits */
Benoît Canetcc0681c2013-09-02 14:14:39 +0200124void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
126{
127 int i;
128
129 throttle_config(&bs->throttle_state, cfg);
130
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
133 }
134}
135
136/* this function drain all the throttled IOs */
137static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
138{
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
142
143 bs->io_limits_enabled = false;
144
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
148 }
149 }
150
151 bs->io_limits_enabled = enabled;
152
153 return drained;
154}
155
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800156void bdrv_io_limits_disable(BlockDriverState *bs)
157{
158 bs->io_limits_enabled = false;
159
Benoît Canetcc0681c2013-09-02 14:14:39 +0200160 bdrv_start_throttled_reqs(bs);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800161
Benoît Canetcc0681c2013-09-02 14:14:39 +0200162 throttle_destroy(&bs->throttle_state);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800163}
164
Benoît Canetcc0681c2013-09-02 14:14:39 +0200165static void bdrv_throttle_read_timer_cb(void *opaque)
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800166{
167 BlockDriverState *bs = opaque;
Benoît Canetcc0681c2013-09-02 14:14:39 +0200168 qemu_co_enter_next(&bs->throttled_reqs[0]);
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800169}
170
Benoît Canetcc0681c2013-09-02 14:14:39 +0200171static void bdrv_throttle_write_timer_cb(void *opaque)
172{
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
175}
176
177/* should be called before bdrv_set_io_limits if a limit is set */
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800178void bdrv_io_limits_enable(BlockDriverState *bs)
179{
Benoît Canetcc0681c2013-09-02 14:14:39 +0200180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
Stefan Hajnoczi13af91e2014-05-14 16:22:45 +0200182 bdrv_get_aio_context(bs),
Benoît Canetcc0681c2013-09-02 14:14:39 +0200183 QEMU_CLOCK_VIRTUAL,
184 bdrv_throttle_read_timer_cb,
185 bdrv_throttle_write_timer_cb,
186 bs);
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800187 bs->io_limits_enabled = true;
188}
189
Benoît Canetcc0681c2013-09-02 14:14:39 +0200190/* This function makes an IO wait if needed
191 *
192 * @nb_sectors: the number of sectors of the IO
193 * @is_write: is the IO a write
194 */
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800195static void bdrv_io_limits_intercept(BlockDriverState *bs,
Kevin Wolfd5103582014-01-16 13:29:10 +0100196 unsigned int bytes,
Benoît Canetcc0681c2013-09-02 14:14:39 +0200197 bool is_write)
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800198{
Benoît Canetcc0681c2013-09-02 14:14:39 +0200199 /* does this io must wait */
200 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800201
Benoît Canetcc0681c2013-09-02 14:14:39 +0200202 /* if must wait or any request of this type throttled queue the IO */
203 if (must_wait ||
204 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
205 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800206 }
207
Benoît Canetcc0681c2013-09-02 14:14:39 +0200208 /* the IO will be executed, do the accounting */
Kevin Wolfd5103582014-01-16 13:29:10 +0100209 throttle_account(&bs->throttle_state, is_write, bytes);
210
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800211
Benoît Canetcc0681c2013-09-02 14:14:39 +0200212 /* if the next request must wait -> do nothing */
213 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
214 return;
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800215 }
216
Benoît Canetcc0681c2013-09-02 14:14:39 +0200217 /* else queue next request for execution */
218 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800219}
220
Kevin Wolf339064d2013-11-28 10:23:32 +0100221size_t bdrv_opt_mem_align(BlockDriverState *bs)
222{
223 if (!bs || !bs->drv) {
224 /* 4k should be on the safe side */
225 return 4096;
226 }
227
228 return bs->bl.opt_mem_alignment;
229}
230
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000231/* check if the path starts with "<protocol>:" */
Max Reitz5c984152014-12-03 14:57:22 +0100232int path_has_protocol(const char *path)
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000233{
Paolo Bonzini947995c2012-05-08 16:51:48 +0200234 const char *p;
235
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000236#ifdef _WIN32
237 if (is_windows_drive(path) ||
238 is_windows_drive_prefix(path)) {
239 return 0;
240 }
Paolo Bonzini947995c2012-05-08 16:51:48 +0200241 p = path + strcspn(path, ":/\\");
242#else
243 p = path + strcspn(path, ":/");
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000244#endif
245
Paolo Bonzini947995c2012-05-08 16:51:48 +0200246 return *p == ':';
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000247}
248
bellard83f64092006-08-01 16:21:11 +0000249int path_is_absolute(const char *path)
250{
bellard21664422007-01-07 18:22:37 +0000251#ifdef _WIN32
252 /* specific case for names like: "\\.\d:" */
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200253 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
bellard21664422007-01-07 18:22:37 +0000254 return 1;
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200255 }
256 return (*path == '/' || *path == '\\');
bellard3b9f94e2007-01-07 17:27:07 +0000257#else
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200258 return (*path == '/');
bellard3b9f94e2007-01-07 17:27:07 +0000259#endif
bellard83f64092006-08-01 16:21:11 +0000260}
261
262/* if filename is absolute, just copy it to dest. Otherwise, build a
263 path to it by considering it is relative to base_path. URL are
264 supported. */
265void path_combine(char *dest, int dest_size,
266 const char *base_path,
267 const char *filename)
268{
269 const char *p, *p1;
270 int len;
271
272 if (dest_size <= 0)
273 return;
274 if (path_is_absolute(filename)) {
275 pstrcpy(dest, dest_size, filename);
276 } else {
277 p = strchr(base_path, ':');
278 if (p)
279 p++;
280 else
281 p = base_path;
bellard3b9f94e2007-01-07 17:27:07 +0000282 p1 = strrchr(base_path, '/');
283#ifdef _WIN32
284 {
285 const char *p2;
286 p2 = strrchr(base_path, '\\');
287 if (!p1 || p2 > p1)
288 p1 = p2;
289 }
290#endif
bellard83f64092006-08-01 16:21:11 +0000291 if (p1)
292 p1++;
293 else
294 p1 = base_path;
295 if (p1 > p)
296 p = p1;
297 len = p - base_path;
298 if (len > dest_size - 1)
299 len = dest_size - 1;
300 memcpy(dest, base_path, len);
301 dest[len] = '\0';
302 pstrcat(dest, dest_size, filename);
303 }
304}
305
Max Reitz0a828552014-11-26 17:20:25 +0100306void bdrv_get_full_backing_filename_from_filename(const char *backed,
307 const char *backing,
Max Reitz9f074292014-11-26 17:20:26 +0100308 char *dest, size_t sz,
309 Error **errp)
Max Reitz0a828552014-11-26 17:20:25 +0100310{
Max Reitz9f074292014-11-26 17:20:26 +0100311 if (backing[0] == '\0' || path_has_protocol(backing) ||
312 path_is_absolute(backing))
313 {
Max Reitz0a828552014-11-26 17:20:25 +0100314 pstrcpy(dest, sz, backing);
Max Reitz9f074292014-11-26 17:20:26 +0100315 } else if (backed[0] == '\0' || strstart(backed, "json:", NULL)) {
316 error_setg(errp, "Cannot use relative backing file names for '%s'",
317 backed);
Max Reitz0a828552014-11-26 17:20:25 +0100318 } else {
319 path_combine(dest, sz, backed, backing);
320 }
321}
322
Max Reitz9f074292014-11-26 17:20:26 +0100323void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz,
324 Error **errp)
Paolo Bonzinidc5a1372012-05-08 16:51:50 +0200325{
Max Reitz9f074292014-11-26 17:20:26 +0100326 char *backed = bs->exact_filename[0] ? bs->exact_filename : bs->filename;
327
328 bdrv_get_full_backing_filename_from_filename(backed, bs->backing_file,
329 dest, sz, errp);
Paolo Bonzinidc5a1372012-05-08 16:51:50 +0200330}
331
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500332void bdrv_register(BlockDriver *bdrv)
bellardea2384d2004-08-01 21:59:26 +0000333{
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +0100334 /* Block drivers without coroutine functions need emulation */
335 if (!bdrv->bdrv_co_readv) {
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200336 bdrv->bdrv_co_readv = bdrv_co_readv_em;
337 bdrv->bdrv_co_writev = bdrv_co_writev_em;
338
Stefan Hajnoczif8c35c12011-10-13 21:09:31 +0100339 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
340 * the block driver lacks aio we need to emulate that too.
341 */
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200342 if (!bdrv->bdrv_aio_readv) {
343 /* add AIO emulation layer */
344 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
345 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200346 }
bellard83f64092006-08-01 16:21:11 +0000347 }
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +0200348
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100349 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
bellardea2384d2004-08-01 21:59:26 +0000350}
bellardb3380822004-03-14 21:38:54 +0000351
Markus Armbruster7f06d472014-10-07 13:59:12 +0200352BlockDriverState *bdrv_new_root(void)
bellardfc01f7e2003-06-30 10:03:06 +0000353{
Markus Armbruster7f06d472014-10-07 13:59:12 +0200354 BlockDriverState *bs = bdrv_new();
Markus Armbrustere4e99862014-10-07 13:59:03 +0200355
Markus Armbrustere4e99862014-10-07 13:59:03 +0200356 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
Markus Armbrustere4e99862014-10-07 13:59:03 +0200357 return bs;
358}
359
360BlockDriverState *bdrv_new(void)
361{
362 BlockDriverState *bs;
363 int i;
364
Markus Armbruster5839e532014-08-19 10:31:08 +0200365 bs = g_new0(BlockDriverState, 1);
Fam Zhenge4654d22013-11-13 18:29:43 +0800366 QLIST_INIT(&bs->dirty_bitmaps);
Fam Zhengfbe40ff2014-05-23 21:29:42 +0800367 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
368 QLIST_INIT(&bs->op_blockers[i]);
369 }
Luiz Capitulino28a72822011-09-26 17:43:50 -0300370 bdrv_iostatus_disable(bs);
Paolo Bonzinid7d512f2012-08-23 11:20:36 +0200371 notifier_list_init(&bs->close_notifiers);
Stefan Hajnoczid616b222013-06-24 17:13:10 +0200372 notifier_with_return_list_init(&bs->before_write_notifiers);
Benoît Canetcc0681c2013-09-02 14:14:39 +0200373 qemu_co_queue_init(&bs->throttled_reqs[0]);
374 qemu_co_queue_init(&bs->throttled_reqs[1]);
Fam Zheng9fcb0252013-08-23 09:14:46 +0800375 bs->refcnt = 1;
Stefan Hajnoczidcd04222014-05-08 16:34:37 +0200376 bs->aio_context = qemu_get_aio_context();
Paolo Bonzinid7d512f2012-08-23 11:20:36 +0200377
bellardb3380822004-03-14 21:38:54 +0000378 return bs;
379}
380
Paolo Bonzinid7d512f2012-08-23 11:20:36 +0200381void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
382{
383 notifier_list_add(&bs->close_notifiers, notify);
384}
385
bellardea2384d2004-08-01 21:59:26 +0000386BlockDriver *bdrv_find_format(const char *format_name)
387{
388 BlockDriver *drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100389 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
390 if (!strcmp(drv1->format_name, format_name)) {
bellardea2384d2004-08-01 21:59:26 +0000391 return drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100392 }
bellardea2384d2004-08-01 21:59:26 +0000393 }
394 return NULL;
395}
396
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800397static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
Markus Armbrustereb852012009-10-27 18:41:44 +0100398{
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800399 static const char *whitelist_rw[] = {
400 CONFIG_BDRV_RW_WHITELIST
401 };
402 static const char *whitelist_ro[] = {
403 CONFIG_BDRV_RO_WHITELIST
Markus Armbrustereb852012009-10-27 18:41:44 +0100404 };
405 const char **p;
406
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800407 if (!whitelist_rw[0] && !whitelist_ro[0]) {
Markus Armbrustereb852012009-10-27 18:41:44 +0100408 return 1; /* no whitelist, anything goes */
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800409 }
Markus Armbrustereb852012009-10-27 18:41:44 +0100410
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800411 for (p = whitelist_rw; *p; p++) {
Markus Armbrustereb852012009-10-27 18:41:44 +0100412 if (!strcmp(drv->format_name, *p)) {
413 return 1;
414 }
415 }
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800416 if (read_only) {
417 for (p = whitelist_ro; *p; p++) {
418 if (!strcmp(drv->format_name, *p)) {
419 return 1;
420 }
421 }
422 }
Markus Armbrustereb852012009-10-27 18:41:44 +0100423 return 0;
424}
425
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800426BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
427 bool read_only)
Markus Armbrustereb852012009-10-27 18:41:44 +0100428{
429 BlockDriver *drv = bdrv_find_format(format_name);
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800430 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
Markus Armbrustereb852012009-10-27 18:41:44 +0100431}
432
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800433typedef struct CreateCo {
434 BlockDriver *drv;
435 char *filename;
Chunyan Liu83d05212014-06-05 17:20:51 +0800436 QemuOpts *opts;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800437 int ret;
Max Reitzcc84d902013-09-06 17:14:26 +0200438 Error *err;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800439} CreateCo;
440
441static void coroutine_fn bdrv_create_co_entry(void *opaque)
442{
Max Reitzcc84d902013-09-06 17:14:26 +0200443 Error *local_err = NULL;
444 int ret;
445
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800446 CreateCo *cco = opaque;
447 assert(cco->drv);
448
Chunyan Liuc282e1f2014-06-05 17:21:11 +0800449 ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
Markus Armbruster84d18f02014-01-30 15:07:28 +0100450 if (local_err) {
Max Reitzcc84d902013-09-06 17:14:26 +0200451 error_propagate(&cco->err, local_err);
452 }
453 cco->ret = ret;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800454}
455
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200456int bdrv_create(BlockDriver *drv, const char* filename,
Chunyan Liu83d05212014-06-05 17:20:51 +0800457 QemuOpts *opts, Error **errp)
bellardea2384d2004-08-01 21:59:26 +0000458{
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800459 int ret;
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200460
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800461 Coroutine *co;
462 CreateCo cco = {
463 .drv = drv,
464 .filename = g_strdup(filename),
Chunyan Liu83d05212014-06-05 17:20:51 +0800465 .opts = opts,
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800466 .ret = NOT_DONE,
Max Reitzcc84d902013-09-06 17:14:26 +0200467 .err = NULL,
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800468 };
469
Chunyan Liuc282e1f2014-06-05 17:21:11 +0800470 if (!drv->bdrv_create) {
Max Reitzcc84d902013-09-06 17:14:26 +0200471 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
Luiz Capitulino80168bf2012-10-17 16:45:25 -0300472 ret = -ENOTSUP;
473 goto out;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800474 }
475
476 if (qemu_in_coroutine()) {
477 /* Fast-path if already in coroutine context */
478 bdrv_create_co_entry(&cco);
479 } else {
480 co = qemu_coroutine_create(bdrv_create_co_entry);
481 qemu_coroutine_enter(co, &cco);
482 while (cco.ret == NOT_DONE) {
Paolo Bonzinib47ec2c2014-07-07 15:18:01 +0200483 aio_poll(qemu_get_aio_context(), true);
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800484 }
485 }
486
487 ret = cco.ret;
Max Reitzcc84d902013-09-06 17:14:26 +0200488 if (ret < 0) {
Markus Armbruster84d18f02014-01-30 15:07:28 +0100489 if (cco.err) {
Max Reitzcc84d902013-09-06 17:14:26 +0200490 error_propagate(errp, cco.err);
491 } else {
492 error_setg_errno(errp, -ret, "Could not create image");
493 }
494 }
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800495
Luiz Capitulino80168bf2012-10-17 16:45:25 -0300496out:
497 g_free(cco.filename);
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800498 return ret;
bellardea2384d2004-08-01 21:59:26 +0000499}
500
Chunyan Liuc282e1f2014-06-05 17:21:11 +0800501int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200502{
503 BlockDriver *drv;
Max Reitzcc84d902013-09-06 17:14:26 +0200504 Error *local_err = NULL;
505 int ret;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200506
Kevin Wolf98289622013-07-10 15:47:39 +0200507 drv = bdrv_find_protocol(filename, true);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200508 if (drv == NULL) {
Max Reitzcc84d902013-09-06 17:14:26 +0200509 error_setg(errp, "Could not find protocol for file '%s'", filename);
Stefan Hajnoczi16905d72010-11-30 15:14:14 +0000510 return -ENOENT;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200511 }
512
Chunyan Liuc282e1f2014-06-05 17:21:11 +0800513 ret = bdrv_create(drv, filename, opts, &local_err);
Markus Armbruster84d18f02014-01-30 15:07:28 +0100514 if (local_err) {
Max Reitzcc84d902013-09-06 17:14:26 +0200515 error_propagate(errp, local_err);
516 }
517 return ret;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200518}
519
Kevin Wolf3baca892014-07-16 17:48:16 +0200520void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
Kevin Wolfd34682c2013-12-11 19:26:16 +0100521{
522 BlockDriver *drv = bs->drv;
Kevin Wolf3baca892014-07-16 17:48:16 +0200523 Error *local_err = NULL;
Kevin Wolfd34682c2013-12-11 19:26:16 +0100524
525 memset(&bs->bl, 0, sizeof(bs->bl));
526
Kevin Wolf466ad822013-12-11 19:50:32 +0100527 if (!drv) {
Kevin Wolf3baca892014-07-16 17:48:16 +0200528 return;
Kevin Wolf466ad822013-12-11 19:50:32 +0100529 }
530
531 /* Take some limits from the children as a default */
532 if (bs->file) {
Kevin Wolf3baca892014-07-16 17:48:16 +0200533 bdrv_refresh_limits(bs->file, &local_err);
534 if (local_err) {
535 error_propagate(errp, local_err);
536 return;
537 }
Kevin Wolf466ad822013-12-11 19:50:32 +0100538 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
Peter Lieven2647fab2014-10-27 10:18:44 +0100539 bs->bl.max_transfer_length = bs->file->bl.max_transfer_length;
Kevin Wolf339064d2013-11-28 10:23:32 +0100540 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
541 } else {
542 bs->bl.opt_mem_alignment = 512;
Kevin Wolf466ad822013-12-11 19:50:32 +0100543 }
544
545 if (bs->backing_hd) {
Kevin Wolf3baca892014-07-16 17:48:16 +0200546 bdrv_refresh_limits(bs->backing_hd, &local_err);
547 if (local_err) {
548 error_propagate(errp, local_err);
549 return;
550 }
Kevin Wolf466ad822013-12-11 19:50:32 +0100551 bs->bl.opt_transfer_length =
552 MAX(bs->bl.opt_transfer_length,
553 bs->backing_hd->bl.opt_transfer_length);
Peter Lieven2647fab2014-10-27 10:18:44 +0100554 bs->bl.max_transfer_length =
555 MIN_NON_ZERO(bs->bl.max_transfer_length,
556 bs->backing_hd->bl.max_transfer_length);
Kevin Wolf339064d2013-11-28 10:23:32 +0100557 bs->bl.opt_mem_alignment =
558 MAX(bs->bl.opt_mem_alignment,
559 bs->backing_hd->bl.opt_mem_alignment);
Kevin Wolf466ad822013-12-11 19:50:32 +0100560 }
561
562 /* Then let the driver override it */
563 if (drv->bdrv_refresh_limits) {
Kevin Wolf3baca892014-07-16 17:48:16 +0200564 drv->bdrv_refresh_limits(bs, errp);
Kevin Wolfd34682c2013-12-11 19:26:16 +0100565 }
Kevin Wolfd34682c2013-12-11 19:26:16 +0100566}
567
Jim Meyeringeba25052012-05-28 09:27:54 +0200568/*
569 * Create a uniquely-named empty temporary file.
570 * Return 0 upon success, otherwise a negative errno value.
571 */
572int get_tmp_filename(char *filename, int size)
573{
bellardd5249392004-08-03 21:14:23 +0000574#ifdef _WIN32
bellard3b9f94e2007-01-07 17:27:07 +0000575 char temp_dir[MAX_PATH];
Jim Meyeringeba25052012-05-28 09:27:54 +0200576 /* GetTempFileName requires that its output buffer (4th param)
577 have length MAX_PATH or greater. */
578 assert(size >= MAX_PATH);
579 return (GetTempPath(MAX_PATH, temp_dir)
580 && GetTempFileName(temp_dir, "qem", 0, filename)
581 ? 0 : -GetLastError());
bellardd5249392004-08-03 21:14:23 +0000582#else
bellardea2384d2004-08-01 21:59:26 +0000583 int fd;
blueswir17ccfb2e2008-09-14 06:45:34 +0000584 const char *tmpdir;
aurel320badc1e2008-03-10 00:05:34 +0000585 tmpdir = getenv("TMPDIR");
Amit Shah69bef792014-02-26 15:12:37 +0530586 if (!tmpdir) {
587 tmpdir = "/var/tmp";
588 }
Jim Meyeringeba25052012-05-28 09:27:54 +0200589 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
590 return -EOVERFLOW;
591 }
bellardea2384d2004-08-01 21:59:26 +0000592 fd = mkstemp(filename);
Dunrong Huangfe235a02012-09-05 21:26:22 +0800593 if (fd < 0) {
594 return -errno;
595 }
596 if (close(fd) != 0) {
597 unlink(filename);
Jim Meyeringeba25052012-05-28 09:27:54 +0200598 return -errno;
599 }
600 return 0;
bellardd5249392004-08-03 21:14:23 +0000601#endif
Jim Meyeringeba25052012-05-28 09:27:54 +0200602}
bellardea2384d2004-08-01 21:59:26 +0000603
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200604/*
605 * Detect host devices. By convention, /dev/cdrom[N] is always
606 * recognized as a host CDROM.
607 */
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200608static BlockDriver *find_hdev_driver(const char *filename)
609{
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200610 int score_max = 0, score;
611 BlockDriver *drv = NULL, *d;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200612
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100613 QLIST_FOREACH(d, &bdrv_drivers, list) {
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200614 if (d->bdrv_probe_device) {
615 score = d->bdrv_probe_device(filename);
616 if (score > score_max) {
617 score_max = score;
618 drv = d;
619 }
620 }
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200621 }
622
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200623 return drv;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200624}
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200625
Kevin Wolf98289622013-07-10 15:47:39 +0200626BlockDriver *bdrv_find_protocol(const char *filename,
627 bool allow_protocol_prefix)
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200628{
629 BlockDriver *drv1;
630 char protocol[128];
631 int len;
632 const char *p;
633
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200634 /* TODO Drivers without bdrv_file_open must be specified explicitly */
635
Christoph Hellwig39508e72010-06-23 12:25:17 +0200636 /*
637 * XXX(hch): we really should not let host device detection
638 * override an explicit protocol specification, but moving this
639 * later breaks access to device names with colons in them.
640 * Thanks to the brain-dead persistent naming schemes on udev-
641 * based Linux systems those actually are quite common.
642 */
643 drv1 = find_hdev_driver(filename);
644 if (drv1) {
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200645 return drv1;
646 }
Christoph Hellwig39508e72010-06-23 12:25:17 +0200647
Kevin Wolf98289622013-07-10 15:47:39 +0200648 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
Max Reitzef810432014-12-02 18:32:42 +0100649 return &bdrv_file;
Christoph Hellwig39508e72010-06-23 12:25:17 +0200650 }
Kevin Wolf98289622013-07-10 15:47:39 +0200651
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000652 p = strchr(filename, ':');
653 assert(p != NULL);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200654 len = p - filename;
655 if (len > sizeof(protocol) - 1)
656 len = sizeof(protocol) - 1;
657 memcpy(protocol, filename, len);
658 protocol[len] = '\0';
659 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
660 if (drv1->protocol_name &&
661 !strcmp(drv1->protocol_name, protocol)) {
662 return drv1;
663 }
664 }
665 return NULL;
666}
667
Markus Armbrusterc6684242014-11-20 16:27:10 +0100668/*
669 * Guess image format by probing its contents.
670 * This is not a good idea when your image is raw (CVE-2008-2004), but
671 * we do it anyway for backward compatibility.
672 *
673 * @buf contains the image's first @buf_size bytes.
Kevin Wolf7cddd372014-11-20 16:27:11 +0100674 * @buf_size is the buffer size in bytes (generally BLOCK_PROBE_BUF_SIZE,
675 * but can be smaller if the image file is smaller)
Markus Armbrusterc6684242014-11-20 16:27:10 +0100676 * @filename is its filename.
677 *
678 * For all block drivers, call the bdrv_probe() method to get its
679 * probing score.
680 * Return the first block driver with the highest probing score.
681 */
Kevin Wolf38f3ef52014-11-20 16:27:12 +0100682BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
683 const char *filename)
Markus Armbrusterc6684242014-11-20 16:27:10 +0100684{
685 int score_max = 0, score;
686 BlockDriver *drv = NULL, *d;
687
688 QLIST_FOREACH(d, &bdrv_drivers, list) {
689 if (d->bdrv_probe) {
690 score = d->bdrv_probe(buf, buf_size, filename);
691 if (score > score_max) {
692 score_max = score;
693 drv = d;
694 }
695 }
696 }
697
698 return drv;
699}
700
Kevin Wolff500a6d2012-11-12 17:35:27 +0100701static int find_image_format(BlockDriverState *bs, const char *filename,
Max Reitz34b5d2c2013-09-05 14:45:29 +0200702 BlockDriver **pdrv, Error **errp)
bellardea2384d2004-08-01 21:59:26 +0000703{
Markus Armbrusterc6684242014-11-20 16:27:10 +0100704 BlockDriver *drv;
Kevin Wolf7cddd372014-11-20 16:27:11 +0100705 uint8_t buf[BLOCK_PROBE_BUF_SIZE];
Kevin Wolff500a6d2012-11-12 17:35:27 +0100706 int ret = 0;
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700707
Kevin Wolf08a00552010-06-01 18:37:31 +0200708 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
Paolo Bonzini8e895592013-01-10 15:39:27 +0100709 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
Max Reitzef810432014-12-02 18:32:42 +0100710 *pdrv = &bdrv_raw;
Stefan Weilc98ac352010-07-21 21:51:51 +0200711 return ret;
Nicholas A. Bellinger1a396852010-05-27 08:56:28 -0700712 }
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700713
bellard83f64092006-08-01 16:21:11 +0000714 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
bellard83f64092006-08-01 16:21:11 +0000715 if (ret < 0) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200716 error_setg_errno(errp, -ret, "Could not read image for determining its "
717 "format");
Stefan Weilc98ac352010-07-21 21:51:51 +0200718 *pdrv = NULL;
719 return ret;
bellard83f64092006-08-01 16:21:11 +0000720 }
721
Markus Armbrusterc6684242014-11-20 16:27:10 +0100722 drv = bdrv_probe_all(buf, ret, filename);
Stefan Weilc98ac352010-07-21 21:51:51 +0200723 if (!drv) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200724 error_setg(errp, "Could not determine image format: No compatible "
725 "driver found");
Stefan Weilc98ac352010-07-21 21:51:51 +0200726 ret = -ENOENT;
727 }
728 *pdrv = drv;
729 return ret;
bellardea2384d2004-08-01 21:59:26 +0000730}
731
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100732/**
733 * Set the current 'total_sectors' value
Markus Armbruster65a9bb22014-06-26 13:23:17 +0200734 * Return 0 on success, -errno on error.
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100735 */
736static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
737{
738 BlockDriver *drv = bs->drv;
739
Nicholas Bellinger396759a2010-05-17 09:46:04 -0700740 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
741 if (bs->sg)
742 return 0;
743
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100744 /* query actual device if possible, otherwise just trust the hint */
745 if (drv->bdrv_getlength) {
746 int64_t length = drv->bdrv_getlength(bs);
747 if (length < 0) {
748 return length;
749 }
Fam Zheng7e382002013-11-06 19:48:06 +0800750 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100751 }
752
753 bs->total_sectors = hint;
754 return 0;
755}
756
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100757/**
Paolo Bonzini9e8f1832013-02-08 14:06:11 +0100758 * Set open flags for a given discard mode
759 *
760 * Return 0 on success, -1 if the discard mode was invalid.
761 */
762int bdrv_parse_discard_flags(const char *mode, int *flags)
763{
764 *flags &= ~BDRV_O_UNMAP;
765
766 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
767 /* do nothing */
768 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
769 *flags |= BDRV_O_UNMAP;
770 } else {
771 return -1;
772 }
773
774 return 0;
775}
776
777/**
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100778 * Set open flags for a given cache mode
779 *
780 * Return 0 on success, -1 if the cache mode was invalid.
781 */
782int bdrv_parse_cache_flags(const char *mode, int *flags)
783{
784 *flags &= ~BDRV_O_CACHE_MASK;
785
786 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
787 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
Stefan Hajnoczi92196b22011-08-04 12:26:52 +0100788 } else if (!strcmp(mode, "directsync")) {
789 *flags |= BDRV_O_NOCACHE;
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100790 } else if (!strcmp(mode, "writeback")) {
791 *flags |= BDRV_O_CACHE_WB;
792 } else if (!strcmp(mode, "unsafe")) {
793 *flags |= BDRV_O_CACHE_WB;
794 *flags |= BDRV_O_NO_FLUSH;
795 } else if (!strcmp(mode, "writethrough")) {
796 /* this is the default */
797 } else {
798 return -1;
799 }
800
801 return 0;
802}
803
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000804/**
805 * The copy-on-read flag is actually a reference count so multiple users may
806 * use the feature without worrying about clobbering its previous state.
807 * Copy-on-read stays enabled until all users have called to disable it.
808 */
809void bdrv_enable_copy_on_read(BlockDriverState *bs)
810{
811 bs->copy_on_read++;
812}
813
814void bdrv_disable_copy_on_read(BlockDriverState *bs)
815{
816 assert(bs->copy_on_read > 0);
817 bs->copy_on_read--;
818}
819
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200820/*
Kevin Wolfb1e6fc02014-05-06 12:11:42 +0200821 * Returns the flags that a temporary snapshot should get, based on the
822 * originally requested flags (the originally requested image will have flags
823 * like a backing file)
824 */
825static int bdrv_temp_snapshot_flags(int flags)
826{
827 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
828}
829
830/*
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200831 * Returns the flags that bs->file should get, based on the given flags for
832 * the parent BDS
833 */
834static int bdrv_inherited_flags(int flags)
835{
836 /* Enable protocol handling, disable format probing for bs->file */
837 flags |= BDRV_O_PROTOCOL;
838
839 /* Our block drivers take care to send flushes and respect unmap policy,
840 * so we can enable both unconditionally on lower layers. */
841 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
842
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200843 /* Clear flags that only apply to the top layer */
Kevin Wolf5669b442014-04-11 21:36:45 +0200844 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200845
846 return flags;
847}
848
Kevin Wolf317fc442014-04-25 13:27:34 +0200849/*
850 * Returns the flags that bs->backing_hd should get, based on the given flags
851 * for the parent BDS
852 */
853static int bdrv_backing_flags(int flags)
854{
855 /* backing files always opened read-only */
856 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
857
858 /* snapshot=on is handled on the top layer */
Kevin Wolf8bfea152014-04-11 19:16:36 +0200859 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
Kevin Wolf317fc442014-04-25 13:27:34 +0200860
861 return flags;
862}
863
Kevin Wolf7b272452012-11-12 17:05:39 +0100864static int bdrv_open_flags(BlockDriverState *bs, int flags)
865{
866 int open_flags = flags | BDRV_O_CACHE_WB;
867
868 /*
869 * Clear flags that are internal to the block layer before opening the
870 * image.
871 */
Kevin Wolf20cca272014-06-04 14:33:27 +0200872 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL);
Kevin Wolf7b272452012-11-12 17:05:39 +0100873
874 /*
875 * Snapshots should be writable.
876 */
Kevin Wolf8bfea152014-04-11 19:16:36 +0200877 if (flags & BDRV_O_TEMPORARY) {
Kevin Wolf7b272452012-11-12 17:05:39 +0100878 open_flags |= BDRV_O_RDWR;
879 }
880
881 return open_flags;
882}
883
Kevin Wolf636ea372014-01-24 14:11:52 +0100884static void bdrv_assign_node_name(BlockDriverState *bs,
885 const char *node_name,
886 Error **errp)
Benoît Canet6913c0c2014-01-23 21:31:33 +0100887{
888 if (!node_name) {
Kevin Wolf636ea372014-01-24 14:11:52 +0100889 return;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100890 }
891
Kevin Wolf9aebf3b2014-09-25 09:54:02 +0200892 /* Check for empty string or invalid characters */
Markus Armbrusterf5bebbb2014-09-30 13:59:30 +0200893 if (!id_wellformed(node_name)) {
Kevin Wolf9aebf3b2014-09-25 09:54:02 +0200894 error_setg(errp, "Invalid node name");
Kevin Wolf636ea372014-01-24 14:11:52 +0100895 return;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100896 }
897
Benoît Canet0c5e94e2014-02-12 17:15:07 +0100898 /* takes care of avoiding namespaces collisions */
Markus Armbruster7f06d472014-10-07 13:59:12 +0200899 if (blk_by_name(node_name)) {
Benoît Canet0c5e94e2014-02-12 17:15:07 +0100900 error_setg(errp, "node-name=%s is conflicting with a device id",
901 node_name);
Kevin Wolf636ea372014-01-24 14:11:52 +0100902 return;
Benoît Canet0c5e94e2014-02-12 17:15:07 +0100903 }
904
Benoît Canet6913c0c2014-01-23 21:31:33 +0100905 /* takes care of avoiding duplicates node names */
906 if (bdrv_find_node(node_name)) {
907 error_setg(errp, "Duplicate node name");
Kevin Wolf636ea372014-01-24 14:11:52 +0100908 return;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100909 }
910
911 /* copy node name into the bs and insert it into the graph list */
912 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
913 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
Benoît Canet6913c0c2014-01-23 21:31:33 +0100914}
915
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200916/*
Kevin Wolf57915332010-04-14 15:24:50 +0200917 * Common part for opening disk images and files
Kevin Wolfb6ad4912013-03-15 10:35:04 +0100918 *
919 * Removes all processed options from *options.
Kevin Wolf57915332010-04-14 15:24:50 +0200920 */
Kevin Wolff500a6d2012-11-12 17:35:27 +0100921static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
Max Reitz34b5d2c2013-09-05 14:45:29 +0200922 QDict *options, int flags, BlockDriver *drv, Error **errp)
Kevin Wolf57915332010-04-14 15:24:50 +0200923{
924 int ret, open_flags;
Kevin Wolf035fccd2013-04-09 14:34:19 +0200925 const char *filename;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100926 const char *node_name = NULL;
Max Reitz34b5d2c2013-09-05 14:45:29 +0200927 Error *local_err = NULL;
Kevin Wolf57915332010-04-14 15:24:50 +0200928
929 assert(drv != NULL);
Paolo Bonzini64058752012-05-08 16:51:49 +0200930 assert(bs->file == NULL);
Kevin Wolf707ff822013-03-06 12:20:31 +0100931 assert(options != NULL && bs->options != options);
Kevin Wolf57915332010-04-14 15:24:50 +0200932
Kevin Wolf45673672013-04-22 17:48:40 +0200933 if (file != NULL) {
934 filename = file->filename;
935 } else {
936 filename = qdict_get_try_str(options, "filename");
937 }
938
Kevin Wolf765003d2014-02-03 14:49:42 +0100939 if (drv->bdrv_needs_filename && !filename) {
940 error_setg(errp, "The '%s' block driver requires a file name",
941 drv->format_name);
942 return -EINVAL;
943 }
944
Kevin Wolf45673672013-04-22 17:48:40 +0200945 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
Stefan Hajnoczi28dcee12011-09-22 20:14:12 +0100946
Benoît Canet6913c0c2014-01-23 21:31:33 +0100947 node_name = qdict_get_try_str(options, "node-name");
Kevin Wolf636ea372014-01-24 14:11:52 +0100948 bdrv_assign_node_name(bs, node_name, &local_err);
Markus Armbruster0fb63952014-04-25 16:50:31 +0200949 if (local_err) {
Kevin Wolf636ea372014-01-24 14:11:52 +0100950 error_propagate(errp, local_err);
951 return -EINVAL;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100952 }
953 qdict_del(options, "node-name");
954
Kevin Wolf5d186eb2013-03-27 17:28:18 +0100955 /* bdrv_open() with directly using a protocol as drv. This layer is already
956 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
957 * and return immediately. */
958 if (file != NULL && drv->bdrv_file_open) {
959 bdrv_swap(file, bs);
960 return 0;
961 }
962
Kevin Wolf57915332010-04-14 15:24:50 +0200963 bs->open_flags = flags;
Paolo Bonzini1b7fd722011-11-29 11:35:47 +0100964 bs->guest_block_size = 512;
Paolo Bonzinic25f53b2011-11-29 12:42:20 +0100965 bs->request_alignment = 512;
Asias He0d51b4d2013-08-22 15:24:14 +0800966 bs->zero_beyond_eof = true;
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800967 open_flags = bdrv_open_flags(bs, flags);
968 bs->read_only = !(open_flags & BDRV_O_RDWR);
Kevin Wolf20cca272014-06-04 14:33:27 +0200969 bs->growable = !!(flags & BDRV_O_PROTOCOL);
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800970
971 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
Kevin Wolf8f94a6e2013-10-10 11:45:55 +0200972 error_setg(errp,
973 !bs->read_only && bdrv_is_whitelisted(drv, true)
974 ? "Driver '%s' can only be used for read-only devices"
975 : "Driver '%s' is not whitelisted",
976 drv->format_name);
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800977 return -ENOTSUP;
978 }
Kevin Wolf57915332010-04-14 15:24:50 +0200979
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000980 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
Kevin Wolf0ebd24e2013-09-19 15:12:18 +0200981 if (flags & BDRV_O_COPY_ON_READ) {
982 if (!bs->read_only) {
983 bdrv_enable_copy_on_read(bs);
984 } else {
985 error_setg(errp, "Can't use copy-on-read on read-only device");
986 return -EINVAL;
987 }
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000988 }
989
Kevin Wolfc2ad1b02013-03-18 16:40:51 +0100990 if (filename != NULL) {
991 pstrcpy(bs->filename, sizeof(bs->filename), filename);
992 } else {
993 bs->filename[0] = '\0';
994 }
Max Reitz91af7012014-07-18 20:24:56 +0200995 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), bs->filename);
Kevin Wolf57915332010-04-14 15:24:50 +0200996
Kevin Wolf57915332010-04-14 15:24:50 +0200997 bs->drv = drv;
Anthony Liguori7267c092011-08-20 22:09:37 -0500998 bs->opaque = g_malloc0(drv->instance_size);
Kevin Wolf57915332010-04-14 15:24:50 +0200999
Stefan Hajnoczi03f541b2011-10-27 10:54:28 +01001000 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
Stefan Hajnoczie7c63792011-10-27 10:54:27 +01001001
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001002 /* Open the image, either directly or using a protocol */
1003 if (drv->bdrv_file_open) {
Kevin Wolf5d186eb2013-03-27 17:28:18 +01001004 assert(file == NULL);
Benoît Canet030be322013-09-24 17:07:04 +02001005 assert(!drv->bdrv_needs_filename || filename != NULL);
Max Reitz34b5d2c2013-09-05 14:45:29 +02001006 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
Kevin Wolff500a6d2012-11-12 17:35:27 +01001007 } else {
Kevin Wolf2af5ef72013-04-09 13:19:18 +02001008 if (file == NULL) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001009 error_setg(errp, "Can't use '%s' as a block driver for the "
1010 "protocol level", drv->format_name);
Kevin Wolf2af5ef72013-04-09 13:19:18 +02001011 ret = -EINVAL;
1012 goto free_and_fail;
1013 }
Kevin Wolff500a6d2012-11-12 17:35:27 +01001014 bs->file = file;
Max Reitz34b5d2c2013-09-05 14:45:29 +02001015 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001016 }
1017
Kevin Wolf57915332010-04-14 15:24:50 +02001018 if (ret < 0) {
Markus Armbruster84d18f02014-01-30 15:07:28 +01001019 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001020 error_propagate(errp, local_err);
Dunrong Huang2fa9aa52013-09-24 18:14:01 +08001021 } else if (bs->filename[0]) {
1022 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
Max Reitz34b5d2c2013-09-05 14:45:29 +02001023 } else {
1024 error_setg_errno(errp, -ret, "Could not open image");
1025 }
Kevin Wolf57915332010-04-14 15:24:50 +02001026 goto free_and_fail;
1027 }
1028
Stefan Hajnoczi51762282010-04-19 16:56:41 +01001029 ret = refresh_total_sectors(bs, bs->total_sectors);
1030 if (ret < 0) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001031 error_setg_errno(errp, -ret, "Could not refresh total sector count");
Stefan Hajnoczi51762282010-04-19 16:56:41 +01001032 goto free_and_fail;
Kevin Wolf57915332010-04-14 15:24:50 +02001033 }
Stefan Hajnoczi51762282010-04-19 16:56:41 +01001034
Kevin Wolf3baca892014-07-16 17:48:16 +02001035 bdrv_refresh_limits(bs, &local_err);
1036 if (local_err) {
1037 error_propagate(errp, local_err);
1038 ret = -EINVAL;
1039 goto free_and_fail;
1040 }
1041
Paolo Bonzinic25f53b2011-11-29 12:42:20 +01001042 assert(bdrv_opt_mem_align(bs) != 0);
Kevin Wolf47ea2de2014-03-05 15:49:55 +01001043 assert((bs->request_alignment != 0) || bs->sg);
Kevin Wolf57915332010-04-14 15:24:50 +02001044 return 0;
1045
1046free_and_fail:
Kevin Wolff500a6d2012-11-12 17:35:27 +01001047 bs->file = NULL;
Anthony Liguori7267c092011-08-20 22:09:37 -05001048 g_free(bs->opaque);
Kevin Wolf57915332010-04-14 15:24:50 +02001049 bs->opaque = NULL;
1050 bs->drv = NULL;
1051 return ret;
1052}
1053
Kevin Wolf5e5c4f62014-05-26 11:45:08 +02001054static QDict *parse_json_filename(const char *filename, Error **errp)
1055{
1056 QObject *options_obj;
1057 QDict *options;
1058 int ret;
1059
1060 ret = strstart(filename, "json:", &filename);
1061 assert(ret);
1062
1063 options_obj = qobject_from_json(filename);
1064 if (!options_obj) {
1065 error_setg(errp, "Could not parse the JSON options");
1066 return NULL;
1067 }
1068
1069 if (qobject_type(options_obj) != QTYPE_QDICT) {
1070 qobject_decref(options_obj);
1071 error_setg(errp, "Invalid JSON object given");
1072 return NULL;
1073 }
1074
1075 options = qobject_to_qdict(options_obj);
1076 qdict_flatten(options);
1077
1078 return options;
1079}
1080
Kevin Wolf57915332010-04-14 15:24:50 +02001081/*
Kevin Wolff54120f2014-05-26 11:09:59 +02001082 * Fills in default options for opening images and converts the legacy
1083 * filename/flags pair to option QDict entries.
1084 */
Kevin Wolf5e5c4f62014-05-26 11:45:08 +02001085static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
Kevin Wolf17b005f2014-05-27 10:50:29 +02001086 BlockDriver *drv, Error **errp)
Kevin Wolff54120f2014-05-26 11:09:59 +02001087{
Kevin Wolf5e5c4f62014-05-26 11:45:08 +02001088 const char *filename = *pfilename;
Kevin Wolff54120f2014-05-26 11:09:59 +02001089 const char *drvname;
Kevin Wolf462f5bc2014-05-26 11:39:55 +02001090 bool protocol = flags & BDRV_O_PROTOCOL;
Kevin Wolff54120f2014-05-26 11:09:59 +02001091 bool parse_filename = false;
1092 Error *local_err = NULL;
Kevin Wolff54120f2014-05-26 11:09:59 +02001093
Kevin Wolf5e5c4f62014-05-26 11:45:08 +02001094 /* Parse json: pseudo-protocol */
1095 if (filename && g_str_has_prefix(filename, "json:")) {
1096 QDict *json_options = parse_json_filename(filename, &local_err);
1097 if (local_err) {
1098 error_propagate(errp, local_err);
1099 return -EINVAL;
1100 }
1101
1102 /* Options given in the filename have lower priority than options
1103 * specified directly */
1104 qdict_join(*options, json_options, false);
1105 QDECREF(json_options);
1106 *pfilename = filename = NULL;
1107 }
1108
Kevin Wolff54120f2014-05-26 11:09:59 +02001109 /* Fetch the file name from the options QDict if necessary */
Kevin Wolf17b005f2014-05-27 10:50:29 +02001110 if (protocol && filename) {
Kevin Wolff54120f2014-05-26 11:09:59 +02001111 if (!qdict_haskey(*options, "filename")) {
1112 qdict_put(*options, "filename", qstring_from_str(filename));
1113 parse_filename = true;
1114 } else {
1115 error_setg(errp, "Can't specify 'file' and 'filename' options at "
1116 "the same time");
1117 return -EINVAL;
1118 }
1119 }
1120
1121 /* Find the right block driver */
1122 filename = qdict_get_try_str(*options, "filename");
1123 drvname = qdict_get_try_str(*options, "driver");
1124
Kevin Wolf17b005f2014-05-27 10:50:29 +02001125 if (drv) {
1126 if (drvname) {
1127 error_setg(errp, "Driver specified twice");
1128 return -EINVAL;
1129 }
1130 drvname = drv->format_name;
1131 qdict_put(*options, "driver", qstring_from_str(drvname));
1132 } else {
1133 if (!drvname && protocol) {
1134 if (filename) {
1135 drv = bdrv_find_protocol(filename, parse_filename);
1136 if (!drv) {
1137 error_setg(errp, "Unknown protocol");
1138 return -EINVAL;
1139 }
1140
1141 drvname = drv->format_name;
1142 qdict_put(*options, "driver", qstring_from_str(drvname));
1143 } else {
1144 error_setg(errp, "Must specify either driver or file");
Kevin Wolff54120f2014-05-26 11:09:59 +02001145 return -EINVAL;
1146 }
Kevin Wolf17b005f2014-05-27 10:50:29 +02001147 } else if (drvname) {
1148 drv = bdrv_find_format(drvname);
1149 if (!drv) {
1150 error_setg(errp, "Unknown driver '%s'", drvname);
1151 return -ENOENT;
1152 }
Kevin Wolff54120f2014-05-26 11:09:59 +02001153 }
1154 }
1155
Kevin Wolf17b005f2014-05-27 10:50:29 +02001156 assert(drv || !protocol);
Kevin Wolff54120f2014-05-26 11:09:59 +02001157
1158 /* Driver-specific filename parsing */
Kevin Wolf17b005f2014-05-27 10:50:29 +02001159 if (drv && drv->bdrv_parse_filename && parse_filename) {
Kevin Wolff54120f2014-05-26 11:09:59 +02001160 drv->bdrv_parse_filename(filename, *options, &local_err);
1161 if (local_err) {
1162 error_propagate(errp, local_err);
1163 return -EINVAL;
1164 }
1165
1166 if (!drv->bdrv_needs_filename) {
1167 qdict_del(*options, "filename");
1168 }
1169 }
1170
1171 return 0;
1172}
1173
Fam Zheng8d24cce2014-05-23 21:29:45 +08001174void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1175{
1176
Fam Zheng826b6ca2014-05-23 21:29:47 +08001177 if (bs->backing_hd) {
1178 assert(bs->backing_blocker);
1179 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1180 } else if (backing_hd) {
1181 error_setg(&bs->backing_blocker,
1182 "device is used as backing hd of '%s'",
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02001183 bdrv_get_device_name(bs));
Fam Zheng826b6ca2014-05-23 21:29:47 +08001184 }
1185
Fam Zheng8d24cce2014-05-23 21:29:45 +08001186 bs->backing_hd = backing_hd;
1187 if (!backing_hd) {
Fam Zheng826b6ca2014-05-23 21:29:47 +08001188 error_free(bs->backing_blocker);
1189 bs->backing_blocker = NULL;
Fam Zheng8d24cce2014-05-23 21:29:45 +08001190 goto out;
1191 }
1192 bs->open_flags &= ~BDRV_O_NO_BACKING;
1193 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1194 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1195 backing_hd->drv ? backing_hd->drv->format_name : "");
Fam Zheng826b6ca2014-05-23 21:29:47 +08001196
1197 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1198 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1199 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1200 bs->backing_blocker);
Fam Zheng8d24cce2014-05-23 21:29:45 +08001201out:
Kevin Wolf3baca892014-07-16 17:48:16 +02001202 bdrv_refresh_limits(bs, NULL);
Fam Zheng8d24cce2014-05-23 21:29:45 +08001203}
1204
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001205/*
1206 * Opens the backing file for a BlockDriverState if not yet open
1207 *
1208 * options is a QDict of options to pass to the block drivers, or NULL for an
1209 * empty set of options. The reference to the QDict is transferred to this
1210 * function (even on failure), so if the caller intends to reuse the dictionary,
1211 * it needs to use QINCREF() before calling bdrv_file_open.
1212 */
Max Reitz34b5d2c2013-09-05 14:45:29 +02001213int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
Paolo Bonzini9156df12012-10-18 16:49:17 +02001214{
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001215 char *backing_filename = g_malloc0(PATH_MAX);
Kevin Wolf317fc442014-04-25 13:27:34 +02001216 int ret = 0;
Fam Zheng8d24cce2014-05-23 21:29:45 +08001217 BlockDriverState *backing_hd;
Max Reitz34b5d2c2013-09-05 14:45:29 +02001218 Error *local_err = NULL;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001219
1220 if (bs->backing_hd != NULL) {
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001221 QDECREF(options);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001222 goto free_exit;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001223 }
1224
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001225 /* NULL means an empty set of options */
1226 if (options == NULL) {
1227 options = qdict_new();
1228 }
1229
Paolo Bonzini9156df12012-10-18 16:49:17 +02001230 bs->open_flags &= ~BDRV_O_NO_BACKING;
Kevin Wolf1cb6f502013-04-12 20:27:07 +02001231 if (qdict_haskey(options, "file.filename")) {
1232 backing_filename[0] = '\0';
1233 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001234 QDECREF(options);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001235 goto free_exit;
Fam Zhengdbecebd2013-09-22 20:05:06 +08001236 } else {
Max Reitz9f074292014-11-26 17:20:26 +01001237 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX,
1238 &local_err);
1239 if (local_err) {
1240 ret = -EINVAL;
1241 error_propagate(errp, local_err);
1242 QDECREF(options);
1243 goto free_exit;
1244 }
Paolo Bonzini9156df12012-10-18 16:49:17 +02001245 }
1246
Kevin Wolf8ee79e72014-06-04 15:09:35 +02001247 if (!bs->drv || !bs->drv->supports_backing) {
1248 ret = -EINVAL;
1249 error_setg(errp, "Driver doesn't support backing files");
1250 QDECREF(options);
1251 goto free_exit;
1252 }
1253
Markus Armbrustere4e99862014-10-07 13:59:03 +02001254 backing_hd = bdrv_new();
Fam Zheng8d24cce2014-05-23 21:29:45 +08001255
Kevin Wolfc5f6e492014-11-25 18:12:42 +01001256 if (bs->backing_format[0] != '\0' && !qdict_haskey(options, "driver")) {
1257 qdict_put(options, "driver", qstring_from_str(bs->backing_format));
Paolo Bonzini9156df12012-10-18 16:49:17 +02001258 }
1259
Max Reitzf67503e2014-02-18 18:33:05 +01001260 assert(bs->backing_hd == NULL);
Fam Zheng8d24cce2014-05-23 21:29:45 +08001261 ret = bdrv_open(&backing_hd,
Max Reitzddf56362014-02-18 18:33:06 +01001262 *backing_filename ? backing_filename : NULL, NULL, options,
Kevin Wolfc5f6e492014-11-25 18:12:42 +01001263 bdrv_backing_flags(bs->open_flags), NULL, &local_err);
Paolo Bonzini9156df12012-10-18 16:49:17 +02001264 if (ret < 0) {
Fam Zheng8d24cce2014-05-23 21:29:45 +08001265 bdrv_unref(backing_hd);
1266 backing_hd = NULL;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001267 bs->open_flags |= BDRV_O_NO_BACKING;
Fam Zhengb04b6b62013-11-08 11:26:49 +08001268 error_setg(errp, "Could not open backing file: %s",
1269 error_get_pretty(local_err));
1270 error_free(local_err);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001271 goto free_exit;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001272 }
Fam Zheng8d24cce2014-05-23 21:29:45 +08001273 bdrv_set_backing_hd(bs, backing_hd);
Peter Feinerd80ac652014-01-08 19:43:25 +00001274
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001275free_exit:
1276 g_free(backing_filename);
1277 return ret;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001278}
1279
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001280/*
Max Reitzda557aa2013-12-20 19:28:11 +01001281 * Opens a disk image whose options are given as BlockdevRef in another block
1282 * device's options.
1283 *
Max Reitzda557aa2013-12-20 19:28:11 +01001284 * If allow_none is true, no image will be opened if filename is false and no
1285 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1286 *
1287 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1288 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1289 * itself, all options starting with "${bdref_key}." are considered part of the
1290 * BlockdevRef.
1291 *
1292 * The BlockdevRef will be removed from the options QDict.
Max Reitzf67503e2014-02-18 18:33:05 +01001293 *
1294 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
Max Reitzda557aa2013-12-20 19:28:11 +01001295 */
1296int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1297 QDict *options, const char *bdref_key, int flags,
Max Reitzf7d9fd82014-02-18 18:33:12 +01001298 bool allow_none, Error **errp)
Max Reitzda557aa2013-12-20 19:28:11 +01001299{
1300 QDict *image_options;
1301 int ret;
1302 char *bdref_key_dot;
1303 const char *reference;
1304
Max Reitzf67503e2014-02-18 18:33:05 +01001305 assert(pbs);
1306 assert(*pbs == NULL);
1307
Max Reitzda557aa2013-12-20 19:28:11 +01001308 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1309 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1310 g_free(bdref_key_dot);
1311
1312 reference = qdict_get_try_str(options, bdref_key);
1313 if (!filename && !reference && !qdict_size(image_options)) {
1314 if (allow_none) {
1315 ret = 0;
1316 } else {
1317 error_setg(errp, "A block device must be specified for \"%s\"",
1318 bdref_key);
1319 ret = -EINVAL;
1320 }
Markus Armbrusterb20e61e2014-05-28 11:16:57 +02001321 QDECREF(image_options);
Max Reitzda557aa2013-12-20 19:28:11 +01001322 goto done;
1323 }
1324
Max Reitzf7d9fd82014-02-18 18:33:12 +01001325 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
Max Reitzda557aa2013-12-20 19:28:11 +01001326
1327done:
1328 qdict_del(options, bdref_key);
1329 return ret;
1330}
1331
Chen Gang6b8aeca2014-06-23 23:28:23 +08001332int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
Kevin Wolfb9988752014-04-03 12:09:34 +02001333{
1334 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001335 char *tmp_filename = g_malloc0(PATH_MAX + 1);
Kevin Wolfb9988752014-04-03 12:09:34 +02001336 int64_t total_size;
Chunyan Liu83d05212014-06-05 17:20:51 +08001337 QemuOpts *opts = NULL;
Kevin Wolfb9988752014-04-03 12:09:34 +02001338 QDict *snapshot_options;
1339 BlockDriverState *bs_snapshot;
1340 Error *local_err;
1341 int ret;
1342
1343 /* if snapshot, we create a temporary backing file and open it
1344 instead of opening 'filename' directly */
1345
1346 /* Get the required size from the image */
Kevin Wolff1877432014-04-04 17:07:19 +02001347 total_size = bdrv_getlength(bs);
1348 if (total_size < 0) {
Chen Gang6b8aeca2014-06-23 23:28:23 +08001349 ret = total_size;
Kevin Wolff1877432014-04-04 17:07:19 +02001350 error_setg_errno(errp, -total_size, "Could not get image size");
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001351 goto out;
Kevin Wolff1877432014-04-04 17:07:19 +02001352 }
Kevin Wolfb9988752014-04-03 12:09:34 +02001353
1354 /* Create the temporary image */
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001355 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
Kevin Wolfb9988752014-04-03 12:09:34 +02001356 if (ret < 0) {
1357 error_setg_errno(errp, -ret, "Could not get temporary filename");
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001358 goto out;
Kevin Wolfb9988752014-04-03 12:09:34 +02001359 }
1360
Max Reitzef810432014-12-02 18:32:42 +01001361 opts = qemu_opts_create(bdrv_qcow2.create_opts, NULL, 0,
Chunyan Liuc282e1f2014-06-05 17:21:11 +08001362 &error_abort);
Chunyan Liu83d05212014-06-05 17:20:51 +08001363 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
Max Reitzef810432014-12-02 18:32:42 +01001364 ret = bdrv_create(&bdrv_qcow2, tmp_filename, opts, &local_err);
Chunyan Liu83d05212014-06-05 17:20:51 +08001365 qemu_opts_del(opts);
Kevin Wolfb9988752014-04-03 12:09:34 +02001366 if (ret < 0) {
1367 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1368 "'%s': %s", tmp_filename,
1369 error_get_pretty(local_err));
1370 error_free(local_err);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001371 goto out;
Kevin Wolfb9988752014-04-03 12:09:34 +02001372 }
1373
1374 /* Prepare a new options QDict for the temporary file */
1375 snapshot_options = qdict_new();
1376 qdict_put(snapshot_options, "file.driver",
1377 qstring_from_str("file"));
1378 qdict_put(snapshot_options, "file.filename",
1379 qstring_from_str(tmp_filename));
1380
Markus Armbrustere4e99862014-10-07 13:59:03 +02001381 bs_snapshot = bdrv_new();
Kevin Wolfb9988752014-04-03 12:09:34 +02001382
1383 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
Max Reitzef810432014-12-02 18:32:42 +01001384 flags, &bdrv_qcow2, &local_err);
Kevin Wolfb9988752014-04-03 12:09:34 +02001385 if (ret < 0) {
1386 error_propagate(errp, local_err);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001387 goto out;
Kevin Wolfb9988752014-04-03 12:09:34 +02001388 }
1389
1390 bdrv_append(bs_snapshot, bs);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001391
1392out:
1393 g_free(tmp_filename);
Chen Gang6b8aeca2014-06-23 23:28:23 +08001394 return ret;
Kevin Wolfb9988752014-04-03 12:09:34 +02001395}
1396
Max Reitzda557aa2013-12-20 19:28:11 +01001397/*
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001398 * Opens a disk image (raw, qcow2, vmdk, ...)
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001399 *
1400 * options is a QDict of options to pass to the block drivers, or NULL for an
1401 * empty set of options. The reference to the QDict belongs to the block layer
1402 * after the call (even on failure), so if the caller intends to reuse the
1403 * dictionary, it needs to use QINCREF() before calling bdrv_open.
Max Reitzf67503e2014-02-18 18:33:05 +01001404 *
1405 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1406 * If it is not NULL, the referenced BDS will be reused.
Max Reitzddf56362014-02-18 18:33:06 +01001407 *
1408 * The reference parameter may be used to specify an existing block device which
1409 * should be opened. If specified, neither options nor a filename may be given,
1410 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001411 */
Max Reitzddf56362014-02-18 18:33:06 +01001412int bdrv_open(BlockDriverState **pbs, const char *filename,
1413 const char *reference, QDict *options, int flags,
1414 BlockDriver *drv, Error **errp)
bellardea2384d2004-08-01 21:59:26 +00001415{
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001416 int ret;
Max Reitzf67503e2014-02-18 18:33:05 +01001417 BlockDriverState *file = NULL, *bs;
Kevin Wolf74fe54f2013-07-09 11:09:02 +02001418 const char *drvname;
Max Reitz34b5d2c2013-09-05 14:45:29 +02001419 Error *local_err = NULL;
Kevin Wolfb1e6fc02014-05-06 12:11:42 +02001420 int snapshot_flags = 0;
bellard712e7872005-04-28 21:09:32 +00001421
Max Reitzf67503e2014-02-18 18:33:05 +01001422 assert(pbs);
1423
Max Reitzddf56362014-02-18 18:33:06 +01001424 if (reference) {
1425 bool options_non_empty = options ? qdict_size(options) : false;
1426 QDECREF(options);
1427
1428 if (*pbs) {
1429 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1430 "another block device");
1431 return -EINVAL;
1432 }
1433
1434 if (filename || options_non_empty) {
1435 error_setg(errp, "Cannot reference an existing block device with "
1436 "additional options or a new filename");
1437 return -EINVAL;
1438 }
1439
1440 bs = bdrv_lookup_bs(reference, reference, errp);
1441 if (!bs) {
1442 return -ENODEV;
1443 }
1444 bdrv_ref(bs);
1445 *pbs = bs;
1446 return 0;
1447 }
1448
Max Reitzf67503e2014-02-18 18:33:05 +01001449 if (*pbs) {
1450 bs = *pbs;
1451 } else {
Markus Armbrustere4e99862014-10-07 13:59:03 +02001452 bs = bdrv_new();
Max Reitzf67503e2014-02-18 18:33:05 +01001453 }
1454
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001455 /* NULL means an empty set of options */
1456 if (options == NULL) {
1457 options = qdict_new();
1458 }
1459
Kevin Wolf17b005f2014-05-27 10:50:29 +02001460 ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err);
Kevin Wolf462f5bc2014-05-26 11:39:55 +02001461 if (local_err) {
1462 goto fail;
1463 }
1464
Kevin Wolf76c591b2014-06-04 14:19:44 +02001465 /* Find the right image format driver */
1466 drv = NULL;
1467 drvname = qdict_get_try_str(options, "driver");
1468 if (drvname) {
1469 drv = bdrv_find_format(drvname);
1470 qdict_del(options, "driver");
1471 if (!drv) {
1472 error_setg(errp, "Unknown driver: '%s'", drvname);
1473 ret = -EINVAL;
1474 goto fail;
1475 }
1476 }
1477
1478 assert(drvname || !(flags & BDRV_O_PROTOCOL));
1479 if (drv && !drv->bdrv_file_open) {
1480 /* If the user explicitly wants a format driver here, we'll need to add
1481 * another layer for the protocol in bs->file */
1482 flags &= ~BDRV_O_PROTOCOL;
1483 }
1484
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001485 bs->options = options;
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001486 options = qdict_clone_shallow(options);
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001487
Kevin Wolff4788ad2014-06-03 16:44:19 +02001488 /* Open image file without format layer */
1489 if ((flags & BDRV_O_PROTOCOL) == 0) {
1490 if (flags & BDRV_O_RDWR) {
1491 flags |= BDRV_O_ALLOW_RDWR;
1492 }
1493 if (flags & BDRV_O_SNAPSHOT) {
1494 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1495 flags = bdrv_backing_flags(flags);
1496 }
1497
1498 assert(file == NULL);
1499 ret = bdrv_open_image(&file, filename, options, "file",
1500 bdrv_inherited_flags(flags),
1501 true, &local_err);
1502 if (ret < 0) {
Max Reitz5469a2a2014-02-18 18:33:10 +01001503 goto fail;
1504 }
1505 }
1506
Kevin Wolf76c591b2014-06-04 14:19:44 +02001507 /* Image format probing */
Kevin Wolf38f3ef52014-11-20 16:27:12 +01001508 bs->probed = !drv;
Kevin Wolf76c591b2014-06-04 14:19:44 +02001509 if (!drv && file) {
Kevin Wolf17b005f2014-05-27 10:50:29 +02001510 ret = find_image_format(file, filename, &drv, &local_err);
1511 if (ret < 0) {
Kevin Wolf8bfea152014-04-11 19:16:36 +02001512 goto fail;
Max Reitz2a05cbe2013-12-20 19:28:10 +01001513 }
Kevin Wolf76c591b2014-06-04 14:19:44 +02001514 } else if (!drv) {
Kevin Wolf17b005f2014-05-27 10:50:29 +02001515 error_setg(errp, "Must specify either driver or file");
1516 ret = -EINVAL;
Kevin Wolf8bfea152014-04-11 19:16:36 +02001517 goto fail;
Kevin Wolff500a6d2012-11-12 17:35:27 +01001518 }
1519
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001520 /* Open the image */
Max Reitz34b5d2c2013-09-05 14:45:29 +02001521 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001522 if (ret < 0) {
Kevin Wolf8bfea152014-04-11 19:16:36 +02001523 goto fail;
Christoph Hellwig69873072010-01-20 18:13:25 +01001524 }
1525
Max Reitz2a05cbe2013-12-20 19:28:10 +01001526 if (file && (bs->file != file)) {
Fam Zheng4f6fd342013-08-23 09:14:47 +08001527 bdrv_unref(file);
Kevin Wolff500a6d2012-11-12 17:35:27 +01001528 file = NULL;
1529 }
1530
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001531 /* If there is a backing file, use it */
Paolo Bonzini9156df12012-10-18 16:49:17 +02001532 if ((flags & BDRV_O_NO_BACKING) == 0) {
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001533 QDict *backing_options;
1534
Benoît Canet5726d872013-09-25 13:30:01 +02001535 qdict_extract_subqdict(options, &backing_options, "backing.");
Max Reitz34b5d2c2013-09-05 14:45:29 +02001536 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001537 if (ret < 0) {
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001538 goto close_and_fail;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001539 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001540 }
1541
Max Reitz91af7012014-07-18 20:24:56 +02001542 bdrv_refresh_filename(bs);
1543
Kevin Wolfb9988752014-04-03 12:09:34 +02001544 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1545 * temporary snapshot afterwards. */
Kevin Wolfb1e6fc02014-05-06 12:11:42 +02001546 if (snapshot_flags) {
Chen Gang6b8aeca2014-06-23 23:28:23 +08001547 ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
Kevin Wolfb9988752014-04-03 12:09:34 +02001548 if (local_err) {
Kevin Wolfb9988752014-04-03 12:09:34 +02001549 goto close_and_fail;
1550 }
1551 }
1552
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001553 /* Check if any unknown options were used */
Max Reitz5acd9d82014-02-18 18:33:11 +01001554 if (options && (qdict_size(options) != 0)) {
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001555 const QDictEntry *entry = qdict_first(options);
Max Reitz5acd9d82014-02-18 18:33:11 +01001556 if (flags & BDRV_O_PROTOCOL) {
1557 error_setg(errp, "Block protocol '%s' doesn't support the option "
1558 "'%s'", drv->format_name, entry->key);
1559 } else {
1560 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1561 "support the option '%s'", drv->format_name,
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02001562 bdrv_get_device_name(bs), entry->key);
Max Reitz5acd9d82014-02-18 18:33:11 +01001563 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001564
1565 ret = -EINVAL;
1566 goto close_and_fail;
1567 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001568
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001569 if (!bdrv_key_required(bs)) {
Markus Armbrustera7f53e22014-10-07 13:59:25 +02001570 if (bs->blk) {
1571 blk_dev_change_media_cb(bs->blk, true);
1572 }
Markus Armbrusterc3adb582014-03-14 09:22:48 +01001573 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1574 && !runstate_check(RUN_STATE_INMIGRATE)
1575 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1576 error_setg(errp,
1577 "Guest must be stopped for opening of encrypted image");
1578 ret = -EBUSY;
1579 goto close_and_fail;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001580 }
1581
Markus Armbrusterc3adb582014-03-14 09:22:48 +01001582 QDECREF(options);
Max Reitzf67503e2014-02-18 18:33:05 +01001583 *pbs = bs;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001584 return 0;
1585
Kevin Wolf8bfea152014-04-11 19:16:36 +02001586fail:
Kevin Wolff500a6d2012-11-12 17:35:27 +01001587 if (file != NULL) {
Fam Zheng4f6fd342013-08-23 09:14:47 +08001588 bdrv_unref(file);
Kevin Wolff500a6d2012-11-12 17:35:27 +01001589 }
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001590 QDECREF(bs->options);
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001591 QDECREF(options);
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001592 bs->options = NULL;
Max Reitzf67503e2014-02-18 18:33:05 +01001593 if (!*pbs) {
1594 /* If *pbs is NULL, a new BDS has been created in this function and
1595 needs to be freed now. Otherwise, it does not need to be closed,
1596 since it has not really been opened yet. */
1597 bdrv_unref(bs);
1598 }
Markus Armbruster84d18f02014-01-30 15:07:28 +01001599 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001600 error_propagate(errp, local_err);
1601 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001602 return ret;
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001603
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001604close_and_fail:
Max Reitzf67503e2014-02-18 18:33:05 +01001605 /* See fail path, but now the BDS has to be always closed */
1606 if (*pbs) {
1607 bdrv_close(bs);
1608 } else {
1609 bdrv_unref(bs);
1610 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001611 QDECREF(options);
Markus Armbruster84d18f02014-01-30 15:07:28 +01001612 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001613 error_propagate(errp, local_err);
1614 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001615 return ret;
1616}
1617
Jeff Codye971aa12012-09-20 15:13:19 -04001618typedef struct BlockReopenQueueEntry {
1619 bool prepared;
1620 BDRVReopenState state;
1621 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1622} BlockReopenQueueEntry;
1623
1624/*
1625 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1626 * reopen of multiple devices.
1627 *
1628 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1629 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1630 * be created and initialized. This newly created BlockReopenQueue should be
1631 * passed back in for subsequent calls that are intended to be of the same
1632 * atomic 'set'.
1633 *
1634 * bs is the BlockDriverState to add to the reopen queue.
1635 *
1636 * flags contains the open flags for the associated bs
1637 *
1638 * returns a pointer to bs_queue, which is either the newly allocated
1639 * bs_queue, or the existing bs_queue being used.
1640 *
1641 */
1642BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1643 BlockDriverState *bs, int flags)
1644{
1645 assert(bs != NULL);
1646
1647 BlockReopenQueueEntry *bs_entry;
1648 if (bs_queue == NULL) {
1649 bs_queue = g_new0(BlockReopenQueue, 1);
1650 QSIMPLEQ_INIT(bs_queue);
1651 }
1652
Kevin Wolff1f25a22014-04-25 19:04:55 +02001653 /* bdrv_open() masks this flag out */
1654 flags &= ~BDRV_O_PROTOCOL;
1655
Jeff Codye971aa12012-09-20 15:13:19 -04001656 if (bs->file) {
Kevin Wolff1f25a22014-04-25 19:04:55 +02001657 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
Jeff Codye971aa12012-09-20 15:13:19 -04001658 }
1659
1660 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1661 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1662
1663 bs_entry->state.bs = bs;
1664 bs_entry->state.flags = flags;
1665
1666 return bs_queue;
1667}
1668
1669/*
1670 * Reopen multiple BlockDriverStates atomically & transactionally.
1671 *
1672 * The queue passed in (bs_queue) must have been built up previous
1673 * via bdrv_reopen_queue().
1674 *
1675 * Reopens all BDS specified in the queue, with the appropriate
1676 * flags. All devices are prepared for reopen, and failure of any
1677 * device will cause all device changes to be abandonded, and intermediate
1678 * data cleaned up.
1679 *
1680 * If all devices prepare successfully, then the changes are committed
1681 * to all devices.
1682 *
1683 */
1684int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1685{
1686 int ret = -1;
1687 BlockReopenQueueEntry *bs_entry, *next;
1688 Error *local_err = NULL;
1689
1690 assert(bs_queue != NULL);
1691
1692 bdrv_drain_all();
1693
1694 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1695 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1696 error_propagate(errp, local_err);
1697 goto cleanup;
1698 }
1699 bs_entry->prepared = true;
1700 }
1701
1702 /* If we reach this point, we have success and just need to apply the
1703 * changes
1704 */
1705 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1706 bdrv_reopen_commit(&bs_entry->state);
1707 }
1708
1709 ret = 0;
1710
1711cleanup:
1712 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1713 if (ret && bs_entry->prepared) {
1714 bdrv_reopen_abort(&bs_entry->state);
1715 }
1716 g_free(bs_entry);
1717 }
1718 g_free(bs_queue);
1719 return ret;
1720}
1721
1722
1723/* Reopen a single BlockDriverState with the specified flags. */
1724int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1725{
1726 int ret = -1;
1727 Error *local_err = NULL;
1728 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1729
1730 ret = bdrv_reopen_multiple(queue, &local_err);
1731 if (local_err != NULL) {
1732 error_propagate(errp, local_err);
1733 }
1734 return ret;
1735}
1736
1737
1738/*
1739 * Prepares a BlockDriverState for reopen. All changes are staged in the
1740 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1741 * the block driver layer .bdrv_reopen_prepare()
1742 *
1743 * bs is the BlockDriverState to reopen
1744 * flags are the new open flags
1745 * queue is the reopen queue
1746 *
1747 * Returns 0 on success, non-zero on error. On error errp will be set
1748 * as well.
1749 *
1750 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1751 * It is the responsibility of the caller to then call the abort() or
1752 * commit() for any other BDS that have been left in a prepare() state
1753 *
1754 */
1755int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1756 Error **errp)
1757{
1758 int ret = -1;
1759 Error *local_err = NULL;
1760 BlockDriver *drv;
1761
1762 assert(reopen_state != NULL);
1763 assert(reopen_state->bs->drv != NULL);
1764 drv = reopen_state->bs->drv;
1765
1766 /* if we are to stay read-only, do not allow permission change
1767 * to r/w */
1768 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1769 reopen_state->flags & BDRV_O_RDWR) {
1770 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02001771 bdrv_get_device_name(reopen_state->bs));
Jeff Codye971aa12012-09-20 15:13:19 -04001772 goto error;
1773 }
1774
1775
1776 ret = bdrv_flush(reopen_state->bs);
1777 if (ret) {
1778 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1779 strerror(-ret));
1780 goto error;
1781 }
1782
1783 if (drv->bdrv_reopen_prepare) {
1784 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1785 if (ret) {
1786 if (local_err != NULL) {
1787 error_propagate(errp, local_err);
1788 } else {
Luiz Capitulinod8b68952013-06-10 11:29:27 -04001789 error_setg(errp, "failed while preparing to reopen image '%s'",
1790 reopen_state->bs->filename);
Jeff Codye971aa12012-09-20 15:13:19 -04001791 }
1792 goto error;
1793 }
1794 } else {
1795 /* It is currently mandatory to have a bdrv_reopen_prepare()
1796 * handler for each supported drv. */
1797 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02001798 drv->format_name, bdrv_get_device_name(reopen_state->bs),
Jeff Codye971aa12012-09-20 15:13:19 -04001799 "reopening of file");
1800 ret = -1;
1801 goto error;
1802 }
1803
1804 ret = 0;
1805
1806error:
1807 return ret;
1808}
1809
1810/*
1811 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1812 * makes them final by swapping the staging BlockDriverState contents into
1813 * the active BlockDriverState contents.
1814 */
1815void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1816{
1817 BlockDriver *drv;
1818
1819 assert(reopen_state != NULL);
1820 drv = reopen_state->bs->drv;
1821 assert(drv != NULL);
1822
1823 /* If there are any driver level actions to take */
1824 if (drv->bdrv_reopen_commit) {
1825 drv->bdrv_reopen_commit(reopen_state);
1826 }
1827
1828 /* set BDS specific flags now */
1829 reopen_state->bs->open_flags = reopen_state->flags;
1830 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1831 BDRV_O_CACHE_WB);
1832 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
Kevin Wolf355ef4a2013-12-11 20:14:09 +01001833
Kevin Wolf3baca892014-07-16 17:48:16 +02001834 bdrv_refresh_limits(reopen_state->bs, NULL);
Jeff Codye971aa12012-09-20 15:13:19 -04001835}
1836
1837/*
1838 * Abort the reopen, and delete and free the staged changes in
1839 * reopen_state
1840 */
1841void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1842{
1843 BlockDriver *drv;
1844
1845 assert(reopen_state != NULL);
1846 drv = reopen_state->bs->drv;
1847 assert(drv != NULL);
1848
1849 if (drv->bdrv_reopen_abort) {
1850 drv->bdrv_reopen_abort(reopen_state);
1851 }
1852}
1853
1854
bellardfc01f7e2003-06-30 10:03:06 +00001855void bdrv_close(BlockDriverState *bs)
1856{
Max Reitz33384422014-06-20 21:57:33 +02001857 BdrvAioNotifier *ban, *ban_next;
1858
Paolo Bonzini3cbc0022012-10-19 11:36:48 +02001859 if (bs->job) {
1860 block_job_cancel_sync(bs->job);
1861 }
Stefan Hajnoczi58fda172013-07-02 15:36:25 +02001862 bdrv_drain_all(); /* complete I/O */
1863 bdrv_flush(bs);
1864 bdrv_drain_all(); /* in case flush left pending I/O */
Paolo Bonzinid7d512f2012-08-23 11:20:36 +02001865 notifier_list_notify(&bs->close_notifiers, bs);
Kevin Wolf7094f122012-04-11 11:06:37 +02001866
Paolo Bonzini3cbc0022012-10-19 11:36:48 +02001867 if (bs->drv) {
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +01001868 if (bs->backing_hd) {
Fam Zheng826b6ca2014-05-23 21:29:47 +08001869 BlockDriverState *backing_hd = bs->backing_hd;
1870 bdrv_set_backing_hd(bs, NULL);
1871 bdrv_unref(backing_hd);
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +01001872 }
bellardea2384d2004-08-01 21:59:26 +00001873 bs->drv->bdrv_close(bs);
Anthony Liguori7267c092011-08-20 22:09:37 -05001874 g_free(bs->opaque);
bellardea2384d2004-08-01 21:59:26 +00001875 bs->opaque = NULL;
1876 bs->drv = NULL;
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +00001877 bs->copy_on_read = 0;
Paolo Bonzinia275fa42012-05-08 16:51:43 +02001878 bs->backing_file[0] = '\0';
1879 bs->backing_format[0] = '\0';
Paolo Bonzini64058752012-05-08 16:51:49 +02001880 bs->total_sectors = 0;
1881 bs->encrypted = 0;
1882 bs->valid_key = 0;
1883 bs->sg = 0;
1884 bs->growable = 0;
Asias He0d51b4d2013-08-22 15:24:14 +08001885 bs->zero_beyond_eof = false;
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001886 QDECREF(bs->options);
1887 bs->options = NULL;
Max Reitz91af7012014-07-18 20:24:56 +02001888 QDECREF(bs->full_open_options);
1889 bs->full_open_options = NULL;
bellardb3380822004-03-14 21:38:54 +00001890
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001891 if (bs->file != NULL) {
Fam Zheng4f6fd342013-08-23 09:14:47 +08001892 bdrv_unref(bs->file);
Paolo Bonzini0ac93772012-05-08 16:51:44 +02001893 bs->file = NULL;
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001894 }
bellardb3380822004-03-14 21:38:54 +00001895 }
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08001896
Markus Armbrustera7f53e22014-10-07 13:59:25 +02001897 if (bs->blk) {
1898 blk_dev_change_media_cb(bs->blk, false);
1899 }
Pavel Hrdina9ca11152012-08-09 12:44:48 +02001900
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08001901 /*throttling disk I/O limits*/
1902 if (bs->io_limits_enabled) {
1903 bdrv_io_limits_disable(bs);
1904 }
Max Reitz33384422014-06-20 21:57:33 +02001905
1906 QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
1907 g_free(ban);
1908 }
1909 QLIST_INIT(&bs->aio_notifiers);
bellardb3380822004-03-14 21:38:54 +00001910}
1911
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +09001912void bdrv_close_all(void)
1913{
1914 BlockDriverState *bs;
1915
Benoît Canetdc364f42014-01-23 21:31:32 +01001916 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02001917 AioContext *aio_context = bdrv_get_aio_context(bs);
1918
1919 aio_context_acquire(aio_context);
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +09001920 bdrv_close(bs);
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02001921 aio_context_release(aio_context);
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +09001922 }
1923}
1924
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001925/* Check if any requests are in-flight (including throttled requests) */
1926static bool bdrv_requests_pending(BlockDriverState *bs)
1927{
1928 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1929 return true;
1930 }
Benoît Canetcc0681c2013-09-02 14:14:39 +02001931 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1932 return true;
1933 }
1934 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001935 return true;
1936 }
1937 if (bs->file && bdrv_requests_pending(bs->file)) {
1938 return true;
1939 }
1940 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1941 return true;
1942 }
1943 return false;
1944}
1945
Stefan Hajnoczi5b98db02014-10-21 12:03:55 +01001946static bool bdrv_drain_one(BlockDriverState *bs)
1947{
1948 bool bs_busy;
1949
1950 bdrv_flush_io_queue(bs);
1951 bdrv_start_throttled_reqs(bs);
1952 bs_busy = bdrv_requests_pending(bs);
1953 bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy);
1954 return bs_busy;
1955}
1956
1957/*
1958 * Wait for pending requests to complete on a single BlockDriverState subtree
1959 *
1960 * See the warning in bdrv_drain_all(). This function can only be called if
1961 * you are sure nothing can generate I/O because you have op blockers
1962 * installed.
1963 *
1964 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
1965 * AioContext.
1966 */
1967void bdrv_drain(BlockDriverState *bs)
1968{
1969 while (bdrv_drain_one(bs)) {
1970 /* Keep iterating */
1971 }
1972}
1973
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001974/*
1975 * Wait for pending requests to complete across all BlockDriverStates
1976 *
1977 * This function does not flush data to disk, use bdrv_flush_all() for that
1978 * after calling this function.
Zhi Yong Wu4c355d52012-04-12 14:00:57 +02001979 *
1980 * Note that completion of an asynchronous I/O operation can trigger any
1981 * number of other I/O operations on other devices---for example a coroutine
1982 * can be arbitrarily complex and a constant flow of I/O can come until the
1983 * coroutine is complete. Because of this, it is not possible to have a
1984 * function to drain a single device's I/O queue.
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001985 */
1986void bdrv_drain_all(void)
1987{
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001988 /* Always run first iteration so any pending completion BHs run */
1989 bool busy = true;
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001990 BlockDriverState *bs;
1991
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001992 while (busy) {
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001993 busy = false;
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001994
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001995 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1996 AioContext *aio_context = bdrv_get_aio_context(bs);
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001997
1998 aio_context_acquire(aio_context);
Stefan Hajnoczi5b98db02014-10-21 12:03:55 +01001999 busy |= bdrv_drain_one(bs);
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02002000 aio_context_release(aio_context);
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02002001 }
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00002002 }
2003}
2004
Benoît Canetdc364f42014-01-23 21:31:32 +01002005/* make a BlockDriverState anonymous by removing from bdrv_state and
2006 * graph_bdrv_state list.
Ryan Harperd22b2f42011-03-29 20:51:47 -05002007 Also, NULL terminate the device_name to prevent double remove */
2008void bdrv_make_anon(BlockDriverState *bs)
2009{
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002010 /*
2011 * Take care to remove bs from bdrv_states only when it's actually
2012 * in it. Note that bs->device_list.tqe_prev is initially null,
2013 * and gets set to non-null by QTAILQ_INSERT_TAIL(). Establish
2014 * the useful invariant "bs in bdrv_states iff bs->tqe_prev" by
2015 * resetting it to null on remove.
2016 */
2017 if (bs->device_list.tqe_prev) {
Benoît Canetdc364f42014-01-23 21:31:32 +01002018 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002019 bs->device_list.tqe_prev = NULL;
Ryan Harperd22b2f42011-03-29 20:51:47 -05002020 }
Benoît Canetdc364f42014-01-23 21:31:32 +01002021 if (bs->node_name[0] != '\0') {
2022 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
2023 }
2024 bs->node_name[0] = '\0';
Ryan Harperd22b2f42011-03-29 20:51:47 -05002025}
2026
Paolo Bonzinie023b2e2012-05-08 16:51:41 +02002027static void bdrv_rebind(BlockDriverState *bs)
2028{
2029 if (bs->drv && bs->drv->bdrv_rebind) {
2030 bs->drv->bdrv_rebind(bs);
2031 }
2032}
2033
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002034static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
2035 BlockDriverState *bs_src)
2036{
2037 /* move some fields that need to stay attached to the device */
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002038
2039 /* dev info */
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01002040 bs_dest->guest_block_size = bs_src->guest_block_size;
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002041 bs_dest->copy_on_read = bs_src->copy_on_read;
2042
2043 bs_dest->enable_write_cache = bs_src->enable_write_cache;
2044
Benoît Canetcc0681c2013-09-02 14:14:39 +02002045 /* i/o throttled req */
2046 memcpy(&bs_dest->throttle_state,
2047 &bs_src->throttle_state,
2048 sizeof(ThrottleState));
2049 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
2050 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002051 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
2052
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002053 /* r/w error */
2054 bs_dest->on_read_error = bs_src->on_read_error;
2055 bs_dest->on_write_error = bs_src->on_write_error;
2056
2057 /* i/o status */
2058 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
2059 bs_dest->iostatus = bs_src->iostatus;
2060
2061 /* dirty bitmap */
Fam Zhenge4654d22013-11-13 18:29:43 +08002062 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002063
Fam Zheng9fcb0252013-08-23 09:14:46 +08002064 /* reference count */
2065 bs_dest->refcnt = bs_src->refcnt;
2066
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002067 /* job */
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002068 bs_dest->job = bs_src->job;
2069
2070 /* keep the same entry in bdrv_states */
Benoît Canetdc364f42014-01-23 21:31:32 +01002071 bs_dest->device_list = bs_src->device_list;
Markus Armbruster7e7d56d2014-10-07 13:59:05 +02002072 bs_dest->blk = bs_src->blk;
2073
Fam Zhengfbe40ff2014-05-23 21:29:42 +08002074 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
2075 sizeof(bs_dest->op_blockers));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002076}
2077
2078/*
2079 * Swap bs contents for two image chains while they are live,
2080 * while keeping required fields on the BlockDriverState that is
2081 * actually attached to a device.
2082 *
2083 * This will modify the BlockDriverState fields, and swap contents
2084 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2085 *
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002086 * bs_new must not be attached to a BlockBackend.
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002087 *
2088 * This function does not create any image files.
2089 */
2090void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2091{
2092 BlockDriverState tmp;
2093
Benoît Canet90ce8a02014-03-05 23:48:29 +01002094 /* The code needs to swap the node_name but simply swapping node_list won't
2095 * work so first remove the nodes from the graph list, do the swap then
2096 * insert them back if needed.
2097 */
2098 if (bs_new->node_name[0] != '\0') {
2099 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2100 }
2101 if (bs_old->node_name[0] != '\0') {
2102 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2103 }
2104
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002105 /* bs_new must be unattached and shouldn't have anything fancy enabled */
Markus Armbruster7e7d56d2014-10-07 13:59:05 +02002106 assert(!bs_new->blk);
Fam Zhenge4654d22013-11-13 18:29:43 +08002107 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002108 assert(bs_new->job == NULL);
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002109 assert(bs_new->io_limits_enabled == false);
Benoît Canetcc0681c2013-09-02 14:14:39 +02002110 assert(!throttle_have_timer(&bs_new->throttle_state));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002111
2112 tmp = *bs_new;
2113 *bs_new = *bs_old;
2114 *bs_old = tmp;
2115
2116 /* there are some fields that should not be swapped, move them back */
2117 bdrv_move_feature_fields(&tmp, bs_old);
2118 bdrv_move_feature_fields(bs_old, bs_new);
2119 bdrv_move_feature_fields(bs_new, &tmp);
2120
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002121 /* bs_new must remain unattached */
Markus Armbruster7e7d56d2014-10-07 13:59:05 +02002122 assert(!bs_new->blk);
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002123
2124 /* Check a few fields that should remain attached to the device */
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002125 assert(bs_new->job == NULL);
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002126 assert(bs_new->io_limits_enabled == false);
Benoît Canetcc0681c2013-09-02 14:14:39 +02002127 assert(!throttle_have_timer(&bs_new->throttle_state));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002128
Benoît Canet90ce8a02014-03-05 23:48:29 +01002129 /* insert the nodes back into the graph node list if needed */
2130 if (bs_new->node_name[0] != '\0') {
2131 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2132 }
2133 if (bs_old->node_name[0] != '\0') {
2134 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2135 }
2136
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002137 bdrv_rebind(bs_new);
2138 bdrv_rebind(bs_old);
2139}
2140
Jeff Cody8802d1f2012-02-28 15:54:06 -05002141/*
2142 * Add new bs contents at the top of an image chain while the chain is
2143 * live, while keeping required fields on the top layer.
2144 *
2145 * This will modify the BlockDriverState fields, and swap contents
2146 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2147 *
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002148 * bs_new must not be attached to a BlockBackend.
Jeff Codyf6801b82012-03-27 16:30:19 -04002149 *
Jeff Cody8802d1f2012-02-28 15:54:06 -05002150 * This function does not create any image files.
2151 */
2152void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2153{
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002154 bdrv_swap(bs_new, bs_top);
Jeff Cody8802d1f2012-02-28 15:54:06 -05002155
2156 /* The contents of 'tmp' will become bs_top, as we are
2157 * swapping bs_new and bs_top contents. */
Fam Zheng8d24cce2014-05-23 21:29:45 +08002158 bdrv_set_backing_hd(bs_top, bs_new);
Jeff Cody8802d1f2012-02-28 15:54:06 -05002159}
2160
Fam Zheng4f6fd342013-08-23 09:14:47 +08002161static void bdrv_delete(BlockDriverState *bs)
bellardb3380822004-03-14 21:38:54 +00002162{
Paolo Bonzini3e914652012-03-30 13:17:11 +02002163 assert(!bs->job);
Fam Zheng3718d8a2014-05-23 21:29:43 +08002164 assert(bdrv_op_blocker_is_empty(bs));
Fam Zheng4f6fd342013-08-23 09:14:47 +08002165 assert(!bs->refcnt);
Fam Zhenge4654d22013-11-13 18:29:43 +08002166 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
Markus Armbruster18846de2010-06-29 16:58:30 +02002167
Stefan Hajnoczie1b5c522013-06-27 15:32:26 +02002168 bdrv_close(bs);
2169
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002170 /* remove from list, if necessary */
Ryan Harperd22b2f42011-03-29 20:51:47 -05002171 bdrv_make_anon(bs);
aurel3234c6f052008-04-08 19:51:21 +00002172
Anthony Liguori7267c092011-08-20 22:09:37 -05002173 g_free(bs);
bellardfc01f7e2003-06-30 10:03:06 +00002174}
2175
aliguorie97fc192009-04-21 23:11:50 +00002176/*
2177 * Run consistency checks on an image
2178 *
Kevin Wolfe076f332010-06-29 11:43:13 +02002179 * Returns 0 if the check could be completed (it doesn't mean that the image is
Stefan Weila1c72732011-04-28 17:20:38 +02002180 * free of errors) or -errno when an internal error occurred. The results of the
Kevin Wolfe076f332010-06-29 11:43:13 +02002181 * check are stored in res.
aliguorie97fc192009-04-21 23:11:50 +00002182 */
Kevin Wolf4534ff52012-05-11 16:07:02 +02002183int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
aliguorie97fc192009-04-21 23:11:50 +00002184{
Max Reitz908bcd52014-08-07 22:47:55 +02002185 if (bs->drv == NULL) {
2186 return -ENOMEDIUM;
2187 }
aliguorie97fc192009-04-21 23:11:50 +00002188 if (bs->drv->bdrv_check == NULL) {
2189 return -ENOTSUP;
2190 }
2191
Kevin Wolfe076f332010-06-29 11:43:13 +02002192 memset(res, 0, sizeof(*res));
Kevin Wolf4534ff52012-05-11 16:07:02 +02002193 return bs->drv->bdrv_check(bs, res, fix);
aliguorie97fc192009-04-21 23:11:50 +00002194}
2195
Kevin Wolf8a426612010-07-16 17:17:01 +02002196#define COMMIT_BUF_SECTORS 2048
2197
bellard33e39632003-07-06 17:15:21 +00002198/* commit COW file into the raw image */
2199int bdrv_commit(BlockDriverState *bs)
2200{
bellard19cb3732006-08-19 11:45:59 +00002201 BlockDriver *drv = bs->drv;
Jeff Cody72706ea2014-01-24 09:02:35 -05002202 int64_t sector, total_sectors, length, backing_length;
Kevin Wolf8a426612010-07-16 17:17:01 +02002203 int n, ro, open_flags;
Jeff Cody0bce5972012-09-20 15:13:34 -04002204 int ret = 0;
Jeff Cody72706ea2014-01-24 09:02:35 -05002205 uint8_t *buf = NULL;
Jim Meyeringc2cba3d2012-10-04 13:09:46 +02002206 char filename[PATH_MAX];
bellard33e39632003-07-06 17:15:21 +00002207
bellard19cb3732006-08-19 11:45:59 +00002208 if (!drv)
2209 return -ENOMEDIUM;
Liu Yuan6bb45152014-09-01 13:35:21 +08002210
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002211 if (!bs->backing_hd) {
2212 return -ENOTSUP;
bellard33e39632003-07-06 17:15:21 +00002213 }
2214
Fam Zheng3718d8a2014-05-23 21:29:43 +08002215 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2216 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
Stefan Hajnoczi2d3735d2012-01-18 14:40:41 +00002217 return -EBUSY;
2218 }
2219
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002220 ro = bs->backing_hd->read_only;
Jim Meyeringc2cba3d2012-10-04 13:09:46 +02002221 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2222 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002223 open_flags = bs->backing_hd->open_flags;
2224
2225 if (ro) {
Jeff Cody0bce5972012-09-20 15:13:34 -04002226 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2227 return -EACCES;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002228 }
bellard33e39632003-07-06 17:15:21 +00002229 }
bellardea2384d2004-08-01 21:59:26 +00002230
Jeff Cody72706ea2014-01-24 09:02:35 -05002231 length = bdrv_getlength(bs);
2232 if (length < 0) {
2233 ret = length;
2234 goto ro_cleanup;
2235 }
2236
2237 backing_length = bdrv_getlength(bs->backing_hd);
2238 if (backing_length < 0) {
2239 ret = backing_length;
2240 goto ro_cleanup;
2241 }
2242
2243 /* If our top snapshot is larger than the backing file image,
2244 * grow the backing file image if possible. If not possible,
2245 * we must return an error */
2246 if (length > backing_length) {
2247 ret = bdrv_truncate(bs->backing_hd, length);
2248 if (ret < 0) {
2249 goto ro_cleanup;
2250 }
2251 }
2252
2253 total_sectors = length >> BDRV_SECTOR_BITS;
Kevin Wolf857d4f42014-05-20 13:16:51 +02002254
2255 /* qemu_try_blockalign() for bs will choose an alignment that works for
2256 * bs->backing_hd as well, so no need to compare the alignment manually. */
2257 buf = qemu_try_blockalign(bs, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2258 if (buf == NULL) {
2259 ret = -ENOMEM;
2260 goto ro_cleanup;
2261 }
bellardea2384d2004-08-01 21:59:26 +00002262
Kevin Wolf8a426612010-07-16 17:17:01 +02002263 for (sector = 0; sector < total_sectors; sector += n) {
Paolo Bonzinid6636402013-09-04 19:00:25 +02002264 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2265 if (ret < 0) {
2266 goto ro_cleanup;
2267 }
2268 if (ret) {
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002269 ret = bdrv_read(bs, sector, buf, n);
2270 if (ret < 0) {
Kevin Wolf8a426612010-07-16 17:17:01 +02002271 goto ro_cleanup;
2272 }
2273
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002274 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2275 if (ret < 0) {
Kevin Wolf8a426612010-07-16 17:17:01 +02002276 goto ro_cleanup;
2277 }
bellardea2384d2004-08-01 21:59:26 +00002278 }
2279 }
bellard95389c82005-12-18 18:28:15 +00002280
Christoph Hellwig1d449522010-01-17 12:32:30 +01002281 if (drv->bdrv_make_empty) {
2282 ret = drv->bdrv_make_empty(bs);
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002283 if (ret < 0) {
2284 goto ro_cleanup;
2285 }
Christoph Hellwig1d449522010-01-17 12:32:30 +01002286 bdrv_flush(bs);
2287 }
bellard95389c82005-12-18 18:28:15 +00002288
Christoph Hellwig3f5075a2010-01-12 13:49:23 +01002289 /*
2290 * Make sure all data we wrote to the backing device is actually
2291 * stable on disk.
2292 */
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002293 if (bs->backing_hd) {
Christoph Hellwig3f5075a2010-01-12 13:49:23 +01002294 bdrv_flush(bs->backing_hd);
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002295 }
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002296
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002297 ret = 0;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002298ro_cleanup:
Kevin Wolf857d4f42014-05-20 13:16:51 +02002299 qemu_vfree(buf);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002300
2301 if (ro) {
Jeff Cody0bce5972012-09-20 15:13:34 -04002302 /* ignoring error return here */
2303 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002304 }
2305
Christoph Hellwig1d449522010-01-17 12:32:30 +01002306 return ret;
bellard33e39632003-07-06 17:15:21 +00002307}
2308
Stefan Hajnoczie8877492012-03-05 18:10:11 +00002309int bdrv_commit_all(void)
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02002310{
2311 BlockDriverState *bs;
2312
Benoît Canetdc364f42014-01-23 21:31:32 +01002313 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02002314 AioContext *aio_context = bdrv_get_aio_context(bs);
2315
2316 aio_context_acquire(aio_context);
Jeff Cody272d2d82013-02-26 09:55:48 -05002317 if (bs->drv && bs->backing_hd) {
2318 int ret = bdrv_commit(bs);
2319 if (ret < 0) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02002320 aio_context_release(aio_context);
Jeff Cody272d2d82013-02-26 09:55:48 -05002321 return ret;
2322 }
Stefan Hajnoczie8877492012-03-05 18:10:11 +00002323 }
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02002324 aio_context_release(aio_context);
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02002325 }
Stefan Hajnoczie8877492012-03-05 18:10:11 +00002326 return 0;
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02002327}
2328
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002329/**
2330 * Remove an active request from the tracked requests list
2331 *
2332 * This function should be called when a tracked request is completing.
2333 */
2334static void tracked_request_end(BdrvTrackedRequest *req)
2335{
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002336 if (req->serialising) {
2337 req->bs->serialising_in_flight--;
2338 }
2339
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002340 QLIST_REMOVE(req, list);
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002341 qemu_co_queue_restart_all(&req->wait_queue);
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002342}
2343
2344/**
2345 * Add an active request to the tracked requests list
2346 */
2347static void tracked_request_begin(BdrvTrackedRequest *req,
2348 BlockDriverState *bs,
Kevin Wolf793ed472013-12-03 15:31:25 +01002349 int64_t offset,
2350 unsigned int bytes, bool is_write)
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002351{
2352 *req = (BdrvTrackedRequest){
2353 .bs = bs,
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002354 .offset = offset,
2355 .bytes = bytes,
2356 .is_write = is_write,
2357 .co = qemu_coroutine_self(),
2358 .serialising = false,
Kevin Wolf73271452013-12-04 17:08:50 +01002359 .overlap_offset = offset,
2360 .overlap_bytes = bytes,
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002361 };
2362
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002363 qemu_co_queue_init(&req->wait_queue);
2364
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002365 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2366}
2367
Kevin Wolfe96126f2014-02-08 10:42:18 +01002368static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002369{
Kevin Wolf73271452013-12-04 17:08:50 +01002370 int64_t overlap_offset = req->offset & ~(align - 1);
Kevin Wolfe96126f2014-02-08 10:42:18 +01002371 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2372 - overlap_offset;
Kevin Wolf73271452013-12-04 17:08:50 +01002373
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002374 if (!req->serialising) {
2375 req->bs->serialising_in_flight++;
2376 req->serialising = true;
2377 }
Kevin Wolf73271452013-12-04 17:08:50 +01002378
2379 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2380 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002381}
2382
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002383/**
2384 * Round a region to cluster boundaries
2385 */
Paolo Bonzini343bded2013-01-21 17:09:42 +01002386void bdrv_round_to_clusters(BlockDriverState *bs,
2387 int64_t sector_num, int nb_sectors,
2388 int64_t *cluster_sector_num,
2389 int *cluster_nb_sectors)
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002390{
2391 BlockDriverInfo bdi;
2392
2393 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2394 *cluster_sector_num = sector_num;
2395 *cluster_nb_sectors = nb_sectors;
2396 } else {
2397 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2398 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2399 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2400 nb_sectors, c);
2401 }
2402}
2403
Kevin Wolf73271452013-12-04 17:08:50 +01002404static int bdrv_get_cluster_size(BlockDriverState *bs)
Kevin Wolf793ed472013-12-03 15:31:25 +01002405{
2406 BlockDriverInfo bdi;
Kevin Wolf73271452013-12-04 17:08:50 +01002407 int ret;
Kevin Wolf793ed472013-12-03 15:31:25 +01002408
Kevin Wolf73271452013-12-04 17:08:50 +01002409 ret = bdrv_get_info(bs, &bdi);
2410 if (ret < 0 || bdi.cluster_size == 0) {
2411 return bs->request_alignment;
Kevin Wolf793ed472013-12-03 15:31:25 +01002412 } else {
Kevin Wolf73271452013-12-04 17:08:50 +01002413 return bdi.cluster_size;
Kevin Wolf793ed472013-12-03 15:31:25 +01002414 }
2415}
2416
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002417static bool tracked_request_overlaps(BdrvTrackedRequest *req,
Kevin Wolf793ed472013-12-03 15:31:25 +01002418 int64_t offset, unsigned int bytes)
2419{
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002420 /* aaaa bbbb */
Kevin Wolf73271452013-12-04 17:08:50 +01002421 if (offset >= req->overlap_offset + req->overlap_bytes) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002422 return false;
2423 }
2424 /* bbbb aaaa */
Kevin Wolf73271452013-12-04 17:08:50 +01002425 if (req->overlap_offset >= offset + bytes) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002426 return false;
2427 }
2428 return true;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002429}
2430
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002431static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002432{
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002433 BlockDriverState *bs = self->bs;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002434 BdrvTrackedRequest *req;
2435 bool retry;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002436 bool waited = false;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002437
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002438 if (!bs->serialising_in_flight) {
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002439 return false;
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002440 }
2441
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002442 do {
2443 retry = false;
2444 QLIST_FOREACH(req, &bs->tracked_requests, list) {
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002445 if (req == self || (!req->serialising && !self->serialising)) {
Kevin Wolf65afd212013-12-03 14:55:55 +01002446 continue;
2447 }
Kevin Wolf73271452013-12-04 17:08:50 +01002448 if (tracked_request_overlaps(req, self->overlap_offset,
2449 self->overlap_bytes))
2450 {
Stefan Hajnoczi5f8b6492011-11-30 12:23:42 +00002451 /* Hitting this means there was a reentrant request, for
2452 * example, a block driver issuing nested requests. This must
2453 * never happen since it means deadlock.
2454 */
2455 assert(qemu_coroutine_self() != req->co);
2456
Kevin Wolf64604402013-12-13 13:04:35 +01002457 /* If the request is already (indirectly) waiting for us, or
2458 * will wait for us as soon as it wakes up, then just go on
2459 * (instead of producing a deadlock in the former case). */
2460 if (!req->waiting_for) {
2461 self->waiting_for = req;
2462 qemu_co_queue_wait(&req->wait_queue);
2463 self->waiting_for = NULL;
2464 retry = true;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002465 waited = true;
Kevin Wolf64604402013-12-13 13:04:35 +01002466 break;
2467 }
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002468 }
2469 }
2470 } while (retry);
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002471
2472 return waited;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002473}
2474
Kevin Wolf756e6732010-01-12 12:55:17 +01002475/*
2476 * Return values:
2477 * 0 - success
2478 * -EINVAL - backing format specified, but no file
2479 * -ENOSPC - can't update the backing file because no space is left in the
2480 * image file header
2481 * -ENOTSUP - format driver doesn't support changing the backing file
2482 */
2483int bdrv_change_backing_file(BlockDriverState *bs,
2484 const char *backing_file, const char *backing_fmt)
2485{
2486 BlockDriver *drv = bs->drv;
Paolo Bonzini469ef352012-04-12 14:01:02 +02002487 int ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01002488
Paolo Bonzini5f377792012-04-12 14:01:01 +02002489 /* Backing file format doesn't make sense without a backing file */
2490 if (backing_fmt && !backing_file) {
2491 return -EINVAL;
2492 }
2493
Kevin Wolf756e6732010-01-12 12:55:17 +01002494 if (drv->bdrv_change_backing_file != NULL) {
Paolo Bonzini469ef352012-04-12 14:01:02 +02002495 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
Kevin Wolf756e6732010-01-12 12:55:17 +01002496 } else {
Paolo Bonzini469ef352012-04-12 14:01:02 +02002497 ret = -ENOTSUP;
Kevin Wolf756e6732010-01-12 12:55:17 +01002498 }
Paolo Bonzini469ef352012-04-12 14:01:02 +02002499
2500 if (ret == 0) {
2501 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2502 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2503 }
2504 return ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01002505}
2506
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002507/*
2508 * Finds the image layer in the chain that has 'bs' as its backing file.
2509 *
2510 * active is the current topmost image.
2511 *
2512 * Returns NULL if bs is not found in active's image chain,
2513 * or if active == bs.
Jeff Cody4caf0fc2014-06-25 15:35:26 -04002514 *
2515 * Returns the bottommost base image if bs == NULL.
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002516 */
2517BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2518 BlockDriverState *bs)
2519{
Jeff Cody4caf0fc2014-06-25 15:35:26 -04002520 while (active && bs != active->backing_hd) {
2521 active = active->backing_hd;
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002522 }
2523
Jeff Cody4caf0fc2014-06-25 15:35:26 -04002524 return active;
2525}
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002526
Jeff Cody4caf0fc2014-06-25 15:35:26 -04002527/* Given a BDS, searches for the base layer. */
2528BlockDriverState *bdrv_find_base(BlockDriverState *bs)
2529{
2530 return bdrv_find_overlay(bs, NULL);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002531}
2532
2533typedef struct BlkIntermediateStates {
2534 BlockDriverState *bs;
2535 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2536} BlkIntermediateStates;
2537
2538
2539/*
2540 * Drops images above 'base' up to and including 'top', and sets the image
2541 * above 'top' to have base as its backing file.
2542 *
2543 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2544 * information in 'bs' can be properly updated.
2545 *
2546 * E.g., this will convert the following chain:
2547 * bottom <- base <- intermediate <- top <- active
2548 *
2549 * to
2550 *
2551 * bottom <- base <- active
2552 *
2553 * It is allowed for bottom==base, in which case it converts:
2554 *
2555 * base <- intermediate <- top <- active
2556 *
2557 * to
2558 *
2559 * base <- active
2560 *
Jeff Cody54e26902014-06-25 15:40:10 -04002561 * If backing_file_str is non-NULL, it will be used when modifying top's
2562 * overlay image metadata.
2563 *
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002564 * Error conditions:
2565 * if active == top, that is considered an error
2566 *
2567 */
2568int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
Jeff Cody54e26902014-06-25 15:40:10 -04002569 BlockDriverState *base, const char *backing_file_str)
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002570{
2571 BlockDriverState *intermediate;
2572 BlockDriverState *base_bs = NULL;
2573 BlockDriverState *new_top_bs = NULL;
2574 BlkIntermediateStates *intermediate_state, *next;
2575 int ret = -EIO;
2576
2577 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2578 QSIMPLEQ_INIT(&states_to_delete);
2579
2580 if (!top->drv || !base->drv) {
2581 goto exit;
2582 }
2583
2584 new_top_bs = bdrv_find_overlay(active, top);
2585
2586 if (new_top_bs == NULL) {
2587 /* we could not find the image above 'top', this is an error */
2588 goto exit;
2589 }
2590
2591 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2592 * to do, no intermediate images */
2593 if (new_top_bs->backing_hd == base) {
2594 ret = 0;
2595 goto exit;
2596 }
2597
2598 intermediate = top;
2599
2600 /* now we will go down through the list, and add each BDS we find
2601 * into our deletion queue, until we hit the 'base'
2602 */
2603 while (intermediate) {
Markus Armbruster5839e532014-08-19 10:31:08 +02002604 intermediate_state = g_new0(BlkIntermediateStates, 1);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002605 intermediate_state->bs = intermediate;
2606 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2607
2608 if (intermediate->backing_hd == base) {
2609 base_bs = intermediate->backing_hd;
2610 break;
2611 }
2612 intermediate = intermediate->backing_hd;
2613 }
2614 if (base_bs == NULL) {
2615 /* something went wrong, we did not end at the base. safely
2616 * unravel everything, and exit with error */
2617 goto exit;
2618 }
2619
2620 /* success - we can delete the intermediate states, and link top->base */
Jeff Cody54e26902014-06-25 15:40:10 -04002621 backing_file_str = backing_file_str ? backing_file_str : base_bs->filename;
2622 ret = bdrv_change_backing_file(new_top_bs, backing_file_str,
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002623 base_bs->drv ? base_bs->drv->format_name : "");
2624 if (ret) {
2625 goto exit;
2626 }
Fam Zheng920beae2014-05-23 21:29:46 +08002627 bdrv_set_backing_hd(new_top_bs, base_bs);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002628
2629 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2630 /* so that bdrv_close() does not recursively close the chain */
Fam Zheng920beae2014-05-23 21:29:46 +08002631 bdrv_set_backing_hd(intermediate_state->bs, NULL);
Fam Zheng4f6fd342013-08-23 09:14:47 +08002632 bdrv_unref(intermediate_state->bs);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002633 }
2634 ret = 0;
2635
2636exit:
2637 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2638 g_free(intermediate_state);
2639 }
2640 return ret;
2641}
2642
2643
aliguori71d07702009-03-03 17:37:16 +00002644static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2645 size_t size)
2646{
2647 int64_t len;
2648
Kevin Wolf1dd3a442014-04-14 14:48:16 +02002649 if (size > INT_MAX) {
2650 return -EIO;
2651 }
2652
aliguori71d07702009-03-03 17:37:16 +00002653 if (!bdrv_is_inserted(bs))
2654 return -ENOMEDIUM;
2655
2656 if (bs->growable)
2657 return 0;
2658
2659 len = bdrv_getlength(bs);
2660
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02002661 if (offset < 0)
2662 return -EIO;
2663
2664 if ((offset > len) || (len - offset < size))
aliguori71d07702009-03-03 17:37:16 +00002665 return -EIO;
2666
2667 return 0;
2668}
2669
2670static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2671 int nb_sectors)
2672{
Kevin Wolf54db38a2014-04-14 14:47:14 +02002673 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
Kevin Wolf8f4754e2014-03-26 13:06:02 +01002674 return -EIO;
2675 }
2676
Jes Sorenseneb5a3162010-05-27 16:20:31 +02002677 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2678 nb_sectors * BDRV_SECTOR_SIZE);
aliguori71d07702009-03-03 17:37:16 +00002679}
2680
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002681typedef struct RwCo {
2682 BlockDriverState *bs;
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002683 int64_t offset;
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002684 QEMUIOVector *qiov;
2685 bool is_write;
2686 int ret;
Peter Lieven4105eaa2013-07-11 14:16:22 +02002687 BdrvRequestFlags flags;
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002688} RwCo;
2689
2690static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2691{
2692 RwCo *rwco = opaque;
2693
2694 if (!rwco->is_write) {
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002695 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2696 rwco->qiov->size, rwco->qiov,
Peter Lieven4105eaa2013-07-11 14:16:22 +02002697 rwco->flags);
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002698 } else {
2699 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2700 rwco->qiov->size, rwco->qiov,
2701 rwco->flags);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002702 }
2703}
2704
2705/*
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002706 * Process a vectored synchronous request using coroutines
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002707 */
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002708static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2709 QEMUIOVector *qiov, bool is_write,
2710 BdrvRequestFlags flags)
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002711{
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002712 Coroutine *co;
2713 RwCo rwco = {
2714 .bs = bs,
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002715 .offset = offset,
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002716 .qiov = qiov,
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002717 .is_write = is_write,
2718 .ret = NOT_DONE,
Peter Lieven4105eaa2013-07-11 14:16:22 +02002719 .flags = flags,
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002720 };
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002721
Zhi Yong Wu498e3862012-04-02 18:59:34 +08002722 /**
2723 * In sync call context, when the vcpu is blocked, this throttling timer
2724 * will not fire; so the I/O throttling function has to be disabled here
2725 * if it has been enabled.
2726 */
2727 if (bs->io_limits_enabled) {
2728 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2729 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2730 bdrv_io_limits_disable(bs);
2731 }
2732
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002733 if (qemu_in_coroutine()) {
2734 /* Fast-path if already in coroutine context */
2735 bdrv_rw_co_entry(&rwco);
2736 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02002737 AioContext *aio_context = bdrv_get_aio_context(bs);
2738
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002739 co = qemu_coroutine_create(bdrv_rw_co_entry);
2740 qemu_coroutine_enter(co, &rwco);
2741 while (rwco.ret == NOT_DONE) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02002742 aio_poll(aio_context, true);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002743 }
2744 }
2745 return rwco.ret;
2746}
2747
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002748/*
2749 * Process a synchronous request using coroutines
2750 */
2751static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
Peter Lieven4105eaa2013-07-11 14:16:22 +02002752 int nb_sectors, bool is_write, BdrvRequestFlags flags)
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002753{
2754 QEMUIOVector qiov;
2755 struct iovec iov = {
2756 .iov_base = (void *)buf,
2757 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2758 };
2759
Kevin Wolfda15ee52014-04-14 15:39:36 +02002760 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2761 return -EINVAL;
2762 }
2763
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002764 qemu_iovec_init_external(&qiov, &iov, 1);
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002765 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2766 &qiov, is_write, flags);
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002767}
2768
bellard19cb3732006-08-19 11:45:59 +00002769/* return < 0 if error. See bdrv_write() for the return codes */
ths5fafdf22007-09-16 21:08:06 +00002770int bdrv_read(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00002771 uint8_t *buf, int nb_sectors)
2772{
Peter Lieven4105eaa2013-07-11 14:16:22 +02002773 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
bellardfc01f7e2003-06-30 10:03:06 +00002774}
2775
Markus Armbruster07d27a42012-06-29 17:34:29 +02002776/* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2777int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2778 uint8_t *buf, int nb_sectors)
2779{
2780 bool enabled;
2781 int ret;
2782
2783 enabled = bs->io_limits_enabled;
2784 bs->io_limits_enabled = false;
Peter Lieven4e7395e2013-07-18 10:37:32 +02002785 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
Markus Armbruster07d27a42012-06-29 17:34:29 +02002786 bs->io_limits_enabled = enabled;
2787 return ret;
2788}
2789
ths5fafdf22007-09-16 21:08:06 +00002790/* Return < 0 if error. Important errors are:
bellard19cb3732006-08-19 11:45:59 +00002791 -EIO generic I/O error (may happen for all errors)
2792 -ENOMEDIUM No media inserted.
2793 -EINVAL Invalid sector number or nb_sectors
2794 -EACCES Trying to write a read-only device
2795*/
ths5fafdf22007-09-16 21:08:06 +00002796int bdrv_write(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00002797 const uint8_t *buf, int nb_sectors)
2798{
Peter Lieven4105eaa2013-07-11 14:16:22 +02002799 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
bellard83f64092006-08-01 16:21:11 +00002800}
2801
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02002802int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2803 int nb_sectors, BdrvRequestFlags flags)
Peter Lieven4105eaa2013-07-11 14:16:22 +02002804{
2805 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02002806 BDRV_REQ_ZERO_WRITE | flags);
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002807}
2808
Peter Lievend75cbb52013-10-24 12:07:03 +02002809/*
2810 * Completely zero out a block device with the help of bdrv_write_zeroes.
2811 * The operation is sped up by checking the block status and only writing
2812 * zeroes to the device if they currently do not return zeroes. Optional
2813 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2814 *
2815 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2816 */
2817int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2818{
Markus Armbrusterd32f7c12014-06-26 13:23:18 +02002819 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
Peter Lievend75cbb52013-10-24 12:07:03 +02002820 int n;
2821
Markus Armbrusterd32f7c12014-06-26 13:23:18 +02002822 target_sectors = bdrv_nb_sectors(bs);
2823 if (target_sectors < 0) {
2824 return target_sectors;
Kevin Wolf9ce10c02014-04-14 17:03:34 +02002825 }
Kevin Wolf9ce10c02014-04-14 17:03:34 +02002826
Peter Lievend75cbb52013-10-24 12:07:03 +02002827 for (;;) {
Markus Armbrusterd32f7c12014-06-26 13:23:18 +02002828 nb_sectors = target_sectors - sector_num;
Peter Lievend75cbb52013-10-24 12:07:03 +02002829 if (nb_sectors <= 0) {
2830 return 0;
2831 }
Fam Zhengf3a9cfd2014-11-10 15:07:44 +08002832 if (nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2833 nb_sectors = INT_MAX / BDRV_SECTOR_SIZE;
Peter Lievend75cbb52013-10-24 12:07:03 +02002834 }
2835 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
Peter Lieven3d94ce62013-12-12 13:57:05 +01002836 if (ret < 0) {
2837 error_report("error getting block status at sector %" PRId64 ": %s",
2838 sector_num, strerror(-ret));
2839 return ret;
2840 }
Peter Lievend75cbb52013-10-24 12:07:03 +02002841 if (ret & BDRV_BLOCK_ZERO) {
2842 sector_num += n;
2843 continue;
2844 }
2845 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2846 if (ret < 0) {
2847 error_report("error writing zeroes at sector %" PRId64 ": %s",
2848 sector_num, strerror(-ret));
2849 return ret;
2850 }
2851 sector_num += n;
2852 }
2853}
2854
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002855int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
bellard83f64092006-08-01 16:21:11 +00002856{
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002857 QEMUIOVector qiov;
2858 struct iovec iov = {
2859 .iov_base = (void *)buf,
2860 .iov_len = bytes,
2861 };
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01002862 int ret;
bellard83f64092006-08-01 16:21:11 +00002863
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002864 if (bytes < 0) {
2865 return -EINVAL;
bellard83f64092006-08-01 16:21:11 +00002866 }
2867
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002868 qemu_iovec_init_external(&qiov, &iov, 1);
2869 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2870 if (ret < 0) {
2871 return ret;
bellard83f64092006-08-01 16:21:11 +00002872 }
2873
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002874 return bytes;
bellard83f64092006-08-01 16:21:11 +00002875}
2876
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002877int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
bellard83f64092006-08-01 16:21:11 +00002878{
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01002879 int ret;
bellard83f64092006-08-01 16:21:11 +00002880
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002881 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2882 if (ret < 0) {
2883 return ret;
bellard83f64092006-08-01 16:21:11 +00002884 }
2885
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002886 return qiov->size;
2887}
2888
2889int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002890 const void *buf, int bytes)
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002891{
2892 QEMUIOVector qiov;
2893 struct iovec iov = {
2894 .iov_base = (void *) buf,
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002895 .iov_len = bytes,
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002896 };
2897
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002898 if (bytes < 0) {
2899 return -EINVAL;
2900 }
2901
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002902 qemu_iovec_init_external(&qiov, &iov, 1);
2903 return bdrv_pwritev(bs, offset, &qiov);
bellard83f64092006-08-01 16:21:11 +00002904}
bellard83f64092006-08-01 16:21:11 +00002905
Kevin Wolff08145f2010-06-16 16:38:15 +02002906/*
2907 * Writes to the file and ensures that no writes are reordered across this
2908 * request (acts as a barrier)
2909 *
2910 * Returns 0 on success, -errno in error cases.
2911 */
2912int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2913 const void *buf, int count)
2914{
2915 int ret;
2916
2917 ret = bdrv_pwrite(bs, offset, buf, count);
2918 if (ret < 0) {
2919 return ret;
2920 }
2921
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02002922 /* No flush needed for cache modes that already do it */
2923 if (bs->enable_write_cache) {
Kevin Wolff08145f2010-06-16 16:38:15 +02002924 bdrv_flush(bs);
2925 }
2926
2927 return 0;
2928}
2929
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00002930static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
Stefan Hajnocziab185922011-11-17 13:40:31 +00002931 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2932{
2933 /* Perform I/O through a temporary buffer so that users who scribble over
2934 * their read buffer while the operation is in progress do not end up
2935 * modifying the image file. This is critical for zero-copy guest I/O
2936 * where anything might happen inside guest memory.
2937 */
2938 void *bounce_buffer;
2939
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002940 BlockDriver *drv = bs->drv;
Stefan Hajnocziab185922011-11-17 13:40:31 +00002941 struct iovec iov;
2942 QEMUIOVector bounce_qiov;
2943 int64_t cluster_sector_num;
2944 int cluster_nb_sectors;
2945 size_t skip_bytes;
2946 int ret;
2947
2948 /* Cover entire cluster so no additional backing file I/O is required when
2949 * allocating cluster in the image file.
2950 */
Paolo Bonzini343bded2013-01-21 17:09:42 +01002951 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2952 &cluster_sector_num, &cluster_nb_sectors);
Stefan Hajnocziab185922011-11-17 13:40:31 +00002953
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00002954 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2955 cluster_sector_num, cluster_nb_sectors);
Stefan Hajnocziab185922011-11-17 13:40:31 +00002956
2957 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
Kevin Wolf857d4f42014-05-20 13:16:51 +02002958 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
2959 if (bounce_buffer == NULL) {
2960 ret = -ENOMEM;
2961 goto err;
2962 }
2963
Stefan Hajnocziab185922011-11-17 13:40:31 +00002964 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2965
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002966 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2967 &bounce_qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00002968 if (ret < 0) {
2969 goto err;
2970 }
2971
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002972 if (drv->bdrv_co_write_zeroes &&
2973 buffer_is_zero(bounce_buffer, iov.iov_len)) {
Kevin Wolf621f0582012-03-20 15:12:58 +01002974 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02002975 cluster_nb_sectors, 0);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002976 } else {
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02002977 /* This does not change the data on the disk, it is not necessary
2978 * to flush even in cache=writethrough mode.
2979 */
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002980 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
Stefan Hajnocziab185922011-11-17 13:40:31 +00002981 &bounce_qiov);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002982 }
2983
Stefan Hajnocziab185922011-11-17 13:40:31 +00002984 if (ret < 0) {
2985 /* It might be okay to ignore write errors for guest requests. If this
2986 * is a deliberate copy-on-read then we don't want to ignore the error.
2987 * Simply report it in all cases.
2988 */
2989 goto err;
2990 }
2991
2992 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
Michael Tokarev03396142012-06-07 20:17:55 +04002993 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
2994 nb_sectors * BDRV_SECTOR_SIZE);
Stefan Hajnocziab185922011-11-17 13:40:31 +00002995
2996err:
2997 qemu_vfree(bounce_buffer);
2998 return ret;
2999}
3000
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003001/*
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003002 * Forwards an already correctly aligned request to the BlockDriver. This
3003 * handles copy on read and zeroing after EOF; any other features must be
3004 * implemented by the caller.
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003005 */
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003006static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
Kevin Wolf65afd212013-12-03 14:55:55 +01003007 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
Kevin Wolfec746e12013-12-04 12:13:10 +01003008 int64_t align, QEMUIOVector *qiov, int flags)
Kevin Wolfda1fa912011-07-14 17:27:13 +02003009{
3010 BlockDriver *drv = bs->drv;
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00003011 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003012
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003013 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3014 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003015
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003016 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3017 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
Kevin Wolf8eb029c2014-07-01 16:09:54 +02003018 assert(!qiov || bytes == qiov->size);
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003019
3020 /* Handle Copy on Read and associated serialisation */
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003021 if (flags & BDRV_REQ_COPY_ON_READ) {
Kevin Wolf73271452013-12-04 17:08:50 +01003022 /* If we touch the same cluster it counts as an overlap. This
3023 * guarantees that allocating writes will be serialized and not race
3024 * with each other for the same cluster. For example, in copy-on-read
3025 * it ensures that the CoR read and write operations are atomic and
3026 * guest writes cannot interleave between them. */
3027 mark_request_serialising(req, bdrv_get_cluster_size(bs));
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003028 }
3029
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01003030 wait_serialising_requests(req);
Stefan Hajnoczif4658282011-11-17 13:40:29 +00003031
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003032 if (flags & BDRV_REQ_COPY_ON_READ) {
Stefan Hajnocziab185922011-11-17 13:40:31 +00003033 int pnum;
3034
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02003035 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003036 if (ret < 0) {
3037 goto out;
3038 }
3039
3040 if (!ret || pnum != nb_sectors) {
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003041 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003042 goto out;
3043 }
3044 }
3045
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003046 /* Forward the request to the BlockDriver */
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003047 if (!(bs->zero_beyond_eof && bs->growable)) {
3048 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3049 } else {
3050 /* Read zeros after EOF of growable BDSes */
Markus Armbruster40490822014-06-26 13:23:19 +02003051 int64_t total_sectors, max_nb_sectors;
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003052
Markus Armbruster40490822014-06-26 13:23:19 +02003053 total_sectors = bdrv_nb_sectors(bs);
3054 if (total_sectors < 0) {
3055 ret = total_sectors;
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003056 goto out;
3057 }
3058
Kevin Wolf5f5bcd82014-02-07 16:00:09 +01003059 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3060 align >> BDRV_SECTOR_BITS);
Paolo Bonzinie012b782014-12-17 16:09:59 +01003061 if (nb_sectors < max_nb_sectors) {
3062 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3063 } else if (max_nb_sectors > 0) {
Kevin Wolf33f461e2014-07-03 13:21:24 +02003064 QEMUIOVector local_qiov;
Kevin Wolf33f461e2014-07-03 13:21:24 +02003065
3066 qemu_iovec_init(&local_qiov, qiov->niov);
3067 qemu_iovec_concat(&local_qiov, qiov, 0,
Paolo Bonzinie012b782014-12-17 16:09:59 +01003068 max_nb_sectors * BDRV_SECTOR_SIZE);
Kevin Wolf33f461e2014-07-03 13:21:24 +02003069
Paolo Bonzinie012b782014-12-17 16:09:59 +01003070 ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
Kevin Wolf33f461e2014-07-03 13:21:24 +02003071 &local_qiov);
3072
3073 qemu_iovec_destroy(&local_qiov);
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003074 } else {
3075 ret = 0;
3076 }
3077
3078 /* Reading beyond end of file is supposed to produce zeroes */
3079 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3080 uint64_t offset = MAX(0, total_sectors - sector_num);
3081 uint64_t bytes = (sector_num + nb_sectors - offset) *
3082 BDRV_SECTOR_SIZE;
3083 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3084 }
3085 }
Stefan Hajnocziab185922011-11-17 13:40:31 +00003086
3087out:
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00003088 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003089}
3090
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003091/*
3092 * Handle a read request in coroutine context
3093 */
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003094static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3095 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003096 BdrvRequestFlags flags)
3097{
3098 BlockDriver *drv = bs->drv;
Kevin Wolf65afd212013-12-03 14:55:55 +01003099 BdrvTrackedRequest req;
3100
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003101 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3102 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3103 uint8_t *head_buf = NULL;
3104 uint8_t *tail_buf = NULL;
3105 QEMUIOVector local_qiov;
3106 bool use_local_qiov = false;
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003107 int ret;
3108
3109 if (!drv) {
3110 return -ENOMEDIUM;
3111 }
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003112 if (bdrv_check_byte_request(bs, offset, bytes)) {
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003113 return -EIO;
3114 }
3115
3116 if (bs->copy_on_read) {
3117 flags |= BDRV_REQ_COPY_ON_READ;
3118 }
3119
3120 /* throttling disk I/O */
3121 if (bs->io_limits_enabled) {
Kevin Wolfd5103582014-01-16 13:29:10 +01003122 bdrv_io_limits_intercept(bs, bytes, false);
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003123 }
3124
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003125 /* Align read if necessary by padding qiov */
3126 if (offset & (align - 1)) {
3127 head_buf = qemu_blockalign(bs, align);
3128 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3129 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3130 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3131 use_local_qiov = true;
3132
3133 bytes += offset & (align - 1);
3134 offset = offset & ~(align - 1);
3135 }
3136
3137 if ((offset + bytes) & (align - 1)) {
3138 if (!use_local_qiov) {
3139 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3140 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3141 use_local_qiov = true;
3142 }
3143 tail_buf = qemu_blockalign(bs, align);
3144 qemu_iovec_add(&local_qiov, tail_buf,
3145 align - ((offset + bytes) & (align - 1)));
3146
3147 bytes = ROUND_UP(bytes, align);
3148 }
3149
Kevin Wolf65afd212013-12-03 14:55:55 +01003150 tracked_request_begin(&req, bs, offset, bytes, false);
Kevin Wolfec746e12013-12-04 12:13:10 +01003151 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003152 use_local_qiov ? &local_qiov : qiov,
3153 flags);
Kevin Wolf65afd212013-12-03 14:55:55 +01003154 tracked_request_end(&req);
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003155
3156 if (use_local_qiov) {
3157 qemu_iovec_destroy(&local_qiov);
3158 qemu_vfree(head_buf);
3159 qemu_vfree(tail_buf);
3160 }
3161
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003162 return ret;
3163}
3164
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003165static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3166 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3167 BdrvRequestFlags flags)
3168{
3169 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3170 return -EINVAL;
3171 }
3172
3173 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3174 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3175}
3176
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003177int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
Kevin Wolfda1fa912011-07-14 17:27:13 +02003178 int nb_sectors, QEMUIOVector *qiov)
3179{
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003180 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
Kevin Wolfda1fa912011-07-14 17:27:13 +02003181
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003182 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3183}
3184
3185int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3186 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3187{
3188 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3189
3190 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3191 BDRV_REQ_COPY_ON_READ);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003192}
3193
Peter Lievenc31cb702013-10-24 12:06:58 +02003194/* if no limit is specified in the BlockLimits use a default
3195 * of 32768 512-byte sectors (16 MiB) per request.
3196 */
3197#define MAX_WRITE_ZEROES_DEFAULT 32768
3198
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003199static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003200 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003201{
3202 BlockDriver *drv = bs->drv;
3203 QEMUIOVector qiov;
Peter Lievenc31cb702013-10-24 12:06:58 +02003204 struct iovec iov = {0};
3205 int ret = 0;
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003206
Peter Lievenc31cb702013-10-24 12:06:58 +02003207 int max_write_zeroes = bs->bl.max_write_zeroes ?
3208 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
Kevin Wolf621f0582012-03-20 15:12:58 +01003209
Peter Lievenc31cb702013-10-24 12:06:58 +02003210 while (nb_sectors > 0 && !ret) {
3211 int num = nb_sectors;
3212
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003213 /* Align request. Block drivers can expect the "bulk" of the request
3214 * to be aligned.
3215 */
3216 if (bs->bl.write_zeroes_alignment
3217 && num > bs->bl.write_zeroes_alignment) {
3218 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3219 /* Make a small request up to the first aligned sector. */
Peter Lievenc31cb702013-10-24 12:06:58 +02003220 num = bs->bl.write_zeroes_alignment;
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003221 num -= sector_num % bs->bl.write_zeroes_alignment;
3222 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3223 /* Shorten the request to the last aligned sector. num cannot
3224 * underflow because num > bs->bl.write_zeroes_alignment.
3225 */
3226 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
Peter Lievenc31cb702013-10-24 12:06:58 +02003227 }
Kevin Wolf621f0582012-03-20 15:12:58 +01003228 }
Peter Lievenc31cb702013-10-24 12:06:58 +02003229
3230 /* limit request size */
3231 if (num > max_write_zeroes) {
3232 num = max_write_zeroes;
3233 }
3234
3235 ret = -ENOTSUP;
3236 /* First try the efficient write zeroes operation */
3237 if (drv->bdrv_co_write_zeroes) {
3238 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3239 }
3240
3241 if (ret == -ENOTSUP) {
3242 /* Fall back to bounce buffer if write zeroes is unsupported */
3243 iov.iov_len = num * BDRV_SECTOR_SIZE;
3244 if (iov.iov_base == NULL) {
Kevin Wolf857d4f42014-05-20 13:16:51 +02003245 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
3246 if (iov.iov_base == NULL) {
3247 ret = -ENOMEM;
3248 goto fail;
3249 }
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003250 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
Peter Lievenc31cb702013-10-24 12:06:58 +02003251 }
3252 qemu_iovec_init_external(&qiov, &iov, 1);
3253
3254 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003255
3256 /* Keep bounce buffer around if it is big enough for all
3257 * all future requests.
3258 */
3259 if (num < max_write_zeroes) {
3260 qemu_vfree(iov.iov_base);
3261 iov.iov_base = NULL;
3262 }
Peter Lievenc31cb702013-10-24 12:06:58 +02003263 }
3264
3265 sector_num += num;
3266 nb_sectors -= num;
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003267 }
3268
Kevin Wolf857d4f42014-05-20 13:16:51 +02003269fail:
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003270 qemu_vfree(iov.iov_base);
3271 return ret;
3272}
3273
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003274/*
Kevin Wolfb404f722013-12-03 14:02:23 +01003275 * Forwards an already correctly aligned write request to the BlockDriver.
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003276 */
Kevin Wolfb404f722013-12-03 14:02:23 +01003277static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
Kevin Wolf65afd212013-12-03 14:55:55 +01003278 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3279 QEMUIOVector *qiov, int flags)
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003280{
3281 BlockDriver *drv = bs->drv;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003282 bool waited;
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01003283 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003284
Kevin Wolfb404f722013-12-03 14:02:23 +01003285 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3286 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003287
Kevin Wolfb404f722013-12-03 14:02:23 +01003288 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3289 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
Kevin Wolf8eb029c2014-07-01 16:09:54 +02003290 assert(!qiov || bytes == qiov->size);
Benoît Canetcc0681c2013-09-02 14:14:39 +02003291
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003292 waited = wait_serialising_requests(req);
3293 assert(!waited || !req->serialising);
Kevin Wolfaf91f9a2014-02-07 15:35:56 +01003294 assert(req->overlap_offset <= offset);
3295 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
Kevin Wolf244eade2013-12-03 14:30:44 +01003296
Kevin Wolf65afd212013-12-03 14:55:55 +01003297 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
Stefan Hajnoczid616b222013-06-24 17:13:10 +02003298
Peter Lieven465bee12014-05-18 00:58:19 +02003299 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3300 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3301 qemu_iovec_is_zero(qiov)) {
3302 flags |= BDRV_REQ_ZERO_WRITE;
3303 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3304 flags |= BDRV_REQ_MAY_UNMAP;
3305 }
3306 }
3307
Stefan Hajnoczid616b222013-06-24 17:13:10 +02003308 if (ret < 0) {
3309 /* Do nothing, write notifier decided to fail this request */
3310 } else if (flags & BDRV_REQ_ZERO_WRITE) {
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003311 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003312 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003313 } else {
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003314 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003315 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3316 }
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003317 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01003318
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02003319 if (ret == 0 && !bs->enable_write_cache) {
3320 ret = bdrv_co_flush(bs);
3321 }
3322
Fam Zhenge4654d22013-11-13 18:29:43 +08003323 bdrv_set_dirty(bs, sector_num, nb_sectors);
Kevin Wolfda1fa912011-07-14 17:27:13 +02003324
Benoît Canet5366d0c2014-09-05 15:46:18 +02003325 block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
Benoît Canet5e5a94b2014-09-05 15:46:16 +02003326
Paolo Bonzinidf2a6f22013-09-04 19:00:21 +02003327 if (bs->growable && ret >= 0) {
3328 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3329 }
Kevin Wolfda1fa912011-07-14 17:27:13 +02003330
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01003331 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003332}
3333
Kevin Wolfb404f722013-12-03 14:02:23 +01003334/*
3335 * Handle a write request in coroutine context
3336 */
Kevin Wolf66015532013-12-03 14:40:18 +01003337static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3338 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Kevin Wolfb404f722013-12-03 14:02:23 +01003339 BdrvRequestFlags flags)
3340{
Kevin Wolf65afd212013-12-03 14:55:55 +01003341 BdrvTrackedRequest req;
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003342 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3343 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3344 uint8_t *head_buf = NULL;
3345 uint8_t *tail_buf = NULL;
3346 QEMUIOVector local_qiov;
3347 bool use_local_qiov = false;
Kevin Wolfb404f722013-12-03 14:02:23 +01003348 int ret;
3349
3350 if (!bs->drv) {
3351 return -ENOMEDIUM;
3352 }
3353 if (bs->read_only) {
3354 return -EACCES;
3355 }
Kevin Wolf66015532013-12-03 14:40:18 +01003356 if (bdrv_check_byte_request(bs, offset, bytes)) {
Kevin Wolfb404f722013-12-03 14:02:23 +01003357 return -EIO;
3358 }
3359
Kevin Wolfb404f722013-12-03 14:02:23 +01003360 /* throttling disk I/O */
3361 if (bs->io_limits_enabled) {
Kevin Wolfd5103582014-01-16 13:29:10 +01003362 bdrv_io_limits_intercept(bs, bytes, true);
Kevin Wolfb404f722013-12-03 14:02:23 +01003363 }
3364
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003365 /*
3366 * Align write if necessary by performing a read-modify-write cycle.
3367 * Pad qiov with the read parts and be sure to have a tracked request not
3368 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3369 */
Kevin Wolf65afd212013-12-03 14:55:55 +01003370 tracked_request_begin(&req, bs, offset, bytes, true);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003371
3372 if (offset & (align - 1)) {
3373 QEMUIOVector head_qiov;
3374 struct iovec head_iov;
3375
3376 mark_request_serialising(&req, align);
3377 wait_serialising_requests(&req);
3378
3379 head_buf = qemu_blockalign(bs, align);
3380 head_iov = (struct iovec) {
3381 .iov_base = head_buf,
3382 .iov_len = align,
3383 };
3384 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3385
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003386 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003387 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3388 align, &head_qiov, 0);
3389 if (ret < 0) {
3390 goto fail;
3391 }
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003392 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003393
3394 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3395 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3396 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3397 use_local_qiov = true;
3398
3399 bytes += offset & (align - 1);
3400 offset = offset & ~(align - 1);
3401 }
3402
3403 if ((offset + bytes) & (align - 1)) {
3404 QEMUIOVector tail_qiov;
3405 struct iovec tail_iov;
3406 size_t tail_bytes;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003407 bool waited;
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003408
3409 mark_request_serialising(&req, align);
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003410 waited = wait_serialising_requests(&req);
3411 assert(!waited || !use_local_qiov);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003412
3413 tail_buf = qemu_blockalign(bs, align);
3414 tail_iov = (struct iovec) {
3415 .iov_base = tail_buf,
3416 .iov_len = align,
3417 };
3418 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3419
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003420 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003421 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3422 align, &tail_qiov, 0);
3423 if (ret < 0) {
3424 goto fail;
3425 }
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003426 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003427
3428 if (!use_local_qiov) {
3429 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3430 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3431 use_local_qiov = true;
3432 }
3433
3434 tail_bytes = (offset + bytes) & (align - 1);
3435 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3436
3437 bytes = ROUND_UP(bytes, align);
3438 }
3439
3440 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3441 use_local_qiov ? &local_qiov : qiov,
3442 flags);
3443
3444fail:
Kevin Wolf65afd212013-12-03 14:55:55 +01003445 tracked_request_end(&req);
Kevin Wolfb404f722013-12-03 14:02:23 +01003446
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003447 if (use_local_qiov) {
3448 qemu_iovec_destroy(&local_qiov);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003449 }
Kevin Wolf99c4a852014-02-07 15:29:00 +01003450 qemu_vfree(head_buf);
3451 qemu_vfree(tail_buf);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003452
Kevin Wolfb404f722013-12-03 14:02:23 +01003453 return ret;
3454}
3455
Kevin Wolf66015532013-12-03 14:40:18 +01003456static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3457 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3458 BdrvRequestFlags flags)
3459{
3460 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3461 return -EINVAL;
3462 }
3463
3464 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3465 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3466}
3467
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003468int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3469 int nb_sectors, QEMUIOVector *qiov)
3470{
3471 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3472
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003473 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3474}
3475
3476int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003477 int64_t sector_num, int nb_sectors,
3478 BdrvRequestFlags flags)
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003479{
Paolo Bonzini94d6ff22013-11-22 13:39:45 +01003480 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003481
Peter Lievend32f35c2013-10-24 12:06:52 +02003482 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3483 flags &= ~BDRV_REQ_MAY_UNMAP;
3484 }
3485
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003486 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003487 BDRV_REQ_ZERO_WRITE | flags);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003488}
3489
bellard83f64092006-08-01 16:21:11 +00003490/**
bellard83f64092006-08-01 16:21:11 +00003491 * Truncate file to 'offset' bytes (needed only for file protocols)
3492 */
3493int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3494{
3495 BlockDriver *drv = bs->drv;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003496 int ret;
bellard83f64092006-08-01 16:21:11 +00003497 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00003498 return -ENOMEDIUM;
bellard83f64092006-08-01 16:21:11 +00003499 if (!drv->bdrv_truncate)
3500 return -ENOTSUP;
Naphtali Sprei59f26892009-10-26 16:25:16 +02003501 if (bs->read_only)
3502 return -EACCES;
Jeff Cody9c75e162014-06-25 16:55:30 -04003503
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003504 ret = drv->bdrv_truncate(bs, offset);
3505 if (ret == 0) {
3506 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
Markus Armbrustera7f53e22014-10-07 13:59:25 +02003507 if (bs->blk) {
3508 blk_dev_resize_cb(bs->blk);
3509 }
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003510 }
3511 return ret;
bellard83f64092006-08-01 16:21:11 +00003512}
3513
3514/**
Fam Zheng4a1d5e12011-07-12 19:56:39 +08003515 * Length of a allocated file in bytes. Sparse files are counted by actual
3516 * allocated space. Return < 0 if error or unknown.
3517 */
3518int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3519{
3520 BlockDriver *drv = bs->drv;
3521 if (!drv) {
3522 return -ENOMEDIUM;
3523 }
3524 if (drv->bdrv_get_allocated_file_size) {
3525 return drv->bdrv_get_allocated_file_size(bs);
3526 }
3527 if (bs->file) {
3528 return bdrv_get_allocated_file_size(bs->file);
3529 }
3530 return -ENOTSUP;
3531}
3532
3533/**
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003534 * Return number of sectors on success, -errno on error.
bellard83f64092006-08-01 16:21:11 +00003535 */
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003536int64_t bdrv_nb_sectors(BlockDriverState *bs)
bellard83f64092006-08-01 16:21:11 +00003537{
3538 BlockDriver *drv = bs->drv;
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003539
bellard83f64092006-08-01 16:21:11 +00003540 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00003541 return -ENOMEDIUM;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003542
Kevin Wolfb94a2612013-10-29 12:18:58 +01003543 if (drv->has_variable_length) {
3544 int ret = refresh_total_sectors(bs, bs->total_sectors);
3545 if (ret < 0) {
3546 return ret;
Stefan Hajnoczi46a4e4e2011-03-29 20:04:41 +01003547 }
bellard83f64092006-08-01 16:21:11 +00003548 }
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003549 return bs->total_sectors;
3550}
3551
3552/**
3553 * Return length in bytes on success, -errno on error.
3554 * The length is always a multiple of BDRV_SECTOR_SIZE.
3555 */
3556int64_t bdrv_getlength(BlockDriverState *bs)
3557{
3558 int64_t ret = bdrv_nb_sectors(bs);
3559
3560 return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE;
bellardfc01f7e2003-06-30 10:03:06 +00003561}
3562
bellard19cb3732006-08-19 11:45:59 +00003563/* return 0 as number of sectors if no device present or error */
ths96b8f132007-12-17 01:35:20 +00003564void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
bellardfc01f7e2003-06-30 10:03:06 +00003565{
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003566 int64_t nb_sectors = bdrv_nb_sectors(bs);
3567
3568 *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
bellardfc01f7e2003-06-30 10:03:06 +00003569}
bellardcf989512004-02-16 21:56:36 +00003570
Paolo Bonziniff06f5f2012-09-28 17:22:54 +02003571void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3572 BlockdevOnError on_write_error)
Markus Armbrusterabd7f682010-06-02 18:55:17 +02003573{
3574 bs->on_read_error = on_read_error;
3575 bs->on_write_error = on_write_error;
3576}
3577
Paolo Bonzini1ceee0d2012-09-28 17:22:56 +02003578BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
Markus Armbrusterabd7f682010-06-02 18:55:17 +02003579{
3580 return is_read ? bs->on_read_error : bs->on_write_error;
3581}
3582
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003583BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3584{
3585 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3586
3587 switch (on_err) {
3588 case BLOCKDEV_ON_ERROR_ENOSPC:
Wenchao Xiaa5895692014-06-18 08:43:30 +02003589 return (error == ENOSPC) ?
3590 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003591 case BLOCKDEV_ON_ERROR_STOP:
Wenchao Xiaa5895692014-06-18 08:43:30 +02003592 return BLOCK_ERROR_ACTION_STOP;
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003593 case BLOCKDEV_ON_ERROR_REPORT:
Wenchao Xiaa5895692014-06-18 08:43:30 +02003594 return BLOCK_ERROR_ACTION_REPORT;
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003595 case BLOCKDEV_ON_ERROR_IGNORE:
Wenchao Xiaa5895692014-06-18 08:43:30 +02003596 return BLOCK_ERROR_ACTION_IGNORE;
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003597 default:
3598 abort();
3599 }
3600}
3601
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003602static void send_qmp_error_event(BlockDriverState *bs,
3603 BlockErrorAction action,
3604 bool is_read, int error)
3605{
Peter Maydell573742a2014-10-10 20:33:03 +01003606 IoOperationType optype;
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003607
Peter Maydell573742a2014-10-10 20:33:03 +01003608 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
3609 qapi_event_send_block_io_error(bdrv_get_device_name(bs), optype, action,
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003610 bdrv_iostatus_is_enabled(bs),
Luiz Capitulino624ff572014-09-11 10:25:48 -04003611 error == ENOSPC, strerror(error),
3612 &error_abort);
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003613}
3614
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003615/* This is done by device models because, while the block layer knows
3616 * about the error, it does not know whether an operation comes from
3617 * the device or the block layer (from a job, for example).
3618 */
3619void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3620 bool is_read, int error)
3621{
3622 assert(error >= 0);
Paolo Bonzini2bd3bce2014-06-05 14:53:59 +02003623
Wenchao Xiaa5895692014-06-18 08:43:30 +02003624 if (action == BLOCK_ERROR_ACTION_STOP) {
Paolo Bonzini2bd3bce2014-06-05 14:53:59 +02003625 /* First set the iostatus, so that "info block" returns an iostatus
3626 * that matches the events raised so far (an additional error iostatus
3627 * is fine, but not a lost one).
3628 */
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003629 bdrv_iostatus_set_err(bs, error);
Paolo Bonzini2bd3bce2014-06-05 14:53:59 +02003630
3631 /* Then raise the request to stop the VM and the event.
3632 * qemu_system_vmstop_request_prepare has two effects. First,
3633 * it ensures that the STOP event always comes after the
3634 * BLOCK_IO_ERROR event. Second, it ensures that even if management
3635 * can observe the STOP event and do a "cont" before the STOP
3636 * event is issued, the VM will not stop. In this case, vm_start()
3637 * also ensures that the STOP/RESUME pair of events is emitted.
3638 */
3639 qemu_system_vmstop_request_prepare();
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003640 send_qmp_error_event(bs, action, is_read, error);
Paolo Bonzini2bd3bce2014-06-05 14:53:59 +02003641 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
3642 } else {
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003643 send_qmp_error_event(bs, action, is_read, error);
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003644 }
3645}
3646
bellardb3380822004-03-14 21:38:54 +00003647int bdrv_is_read_only(BlockDriverState *bs)
3648{
3649 return bs->read_only;
3650}
3651
ths985a03b2007-12-24 16:10:43 +00003652int bdrv_is_sg(BlockDriverState *bs)
3653{
3654 return bs->sg;
3655}
3656
Christoph Hellwige900a7b2009-09-04 19:01:15 +02003657int bdrv_enable_write_cache(BlockDriverState *bs)
3658{
3659 return bs->enable_write_cache;
3660}
3661
Paolo Bonzini425b0142012-06-06 00:04:52 +02003662void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3663{
3664 bs->enable_write_cache = wce;
Jeff Cody55b110f2012-09-20 15:13:18 -04003665
3666 /* so a reopen() will preserve wce */
3667 if (wce) {
3668 bs->open_flags |= BDRV_O_CACHE_WB;
3669 } else {
3670 bs->open_flags &= ~BDRV_O_CACHE_WB;
3671 }
Paolo Bonzini425b0142012-06-06 00:04:52 +02003672}
3673
bellardea2384d2004-08-01 21:59:26 +00003674int bdrv_is_encrypted(BlockDriverState *bs)
3675{
3676 if (bs->backing_hd && bs->backing_hd->encrypted)
3677 return 1;
3678 return bs->encrypted;
3679}
3680
aliguoric0f4ce72009-03-05 23:01:01 +00003681int bdrv_key_required(BlockDriverState *bs)
3682{
3683 BlockDriverState *backing_hd = bs->backing_hd;
3684
3685 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3686 return 1;
3687 return (bs->encrypted && !bs->valid_key);
3688}
3689
bellardea2384d2004-08-01 21:59:26 +00003690int bdrv_set_key(BlockDriverState *bs, const char *key)
3691{
3692 int ret;
3693 if (bs->backing_hd && bs->backing_hd->encrypted) {
3694 ret = bdrv_set_key(bs->backing_hd, key);
3695 if (ret < 0)
3696 return ret;
3697 if (!bs->encrypted)
3698 return 0;
3699 }
Shahar Havivifd04a2a2010-03-06 00:26:13 +02003700 if (!bs->encrypted) {
3701 return -EINVAL;
3702 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3703 return -ENOMEDIUM;
3704 }
aliguoric0f4ce72009-03-05 23:01:01 +00003705 ret = bs->drv->bdrv_set_key(bs, key);
aliguoribb5fc202009-03-05 23:01:15 +00003706 if (ret < 0) {
3707 bs->valid_key = 0;
3708 } else if (!bs->valid_key) {
3709 bs->valid_key = 1;
Markus Armbrustera7f53e22014-10-07 13:59:25 +02003710 if (bs->blk) {
3711 /* call the change callback now, we skipped it on open */
3712 blk_dev_change_media_cb(bs->blk, true);
3713 }
aliguoribb5fc202009-03-05 23:01:15 +00003714 }
aliguoric0f4ce72009-03-05 23:01:01 +00003715 return ret;
bellardea2384d2004-08-01 21:59:26 +00003716}
3717
Markus Armbrusterf8d6bba2012-06-13 10:11:48 +02003718const char *bdrv_get_format_name(BlockDriverState *bs)
bellardea2384d2004-08-01 21:59:26 +00003719{
Markus Armbrusterf8d6bba2012-06-13 10:11:48 +02003720 return bs->drv ? bs->drv->format_name : NULL;
bellardea2384d2004-08-01 21:59:26 +00003721}
3722
Stefan Hajnocziada42402014-08-27 12:08:55 +01003723static int qsort_strcmp(const void *a, const void *b)
3724{
3725 return strcmp(a, b);
3726}
3727
ths5fafdf22007-09-16 21:08:06 +00003728void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
bellardea2384d2004-08-01 21:59:26 +00003729 void *opaque)
3730{
3731 BlockDriver *drv;
Jeff Codye855e4f2014-04-28 18:29:54 -04003732 int count = 0;
Stefan Hajnocziada42402014-08-27 12:08:55 +01003733 int i;
Jeff Codye855e4f2014-04-28 18:29:54 -04003734 const char **formats = NULL;
bellardea2384d2004-08-01 21:59:26 +00003735
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +01003736 QLIST_FOREACH(drv, &bdrv_drivers, list) {
Jeff Codye855e4f2014-04-28 18:29:54 -04003737 if (drv->format_name) {
3738 bool found = false;
3739 int i = count;
3740 while (formats && i && !found) {
3741 found = !strcmp(formats[--i], drv->format_name);
3742 }
3743
3744 if (!found) {
Markus Armbruster5839e532014-08-19 10:31:08 +02003745 formats = g_renew(const char *, formats, count + 1);
Jeff Codye855e4f2014-04-28 18:29:54 -04003746 formats[count++] = drv->format_name;
Jeff Codye855e4f2014-04-28 18:29:54 -04003747 }
3748 }
bellardea2384d2004-08-01 21:59:26 +00003749 }
Stefan Hajnocziada42402014-08-27 12:08:55 +01003750
3751 qsort(formats, count, sizeof(formats[0]), qsort_strcmp);
3752
3753 for (i = 0; i < count; i++) {
3754 it(opaque, formats[i]);
3755 }
3756
Jeff Codye855e4f2014-04-28 18:29:54 -04003757 g_free(formats);
bellardea2384d2004-08-01 21:59:26 +00003758}
3759
Benoît Canetdc364f42014-01-23 21:31:32 +01003760/* This function is to find block backend bs */
Markus Armbruster7f06d472014-10-07 13:59:12 +02003761/* TODO convert callers to blk_by_name(), then remove */
bellardb3380822004-03-14 21:38:54 +00003762BlockDriverState *bdrv_find(const char *name)
3763{
Markus Armbruster7f06d472014-10-07 13:59:12 +02003764 BlockBackend *blk = blk_by_name(name);
bellardb3380822004-03-14 21:38:54 +00003765
Markus Armbruster7f06d472014-10-07 13:59:12 +02003766 return blk ? blk_bs(blk) : NULL;
bellardb3380822004-03-14 21:38:54 +00003767}
3768
Benoît Canetdc364f42014-01-23 21:31:32 +01003769/* This function is to find a node in the bs graph */
3770BlockDriverState *bdrv_find_node(const char *node_name)
3771{
3772 BlockDriverState *bs;
3773
3774 assert(node_name);
3775
3776 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3777 if (!strcmp(node_name, bs->node_name)) {
3778 return bs;
3779 }
3780 }
3781 return NULL;
3782}
3783
Benoît Canetc13163f2014-01-23 21:31:34 +01003784/* Put this QMP function here so it can access the static graph_bdrv_states. */
3785BlockDeviceInfoList *bdrv_named_nodes_list(void)
3786{
3787 BlockDeviceInfoList *list, *entry;
3788 BlockDriverState *bs;
3789
3790 list = NULL;
3791 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3792 entry = g_malloc0(sizeof(*entry));
3793 entry->value = bdrv_block_device_info(bs);
3794 entry->next = list;
3795 list = entry;
3796 }
3797
3798 return list;
3799}
3800
Benoît Canet12d3ba82014-01-23 21:31:35 +01003801BlockDriverState *bdrv_lookup_bs(const char *device,
3802 const char *node_name,
3803 Error **errp)
3804{
Markus Armbruster7f06d472014-10-07 13:59:12 +02003805 BlockBackend *blk;
3806 BlockDriverState *bs;
Benoît Canet12d3ba82014-01-23 21:31:35 +01003807
Benoît Canet12d3ba82014-01-23 21:31:35 +01003808 if (device) {
Markus Armbruster7f06d472014-10-07 13:59:12 +02003809 blk = blk_by_name(device);
Benoît Canet12d3ba82014-01-23 21:31:35 +01003810
Markus Armbruster7f06d472014-10-07 13:59:12 +02003811 if (blk) {
3812 return blk_bs(blk);
Benoît Canet12d3ba82014-01-23 21:31:35 +01003813 }
Benoît Canet12d3ba82014-01-23 21:31:35 +01003814 }
3815
Benoît Canetdd67fa52014-02-12 17:15:06 +01003816 if (node_name) {
3817 bs = bdrv_find_node(node_name);
Benoît Canet12d3ba82014-01-23 21:31:35 +01003818
Benoît Canetdd67fa52014-02-12 17:15:06 +01003819 if (bs) {
3820 return bs;
3821 }
Benoît Canet12d3ba82014-01-23 21:31:35 +01003822 }
3823
Benoît Canetdd67fa52014-02-12 17:15:06 +01003824 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3825 device ? device : "",
3826 node_name ? node_name : "");
3827 return NULL;
Benoît Canet12d3ba82014-01-23 21:31:35 +01003828}
3829
Jeff Cody5a6684d2014-06-25 15:40:09 -04003830/* If 'base' is in the same chain as 'top', return true. Otherwise,
3831 * return false. If either argument is NULL, return false. */
3832bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base)
3833{
3834 while (top && top != base) {
3835 top = top->backing_hd;
3836 }
3837
3838 return top != NULL;
3839}
3840
Fam Zheng04df7652014-10-31 11:32:54 +08003841BlockDriverState *bdrv_next_node(BlockDriverState *bs)
3842{
3843 if (!bs) {
3844 return QTAILQ_FIRST(&graph_bdrv_states);
3845 }
3846 return QTAILQ_NEXT(bs, node_list);
3847}
3848
Markus Armbruster2f399b02010-06-02 18:55:20 +02003849BlockDriverState *bdrv_next(BlockDriverState *bs)
3850{
3851 if (!bs) {
3852 return QTAILQ_FIRST(&bdrv_states);
3853 }
Benoît Canetdc364f42014-01-23 21:31:32 +01003854 return QTAILQ_NEXT(bs, device_list);
Markus Armbruster2f399b02010-06-02 18:55:20 +02003855}
3856
Fam Zheng20a9e772014-10-31 11:32:55 +08003857const char *bdrv_get_node_name(const BlockDriverState *bs)
3858{
3859 return bs->node_name;
3860}
3861
Markus Armbruster7f06d472014-10-07 13:59:12 +02003862/* TODO check what callers really want: bs->node_name or blk_name() */
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02003863const char *bdrv_get_device_name(const BlockDriverState *bs)
bellardea2384d2004-08-01 21:59:26 +00003864{
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02003865 return bs->blk ? blk_name(bs->blk) : "";
bellardea2384d2004-08-01 21:59:26 +00003866}
3867
Markus Armbrusterc8433282012-06-05 16:49:24 +02003868int bdrv_get_flags(BlockDriverState *bs)
3869{
3870 return bs->open_flags;
3871}
3872
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003873int bdrv_flush_all(void)
aliguoric6ca28d2008-10-06 13:55:43 +00003874{
3875 BlockDriverState *bs;
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003876 int result = 0;
aliguoric6ca28d2008-10-06 13:55:43 +00003877
Benoît Canetdc364f42014-01-23 21:31:32 +01003878 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02003879 AioContext *aio_context = bdrv_get_aio_context(bs);
3880 int ret;
3881
3882 aio_context_acquire(aio_context);
3883 ret = bdrv_flush(bs);
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003884 if (ret < 0 && !result) {
3885 result = ret;
3886 }
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02003887 aio_context_release(aio_context);
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01003888 }
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003889
3890 return result;
aliguoric6ca28d2008-10-06 13:55:43 +00003891}
3892
Peter Lieven3ac21622013-06-28 12:47:42 +02003893int bdrv_has_zero_init_1(BlockDriverState *bs)
3894{
3895 return 1;
3896}
3897
Kevin Wolff2feebb2010-04-14 17:30:35 +02003898int bdrv_has_zero_init(BlockDriverState *bs)
3899{
3900 assert(bs->drv);
3901
Paolo Bonzini11212d82013-09-04 19:00:27 +02003902 /* If BS is a copy on write image, it is initialized to
3903 the contents of the base image, which may not be zeroes. */
3904 if (bs->backing_hd) {
3905 return 0;
3906 }
Kevin Wolf336c1c12010-07-28 11:26:29 +02003907 if (bs->drv->bdrv_has_zero_init) {
3908 return bs->drv->bdrv_has_zero_init(bs);
Kevin Wolff2feebb2010-04-14 17:30:35 +02003909 }
3910
Peter Lieven3ac21622013-06-28 12:47:42 +02003911 /* safe default */
3912 return 0;
Kevin Wolff2feebb2010-04-14 17:30:35 +02003913}
3914
Peter Lieven4ce78692013-10-24 12:06:54 +02003915bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3916{
3917 BlockDriverInfo bdi;
3918
3919 if (bs->backing_hd) {
3920 return false;
3921 }
3922
3923 if (bdrv_get_info(bs, &bdi) == 0) {
3924 return bdi.unallocated_blocks_are_zero;
3925 }
3926
3927 return false;
3928}
3929
3930bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3931{
3932 BlockDriverInfo bdi;
3933
3934 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3935 return false;
3936 }
3937
3938 if (bdrv_get_info(bs, &bdi) == 0) {
3939 return bdi.can_write_zeroes_with_unmap;
3940 }
3941
3942 return false;
3943}
3944
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003945typedef struct BdrvCoGetBlockStatusData {
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003946 BlockDriverState *bs;
Miroslav Rezaninab35b2bb2013-02-13 09:09:39 +01003947 BlockDriverState *base;
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003948 int64_t sector_num;
3949 int nb_sectors;
3950 int *pnum;
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003951 int64_t ret;
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003952 bool done;
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003953} BdrvCoGetBlockStatusData;
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003954
thsf58c7b32008-06-05 21:53:49 +00003955/*
Fam Zheng705be722014-11-10 17:10:38 +08003956 * Returns the allocation status of the specified sectors.
3957 * Drivers not implementing the functionality are assumed to not support
3958 * backing files, hence all their sectors are reported as allocated.
thsf58c7b32008-06-05 21:53:49 +00003959 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003960 * If 'sector_num' is beyond the end of the disk image the return value is 0
3961 * and 'pnum' is set to 0.
3962 *
thsf58c7b32008-06-05 21:53:49 +00003963 * 'pnum' is set to the number of sectors (including and immediately following
3964 * the specified sector) that are known to be in the same
3965 * allocated/unallocated state.
3966 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003967 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3968 * beyond the end of the disk image it will be clamped.
thsf58c7b32008-06-05 21:53:49 +00003969 */
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003970static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3971 int64_t sector_num,
3972 int nb_sectors, int *pnum)
thsf58c7b32008-06-05 21:53:49 +00003973{
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02003974 int64_t total_sectors;
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003975 int64_t n;
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02003976 int64_t ret, ret2;
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003977
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02003978 total_sectors = bdrv_nb_sectors(bs);
3979 if (total_sectors < 0) {
3980 return total_sectors;
Paolo Bonzini617ccb42013-09-04 19:00:23 +02003981 }
3982
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02003983 if (sector_num >= total_sectors) {
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003984 *pnum = 0;
3985 return 0;
3986 }
3987
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02003988 n = total_sectors - sector_num;
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003989 if (n < nb_sectors) {
3990 nb_sectors = n;
3991 }
3992
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003993 if (!bs->drv->bdrv_co_get_block_status) {
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003994 *pnum = nb_sectors;
Kevin Wolfe88ae222014-05-06 15:25:36 +02003995 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
Paolo Bonzini918e92d2013-09-04 19:00:37 +02003996 if (bs->drv->protocol_name) {
3997 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3998 }
3999 return ret;
thsf58c7b32008-06-05 21:53:49 +00004000 }
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004001
Paolo Bonzini415b5b02013-09-04 19:00:31 +02004002 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
4003 if (ret < 0) {
Peter Lieven3e0a2332013-09-24 15:35:08 +02004004 *pnum = 0;
Paolo Bonzini415b5b02013-09-04 19:00:31 +02004005 return ret;
4006 }
4007
Peter Lieven92bc50a2013-10-08 14:43:14 +02004008 if (ret & BDRV_BLOCK_RAW) {
4009 assert(ret & BDRV_BLOCK_OFFSET_VALID);
4010 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
4011 *pnum, pnum);
4012 }
4013
Kevin Wolfe88ae222014-05-06 15:25:36 +02004014 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
4015 ret |= BDRV_BLOCK_ALLOCATED;
4016 }
4017
Peter Lievenc3d86882013-10-24 12:07:04 +02004018 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
4019 if (bdrv_unallocated_blocks_are_zero(bs)) {
Paolo Bonzinif0ad5712013-09-04 19:00:32 +02004020 ret |= BDRV_BLOCK_ZERO;
Peter Lieven1f9db222013-09-24 15:35:09 +02004021 } else if (bs->backing_hd) {
Paolo Bonzinif0ad5712013-09-04 19:00:32 +02004022 BlockDriverState *bs2 = bs->backing_hd;
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02004023 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
4024 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
Paolo Bonzinif0ad5712013-09-04 19:00:32 +02004025 ret |= BDRV_BLOCK_ZERO;
4026 }
4027 }
Paolo Bonzini415b5b02013-09-04 19:00:31 +02004028 }
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02004029
4030 if (bs->file &&
4031 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
4032 (ret & BDRV_BLOCK_OFFSET_VALID)) {
Max Reitz59c9a952014-10-22 17:00:15 +02004033 int file_pnum;
4034
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02004035 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
Max Reitz59c9a952014-10-22 17:00:15 +02004036 *pnum, &file_pnum);
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02004037 if (ret2 >= 0) {
4038 /* Ignore errors. This is just providing extra information, it
4039 * is useful but not necessary.
4040 */
Max Reitz59c9a952014-10-22 17:00:15 +02004041 if (!file_pnum) {
4042 /* !file_pnum indicates an offset at or beyond the EOF; it is
4043 * perfectly valid for the format block driver to point to such
4044 * offsets, so catch it and mark everything as zero */
4045 ret |= BDRV_BLOCK_ZERO;
4046 } else {
4047 /* Limit request to the range reported by the protocol driver */
4048 *pnum = file_pnum;
4049 ret |= (ret2 & BDRV_BLOCK_ZERO);
4050 }
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02004051 }
4052 }
4053
Paolo Bonzini415b5b02013-09-04 19:00:31 +02004054 return ret;
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004055}
4056
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004057/* Coroutine wrapper for bdrv_get_block_status() */
4058static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004059{
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004060 BdrvCoGetBlockStatusData *data = opaque;
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004061 BlockDriverState *bs = data->bs;
4062
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004063 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4064 data->pnum);
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004065 data->done = true;
4066}
4067
4068/*
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004069 * Synchronous wrapper around bdrv_co_get_block_status().
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004070 *
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004071 * See bdrv_co_get_block_status() for details.
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004072 */
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004073int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4074 int nb_sectors, int *pnum)
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004075{
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004076 Coroutine *co;
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004077 BdrvCoGetBlockStatusData data = {
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004078 .bs = bs,
4079 .sector_num = sector_num,
4080 .nb_sectors = nb_sectors,
4081 .pnum = pnum,
4082 .done = false,
4083 };
4084
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004085 if (qemu_in_coroutine()) {
4086 /* Fast-path if already in coroutine context */
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004087 bdrv_get_block_status_co_entry(&data);
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004088 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004089 AioContext *aio_context = bdrv_get_aio_context(bs);
4090
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004091 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004092 qemu_coroutine_enter(co, &data);
4093 while (!data.done) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004094 aio_poll(aio_context, true);
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004095 }
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004096 }
4097 return data.ret;
thsf58c7b32008-06-05 21:53:49 +00004098}
4099
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004100int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4101 int nb_sectors, int *pnum)
4102{
Paolo Bonzini4333bb72013-09-04 19:00:29 +02004103 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4104 if (ret < 0) {
4105 return ret;
4106 }
Kevin Wolf01fb2702014-07-07 17:00:37 +02004107 return !!(ret & BDRV_BLOCK_ALLOCATED);
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004108}
4109
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004110/*
4111 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4112 *
4113 * Return true if the given sector is allocated in any image between
4114 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4115 * sector is allocated in any image of the chain. Return false otherwise.
4116 *
4117 * 'pnum' is set to the number of sectors (including and immediately following
4118 * the specified sector) that are known to be in the same
4119 * allocated/unallocated state.
4120 *
4121 */
Paolo Bonzini4f578632013-09-04 19:00:24 +02004122int bdrv_is_allocated_above(BlockDriverState *top,
4123 BlockDriverState *base,
4124 int64_t sector_num,
4125 int nb_sectors, int *pnum)
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004126{
4127 BlockDriverState *intermediate;
4128 int ret, n = nb_sectors;
4129
4130 intermediate = top;
4131 while (intermediate && intermediate != base) {
4132 int pnum_inter;
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004133 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4134 &pnum_inter);
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004135 if (ret < 0) {
4136 return ret;
4137 } else if (ret) {
4138 *pnum = pnum_inter;
4139 return 1;
4140 }
4141
4142 /*
4143 * [sector_num, nb_sectors] is unallocated on top but intermediate
4144 * might have
4145 *
4146 * [sector_num+x, nr_sectors] allocated.
4147 */
Vishvananda Ishaya63ba17d2013-01-24 10:02:08 -08004148 if (n > pnum_inter &&
4149 (intermediate == top ||
4150 sector_num + pnum_inter < intermediate->total_sectors)) {
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004151 n = pnum_inter;
4152 }
4153
4154 intermediate = intermediate->backing_hd;
4155 }
4156
4157 *pnum = n;
4158 return 0;
4159}
4160
aliguori045df332009-03-05 23:00:48 +00004161const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4162{
4163 if (bs->backing_hd && bs->backing_hd->encrypted)
4164 return bs->backing_file;
4165 else if (bs->encrypted)
4166 return bs->filename;
4167 else
4168 return NULL;
4169}
4170
ths5fafdf22007-09-16 21:08:06 +00004171void bdrv_get_backing_filename(BlockDriverState *bs,
bellard83f64092006-08-01 16:21:11 +00004172 char *filename, int filename_size)
bellardea2384d2004-08-01 21:59:26 +00004173{
Kevin Wolf3574c602011-10-26 11:02:11 +02004174 pstrcpy(filename, filename_size, bs->backing_file);
bellardea2384d2004-08-01 21:59:26 +00004175}
4176
ths5fafdf22007-09-16 21:08:06 +00004177int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
bellardfaea38e2006-08-05 21:31:00 +00004178 const uint8_t *buf, int nb_sectors)
4179{
4180 BlockDriver *drv = bs->drv;
4181 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00004182 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00004183 if (!drv->bdrv_write_compressed)
4184 return -ENOTSUP;
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02004185 if (bdrv_check_request(bs, sector_num, nb_sectors))
4186 return -EIO;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004187
Fam Zhenge4654d22013-11-13 18:29:43 +08004188 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004189
bellardfaea38e2006-08-05 21:31:00 +00004190 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4191}
ths3b46e622007-09-17 08:09:54 +00004192
bellardfaea38e2006-08-05 21:31:00 +00004193int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4194{
4195 BlockDriver *drv = bs->drv;
4196 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00004197 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00004198 if (!drv->bdrv_get_info)
4199 return -ENOTSUP;
4200 memset(bdi, 0, sizeof(*bdi));
4201 return drv->bdrv_get_info(bs, bdi);
4202}
4203
Max Reitzeae041f2013-10-09 10:46:16 +02004204ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4205{
4206 BlockDriver *drv = bs->drv;
4207 if (drv && drv->bdrv_get_specific_info) {
4208 return drv->bdrv_get_specific_info(bs);
4209 }
4210 return NULL;
4211}
4212
Christoph Hellwig45566e92009-07-10 23:11:57 +02004213int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4214 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00004215{
Kevin Wolfcf8074b2013-04-05 21:27:53 +02004216 QEMUIOVector qiov;
4217 struct iovec iov = {
4218 .iov_base = (void *) buf,
4219 .iov_len = size,
4220 };
4221
4222 qemu_iovec_init_external(&qiov, &iov, 1);
4223 return bdrv_writev_vmstate(bs, &qiov, pos);
4224}
4225
4226int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4227{
aliguori178e08a2009-04-05 19:10:55 +00004228 BlockDriver *drv = bs->drv;
Kevin Wolfcf8074b2013-04-05 21:27:53 +02004229
4230 if (!drv) {
aliguori178e08a2009-04-05 19:10:55 +00004231 return -ENOMEDIUM;
Kevin Wolfcf8074b2013-04-05 21:27:53 +02004232 } else if (drv->bdrv_save_vmstate) {
4233 return drv->bdrv_save_vmstate(bs, qiov, pos);
4234 } else if (bs->file) {
4235 return bdrv_writev_vmstate(bs->file, qiov, pos);
4236 }
4237
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09004238 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00004239}
4240
Christoph Hellwig45566e92009-07-10 23:11:57 +02004241int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4242 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00004243{
4244 BlockDriver *drv = bs->drv;
4245 if (!drv)
4246 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09004247 if (drv->bdrv_load_vmstate)
4248 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4249 if (bs->file)
4250 return bdrv_load_vmstate(bs->file, buf, pos, size);
4251 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00004252}
4253
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004254void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4255{
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004256 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004257 return;
4258 }
4259
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004260 bs->drv->bdrv_debug_event(bs, event);
Kevin Wolf41c695c2012-12-06 14:32:58 +01004261}
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004262
Kevin Wolf41c695c2012-12-06 14:32:58 +01004263int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4264 const char *tag)
4265{
4266 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4267 bs = bs->file;
4268 }
4269
4270 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4271 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4272 }
4273
4274 return -ENOTSUP;
4275}
4276
Fam Zheng4cc70e92013-11-20 10:01:54 +08004277int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4278{
4279 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4280 bs = bs->file;
4281 }
4282
4283 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4284 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4285 }
4286
4287 return -ENOTSUP;
4288}
4289
Kevin Wolf41c695c2012-12-06 14:32:58 +01004290int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4291{
Max Reitz938789e2014-03-10 23:44:08 +01004292 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
Kevin Wolf41c695c2012-12-06 14:32:58 +01004293 bs = bs->file;
4294 }
4295
4296 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4297 return bs->drv->bdrv_debug_resume(bs, tag);
4298 }
4299
4300 return -ENOTSUP;
4301}
4302
4303bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4304{
4305 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4306 bs = bs->file;
4307 }
4308
4309 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4310 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4311 }
4312
4313 return false;
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004314}
4315
Blue Swirl199630b2010-07-25 20:49:34 +00004316int bdrv_is_snapshot(BlockDriverState *bs)
4317{
4318 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4319}
4320
Jeff Codyb1b1d782012-10-16 15:49:09 -04004321/* backing_file can either be relative, or absolute, or a protocol. If it is
4322 * relative, it must be relative to the chain. So, passing in bs->filename
4323 * from a BDS as backing_file should not be done, as that may be relative to
4324 * the CWD rather than the chain. */
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004325BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4326 const char *backing_file)
4327{
Jeff Codyb1b1d782012-10-16 15:49:09 -04004328 char *filename_full = NULL;
4329 char *backing_file_full = NULL;
4330 char *filename_tmp = NULL;
4331 int is_protocol = 0;
4332 BlockDriverState *curr_bs = NULL;
4333 BlockDriverState *retval = NULL;
4334
4335 if (!bs || !bs->drv || !backing_file) {
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004336 return NULL;
4337 }
4338
Jeff Codyb1b1d782012-10-16 15:49:09 -04004339 filename_full = g_malloc(PATH_MAX);
4340 backing_file_full = g_malloc(PATH_MAX);
4341 filename_tmp = g_malloc(PATH_MAX);
4342
4343 is_protocol = path_has_protocol(backing_file);
4344
4345 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4346
4347 /* If either of the filename paths is actually a protocol, then
4348 * compare unmodified paths; otherwise make paths relative */
4349 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4350 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4351 retval = curr_bs->backing_hd;
4352 break;
4353 }
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004354 } else {
Jeff Codyb1b1d782012-10-16 15:49:09 -04004355 /* If not an absolute filename path, make it relative to the current
4356 * image's filename path */
4357 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4358 backing_file);
4359
4360 /* We are going to compare absolute pathnames */
4361 if (!realpath(filename_tmp, filename_full)) {
4362 continue;
4363 }
4364
4365 /* We need to make sure the backing filename we are comparing against
4366 * is relative to the current image filename (or absolute) */
4367 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4368 curr_bs->backing_file);
4369
4370 if (!realpath(filename_tmp, backing_file_full)) {
4371 continue;
4372 }
4373
4374 if (strcmp(backing_file_full, filename_full) == 0) {
4375 retval = curr_bs->backing_hd;
4376 break;
4377 }
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004378 }
4379 }
4380
Jeff Codyb1b1d782012-10-16 15:49:09 -04004381 g_free(filename_full);
4382 g_free(backing_file_full);
4383 g_free(filename_tmp);
4384 return retval;
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004385}
4386
Benoît Canetf198fd12012-08-02 10:22:47 +02004387int bdrv_get_backing_file_depth(BlockDriverState *bs)
4388{
4389 if (!bs->drv) {
4390 return 0;
4391 }
4392
4393 if (!bs->backing_hd) {
4394 return 0;
4395 }
4396
4397 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4398}
4399
bellard83f64092006-08-01 16:21:11 +00004400/**************************************************************/
4401/* async I/Os */
4402
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004403BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4404 QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004405 BlockCompletionFunc *cb, void *opaque)
aliguori3b69e4b2009-01-22 16:59:24 +00004406{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01004407 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4408
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004409 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004410 cb, opaque, false);
bellard83f64092006-08-01 16:21:11 +00004411}
4412
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004413BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4414 QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004415 BlockCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00004416{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01004417 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4418
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004419 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004420 cb, opaque, true);
bellard83f64092006-08-01 16:21:11 +00004421}
4422
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004423BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
Paolo Bonzinid5ef94d2013-11-22 13:39:46 +01004424 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
Markus Armbruster097310b2014-10-07 13:59:15 +02004425 BlockCompletionFunc *cb, void *opaque)
Paolo Bonzinid5ef94d2013-11-22 13:39:46 +01004426{
4427 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4428
4429 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4430 BDRV_REQ_ZERO_WRITE | flags,
4431 cb, opaque, true);
4432}
4433
Kevin Wolf40b4f532009-09-09 17:53:37 +02004434
4435typedef struct MultiwriteCB {
4436 int error;
4437 int num_requests;
4438 int num_callbacks;
4439 struct {
Markus Armbruster097310b2014-10-07 13:59:15 +02004440 BlockCompletionFunc *cb;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004441 void *opaque;
4442 QEMUIOVector *free_qiov;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004443 } callbacks[];
4444} MultiwriteCB;
4445
4446static void multiwrite_user_cb(MultiwriteCB *mcb)
4447{
4448 int i;
4449
4450 for (i = 0; i < mcb->num_callbacks; i++) {
4451 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
Stefan Hajnoczi1e1ea482010-04-21 20:35:45 +01004452 if (mcb->callbacks[i].free_qiov) {
4453 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4454 }
Anthony Liguori7267c092011-08-20 22:09:37 -05004455 g_free(mcb->callbacks[i].free_qiov);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004456 }
4457}
4458
4459static void multiwrite_cb(void *opaque, int ret)
4460{
4461 MultiwriteCB *mcb = opaque;
4462
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01004463 trace_multiwrite_cb(mcb, ret);
4464
Kevin Wolfcb6d3ca2010-04-01 22:48:44 +02004465 if (ret < 0 && !mcb->error) {
Kevin Wolf40b4f532009-09-09 17:53:37 +02004466 mcb->error = ret;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004467 }
4468
4469 mcb->num_requests--;
4470 if (mcb->num_requests == 0) {
Kevin Wolfde189a12010-07-01 16:08:51 +02004471 multiwrite_user_cb(mcb);
Anthony Liguori7267c092011-08-20 22:09:37 -05004472 g_free(mcb);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004473 }
4474}
4475
4476static int multiwrite_req_compare(const void *a, const void *b)
4477{
Christoph Hellwig77be4362010-05-19 20:53:10 +02004478 const BlockRequest *req1 = a, *req2 = b;
4479
4480 /*
4481 * Note that we can't simply subtract req2->sector from req1->sector
4482 * here as that could overflow the return value.
4483 */
4484 if (req1->sector > req2->sector) {
4485 return 1;
4486 } else if (req1->sector < req2->sector) {
4487 return -1;
4488 } else {
4489 return 0;
4490 }
Kevin Wolf40b4f532009-09-09 17:53:37 +02004491}
4492
4493/*
4494 * Takes a bunch of requests and tries to merge them. Returns the number of
4495 * requests that remain after merging.
4496 */
4497static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4498 int num_reqs, MultiwriteCB *mcb)
4499{
4500 int i, outidx;
4501
4502 // Sort requests by start sector
4503 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4504
4505 // Check if adjacent requests touch the same clusters. If so, combine them,
4506 // filling up gaps with zero sectors.
4507 outidx = 0;
4508 for (i = 1; i < num_reqs; i++) {
4509 int merge = 0;
4510 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4511
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01004512 // Handle exactly sequential writes and overlapping writes.
Kevin Wolf40b4f532009-09-09 17:53:37 +02004513 if (reqs[i].sector <= oldreq_last) {
4514 merge = 1;
4515 }
4516
Christoph Hellwige2a305f2010-01-26 14:49:08 +01004517 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4518 merge = 0;
4519 }
4520
Peter Lieven6c5a42a2014-10-27 10:18:46 +01004521 if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
4522 reqs[i].nb_sectors > bs->bl.max_transfer_length) {
4523 merge = 0;
4524 }
4525
Kevin Wolf40b4f532009-09-09 17:53:37 +02004526 if (merge) {
4527 size_t size;
Anthony Liguori7267c092011-08-20 22:09:37 -05004528 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
Kevin Wolf40b4f532009-09-09 17:53:37 +02004529 qemu_iovec_init(qiov,
4530 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4531
4532 // Add the first request to the merged one. If the requests are
4533 // overlapping, drop the last sectors of the first request.
4534 size = (reqs[i].sector - reqs[outidx].sector) << 9;
Michael Tokarev1b093c42012-03-12 21:28:06 +04004535 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004536
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01004537 // We should need to add any zeros between the two requests
4538 assert (reqs[i].sector <= oldreq_last);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004539
4540 // Add the second request
Michael Tokarev1b093c42012-03-12 21:28:06 +04004541 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004542
Stefan Hajnoczi391827e2014-07-30 09:53:30 +01004543 // Add tail of first request, if necessary
4544 if (qiov->size < reqs[outidx].qiov->size) {
4545 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
4546 reqs[outidx].qiov->size - qiov->size);
4547 }
4548
Kevin Wolfcbf1dff2010-05-21 11:09:42 +02004549 reqs[outidx].nb_sectors = qiov->size >> 9;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004550 reqs[outidx].qiov = qiov;
4551
4552 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4553 } else {
4554 outidx++;
4555 reqs[outidx].sector = reqs[i].sector;
4556 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4557 reqs[outidx].qiov = reqs[i].qiov;
4558 }
4559 }
4560
4561 return outidx + 1;
4562}
4563
4564/*
4565 * Submit multiple AIO write requests at once.
4566 *
4567 * On success, the function returns 0 and all requests in the reqs array have
4568 * been submitted. In error case this function returns -1, and any of the
4569 * requests may or may not be submitted yet. In particular, this means that the
4570 * callback will be called for some of the requests, for others it won't. The
4571 * caller must check the error field of the BlockRequest to wait for the right
4572 * callbacks (if error != 0, no callback will be called).
4573 *
4574 * The implementation may modify the contents of the reqs array, e.g. to merge
4575 * requests. However, the fields opaque and error are left unmodified as they
4576 * are used to signal failure for a single request to the caller.
4577 */
4578int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4579{
Kevin Wolf40b4f532009-09-09 17:53:37 +02004580 MultiwriteCB *mcb;
4581 int i;
4582
Ryan Harper301db7c2011-03-07 10:01:04 -06004583 /* don't submit writes if we don't have a medium */
4584 if (bs->drv == NULL) {
4585 for (i = 0; i < num_reqs; i++) {
4586 reqs[i].error = -ENOMEDIUM;
4587 }
4588 return -1;
4589 }
4590
Kevin Wolf40b4f532009-09-09 17:53:37 +02004591 if (num_reqs == 0) {
4592 return 0;
4593 }
4594
4595 // Create MultiwriteCB structure
Anthony Liguori7267c092011-08-20 22:09:37 -05004596 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
Kevin Wolf40b4f532009-09-09 17:53:37 +02004597 mcb->num_requests = 0;
4598 mcb->num_callbacks = num_reqs;
4599
4600 for (i = 0; i < num_reqs; i++) {
4601 mcb->callbacks[i].cb = reqs[i].cb;
4602 mcb->callbacks[i].opaque = reqs[i].opaque;
4603 }
4604
4605 // Check for mergable requests
4606 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4607
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01004608 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4609
Paolo Bonzinidf9309f2011-11-14 17:50:50 +01004610 /* Run the aio requests. */
4611 mcb->num_requests = num_reqs;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004612 for (i = 0; i < num_reqs; i++) {
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004613 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4614 reqs[i].nb_sectors, reqs[i].flags,
4615 multiwrite_cb, mcb,
4616 true);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004617 }
4618
4619 return 0;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004620}
4621
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004622void bdrv_aio_cancel(BlockAIOCB *acb)
pbrookce1a14d2006-08-07 02:38:06 +00004623{
Fam Zhengca5fd112014-09-11 13:41:27 +08004624 qemu_aio_ref(acb);
4625 bdrv_aio_cancel_async(acb);
4626 while (acb->refcnt > 1) {
4627 if (acb->aiocb_info->get_aio_context) {
4628 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
4629 } else if (acb->bs) {
4630 aio_poll(bdrv_get_aio_context(acb->bs), true);
4631 } else {
4632 abort();
Fam Zheng02c50ef2014-09-11 13:41:09 +08004633 }
Fam Zheng02c50ef2014-09-11 13:41:09 +08004634 }
Fam Zheng80074292014-09-11 13:41:28 +08004635 qemu_aio_unref(acb);
Fam Zheng02c50ef2014-09-11 13:41:09 +08004636}
4637
4638/* Async version of aio cancel. The caller is not blocked if the acb implements
4639 * cancel_async, otherwise we do nothing and let the request normally complete.
4640 * In either case the completion callback must be called. */
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004641void bdrv_aio_cancel_async(BlockAIOCB *acb)
Fam Zheng02c50ef2014-09-11 13:41:09 +08004642{
4643 if (acb->aiocb_info->cancel_async) {
4644 acb->aiocb_info->cancel_async(acb);
4645 }
bellard83f64092006-08-01 16:21:11 +00004646}
4647
4648/**************************************************************/
4649/* async block device emulation */
4650
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004651typedef struct BlockAIOCBSync {
4652 BlockAIOCB common;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004653 QEMUBH *bh;
4654 int ret;
4655 /* vector translation state */
4656 QEMUIOVector *qiov;
4657 uint8_t *bounce;
4658 int is_write;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004659} BlockAIOCBSync;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004660
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004661static const AIOCBInfo bdrv_em_aiocb_info = {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004662 .aiocb_size = sizeof(BlockAIOCBSync),
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004663};
4664
bellard83f64092006-08-01 16:21:11 +00004665static void bdrv_aio_bh_cb(void *opaque)
bellardbeac80c2006-06-26 20:08:57 +00004666{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004667 BlockAIOCBSync *acb = opaque;
aliguorif141eaf2009-04-07 18:43:24 +00004668
Kevin Wolf857d4f42014-05-20 13:16:51 +02004669 if (!acb->is_write && acb->ret >= 0) {
Michael Tokarev03396142012-06-07 20:17:55 +04004670 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
Kevin Wolf857d4f42014-05-20 13:16:51 +02004671 }
aliguoriceb42de2009-04-07 18:43:28 +00004672 qemu_vfree(acb->bounce);
pbrookce1a14d2006-08-07 02:38:06 +00004673 acb->common.cb(acb->common.opaque, acb->ret);
Dor Laor6a7ad292009-06-01 12:07:23 +03004674 qemu_bh_delete(acb->bh);
Avi Kivity36afc452009-06-23 16:20:36 +03004675 acb->bh = NULL;
Fam Zheng80074292014-09-11 13:41:28 +08004676 qemu_aio_unref(acb);
bellardbeac80c2006-06-26 20:08:57 +00004677}
bellardbeac80c2006-06-26 20:08:57 +00004678
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004679static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4680 int64_t sector_num,
4681 QEMUIOVector *qiov,
4682 int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004683 BlockCompletionFunc *cb,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004684 void *opaque,
4685 int is_write)
aliguorif141eaf2009-04-07 18:43:24 +00004686
bellardea2384d2004-08-01 21:59:26 +00004687{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004688 BlockAIOCBSync *acb;
pbrookce1a14d2006-08-07 02:38:06 +00004689
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004690 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
aliguorif141eaf2009-04-07 18:43:24 +00004691 acb->is_write = is_write;
4692 acb->qiov = qiov;
Kevin Wolf857d4f42014-05-20 13:16:51 +02004693 acb->bounce = qemu_try_blockalign(bs, qiov->size);
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004694 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
aliguorif141eaf2009-04-07 18:43:24 +00004695
Kevin Wolf857d4f42014-05-20 13:16:51 +02004696 if (acb->bounce == NULL) {
4697 acb->ret = -ENOMEM;
4698 } else if (is_write) {
Michael Tokarevd5e6b162012-06-07 20:21:06 +04004699 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01004700 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00004701 } else {
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01004702 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00004703 }
4704
pbrookce1a14d2006-08-07 02:38:06 +00004705 qemu_bh_schedule(acb->bh);
aliguorif141eaf2009-04-07 18:43:24 +00004706
pbrookce1a14d2006-08-07 02:38:06 +00004707 return &acb->common;
pbrook7a6cba62006-06-04 11:39:07 +00004708}
4709
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004710static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
aliguorif141eaf2009-04-07 18:43:24 +00004711 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004712 BlockCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00004713{
aliguorif141eaf2009-04-07 18:43:24 +00004714 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
bellard83f64092006-08-01 16:21:11 +00004715}
4716
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004717static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
aliguorif141eaf2009-04-07 18:43:24 +00004718 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004719 BlockCompletionFunc *cb, void *opaque)
aliguorif141eaf2009-04-07 18:43:24 +00004720{
4721 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4722}
4723
Kevin Wolf68485422011-06-30 10:05:46 +02004724
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004725typedef struct BlockAIOCBCoroutine {
4726 BlockAIOCB common;
Kevin Wolf68485422011-06-30 10:05:46 +02004727 BlockRequest req;
4728 bool is_write;
Kevin Wolfd318aea2012-11-13 16:35:08 +01004729 bool *done;
Kevin Wolf68485422011-06-30 10:05:46 +02004730 QEMUBH* bh;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004731} BlockAIOCBCoroutine;
Kevin Wolf68485422011-06-30 10:05:46 +02004732
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004733static const AIOCBInfo bdrv_em_co_aiocb_info = {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004734 .aiocb_size = sizeof(BlockAIOCBCoroutine),
Kevin Wolf68485422011-06-30 10:05:46 +02004735};
4736
Paolo Bonzini35246a62011-10-14 10:41:29 +02004737static void bdrv_co_em_bh(void *opaque)
Kevin Wolf68485422011-06-30 10:05:46 +02004738{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004739 BlockAIOCBCoroutine *acb = opaque;
Kevin Wolf68485422011-06-30 10:05:46 +02004740
4741 acb->common.cb(acb->common.opaque, acb->req.error);
Kevin Wolfd318aea2012-11-13 16:35:08 +01004742
Kevin Wolf68485422011-06-30 10:05:46 +02004743 qemu_bh_delete(acb->bh);
Fam Zheng80074292014-09-11 13:41:28 +08004744 qemu_aio_unref(acb);
Kevin Wolf68485422011-06-30 10:05:46 +02004745}
4746
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004747/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4748static void coroutine_fn bdrv_co_do_rw(void *opaque)
4749{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004750 BlockAIOCBCoroutine *acb = opaque;
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004751 BlockDriverState *bs = acb->common.bs;
4752
4753 if (!acb->is_write) {
4754 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004755 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004756 } else {
4757 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004758 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004759 }
4760
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004761 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004762 qemu_bh_schedule(acb->bh);
4763}
4764
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004765static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4766 int64_t sector_num,
4767 QEMUIOVector *qiov,
4768 int nb_sectors,
4769 BdrvRequestFlags flags,
Markus Armbruster097310b2014-10-07 13:59:15 +02004770 BlockCompletionFunc *cb,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004771 void *opaque,
4772 bool is_write)
Kevin Wolf68485422011-06-30 10:05:46 +02004773{
4774 Coroutine *co;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004775 BlockAIOCBCoroutine *acb;
Kevin Wolf68485422011-06-30 10:05:46 +02004776
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004777 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
Kevin Wolf68485422011-06-30 10:05:46 +02004778 acb->req.sector = sector_num;
4779 acb->req.nb_sectors = nb_sectors;
4780 acb->req.qiov = qiov;
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004781 acb->req.flags = flags;
Kevin Wolf68485422011-06-30 10:05:46 +02004782 acb->is_write = is_write;
4783
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004784 co = qemu_coroutine_create(bdrv_co_do_rw);
Kevin Wolf68485422011-06-30 10:05:46 +02004785 qemu_coroutine_enter(co, acb);
4786
4787 return &acb->common;
4788}
4789
Paolo Bonzini07f07612011-10-17 12:32:12 +02004790static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004791{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004792 BlockAIOCBCoroutine *acb = opaque;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004793 BlockDriverState *bs = acb->common.bs;
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004794
Paolo Bonzini07f07612011-10-17 12:32:12 +02004795 acb->req.error = bdrv_co_flush(bs);
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004796 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004797 qemu_bh_schedule(acb->bh);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004798}
4799
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004800BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
Markus Armbruster097310b2014-10-07 13:59:15 +02004801 BlockCompletionFunc *cb, void *opaque)
Alexander Graf016f5cf2010-05-26 17:51:49 +02004802{
Paolo Bonzini07f07612011-10-17 12:32:12 +02004803 trace_bdrv_aio_flush(bs, opaque);
Alexander Graf016f5cf2010-05-26 17:51:49 +02004804
Paolo Bonzini07f07612011-10-17 12:32:12 +02004805 Coroutine *co;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004806 BlockAIOCBCoroutine *acb;
Alexander Graf016f5cf2010-05-26 17:51:49 +02004807
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004808 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
Kevin Wolfd318aea2012-11-13 16:35:08 +01004809
Paolo Bonzini07f07612011-10-17 12:32:12 +02004810 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4811 qemu_coroutine_enter(co, acb);
Alexander Graf016f5cf2010-05-26 17:51:49 +02004812
Alexander Graf016f5cf2010-05-26 17:51:49 +02004813 return &acb->common;
4814}
4815
Paolo Bonzini4265d622011-10-17 12:32:14 +02004816static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4817{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004818 BlockAIOCBCoroutine *acb = opaque;
Paolo Bonzini4265d622011-10-17 12:32:14 +02004819 BlockDriverState *bs = acb->common.bs;
4820
4821 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004822 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
Paolo Bonzini4265d622011-10-17 12:32:14 +02004823 qemu_bh_schedule(acb->bh);
4824}
4825
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004826BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
Paolo Bonzini4265d622011-10-17 12:32:14 +02004827 int64_t sector_num, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004828 BlockCompletionFunc *cb, void *opaque)
Paolo Bonzini4265d622011-10-17 12:32:14 +02004829{
4830 Coroutine *co;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004831 BlockAIOCBCoroutine *acb;
Paolo Bonzini4265d622011-10-17 12:32:14 +02004832
4833 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4834
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004835 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
Paolo Bonzini4265d622011-10-17 12:32:14 +02004836 acb->req.sector = sector_num;
4837 acb->req.nb_sectors = nb_sectors;
4838 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4839 qemu_coroutine_enter(co, acb);
4840
4841 return &acb->common;
4842}
4843
bellardea2384d2004-08-01 21:59:26 +00004844void bdrv_init(void)
4845{
Anthony Liguori5efa9d52009-05-09 17:03:42 -05004846 module_call_init(MODULE_INIT_BLOCK);
bellardea2384d2004-08-01 21:59:26 +00004847}
pbrookce1a14d2006-08-07 02:38:06 +00004848
Markus Armbrustereb852012009-10-27 18:41:44 +01004849void bdrv_init_with_whitelist(void)
4850{
4851 use_bdrv_whitelist = 1;
4852 bdrv_init();
4853}
4854
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004855void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
Markus Armbruster097310b2014-10-07 13:59:15 +02004856 BlockCompletionFunc *cb, void *opaque)
aliguori6bbff9a2009-03-20 18:25:59 +00004857{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004858 BlockAIOCB *acb;
pbrookce1a14d2006-08-07 02:38:06 +00004859
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004860 acb = g_slice_alloc(aiocb_info->aiocb_size);
4861 acb->aiocb_info = aiocb_info;
pbrookce1a14d2006-08-07 02:38:06 +00004862 acb->bs = bs;
4863 acb->cb = cb;
4864 acb->opaque = opaque;
Fam Zhengf197fe22014-09-11 13:41:08 +08004865 acb->refcnt = 1;
pbrookce1a14d2006-08-07 02:38:06 +00004866 return acb;
4867}
4868
Fam Zhengf197fe22014-09-11 13:41:08 +08004869void qemu_aio_ref(void *p)
4870{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004871 BlockAIOCB *acb = p;
Fam Zhengf197fe22014-09-11 13:41:08 +08004872 acb->refcnt++;
4873}
4874
Fam Zheng80074292014-09-11 13:41:28 +08004875void qemu_aio_unref(void *p)
pbrookce1a14d2006-08-07 02:38:06 +00004876{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004877 BlockAIOCB *acb = p;
Fam Zhengf197fe22014-09-11 13:41:08 +08004878 assert(acb->refcnt > 0);
4879 if (--acb->refcnt == 0) {
4880 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4881 }
pbrookce1a14d2006-08-07 02:38:06 +00004882}
bellard19cb3732006-08-19 11:45:59 +00004883
4884/**************************************************************/
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004885/* Coroutine block device emulation */
4886
4887typedef struct CoroutineIOCompletion {
4888 Coroutine *coroutine;
4889 int ret;
4890} CoroutineIOCompletion;
4891
4892static void bdrv_co_io_em_complete(void *opaque, int ret)
4893{
4894 CoroutineIOCompletion *co = opaque;
4895
4896 co->ret = ret;
4897 qemu_coroutine_enter(co->coroutine, NULL);
4898}
4899
4900static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4901 int nb_sectors, QEMUIOVector *iov,
4902 bool is_write)
4903{
4904 CoroutineIOCompletion co = {
4905 .coroutine = qemu_coroutine_self(),
4906 };
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004907 BlockAIOCB *acb;
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004908
4909 if (is_write) {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01004910 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4911 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004912 } else {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01004913 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4914 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004915 }
4916
Stefan Hajnoczi59370aa2011-09-30 17:34:58 +01004917 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004918 if (!acb) {
4919 return -EIO;
4920 }
4921 qemu_coroutine_yield();
4922
4923 return co.ret;
4924}
4925
4926static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4927 int64_t sector_num, int nb_sectors,
4928 QEMUIOVector *iov)
4929{
4930 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4931}
4932
4933static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4934 int64_t sector_num, int nb_sectors,
4935 QEMUIOVector *iov)
4936{
4937 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4938}
4939
Paolo Bonzini07f07612011-10-17 12:32:12 +02004940static void coroutine_fn bdrv_flush_co_entry(void *opaque)
Kevin Wolfe7a8a782011-07-15 16:05:00 +02004941{
Paolo Bonzini07f07612011-10-17 12:32:12 +02004942 RwCo *rwco = opaque;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02004943
Paolo Bonzini07f07612011-10-17 12:32:12 +02004944 rwco->ret = bdrv_co_flush(rwco->bs);
4945}
4946
4947int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4948{
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004949 int ret;
4950
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004951 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
Paolo Bonzini07f07612011-10-17 12:32:12 +02004952 return 0;
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004953 }
4954
Kevin Wolfca716362011-11-10 18:13:59 +01004955 /* Write back cached data to the OS even with cache=unsafe */
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004956 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004957 if (bs->drv->bdrv_co_flush_to_os) {
4958 ret = bs->drv->bdrv_co_flush_to_os(bs);
4959 if (ret < 0) {
4960 return ret;
4961 }
4962 }
4963
Kevin Wolfca716362011-11-10 18:13:59 +01004964 /* But don't actually force it to the disk with cache=unsafe */
4965 if (bs->open_flags & BDRV_O_NO_FLUSH) {
Kevin Wolfd4c82322012-08-15 12:52:45 +02004966 goto flush_parent;
Kevin Wolfca716362011-11-10 18:13:59 +01004967 }
4968
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004969 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004970 if (bs->drv->bdrv_co_flush_to_disk) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004971 ret = bs->drv->bdrv_co_flush_to_disk(bs);
Paolo Bonzini07f07612011-10-17 12:32:12 +02004972 } else if (bs->drv->bdrv_aio_flush) {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004973 BlockAIOCB *acb;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004974 CoroutineIOCompletion co = {
4975 .coroutine = qemu_coroutine_self(),
4976 };
4977
4978 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4979 if (acb == NULL) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004980 ret = -EIO;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004981 } else {
4982 qemu_coroutine_yield();
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004983 ret = co.ret;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004984 }
Paolo Bonzini07f07612011-10-17 12:32:12 +02004985 } else {
4986 /*
4987 * Some block drivers always operate in either writethrough or unsafe
4988 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4989 * know how the server works (because the behaviour is hardcoded or
4990 * depends on server-side configuration), so we can't ensure that
4991 * everything is safe on disk. Returning an error doesn't work because
4992 * that would break guests even if the server operates in writethrough
4993 * mode.
4994 *
4995 * Let's hope the user knows what he's doing.
4996 */
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004997 ret = 0;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02004998 }
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004999 if (ret < 0) {
5000 return ret;
5001 }
5002
5003 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
5004 * in the case of cache=unsafe, so there are no useless flushes.
5005 */
Kevin Wolfd4c82322012-08-15 12:52:45 +02005006flush_parent:
Paolo Bonzini29cdb252012-03-12 18:26:01 +01005007 return bdrv_co_flush(bs->file);
Paolo Bonzini07f07612011-10-17 12:32:12 +02005008}
5009
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005010void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
Anthony Liguori0f154232011-11-14 15:09:45 -06005011{
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005012 Error *local_err = NULL;
5013 int ret;
5014
Kevin Wolf3456a8d2014-03-11 10:58:39 +01005015 if (!bs->drv) {
5016 return;
Anthony Liguori0f154232011-11-14 15:09:45 -06005017 }
Kevin Wolf3456a8d2014-03-11 10:58:39 +01005018
Alexey Kardashevskiy7ea2d262014-10-09 13:50:46 +11005019 if (!(bs->open_flags & BDRV_O_INCOMING)) {
5020 return;
5021 }
5022 bs->open_flags &= ~BDRV_O_INCOMING;
5023
Kevin Wolf3456a8d2014-03-11 10:58:39 +01005024 if (bs->drv->bdrv_invalidate_cache) {
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005025 bs->drv->bdrv_invalidate_cache(bs, &local_err);
Kevin Wolf3456a8d2014-03-11 10:58:39 +01005026 } else if (bs->file) {
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005027 bdrv_invalidate_cache(bs->file, &local_err);
5028 }
5029 if (local_err) {
5030 error_propagate(errp, local_err);
5031 return;
Kevin Wolf3456a8d2014-03-11 10:58:39 +01005032 }
5033
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005034 ret = refresh_total_sectors(bs, bs->total_sectors);
5035 if (ret < 0) {
5036 error_setg_errno(errp, -ret, "Could not refresh total sector count");
5037 return;
5038 }
Anthony Liguori0f154232011-11-14 15:09:45 -06005039}
5040
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005041void bdrv_invalidate_cache_all(Error **errp)
Anthony Liguori0f154232011-11-14 15:09:45 -06005042{
5043 BlockDriverState *bs;
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005044 Error *local_err = NULL;
Anthony Liguori0f154232011-11-14 15:09:45 -06005045
Benoît Canetdc364f42014-01-23 21:31:32 +01005046 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02005047 AioContext *aio_context = bdrv_get_aio_context(bs);
5048
5049 aio_context_acquire(aio_context);
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005050 bdrv_invalidate_cache(bs, &local_err);
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02005051 aio_context_release(aio_context);
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005052 if (local_err) {
5053 error_propagate(errp, local_err);
5054 return;
5055 }
Anthony Liguori0f154232011-11-14 15:09:45 -06005056 }
5057}
5058
Paolo Bonzini07f07612011-10-17 12:32:12 +02005059int bdrv_flush(BlockDriverState *bs)
5060{
5061 Coroutine *co;
5062 RwCo rwco = {
5063 .bs = bs,
5064 .ret = NOT_DONE,
5065 };
5066
5067 if (qemu_in_coroutine()) {
5068 /* Fast-path if already in coroutine context */
5069 bdrv_flush_co_entry(&rwco);
5070 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005071 AioContext *aio_context = bdrv_get_aio_context(bs);
5072
Paolo Bonzini07f07612011-10-17 12:32:12 +02005073 co = qemu_coroutine_create(bdrv_flush_co_entry);
5074 qemu_coroutine_enter(co, &rwco);
5075 while (rwco.ret == NOT_DONE) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005076 aio_poll(aio_context, true);
Paolo Bonzini07f07612011-10-17 12:32:12 +02005077 }
5078 }
5079
5080 return rwco.ret;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02005081}
5082
Kevin Wolf775aa8b2013-12-05 12:09:38 +01005083typedef struct DiscardCo {
5084 BlockDriverState *bs;
5085 int64_t sector_num;
5086 int nb_sectors;
5087 int ret;
5088} DiscardCo;
Paolo Bonzini4265d622011-10-17 12:32:14 +02005089static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5090{
Kevin Wolf775aa8b2013-12-05 12:09:38 +01005091 DiscardCo *rwco = opaque;
Paolo Bonzini4265d622011-10-17 12:32:14 +02005092
5093 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5094}
5095
Peter Lieven6f14da52013-10-24 12:06:59 +02005096/* if no limit is specified in the BlockLimits use a default
5097 * of 32768 512-byte sectors (16 MiB) per request.
5098 */
5099#define MAX_DISCARD_DEFAULT 32768
5100
Paolo Bonzini4265d622011-10-17 12:32:14 +02005101int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5102 int nb_sectors)
5103{
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005104 int max_discard;
5105
Paolo Bonzini4265d622011-10-17 12:32:14 +02005106 if (!bs->drv) {
5107 return -ENOMEDIUM;
5108 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5109 return -EIO;
5110 } else if (bs->read_only) {
5111 return -EROFS;
Paolo Bonzinidf702c92013-01-14 16:26:58 +01005112 }
5113
Fam Zhenge4654d22013-11-13 18:29:43 +08005114 bdrv_reset_dirty(bs, sector_num, nb_sectors);
Paolo Bonzinidf702c92013-01-14 16:26:58 +01005115
Paolo Bonzini9e8f1832013-02-08 14:06:11 +01005116 /* Do nothing if disabled. */
5117 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5118 return 0;
5119 }
5120
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005121 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
Paolo Bonzini4265d622011-10-17 12:32:14 +02005122 return 0;
5123 }
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005124
5125 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5126 while (nb_sectors > 0) {
5127 int ret;
5128 int num = nb_sectors;
5129
5130 /* align request */
5131 if (bs->bl.discard_alignment &&
5132 num >= bs->bl.discard_alignment &&
5133 sector_num % bs->bl.discard_alignment) {
5134 if (num > bs->bl.discard_alignment) {
5135 num = bs->bl.discard_alignment;
5136 }
5137 num -= sector_num % bs->bl.discard_alignment;
5138 }
5139
5140 /* limit request size */
5141 if (num > max_discard) {
5142 num = max_discard;
5143 }
5144
5145 if (bs->drv->bdrv_co_discard) {
5146 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5147 } else {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02005148 BlockAIOCB *acb;
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005149 CoroutineIOCompletion co = {
5150 .coroutine = qemu_coroutine_self(),
5151 };
5152
5153 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5154 bdrv_co_io_em_complete, &co);
5155 if (acb == NULL) {
5156 return -EIO;
5157 } else {
5158 qemu_coroutine_yield();
5159 ret = co.ret;
5160 }
5161 }
Paolo Bonzini7ce21012013-11-22 13:39:47 +01005162 if (ret && ret != -ENOTSUP) {
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005163 return ret;
5164 }
5165
5166 sector_num += num;
5167 nb_sectors -= num;
5168 }
5169 return 0;
Paolo Bonzini4265d622011-10-17 12:32:14 +02005170}
5171
5172int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5173{
5174 Coroutine *co;
Kevin Wolf775aa8b2013-12-05 12:09:38 +01005175 DiscardCo rwco = {
Paolo Bonzini4265d622011-10-17 12:32:14 +02005176 .bs = bs,
5177 .sector_num = sector_num,
5178 .nb_sectors = nb_sectors,
5179 .ret = NOT_DONE,
5180 };
5181
5182 if (qemu_in_coroutine()) {
5183 /* Fast-path if already in coroutine context */
5184 bdrv_discard_co_entry(&rwco);
5185 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005186 AioContext *aio_context = bdrv_get_aio_context(bs);
5187
Paolo Bonzini4265d622011-10-17 12:32:14 +02005188 co = qemu_coroutine_create(bdrv_discard_co_entry);
5189 qemu_coroutine_enter(co, &rwco);
5190 while (rwco.ret == NOT_DONE) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005191 aio_poll(aio_context, true);
Paolo Bonzini4265d622011-10-17 12:32:14 +02005192 }
5193 }
5194
5195 return rwco.ret;
5196}
5197
Kevin Wolff9f05dc2011-07-15 13:50:26 +02005198/**************************************************************/
bellard19cb3732006-08-19 11:45:59 +00005199/* removable device support */
5200
5201/**
5202 * Return TRUE if the media is present
5203 */
5204int bdrv_is_inserted(BlockDriverState *bs)
5205{
5206 BlockDriver *drv = bs->drv;
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02005207
bellard19cb3732006-08-19 11:45:59 +00005208 if (!drv)
5209 return 0;
5210 if (!drv->bdrv_is_inserted)
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02005211 return 1;
5212 return drv->bdrv_is_inserted(bs);
bellard19cb3732006-08-19 11:45:59 +00005213}
5214
5215/**
Markus Armbruster8e49ca42011-08-03 15:08:08 +02005216 * Return whether the media changed since the last call to this
5217 * function, or -ENOTSUP if we don't know. Most drivers don't know.
bellard19cb3732006-08-19 11:45:59 +00005218 */
5219int bdrv_media_changed(BlockDriverState *bs)
5220{
5221 BlockDriver *drv = bs->drv;
bellard19cb3732006-08-19 11:45:59 +00005222
Markus Armbruster8e49ca42011-08-03 15:08:08 +02005223 if (drv && drv->bdrv_media_changed) {
5224 return drv->bdrv_media_changed(bs);
5225 }
5226 return -ENOTSUP;
bellard19cb3732006-08-19 11:45:59 +00005227}
5228
5229/**
5230 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5231 */
Luiz Capitulinof36f3942012-02-03 16:24:53 -02005232void bdrv_eject(BlockDriverState *bs, bool eject_flag)
bellard19cb3732006-08-19 11:45:59 +00005233{
5234 BlockDriver *drv = bs->drv;
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02005235 const char *device_name;
bellard19cb3732006-08-19 11:45:59 +00005236
Markus Armbruster822e1cd2011-07-20 18:23:42 +02005237 if (drv && drv->bdrv_eject) {
5238 drv->bdrv_eject(bs, eject_flag);
bellard19cb3732006-08-19 11:45:59 +00005239 }
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02005240
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02005241 device_name = bdrv_get_device_name(bs);
5242 if (device_name[0] != '\0') {
5243 qapi_event_send_device_tray_moved(device_name,
Wenchao Xiaa5ee7bd2014-06-18 08:43:44 +02005244 eject_flag, &error_abort);
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02005245 }
bellard19cb3732006-08-19 11:45:59 +00005246}
5247
bellard19cb3732006-08-19 11:45:59 +00005248/**
5249 * Lock or unlock the media (if it is locked, the user won't be able
5250 * to eject it manually).
5251 */
Markus Armbruster025e8492011-09-06 18:58:47 +02005252void bdrv_lock_medium(BlockDriverState *bs, bool locked)
bellard19cb3732006-08-19 11:45:59 +00005253{
5254 BlockDriver *drv = bs->drv;
5255
Markus Armbruster025e8492011-09-06 18:58:47 +02005256 trace_bdrv_lock_medium(bs, locked);
Stefan Hajnoczib8c6d092011-03-29 20:04:40 +01005257
Markus Armbruster025e8492011-09-06 18:58:47 +02005258 if (drv && drv->bdrv_lock_medium) {
5259 drv->bdrv_lock_medium(bs, locked);
bellard19cb3732006-08-19 11:45:59 +00005260 }
5261}
ths985a03b2007-12-24 16:10:43 +00005262
5263/* needed for generic scsi interface */
5264
5265int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5266{
5267 BlockDriver *drv = bs->drv;
5268
5269 if (drv && drv->bdrv_ioctl)
5270 return drv->bdrv_ioctl(bs, req, buf);
5271 return -ENOTSUP;
5272}
aliguori7d780662009-03-12 19:57:08 +00005273
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02005274BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
aliguori221f7152009-03-28 17:28:41 +00005275 unsigned long int req, void *buf,
Markus Armbruster097310b2014-10-07 13:59:15 +02005276 BlockCompletionFunc *cb, void *opaque)
aliguori7d780662009-03-12 19:57:08 +00005277{
aliguori221f7152009-03-28 17:28:41 +00005278 BlockDriver *drv = bs->drv;
aliguori7d780662009-03-12 19:57:08 +00005279
aliguori221f7152009-03-28 17:28:41 +00005280 if (drv && drv->bdrv_aio_ioctl)
5281 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5282 return NULL;
aliguori7d780662009-03-12 19:57:08 +00005283}
aliguorie268ca52009-04-22 20:20:00 +00005284
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01005285void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
Markus Armbruster7b6f9302011-09-06 18:58:56 +02005286{
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01005287 bs->guest_block_size = align;
Markus Armbruster7b6f9302011-09-06 18:58:56 +02005288}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005289
aliguorie268ca52009-04-22 20:20:00 +00005290void *qemu_blockalign(BlockDriverState *bs, size_t size)
5291{
Kevin Wolf339064d2013-11-28 10:23:32 +01005292 return qemu_memalign(bdrv_opt_mem_align(bs), size);
aliguorie268ca52009-04-22 20:20:00 +00005293}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005294
Max Reitz9ebd8442014-10-22 14:09:27 +02005295void *qemu_blockalign0(BlockDriverState *bs, size_t size)
5296{
5297 return memset(qemu_blockalign(bs, size), 0, size);
5298}
5299
Kevin Wolf7d2a35c2014-05-20 12:24:05 +02005300void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
5301{
5302 size_t align = bdrv_opt_mem_align(bs);
5303
5304 /* Ensure that NULL is never returned on success */
5305 assert(align > 0);
5306 if (size == 0) {
5307 size = align;
5308 }
5309
5310 return qemu_try_memalign(align, size);
5311}
5312
Max Reitz9ebd8442014-10-22 14:09:27 +02005313void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
5314{
5315 void *mem = qemu_try_blockalign(bs, size);
5316
5317 if (mem) {
5318 memset(mem, 0, size);
5319 }
5320
5321 return mem;
5322}
5323
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005324/*
5325 * Check if all memory in this vector is sector aligned.
5326 */
5327bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5328{
5329 int i;
Kevin Wolf339064d2013-11-28 10:23:32 +01005330 size_t alignment = bdrv_opt_mem_align(bs);
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005331
5332 for (i = 0; i < qiov->niov; i++) {
Kevin Wolf339064d2013-11-28 10:23:32 +01005333 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005334 return false;
5335 }
Kevin Wolf339064d2013-11-28 10:23:32 +01005336 if (qiov->iov[i].iov_len % alignment) {
Kevin Wolf1ff735b2013-12-05 13:01:46 +01005337 return false;
5338 }
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005339 }
5340
5341 return true;
5342}
5343
Fam Zhengb8afb522014-04-16 09:34:30 +08005344BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5345 Error **errp)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005346{
5347 int64_t bitmap_size;
Fam Zhenge4654d22013-11-13 18:29:43 +08005348 BdrvDirtyBitmap *bitmap;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01005349
Paolo Bonzini50717e92013-01-21 17:09:45 +01005350 assert((granularity & (granularity - 1)) == 0);
5351
Fam Zhenge4654d22013-11-13 18:29:43 +08005352 granularity >>= BDRV_SECTOR_BITS;
5353 assert(granularity);
Markus Armbruster57322b72014-06-26 13:23:22 +02005354 bitmap_size = bdrv_nb_sectors(bs);
Fam Zhengb8afb522014-04-16 09:34:30 +08005355 if (bitmap_size < 0) {
5356 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5357 errno = -bitmap_size;
5358 return NULL;
5359 }
Markus Armbruster5839e532014-08-19 10:31:08 +02005360 bitmap = g_new0(BdrvDirtyBitmap, 1);
Fam Zhenge4654d22013-11-13 18:29:43 +08005361 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5362 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5363 return bitmap;
5364}
5365
5366void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5367{
5368 BdrvDirtyBitmap *bm, *next;
5369 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5370 if (bm == bitmap) {
5371 QLIST_REMOVE(bitmap, list);
5372 hbitmap_free(bitmap->bitmap);
5373 g_free(bitmap);
5374 return;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01005375 }
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005376 }
5377}
5378
Fam Zheng21b56832013-11-13 18:29:44 +08005379BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5380{
5381 BdrvDirtyBitmap *bm;
5382 BlockDirtyInfoList *list = NULL;
5383 BlockDirtyInfoList **plist = &list;
5384
5385 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
Markus Armbruster5839e532014-08-19 10:31:08 +02005386 BlockDirtyInfo *info = g_new0(BlockDirtyInfo, 1);
5387 BlockDirtyInfoList *entry = g_new0(BlockDirtyInfoList, 1);
Fam Zheng21b56832013-11-13 18:29:44 +08005388 info->count = bdrv_get_dirty_count(bs, bm);
5389 info->granularity =
5390 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5391 entry->value = info;
5392 *plist = entry;
5393 plist = &entry->next;
5394 }
5395
5396 return list;
5397}
5398
Fam Zhenge4654d22013-11-13 18:29:43 +08005399int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005400{
Fam Zhenge4654d22013-11-13 18:29:43 +08005401 if (bitmap) {
5402 return hbitmap_get(bitmap->bitmap, sector);
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005403 } else {
5404 return 0;
5405 }
5406}
5407
Fam Zhenge4654d22013-11-13 18:29:43 +08005408void bdrv_dirty_iter_init(BlockDriverState *bs,
5409 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
Paolo Bonzini1755da12012-10-18 16:49:18 +02005410{
Fam Zhenge4654d22013-11-13 18:29:43 +08005411 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
Paolo Bonzini1755da12012-10-18 16:49:18 +02005412}
5413
5414void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5415 int nr_sectors)
5416{
Fam Zhenge4654d22013-11-13 18:29:43 +08005417 BdrvDirtyBitmap *bitmap;
5418 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5419 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
Paolo Bonzini8f0720e2013-01-21 17:09:41 +01005420 }
Liran Schouraaa0eb72010-01-26 10:31:48 +02005421}
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005422
Fam Zhenge4654d22013-11-13 18:29:43 +08005423void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5424{
5425 BdrvDirtyBitmap *bitmap;
5426 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5427 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5428 }
5429}
5430
5431int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5432{
5433 return hbitmap_count(bitmap->bitmap);
5434}
5435
Fam Zheng9fcb0252013-08-23 09:14:46 +08005436/* Get a reference to bs */
5437void bdrv_ref(BlockDriverState *bs)
5438{
5439 bs->refcnt++;
5440}
5441
5442/* Release a previously grabbed reference to bs.
5443 * If after releasing, reference count is zero, the BlockDriverState is
5444 * deleted. */
5445void bdrv_unref(BlockDriverState *bs)
5446{
Jeff Cody9a4d5ca2014-07-23 17:22:57 -04005447 if (!bs) {
5448 return;
5449 }
Fam Zheng9fcb0252013-08-23 09:14:46 +08005450 assert(bs->refcnt > 0);
5451 if (--bs->refcnt == 0) {
5452 bdrv_delete(bs);
5453 }
5454}
5455
Fam Zhengfbe40ff2014-05-23 21:29:42 +08005456struct BdrvOpBlocker {
5457 Error *reason;
5458 QLIST_ENTRY(BdrvOpBlocker) list;
5459};
5460
5461bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5462{
5463 BdrvOpBlocker *blocker;
5464 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5465 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5466 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5467 if (errp) {
5468 error_setg(errp, "Device '%s' is busy: %s",
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02005469 bdrv_get_device_name(bs),
5470 error_get_pretty(blocker->reason));
Fam Zhengfbe40ff2014-05-23 21:29:42 +08005471 }
5472 return true;
5473 }
5474 return false;
5475}
5476
5477void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5478{
5479 BdrvOpBlocker *blocker;
5480 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5481
Markus Armbruster5839e532014-08-19 10:31:08 +02005482 blocker = g_new0(BdrvOpBlocker, 1);
Fam Zhengfbe40ff2014-05-23 21:29:42 +08005483 blocker->reason = reason;
5484 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5485}
5486
5487void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5488{
5489 BdrvOpBlocker *blocker, *next;
5490 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5491 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5492 if (blocker->reason == reason) {
5493 QLIST_REMOVE(blocker, list);
5494 g_free(blocker);
5495 }
5496 }
5497}
5498
5499void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5500{
5501 int i;
5502 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5503 bdrv_op_block(bs, i, reason);
5504 }
5505}
5506
5507void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5508{
5509 int i;
5510 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5511 bdrv_op_unblock(bs, i, reason);
5512 }
5513}
5514
5515bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5516{
5517 int i;
5518
5519 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5520 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5521 return false;
5522 }
5523 }
5524 return true;
5525}
5526
Luiz Capitulino28a72822011-09-26 17:43:50 -03005527void bdrv_iostatus_enable(BlockDriverState *bs)
5528{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03005529 bs->iostatus_enabled = true;
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03005530 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Luiz Capitulino28a72822011-09-26 17:43:50 -03005531}
5532
5533/* The I/O status is only enabled if the drive explicitly
5534 * enables it _and_ the VM is configured to stop on errors */
5535bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5536{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03005537 return (bs->iostatus_enabled &&
Paolo Bonzini92aa5c62012-09-28 17:22:55 +02005538 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5539 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5540 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
Luiz Capitulino28a72822011-09-26 17:43:50 -03005541}
5542
5543void bdrv_iostatus_disable(BlockDriverState *bs)
5544{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03005545 bs->iostatus_enabled = false;
Luiz Capitulino28a72822011-09-26 17:43:50 -03005546}
5547
5548void bdrv_iostatus_reset(BlockDriverState *bs)
5549{
5550 if (bdrv_iostatus_is_enabled(bs)) {
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03005551 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Paolo Bonzini3bd293c2012-10-18 16:49:27 +02005552 if (bs->job) {
5553 block_job_iostatus_reset(bs->job);
5554 }
Luiz Capitulino28a72822011-09-26 17:43:50 -03005555 }
5556}
5557
Luiz Capitulino28a72822011-09-26 17:43:50 -03005558void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5559{
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02005560 assert(bdrv_iostatus_is_enabled(bs));
5561 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03005562 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5563 BLOCK_DEVICE_IO_STATUS_FAILED;
Luiz Capitulino28a72822011-09-26 17:43:50 -03005564 }
5565}
5566
Luiz Capitulinod92ada22012-11-30 10:52:09 -02005567void bdrv_img_create(const char *filename, const char *fmt,
5568 const char *base_filename, const char *base_fmt,
Miroslav Rezaninaf382d432013-02-13 09:09:40 +01005569 char *options, uint64_t img_size, int flags,
5570 Error **errp, bool quiet)
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005571{
Chunyan Liu83d05212014-06-05 17:20:51 +08005572 QemuOptsList *create_opts = NULL;
5573 QemuOpts *opts = NULL;
5574 const char *backing_fmt, *backing_file;
5575 int64_t size;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005576 BlockDriver *drv, *proto_drv;
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00005577 BlockDriver *backing_drv = NULL;
Max Reitzcc84d902013-09-06 17:14:26 +02005578 Error *local_err = NULL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005579 int ret = 0;
5580
5581 /* Find driver and parse its options */
5582 drv = bdrv_find_format(fmt);
5583 if (!drv) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005584 error_setg(errp, "Unknown file format '%s'", fmt);
Luiz Capitulinod92ada22012-11-30 10:52:09 -02005585 return;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005586 }
5587
Kevin Wolf98289622013-07-10 15:47:39 +02005588 proto_drv = bdrv_find_protocol(filename, true);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005589 if (!proto_drv) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005590 error_setg(errp, "Unknown protocol '%s'", filename);
Luiz Capitulinod92ada22012-11-30 10:52:09 -02005591 return;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005592 }
5593
Max Reitzc6149722014-12-02 18:32:45 +01005594 if (!drv->create_opts) {
5595 error_setg(errp, "Format driver '%s' does not support image creation",
5596 drv->format_name);
5597 return;
5598 }
5599
5600 if (!proto_drv->create_opts) {
5601 error_setg(errp, "Protocol driver '%s' does not support image creation",
5602 proto_drv->format_name);
5603 return;
5604 }
5605
Chunyan Liuc282e1f2014-06-05 17:21:11 +08005606 create_opts = qemu_opts_append(create_opts, drv->create_opts);
5607 create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005608
5609 /* Create parameter list with default values */
Chunyan Liu83d05212014-06-05 17:20:51 +08005610 opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
5611 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005612
5613 /* Parse -o options */
5614 if (options) {
Chunyan Liu83d05212014-06-05 17:20:51 +08005615 if (qemu_opts_do_parse(opts, options, NULL) != 0) {
5616 error_setg(errp, "Invalid options for file format '%s'", fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005617 goto out;
5618 }
5619 }
5620
5621 if (base_filename) {
Chunyan Liu83d05212014-06-05 17:20:51 +08005622 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005623 error_setg(errp, "Backing file not supported for file format '%s'",
5624 fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005625 goto out;
5626 }
5627 }
5628
5629 if (base_fmt) {
Chunyan Liu83d05212014-06-05 17:20:51 +08005630 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005631 error_setg(errp, "Backing file format not supported for file "
5632 "format '%s'", fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005633 goto out;
5634 }
5635 }
5636
Chunyan Liu83d05212014-06-05 17:20:51 +08005637 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5638 if (backing_file) {
5639 if (!strcmp(filename, backing_file)) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005640 error_setg(errp, "Error: Trying to create an image with the "
5641 "same filename as the backing file");
Jes Sorensen792da932010-12-16 13:52:17 +01005642 goto out;
5643 }
5644 }
5645
Chunyan Liu83d05212014-06-05 17:20:51 +08005646 backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5647 if (backing_fmt) {
5648 backing_drv = bdrv_find_format(backing_fmt);
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00005649 if (!backing_drv) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005650 error_setg(errp, "Unknown backing file format '%s'",
Chunyan Liu83d05212014-06-05 17:20:51 +08005651 backing_fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005652 goto out;
5653 }
5654 }
5655
5656 // The size for the image must always be specified, with one exception:
5657 // If we are using a backing file, we can obtain the size from there
Chunyan Liu83d05212014-06-05 17:20:51 +08005658 size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5659 if (size == -1) {
5660 if (backing_file) {
Max Reitz66f6b812013-12-03 14:57:52 +01005661 BlockDriverState *bs;
Markus Armbruster52bf1e72014-06-26 13:23:25 +02005662 int64_t size;
Paolo Bonzini63090da2012-04-12 14:01:03 +02005663 int back_flags;
5664
5665 /* backing files always opened read-only */
5666 back_flags =
5667 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005668
Max Reitzf67503e2014-02-18 18:33:05 +01005669 bs = NULL;
Chunyan Liu83d05212014-06-05 17:20:51 +08005670 ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags,
Max Reitzcc84d902013-09-06 17:14:26 +02005671 backing_drv, &local_err);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005672 if (ret < 0) {
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005673 goto out;
5674 }
Markus Armbruster52bf1e72014-06-26 13:23:25 +02005675 size = bdrv_getlength(bs);
5676 if (size < 0) {
5677 error_setg_errno(errp, -size, "Could not get size of '%s'",
5678 backing_file);
5679 bdrv_unref(bs);
5680 goto out;
5681 }
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005682
Chunyan Liu83d05212014-06-05 17:20:51 +08005683 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size);
Max Reitz66f6b812013-12-03 14:57:52 +01005684
5685 bdrv_unref(bs);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005686 } else {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005687 error_setg(errp, "Image creation needs a size parameter");
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005688 goto out;
5689 }
5690 }
5691
Miroslav Rezaninaf382d432013-02-13 09:09:40 +01005692 if (!quiet) {
Fam Zheng43c5d8f2014-12-09 15:38:04 +08005693 printf("Formatting '%s', fmt=%s", filename, fmt);
5694 qemu_opts_print(opts, " ");
Miroslav Rezaninaf382d432013-02-13 09:09:40 +01005695 puts("");
5696 }
Chunyan Liu83d05212014-06-05 17:20:51 +08005697
Chunyan Liuc282e1f2014-06-05 17:21:11 +08005698 ret = bdrv_create(drv, filename, opts, &local_err);
Chunyan Liu83d05212014-06-05 17:20:51 +08005699
Max Reitzcc84d902013-09-06 17:14:26 +02005700 if (ret == -EFBIG) {
5701 /* This is generally a better message than whatever the driver would
5702 * deliver (especially because of the cluster_size_hint), since that
5703 * is most probably not much different from "image too large". */
5704 const char *cluster_size_hint = "";
Chunyan Liu83d05212014-06-05 17:20:51 +08005705 if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
Max Reitzcc84d902013-09-06 17:14:26 +02005706 cluster_size_hint = " (try using a larger cluster size)";
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005707 }
Max Reitzcc84d902013-09-06 17:14:26 +02005708 error_setg(errp, "The image size is too large for file format '%s'"
5709 "%s", fmt, cluster_size_hint);
5710 error_free(local_err);
5711 local_err = NULL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005712 }
5713
5714out:
Chunyan Liu83d05212014-06-05 17:20:51 +08005715 qemu_opts_del(opts);
5716 qemu_opts_free(create_opts);
Markus Armbruster84d18f02014-01-30 15:07:28 +01005717 if (local_err) {
Max Reitzcc84d902013-09-06 17:14:26 +02005718 error_propagate(errp, local_err);
5719 }
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005720}
Stefan Hajnoczi85d126f2013-03-07 13:41:48 +01005721
5722AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5723{
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005724 return bs->aio_context;
5725}
5726
5727void bdrv_detach_aio_context(BlockDriverState *bs)
5728{
Max Reitz33384422014-06-20 21:57:33 +02005729 BdrvAioNotifier *baf;
5730
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005731 if (!bs->drv) {
5732 return;
5733 }
5734
Max Reitz33384422014-06-20 21:57:33 +02005735 QLIST_FOREACH(baf, &bs->aio_notifiers, list) {
5736 baf->detach_aio_context(baf->opaque);
5737 }
5738
Stefan Hajnoczi13af91e2014-05-14 16:22:45 +02005739 if (bs->io_limits_enabled) {
5740 throttle_detach_aio_context(&bs->throttle_state);
5741 }
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005742 if (bs->drv->bdrv_detach_aio_context) {
5743 bs->drv->bdrv_detach_aio_context(bs);
5744 }
5745 if (bs->file) {
5746 bdrv_detach_aio_context(bs->file);
5747 }
5748 if (bs->backing_hd) {
5749 bdrv_detach_aio_context(bs->backing_hd);
5750 }
5751
5752 bs->aio_context = NULL;
5753}
5754
5755void bdrv_attach_aio_context(BlockDriverState *bs,
5756 AioContext *new_context)
5757{
Max Reitz33384422014-06-20 21:57:33 +02005758 BdrvAioNotifier *ban;
5759
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005760 if (!bs->drv) {
5761 return;
5762 }
5763
5764 bs->aio_context = new_context;
5765
5766 if (bs->backing_hd) {
5767 bdrv_attach_aio_context(bs->backing_hd, new_context);
5768 }
5769 if (bs->file) {
5770 bdrv_attach_aio_context(bs->file, new_context);
5771 }
5772 if (bs->drv->bdrv_attach_aio_context) {
5773 bs->drv->bdrv_attach_aio_context(bs, new_context);
5774 }
Stefan Hajnoczi13af91e2014-05-14 16:22:45 +02005775 if (bs->io_limits_enabled) {
5776 throttle_attach_aio_context(&bs->throttle_state, new_context);
5777 }
Max Reitz33384422014-06-20 21:57:33 +02005778
5779 QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
5780 ban->attached_aio_context(new_context, ban->opaque);
5781 }
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005782}
5783
5784void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5785{
5786 bdrv_drain_all(); /* ensure there are no in-flight requests */
5787
5788 bdrv_detach_aio_context(bs);
5789
5790 /* This function executes in the old AioContext so acquire the new one in
5791 * case it runs in a different thread.
5792 */
5793 aio_context_acquire(new_context);
5794 bdrv_attach_aio_context(bs, new_context);
5795 aio_context_release(new_context);
Stefan Hajnoczi85d126f2013-03-07 13:41:48 +01005796}
Stefan Hajnoczid616b222013-06-24 17:13:10 +02005797
Max Reitz33384422014-06-20 21:57:33 +02005798void bdrv_add_aio_context_notifier(BlockDriverState *bs,
5799 void (*attached_aio_context)(AioContext *new_context, void *opaque),
5800 void (*detach_aio_context)(void *opaque), void *opaque)
5801{
5802 BdrvAioNotifier *ban = g_new(BdrvAioNotifier, 1);
5803 *ban = (BdrvAioNotifier){
5804 .attached_aio_context = attached_aio_context,
5805 .detach_aio_context = detach_aio_context,
5806 .opaque = opaque
5807 };
5808
5809 QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list);
5810}
5811
5812void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
5813 void (*attached_aio_context)(AioContext *,
5814 void *),
5815 void (*detach_aio_context)(void *),
5816 void *opaque)
5817{
5818 BdrvAioNotifier *ban, *ban_next;
5819
5820 QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
5821 if (ban->attached_aio_context == attached_aio_context &&
5822 ban->detach_aio_context == detach_aio_context &&
5823 ban->opaque == opaque)
5824 {
5825 QLIST_REMOVE(ban, list);
5826 g_free(ban);
5827
5828 return;
5829 }
5830 }
5831
5832 abort();
5833}
5834
Stefan Hajnoczid616b222013-06-24 17:13:10 +02005835void bdrv_add_before_write_notifier(BlockDriverState *bs,
5836 NotifierWithReturn *notifier)
5837{
5838 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5839}
Max Reitz6f176b42013-09-03 10:09:50 +02005840
Max Reitz77485432014-10-27 11:12:50 +01005841int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts,
5842 BlockDriverAmendStatusCB *status_cb)
Max Reitz6f176b42013-09-03 10:09:50 +02005843{
Chunyan Liuc282e1f2014-06-05 17:21:11 +08005844 if (!bs->drv->bdrv_amend_options) {
Max Reitz6f176b42013-09-03 10:09:50 +02005845 return -ENOTSUP;
5846 }
Max Reitz77485432014-10-27 11:12:50 +01005847 return bs->drv->bdrv_amend_options(bs, opts, status_cb);
Max Reitz6f176b42013-09-03 10:09:50 +02005848}
Benoît Canetf6186f42013-10-02 14:33:48 +02005849
Benoît Canetb5042a32014-03-03 19:11:34 +01005850/* This function will be called by the bdrv_recurse_is_first_non_filter method
5851 * of block filter and by bdrv_is_first_non_filter.
5852 * It is used to test if the given bs is the candidate or recurse more in the
5853 * node graph.
Benoît Canet212a5a82014-01-23 21:31:36 +01005854 */
Benoît Canet212a5a82014-01-23 21:31:36 +01005855bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5856 BlockDriverState *candidate)
Benoît Canetf6186f42013-10-02 14:33:48 +02005857{
Benoît Canetb5042a32014-03-03 19:11:34 +01005858 /* return false if basic checks fails */
5859 if (!bs || !bs->drv) {
5860 return false;
5861 }
5862
5863 /* the code reached a non block filter driver -> check if the bs is
5864 * the same as the candidate. It's the recursion termination condition.
5865 */
5866 if (!bs->drv->is_filter) {
5867 return bs == candidate;
5868 }
5869 /* Down this path the driver is a block filter driver */
5870
5871 /* If the block filter recursion method is defined use it to recurse down
5872 * the node graph.
5873 */
5874 if (bs->drv->bdrv_recurse_is_first_non_filter) {
Benoît Canet212a5a82014-01-23 21:31:36 +01005875 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5876 }
5877
Benoît Canetb5042a32014-03-03 19:11:34 +01005878 /* the driver is a block filter but don't allow to recurse -> return false
5879 */
5880 return false;
Benoît Canet212a5a82014-01-23 21:31:36 +01005881}
5882
5883/* This function checks if the candidate is the first non filter bs down it's
5884 * bs chain. Since we don't have pointers to parents it explore all bs chains
5885 * from the top. Some filters can choose not to pass down the recursion.
5886 */
5887bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5888{
5889 BlockDriverState *bs;
5890
5891 /* walk down the bs forest recursively */
5892 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5893 bool perm;
5894
Benoît Canetb5042a32014-03-03 19:11:34 +01005895 /* try to recurse in this top level bs */
Kevin Wolfe6dc8a12014-02-04 11:45:31 +01005896 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
Benoît Canet212a5a82014-01-23 21:31:36 +01005897
5898 /* candidate is the first non filter */
5899 if (perm) {
5900 return true;
5901 }
5902 }
5903
5904 return false;
Benoît Canetf6186f42013-10-02 14:33:48 +02005905}
Benoît Canet09158f02014-06-27 18:25:25 +02005906
5907BlockDriverState *check_to_replace_node(const char *node_name, Error **errp)
5908{
5909 BlockDriverState *to_replace_bs = bdrv_find_node(node_name);
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005910 AioContext *aio_context;
5911
Benoît Canet09158f02014-06-27 18:25:25 +02005912 if (!to_replace_bs) {
5913 error_setg(errp, "Node name '%s' not found", node_name);
5914 return NULL;
5915 }
5916
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005917 aio_context = bdrv_get_aio_context(to_replace_bs);
5918 aio_context_acquire(aio_context);
5919
Benoît Canet09158f02014-06-27 18:25:25 +02005920 if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) {
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005921 to_replace_bs = NULL;
5922 goto out;
Benoît Canet09158f02014-06-27 18:25:25 +02005923 }
5924
5925 /* We don't want arbitrary node of the BDS chain to be replaced only the top
5926 * most non filter in order to prevent data corruption.
5927 * Another benefit is that this tests exclude backing files which are
5928 * blocked by the backing blockers.
5929 */
5930 if (!bdrv_is_first_non_filter(to_replace_bs)) {
5931 error_setg(errp, "Only top most non filter can be replaced");
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005932 to_replace_bs = NULL;
5933 goto out;
Benoît Canet09158f02014-06-27 18:25:25 +02005934 }
5935
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005936out:
5937 aio_context_release(aio_context);
Benoît Canet09158f02014-06-27 18:25:25 +02005938 return to_replace_bs;
5939}
Ming Lei448ad912014-07-04 18:04:33 +08005940
5941void bdrv_io_plug(BlockDriverState *bs)
5942{
5943 BlockDriver *drv = bs->drv;
5944 if (drv && drv->bdrv_io_plug) {
5945 drv->bdrv_io_plug(bs);
5946 } else if (bs->file) {
5947 bdrv_io_plug(bs->file);
5948 }
5949}
5950
5951void bdrv_io_unplug(BlockDriverState *bs)
5952{
5953 BlockDriver *drv = bs->drv;
5954 if (drv && drv->bdrv_io_unplug) {
5955 drv->bdrv_io_unplug(bs);
5956 } else if (bs->file) {
5957 bdrv_io_unplug(bs->file);
5958 }
5959}
5960
5961void bdrv_flush_io_queue(BlockDriverState *bs)
5962{
5963 BlockDriver *drv = bs->drv;
5964 if (drv && drv->bdrv_flush_io_queue) {
5965 drv->bdrv_flush_io_queue(bs);
5966 } else if (bs->file) {
5967 bdrv_flush_io_queue(bs->file);
5968 }
5969}
Max Reitz91af7012014-07-18 20:24:56 +02005970
5971static bool append_open_options(QDict *d, BlockDriverState *bs)
5972{
5973 const QDictEntry *entry;
5974 bool found_any = false;
5975
5976 for (entry = qdict_first(bs->options); entry;
5977 entry = qdict_next(bs->options, entry))
5978 {
5979 /* Only take options for this level and exclude all non-driver-specific
5980 * options */
5981 if (!strchr(qdict_entry_key(entry), '.') &&
5982 strcmp(qdict_entry_key(entry), "node-name"))
5983 {
5984 qobject_incref(qdict_entry_value(entry));
5985 qdict_put_obj(d, qdict_entry_key(entry), qdict_entry_value(entry));
5986 found_any = true;
5987 }
5988 }
5989
5990 return found_any;
5991}
5992
5993/* Updates the following BDS fields:
5994 * - exact_filename: A filename which may be used for opening a block device
5995 * which (mostly) equals the given BDS (even without any
5996 * other options; so reading and writing must return the same
5997 * results, but caching etc. may be different)
5998 * - full_open_options: Options which, when given when opening a block device
5999 * (without a filename), result in a BDS (mostly)
6000 * equalling the given one
6001 * - filename: If exact_filename is set, it is copied here. Otherwise,
6002 * full_open_options is converted to a JSON object, prefixed with
6003 * "json:" (for use through the JSON pseudo protocol) and put here.
6004 */
6005void bdrv_refresh_filename(BlockDriverState *bs)
6006{
6007 BlockDriver *drv = bs->drv;
6008 QDict *opts;
6009
6010 if (!drv) {
6011 return;
6012 }
6013
6014 /* This BDS's file name will most probably depend on its file's name, so
6015 * refresh that first */
6016 if (bs->file) {
6017 bdrv_refresh_filename(bs->file);
6018 }
6019
6020 if (drv->bdrv_refresh_filename) {
6021 /* Obsolete information is of no use here, so drop the old file name
6022 * information before refreshing it */
6023 bs->exact_filename[0] = '\0';
6024 if (bs->full_open_options) {
6025 QDECREF(bs->full_open_options);
6026 bs->full_open_options = NULL;
6027 }
6028
6029 drv->bdrv_refresh_filename(bs);
6030 } else if (bs->file) {
6031 /* Try to reconstruct valid information from the underlying file */
6032 bool has_open_options;
6033
6034 bs->exact_filename[0] = '\0';
6035 if (bs->full_open_options) {
6036 QDECREF(bs->full_open_options);
6037 bs->full_open_options = NULL;
6038 }
6039
6040 opts = qdict_new();
6041 has_open_options = append_open_options(opts, bs);
6042
6043 /* If no specific options have been given for this BDS, the filename of
6044 * the underlying file should suffice for this one as well */
6045 if (bs->file->exact_filename[0] && !has_open_options) {
6046 strcpy(bs->exact_filename, bs->file->exact_filename);
6047 }
6048 /* Reconstructing the full options QDict is simple for most format block
6049 * drivers, as long as the full options are known for the underlying
6050 * file BDS. The full options QDict of that file BDS should somehow
6051 * contain a representation of the filename, therefore the following
6052 * suffices without querying the (exact_)filename of this BDS. */
6053 if (bs->file->full_open_options) {
6054 qdict_put_obj(opts, "driver",
6055 QOBJECT(qstring_from_str(drv->format_name)));
6056 QINCREF(bs->file->full_open_options);
6057 qdict_put_obj(opts, "file", QOBJECT(bs->file->full_open_options));
6058
6059 bs->full_open_options = opts;
6060 } else {
6061 QDECREF(opts);
6062 }
6063 } else if (!bs->full_open_options && qdict_size(bs->options)) {
6064 /* There is no underlying file BDS (at least referenced by BDS.file),
6065 * so the full options QDict should be equal to the options given
6066 * specifically for this block device when it was opened (plus the
6067 * driver specification).
6068 * Because those options don't change, there is no need to update
6069 * full_open_options when it's already set. */
6070
6071 opts = qdict_new();
6072 append_open_options(opts, bs);
6073 qdict_put_obj(opts, "driver",
6074 QOBJECT(qstring_from_str(drv->format_name)));
6075
6076 if (bs->exact_filename[0]) {
6077 /* This may not work for all block protocol drivers (some may
6078 * require this filename to be parsed), but we have to find some
6079 * default solution here, so just include it. If some block driver
6080 * does not support pure options without any filename at all or
6081 * needs some special format of the options QDict, it needs to
6082 * implement the driver-specific bdrv_refresh_filename() function.
6083 */
6084 qdict_put_obj(opts, "filename",
6085 QOBJECT(qstring_from_str(bs->exact_filename)));
6086 }
6087
6088 bs->full_open_options = opts;
6089 }
6090
6091 if (bs->exact_filename[0]) {
6092 pstrcpy(bs->filename, sizeof(bs->filename), bs->exact_filename);
6093 } else if (bs->full_open_options) {
6094 QString *json = qobject_to_json(QOBJECT(bs->full_open_options));
6095 snprintf(bs->filename, sizeof(bs->filename), "json:%s",
6096 qstring_get_str(json));
6097 QDECREF(json);
6098 }
6099}
Benoît Canet5366d0c2014-09-05 15:46:18 +02006100
6101/* This accessor function purpose is to allow the device models to access the
6102 * BlockAcctStats structure embedded inside a BlockDriverState without being
6103 * aware of the BlockDriverState structure layout.
6104 * It will go away when the BlockAcctStats structure will be moved inside
6105 * the device models.
6106 */
6107BlockAcctStats *bdrv_get_stats(BlockDriverState *bs)
6108{
6109 return &bs->stats;
6110}