blob: 86f2faa2665f9444492dfc0da9d9d44004526f26 [file] [log] [blame]
bellardfc01f7e2003-06-30 10:03:06 +00001/*
2 * QEMU System Emulator block driver
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardfc01f7e2003-06-30 10:03:06 +00004 * Copyright (c) 2003 Fabrice Bellard
ths5fafdf22007-09-16 21:08:06 +00005 *
bellardfc01f7e2003-06-30 10:03:06 +00006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
blueswir13990d092008-12-05 17:53:21 +000024#include "config-host.h"
pbrookfaf07962007-11-11 02:51:17 +000025#include "qemu-common.h"
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +010026#include "trace.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010027#include "block/block_int.h"
28#include "block/blockjob.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010029#include "qemu/module.h"
Paolo Bonzini7b1b5d12012-12-17 18:19:43 +010030#include "qapi/qmp/qjson.h"
Markus Armbrusterbfb197e2014-10-07 13:59:11 +020031#include "sysemu/block-backend.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/notify.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010034#include "block/coroutine.h"
Benoît Canetc13163f2014-01-23 21:31:34 +010035#include "block/qapi.h"
Luiz Capitulinob2023812011-09-21 17:16:47 -030036#include "qmp-commands.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
Wenchao Xiaa5ee7bd2014-06-18 08:43:44 +020038#include "qapi-event.h"
bellardfc01f7e2003-06-30 10:03:06 +000039
Juan Quintela71e72a12009-07-27 16:12:56 +020040#ifdef CONFIG_BSD
bellard7674e7b2005-04-26 21:59:26 +000041#include <sys/types.h>
42#include <sys/stat.h>
43#include <sys/ioctl.h>
Blue Swirl72cf2d42009-09-12 07:36:22 +000044#include <sys/queue.h>
blueswir1c5e97232009-03-07 20:06:23 +000045#ifndef __DragonFly__
bellard7674e7b2005-04-26 21:59:26 +000046#include <sys/disk.h>
47#endif
blueswir1c5e97232009-03-07 20:06:23 +000048#endif
bellard7674e7b2005-04-26 21:59:26 +000049
aliguori49dc7682009-03-08 16:26:59 +000050#ifdef _WIN32
51#include <windows.h>
52#endif
53
Fam Zhenge4654d22013-11-13 18:29:43 +080054struct BdrvDirtyBitmap {
55 HBitmap *bitmap;
56 QLIST_ENTRY(BdrvDirtyBitmap) list;
57};
58
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +010059#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020061static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
aliguorif141eaf2009-04-07 18:43:24 +000062 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +020063 BlockCompletionFunc *cb, void *opaque);
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020064static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
aliguorif141eaf2009-04-07 18:43:24 +000065 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +020066 BlockCompletionFunc *cb, void *opaque);
Kevin Wolff9f05dc2011-07-15 13:50:26 +020067static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
Kevin Wolf775aa8b2013-12-05 12:09:38 +010073static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +000075 BdrvRequestFlags flags);
Kevin Wolf775aa8b2013-12-05 12:09:38 +010076static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +000078 BdrvRequestFlags flags);
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020079static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
Markus Armbruster097310b2014-10-07 13:59:15 +020084 BlockCompletionFunc *cb,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020085 void *opaque,
86 bool is_write);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +010087static void coroutine_fn bdrv_co_do_rw(void *opaque);
Kevin Wolf621f0582012-03-20 15:12:58 +010088static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +020089 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
bellardec530c82006-04-25 22:36:06 +000090
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +010091static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
blueswir17ee930d2008-09-17 19:04:14 +000093
Benoît Canetdc364f42014-01-23 21:31:32 +010094static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
96
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +010097static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
bellardea2384d2004-08-01 21:59:26 +000099
Markus Armbrustereb852012009-10-27 18:41:44 +0100100/* If non-zero, use only whitelisted block drivers */
101static int use_bdrv_whitelist;
102
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000103#ifdef _WIN32
104static int is_windows_drive_prefix(const char *filename)
105{
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
109}
110
111int is_windows_drive(const char *filename)
112{
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
120}
121#endif
122
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800123/* throttling disk I/O limits */
Benoît Canetcc0681c2013-09-02 14:14:39 +0200124void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
126{
127 int i;
128
129 throttle_config(&bs->throttle_state, cfg);
130
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
133 }
134}
135
136/* this function drain all the throttled IOs */
137static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
138{
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
142
143 bs->io_limits_enabled = false;
144
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
148 }
149 }
150
151 bs->io_limits_enabled = enabled;
152
153 return drained;
154}
155
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800156void bdrv_io_limits_disable(BlockDriverState *bs)
157{
158 bs->io_limits_enabled = false;
159
Benoît Canetcc0681c2013-09-02 14:14:39 +0200160 bdrv_start_throttled_reqs(bs);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800161
Benoît Canetcc0681c2013-09-02 14:14:39 +0200162 throttle_destroy(&bs->throttle_state);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800163}
164
Benoît Canetcc0681c2013-09-02 14:14:39 +0200165static void bdrv_throttle_read_timer_cb(void *opaque)
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800166{
167 BlockDriverState *bs = opaque;
Benoît Canetcc0681c2013-09-02 14:14:39 +0200168 qemu_co_enter_next(&bs->throttled_reqs[0]);
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800169}
170
Benoît Canetcc0681c2013-09-02 14:14:39 +0200171static void bdrv_throttle_write_timer_cb(void *opaque)
172{
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
175}
176
177/* should be called before bdrv_set_io_limits if a limit is set */
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800178void bdrv_io_limits_enable(BlockDriverState *bs)
179{
Benoît Canetcc0681c2013-09-02 14:14:39 +0200180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
Stefan Hajnoczi13af91e2014-05-14 16:22:45 +0200182 bdrv_get_aio_context(bs),
Benoît Canetcc0681c2013-09-02 14:14:39 +0200183 QEMU_CLOCK_VIRTUAL,
184 bdrv_throttle_read_timer_cb,
185 bdrv_throttle_write_timer_cb,
186 bs);
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800187 bs->io_limits_enabled = true;
188}
189
Benoît Canetcc0681c2013-09-02 14:14:39 +0200190/* This function makes an IO wait if needed
191 *
192 * @nb_sectors: the number of sectors of the IO
193 * @is_write: is the IO a write
194 */
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800195static void bdrv_io_limits_intercept(BlockDriverState *bs,
Kevin Wolfd5103582014-01-16 13:29:10 +0100196 unsigned int bytes,
Benoît Canetcc0681c2013-09-02 14:14:39 +0200197 bool is_write)
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800198{
Benoît Canetcc0681c2013-09-02 14:14:39 +0200199 /* does this io must wait */
200 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800201
Benoît Canetcc0681c2013-09-02 14:14:39 +0200202 /* if must wait or any request of this type throttled queue the IO */
203 if (must_wait ||
204 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
205 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800206 }
207
Benoît Canetcc0681c2013-09-02 14:14:39 +0200208 /* the IO will be executed, do the accounting */
Kevin Wolfd5103582014-01-16 13:29:10 +0100209 throttle_account(&bs->throttle_state, is_write, bytes);
210
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800211
Benoît Canetcc0681c2013-09-02 14:14:39 +0200212 /* if the next request must wait -> do nothing */
213 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
214 return;
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800215 }
216
Benoît Canetcc0681c2013-09-02 14:14:39 +0200217 /* else queue next request for execution */
218 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800219}
220
Kevin Wolf339064d2013-11-28 10:23:32 +0100221size_t bdrv_opt_mem_align(BlockDriverState *bs)
222{
223 if (!bs || !bs->drv) {
224 /* 4k should be on the safe side */
225 return 4096;
226 }
227
228 return bs->bl.opt_mem_alignment;
229}
230
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000231/* check if the path starts with "<protocol>:" */
Max Reitz5c984152014-12-03 14:57:22 +0100232int path_has_protocol(const char *path)
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000233{
Paolo Bonzini947995c2012-05-08 16:51:48 +0200234 const char *p;
235
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000236#ifdef _WIN32
237 if (is_windows_drive(path) ||
238 is_windows_drive_prefix(path)) {
239 return 0;
240 }
Paolo Bonzini947995c2012-05-08 16:51:48 +0200241 p = path + strcspn(path, ":/\\");
242#else
243 p = path + strcspn(path, ":/");
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000244#endif
245
Paolo Bonzini947995c2012-05-08 16:51:48 +0200246 return *p == ':';
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000247}
248
bellard83f64092006-08-01 16:21:11 +0000249int path_is_absolute(const char *path)
250{
bellard21664422007-01-07 18:22:37 +0000251#ifdef _WIN32
252 /* specific case for names like: "\\.\d:" */
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200253 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
bellard21664422007-01-07 18:22:37 +0000254 return 1;
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200255 }
256 return (*path == '/' || *path == '\\');
bellard3b9f94e2007-01-07 17:27:07 +0000257#else
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200258 return (*path == '/');
bellard3b9f94e2007-01-07 17:27:07 +0000259#endif
bellard83f64092006-08-01 16:21:11 +0000260}
261
262/* if filename is absolute, just copy it to dest. Otherwise, build a
263 path to it by considering it is relative to base_path. URL are
264 supported. */
265void path_combine(char *dest, int dest_size,
266 const char *base_path,
267 const char *filename)
268{
269 const char *p, *p1;
270 int len;
271
272 if (dest_size <= 0)
273 return;
274 if (path_is_absolute(filename)) {
275 pstrcpy(dest, dest_size, filename);
276 } else {
277 p = strchr(base_path, ':');
278 if (p)
279 p++;
280 else
281 p = base_path;
bellard3b9f94e2007-01-07 17:27:07 +0000282 p1 = strrchr(base_path, '/');
283#ifdef _WIN32
284 {
285 const char *p2;
286 p2 = strrchr(base_path, '\\');
287 if (!p1 || p2 > p1)
288 p1 = p2;
289 }
290#endif
bellard83f64092006-08-01 16:21:11 +0000291 if (p1)
292 p1++;
293 else
294 p1 = base_path;
295 if (p1 > p)
296 p = p1;
297 len = p - base_path;
298 if (len > dest_size - 1)
299 len = dest_size - 1;
300 memcpy(dest, base_path, len);
301 dest[len] = '\0';
302 pstrcat(dest, dest_size, filename);
303 }
304}
305
Max Reitz0a828552014-11-26 17:20:25 +0100306void bdrv_get_full_backing_filename_from_filename(const char *backed,
307 const char *backing,
308 char *dest, size_t sz)
309{
310 if (backing[0] == '\0' || path_has_protocol(backing)) {
311 pstrcpy(dest, sz, backing);
312 } else {
313 path_combine(dest, sz, backed, backing);
314 }
315}
316
Paolo Bonzinidc5a1372012-05-08 16:51:50 +0200317void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
318{
Max Reitz0a828552014-11-26 17:20:25 +0100319 bdrv_get_full_backing_filename_from_filename(bs->filename, bs->backing_file,
320 dest, sz);
Paolo Bonzinidc5a1372012-05-08 16:51:50 +0200321}
322
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500323void bdrv_register(BlockDriver *bdrv)
bellardea2384d2004-08-01 21:59:26 +0000324{
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +0100325 /* Block drivers without coroutine functions need emulation */
326 if (!bdrv->bdrv_co_readv) {
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200327 bdrv->bdrv_co_readv = bdrv_co_readv_em;
328 bdrv->bdrv_co_writev = bdrv_co_writev_em;
329
Stefan Hajnoczif8c35c12011-10-13 21:09:31 +0100330 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
331 * the block driver lacks aio we need to emulate that too.
332 */
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200333 if (!bdrv->bdrv_aio_readv) {
334 /* add AIO emulation layer */
335 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
336 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200337 }
bellard83f64092006-08-01 16:21:11 +0000338 }
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +0200339
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100340 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
bellardea2384d2004-08-01 21:59:26 +0000341}
bellardb3380822004-03-14 21:38:54 +0000342
Markus Armbruster7f06d472014-10-07 13:59:12 +0200343BlockDriverState *bdrv_new_root(void)
bellardfc01f7e2003-06-30 10:03:06 +0000344{
Markus Armbruster7f06d472014-10-07 13:59:12 +0200345 BlockDriverState *bs = bdrv_new();
Markus Armbrustere4e99862014-10-07 13:59:03 +0200346
Markus Armbrustere4e99862014-10-07 13:59:03 +0200347 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
Markus Armbrustere4e99862014-10-07 13:59:03 +0200348 return bs;
349}
350
351BlockDriverState *bdrv_new(void)
352{
353 BlockDriverState *bs;
354 int i;
355
Markus Armbruster5839e532014-08-19 10:31:08 +0200356 bs = g_new0(BlockDriverState, 1);
Fam Zhenge4654d22013-11-13 18:29:43 +0800357 QLIST_INIT(&bs->dirty_bitmaps);
Fam Zhengfbe40ff2014-05-23 21:29:42 +0800358 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
359 QLIST_INIT(&bs->op_blockers[i]);
360 }
Luiz Capitulino28a72822011-09-26 17:43:50 -0300361 bdrv_iostatus_disable(bs);
Paolo Bonzinid7d512f2012-08-23 11:20:36 +0200362 notifier_list_init(&bs->close_notifiers);
Stefan Hajnoczid616b222013-06-24 17:13:10 +0200363 notifier_with_return_list_init(&bs->before_write_notifiers);
Benoît Canetcc0681c2013-09-02 14:14:39 +0200364 qemu_co_queue_init(&bs->throttled_reqs[0]);
365 qemu_co_queue_init(&bs->throttled_reqs[1]);
Fam Zheng9fcb0252013-08-23 09:14:46 +0800366 bs->refcnt = 1;
Stefan Hajnoczidcd04222014-05-08 16:34:37 +0200367 bs->aio_context = qemu_get_aio_context();
Paolo Bonzinid7d512f2012-08-23 11:20:36 +0200368
bellardb3380822004-03-14 21:38:54 +0000369 return bs;
370}
371
Paolo Bonzinid7d512f2012-08-23 11:20:36 +0200372void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
373{
374 notifier_list_add(&bs->close_notifiers, notify);
375}
376
bellardea2384d2004-08-01 21:59:26 +0000377BlockDriver *bdrv_find_format(const char *format_name)
378{
379 BlockDriver *drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100380 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
381 if (!strcmp(drv1->format_name, format_name)) {
bellardea2384d2004-08-01 21:59:26 +0000382 return drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100383 }
bellardea2384d2004-08-01 21:59:26 +0000384 }
385 return NULL;
386}
387
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800388static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
Markus Armbrustereb852012009-10-27 18:41:44 +0100389{
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800390 static const char *whitelist_rw[] = {
391 CONFIG_BDRV_RW_WHITELIST
392 };
393 static const char *whitelist_ro[] = {
394 CONFIG_BDRV_RO_WHITELIST
Markus Armbrustereb852012009-10-27 18:41:44 +0100395 };
396 const char **p;
397
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800398 if (!whitelist_rw[0] && !whitelist_ro[0]) {
Markus Armbrustereb852012009-10-27 18:41:44 +0100399 return 1; /* no whitelist, anything goes */
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800400 }
Markus Armbrustereb852012009-10-27 18:41:44 +0100401
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800402 for (p = whitelist_rw; *p; p++) {
Markus Armbrustereb852012009-10-27 18:41:44 +0100403 if (!strcmp(drv->format_name, *p)) {
404 return 1;
405 }
406 }
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800407 if (read_only) {
408 for (p = whitelist_ro; *p; p++) {
409 if (!strcmp(drv->format_name, *p)) {
410 return 1;
411 }
412 }
413 }
Markus Armbrustereb852012009-10-27 18:41:44 +0100414 return 0;
415}
416
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800417BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
418 bool read_only)
Markus Armbrustereb852012009-10-27 18:41:44 +0100419{
420 BlockDriver *drv = bdrv_find_format(format_name);
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800421 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
Markus Armbrustereb852012009-10-27 18:41:44 +0100422}
423
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800424typedef struct CreateCo {
425 BlockDriver *drv;
426 char *filename;
Chunyan Liu83d05212014-06-05 17:20:51 +0800427 QemuOpts *opts;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800428 int ret;
Max Reitzcc84d902013-09-06 17:14:26 +0200429 Error *err;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800430} CreateCo;
431
432static void coroutine_fn bdrv_create_co_entry(void *opaque)
433{
Max Reitzcc84d902013-09-06 17:14:26 +0200434 Error *local_err = NULL;
435 int ret;
436
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800437 CreateCo *cco = opaque;
438 assert(cco->drv);
439
Chunyan Liuc282e1f2014-06-05 17:21:11 +0800440 ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
Markus Armbruster84d18f02014-01-30 15:07:28 +0100441 if (local_err) {
Max Reitzcc84d902013-09-06 17:14:26 +0200442 error_propagate(&cco->err, local_err);
443 }
444 cco->ret = ret;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800445}
446
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200447int bdrv_create(BlockDriver *drv, const char* filename,
Chunyan Liu83d05212014-06-05 17:20:51 +0800448 QemuOpts *opts, Error **errp)
bellardea2384d2004-08-01 21:59:26 +0000449{
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800450 int ret;
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200451
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800452 Coroutine *co;
453 CreateCo cco = {
454 .drv = drv,
455 .filename = g_strdup(filename),
Chunyan Liu83d05212014-06-05 17:20:51 +0800456 .opts = opts,
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800457 .ret = NOT_DONE,
Max Reitzcc84d902013-09-06 17:14:26 +0200458 .err = NULL,
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800459 };
460
Chunyan Liuc282e1f2014-06-05 17:21:11 +0800461 if (!drv->bdrv_create) {
Max Reitzcc84d902013-09-06 17:14:26 +0200462 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
Luiz Capitulino80168bf2012-10-17 16:45:25 -0300463 ret = -ENOTSUP;
464 goto out;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800465 }
466
467 if (qemu_in_coroutine()) {
468 /* Fast-path if already in coroutine context */
469 bdrv_create_co_entry(&cco);
470 } else {
471 co = qemu_coroutine_create(bdrv_create_co_entry);
472 qemu_coroutine_enter(co, &cco);
473 while (cco.ret == NOT_DONE) {
Paolo Bonzinib47ec2c2014-07-07 15:18:01 +0200474 aio_poll(qemu_get_aio_context(), true);
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800475 }
476 }
477
478 ret = cco.ret;
Max Reitzcc84d902013-09-06 17:14:26 +0200479 if (ret < 0) {
Markus Armbruster84d18f02014-01-30 15:07:28 +0100480 if (cco.err) {
Max Reitzcc84d902013-09-06 17:14:26 +0200481 error_propagate(errp, cco.err);
482 } else {
483 error_setg_errno(errp, -ret, "Could not create image");
484 }
485 }
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800486
Luiz Capitulino80168bf2012-10-17 16:45:25 -0300487out:
488 g_free(cco.filename);
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800489 return ret;
bellardea2384d2004-08-01 21:59:26 +0000490}
491
Chunyan Liuc282e1f2014-06-05 17:21:11 +0800492int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200493{
494 BlockDriver *drv;
Max Reitzcc84d902013-09-06 17:14:26 +0200495 Error *local_err = NULL;
496 int ret;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200497
Kevin Wolf98289622013-07-10 15:47:39 +0200498 drv = bdrv_find_protocol(filename, true);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200499 if (drv == NULL) {
Max Reitzcc84d902013-09-06 17:14:26 +0200500 error_setg(errp, "Could not find protocol for file '%s'", filename);
Stefan Hajnoczi16905d72010-11-30 15:14:14 +0000501 return -ENOENT;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200502 }
503
Chunyan Liuc282e1f2014-06-05 17:21:11 +0800504 ret = bdrv_create(drv, filename, opts, &local_err);
Markus Armbruster84d18f02014-01-30 15:07:28 +0100505 if (local_err) {
Max Reitzcc84d902013-09-06 17:14:26 +0200506 error_propagate(errp, local_err);
507 }
508 return ret;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200509}
510
Kevin Wolf3baca892014-07-16 17:48:16 +0200511void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
Kevin Wolfd34682c2013-12-11 19:26:16 +0100512{
513 BlockDriver *drv = bs->drv;
Kevin Wolf3baca892014-07-16 17:48:16 +0200514 Error *local_err = NULL;
Kevin Wolfd34682c2013-12-11 19:26:16 +0100515
516 memset(&bs->bl, 0, sizeof(bs->bl));
517
Kevin Wolf466ad822013-12-11 19:50:32 +0100518 if (!drv) {
Kevin Wolf3baca892014-07-16 17:48:16 +0200519 return;
Kevin Wolf466ad822013-12-11 19:50:32 +0100520 }
521
522 /* Take some limits from the children as a default */
523 if (bs->file) {
Kevin Wolf3baca892014-07-16 17:48:16 +0200524 bdrv_refresh_limits(bs->file, &local_err);
525 if (local_err) {
526 error_propagate(errp, local_err);
527 return;
528 }
Kevin Wolf466ad822013-12-11 19:50:32 +0100529 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
Peter Lieven2647fab2014-10-27 10:18:44 +0100530 bs->bl.max_transfer_length = bs->file->bl.max_transfer_length;
Kevin Wolf339064d2013-11-28 10:23:32 +0100531 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
532 } else {
533 bs->bl.opt_mem_alignment = 512;
Kevin Wolf466ad822013-12-11 19:50:32 +0100534 }
535
536 if (bs->backing_hd) {
Kevin Wolf3baca892014-07-16 17:48:16 +0200537 bdrv_refresh_limits(bs->backing_hd, &local_err);
538 if (local_err) {
539 error_propagate(errp, local_err);
540 return;
541 }
Kevin Wolf466ad822013-12-11 19:50:32 +0100542 bs->bl.opt_transfer_length =
543 MAX(bs->bl.opt_transfer_length,
544 bs->backing_hd->bl.opt_transfer_length);
Peter Lieven2647fab2014-10-27 10:18:44 +0100545 bs->bl.max_transfer_length =
546 MIN_NON_ZERO(bs->bl.max_transfer_length,
547 bs->backing_hd->bl.max_transfer_length);
Kevin Wolf339064d2013-11-28 10:23:32 +0100548 bs->bl.opt_mem_alignment =
549 MAX(bs->bl.opt_mem_alignment,
550 bs->backing_hd->bl.opt_mem_alignment);
Kevin Wolf466ad822013-12-11 19:50:32 +0100551 }
552
553 /* Then let the driver override it */
554 if (drv->bdrv_refresh_limits) {
Kevin Wolf3baca892014-07-16 17:48:16 +0200555 drv->bdrv_refresh_limits(bs, errp);
Kevin Wolfd34682c2013-12-11 19:26:16 +0100556 }
Kevin Wolfd34682c2013-12-11 19:26:16 +0100557}
558
Jim Meyeringeba25052012-05-28 09:27:54 +0200559/*
560 * Create a uniquely-named empty temporary file.
561 * Return 0 upon success, otherwise a negative errno value.
562 */
563int get_tmp_filename(char *filename, int size)
564{
bellardd5249392004-08-03 21:14:23 +0000565#ifdef _WIN32
bellard3b9f94e2007-01-07 17:27:07 +0000566 char temp_dir[MAX_PATH];
Jim Meyeringeba25052012-05-28 09:27:54 +0200567 /* GetTempFileName requires that its output buffer (4th param)
568 have length MAX_PATH or greater. */
569 assert(size >= MAX_PATH);
570 return (GetTempPath(MAX_PATH, temp_dir)
571 && GetTempFileName(temp_dir, "qem", 0, filename)
572 ? 0 : -GetLastError());
bellardd5249392004-08-03 21:14:23 +0000573#else
bellardea2384d2004-08-01 21:59:26 +0000574 int fd;
blueswir17ccfb2e2008-09-14 06:45:34 +0000575 const char *tmpdir;
aurel320badc1e2008-03-10 00:05:34 +0000576 tmpdir = getenv("TMPDIR");
Amit Shah69bef792014-02-26 15:12:37 +0530577 if (!tmpdir) {
578 tmpdir = "/var/tmp";
579 }
Jim Meyeringeba25052012-05-28 09:27:54 +0200580 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
581 return -EOVERFLOW;
582 }
bellardea2384d2004-08-01 21:59:26 +0000583 fd = mkstemp(filename);
Dunrong Huangfe235a02012-09-05 21:26:22 +0800584 if (fd < 0) {
585 return -errno;
586 }
587 if (close(fd) != 0) {
588 unlink(filename);
Jim Meyeringeba25052012-05-28 09:27:54 +0200589 return -errno;
590 }
591 return 0;
bellardd5249392004-08-03 21:14:23 +0000592#endif
Jim Meyeringeba25052012-05-28 09:27:54 +0200593}
bellardea2384d2004-08-01 21:59:26 +0000594
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200595/*
596 * Detect host devices. By convention, /dev/cdrom[N] is always
597 * recognized as a host CDROM.
598 */
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200599static BlockDriver *find_hdev_driver(const char *filename)
600{
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200601 int score_max = 0, score;
602 BlockDriver *drv = NULL, *d;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200603
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100604 QLIST_FOREACH(d, &bdrv_drivers, list) {
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200605 if (d->bdrv_probe_device) {
606 score = d->bdrv_probe_device(filename);
607 if (score > score_max) {
608 score_max = score;
609 drv = d;
610 }
611 }
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200612 }
613
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200614 return drv;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200615}
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200616
Kevin Wolf98289622013-07-10 15:47:39 +0200617BlockDriver *bdrv_find_protocol(const char *filename,
618 bool allow_protocol_prefix)
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200619{
620 BlockDriver *drv1;
621 char protocol[128];
622 int len;
623 const char *p;
624
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200625 /* TODO Drivers without bdrv_file_open must be specified explicitly */
626
Christoph Hellwig39508e72010-06-23 12:25:17 +0200627 /*
628 * XXX(hch): we really should not let host device detection
629 * override an explicit protocol specification, but moving this
630 * later breaks access to device names with colons in them.
631 * Thanks to the brain-dead persistent naming schemes on udev-
632 * based Linux systems those actually are quite common.
633 */
634 drv1 = find_hdev_driver(filename);
635 if (drv1) {
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200636 return drv1;
637 }
Christoph Hellwig39508e72010-06-23 12:25:17 +0200638
Kevin Wolf98289622013-07-10 15:47:39 +0200639 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
Max Reitzef810432014-12-02 18:32:42 +0100640 return &bdrv_file;
Christoph Hellwig39508e72010-06-23 12:25:17 +0200641 }
Kevin Wolf98289622013-07-10 15:47:39 +0200642
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000643 p = strchr(filename, ':');
644 assert(p != NULL);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200645 len = p - filename;
646 if (len > sizeof(protocol) - 1)
647 len = sizeof(protocol) - 1;
648 memcpy(protocol, filename, len);
649 protocol[len] = '\0';
650 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
651 if (drv1->protocol_name &&
652 !strcmp(drv1->protocol_name, protocol)) {
653 return drv1;
654 }
655 }
656 return NULL;
657}
658
Markus Armbrusterc6684242014-11-20 16:27:10 +0100659/*
660 * Guess image format by probing its contents.
661 * This is not a good idea when your image is raw (CVE-2008-2004), but
662 * we do it anyway for backward compatibility.
663 *
664 * @buf contains the image's first @buf_size bytes.
Kevin Wolf7cddd372014-11-20 16:27:11 +0100665 * @buf_size is the buffer size in bytes (generally BLOCK_PROBE_BUF_SIZE,
666 * but can be smaller if the image file is smaller)
Markus Armbrusterc6684242014-11-20 16:27:10 +0100667 * @filename is its filename.
668 *
669 * For all block drivers, call the bdrv_probe() method to get its
670 * probing score.
671 * Return the first block driver with the highest probing score.
672 */
Kevin Wolf38f3ef52014-11-20 16:27:12 +0100673BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
674 const char *filename)
Markus Armbrusterc6684242014-11-20 16:27:10 +0100675{
676 int score_max = 0, score;
677 BlockDriver *drv = NULL, *d;
678
679 QLIST_FOREACH(d, &bdrv_drivers, list) {
680 if (d->bdrv_probe) {
681 score = d->bdrv_probe(buf, buf_size, filename);
682 if (score > score_max) {
683 score_max = score;
684 drv = d;
685 }
686 }
687 }
688
689 return drv;
690}
691
Kevin Wolff500a6d2012-11-12 17:35:27 +0100692static int find_image_format(BlockDriverState *bs, const char *filename,
Max Reitz34b5d2c2013-09-05 14:45:29 +0200693 BlockDriver **pdrv, Error **errp)
bellardea2384d2004-08-01 21:59:26 +0000694{
Markus Armbrusterc6684242014-11-20 16:27:10 +0100695 BlockDriver *drv;
Kevin Wolf7cddd372014-11-20 16:27:11 +0100696 uint8_t buf[BLOCK_PROBE_BUF_SIZE];
Kevin Wolff500a6d2012-11-12 17:35:27 +0100697 int ret = 0;
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700698
Kevin Wolf08a00552010-06-01 18:37:31 +0200699 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
Paolo Bonzini8e895592013-01-10 15:39:27 +0100700 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
Max Reitzef810432014-12-02 18:32:42 +0100701 *pdrv = &bdrv_raw;
Stefan Weilc98ac352010-07-21 21:51:51 +0200702 return ret;
Nicholas A. Bellinger1a396852010-05-27 08:56:28 -0700703 }
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700704
bellard83f64092006-08-01 16:21:11 +0000705 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
bellard83f64092006-08-01 16:21:11 +0000706 if (ret < 0) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200707 error_setg_errno(errp, -ret, "Could not read image for determining its "
708 "format");
Stefan Weilc98ac352010-07-21 21:51:51 +0200709 *pdrv = NULL;
710 return ret;
bellard83f64092006-08-01 16:21:11 +0000711 }
712
Markus Armbrusterc6684242014-11-20 16:27:10 +0100713 drv = bdrv_probe_all(buf, ret, filename);
Stefan Weilc98ac352010-07-21 21:51:51 +0200714 if (!drv) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200715 error_setg(errp, "Could not determine image format: No compatible "
716 "driver found");
Stefan Weilc98ac352010-07-21 21:51:51 +0200717 ret = -ENOENT;
718 }
719 *pdrv = drv;
720 return ret;
bellardea2384d2004-08-01 21:59:26 +0000721}
722
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100723/**
724 * Set the current 'total_sectors' value
Markus Armbruster65a9bb22014-06-26 13:23:17 +0200725 * Return 0 on success, -errno on error.
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100726 */
727static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
728{
729 BlockDriver *drv = bs->drv;
730
Nicholas Bellinger396759a2010-05-17 09:46:04 -0700731 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
732 if (bs->sg)
733 return 0;
734
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100735 /* query actual device if possible, otherwise just trust the hint */
736 if (drv->bdrv_getlength) {
737 int64_t length = drv->bdrv_getlength(bs);
738 if (length < 0) {
739 return length;
740 }
Fam Zheng7e382002013-11-06 19:48:06 +0800741 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100742 }
743
744 bs->total_sectors = hint;
745 return 0;
746}
747
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100748/**
Paolo Bonzini9e8f1832013-02-08 14:06:11 +0100749 * Set open flags for a given discard mode
750 *
751 * Return 0 on success, -1 if the discard mode was invalid.
752 */
753int bdrv_parse_discard_flags(const char *mode, int *flags)
754{
755 *flags &= ~BDRV_O_UNMAP;
756
757 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
758 /* do nothing */
759 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
760 *flags |= BDRV_O_UNMAP;
761 } else {
762 return -1;
763 }
764
765 return 0;
766}
767
768/**
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100769 * Set open flags for a given cache mode
770 *
771 * Return 0 on success, -1 if the cache mode was invalid.
772 */
773int bdrv_parse_cache_flags(const char *mode, int *flags)
774{
775 *flags &= ~BDRV_O_CACHE_MASK;
776
777 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
778 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
Stefan Hajnoczi92196b22011-08-04 12:26:52 +0100779 } else if (!strcmp(mode, "directsync")) {
780 *flags |= BDRV_O_NOCACHE;
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100781 } else if (!strcmp(mode, "writeback")) {
782 *flags |= BDRV_O_CACHE_WB;
783 } else if (!strcmp(mode, "unsafe")) {
784 *flags |= BDRV_O_CACHE_WB;
785 *flags |= BDRV_O_NO_FLUSH;
786 } else if (!strcmp(mode, "writethrough")) {
787 /* this is the default */
788 } else {
789 return -1;
790 }
791
792 return 0;
793}
794
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000795/**
796 * The copy-on-read flag is actually a reference count so multiple users may
797 * use the feature without worrying about clobbering its previous state.
798 * Copy-on-read stays enabled until all users have called to disable it.
799 */
800void bdrv_enable_copy_on_read(BlockDriverState *bs)
801{
802 bs->copy_on_read++;
803}
804
805void bdrv_disable_copy_on_read(BlockDriverState *bs)
806{
807 assert(bs->copy_on_read > 0);
808 bs->copy_on_read--;
809}
810
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200811/*
Kevin Wolfb1e6fc02014-05-06 12:11:42 +0200812 * Returns the flags that a temporary snapshot should get, based on the
813 * originally requested flags (the originally requested image will have flags
814 * like a backing file)
815 */
816static int bdrv_temp_snapshot_flags(int flags)
817{
818 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
819}
820
821/*
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200822 * Returns the flags that bs->file should get, based on the given flags for
823 * the parent BDS
824 */
825static int bdrv_inherited_flags(int flags)
826{
827 /* Enable protocol handling, disable format probing for bs->file */
828 flags |= BDRV_O_PROTOCOL;
829
830 /* Our block drivers take care to send flushes and respect unmap policy,
831 * so we can enable both unconditionally on lower layers. */
832 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
833
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200834 /* Clear flags that only apply to the top layer */
Kevin Wolf5669b442014-04-11 21:36:45 +0200835 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200836
837 return flags;
838}
839
Kevin Wolf317fc442014-04-25 13:27:34 +0200840/*
841 * Returns the flags that bs->backing_hd should get, based on the given flags
842 * for the parent BDS
843 */
844static int bdrv_backing_flags(int flags)
845{
846 /* backing files always opened read-only */
847 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
848
849 /* snapshot=on is handled on the top layer */
Kevin Wolf8bfea152014-04-11 19:16:36 +0200850 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
Kevin Wolf317fc442014-04-25 13:27:34 +0200851
852 return flags;
853}
854
Kevin Wolf7b272452012-11-12 17:05:39 +0100855static int bdrv_open_flags(BlockDriverState *bs, int flags)
856{
857 int open_flags = flags | BDRV_O_CACHE_WB;
858
859 /*
860 * Clear flags that are internal to the block layer before opening the
861 * image.
862 */
Kevin Wolf20cca272014-06-04 14:33:27 +0200863 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL);
Kevin Wolf7b272452012-11-12 17:05:39 +0100864
865 /*
866 * Snapshots should be writable.
867 */
Kevin Wolf8bfea152014-04-11 19:16:36 +0200868 if (flags & BDRV_O_TEMPORARY) {
Kevin Wolf7b272452012-11-12 17:05:39 +0100869 open_flags |= BDRV_O_RDWR;
870 }
871
872 return open_flags;
873}
874
Kevin Wolf636ea372014-01-24 14:11:52 +0100875static void bdrv_assign_node_name(BlockDriverState *bs,
876 const char *node_name,
877 Error **errp)
Benoît Canet6913c0c2014-01-23 21:31:33 +0100878{
879 if (!node_name) {
Kevin Wolf636ea372014-01-24 14:11:52 +0100880 return;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100881 }
882
Kevin Wolf9aebf3b2014-09-25 09:54:02 +0200883 /* Check for empty string or invalid characters */
Markus Armbrusterf5bebbb2014-09-30 13:59:30 +0200884 if (!id_wellformed(node_name)) {
Kevin Wolf9aebf3b2014-09-25 09:54:02 +0200885 error_setg(errp, "Invalid node name");
Kevin Wolf636ea372014-01-24 14:11:52 +0100886 return;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100887 }
888
Benoît Canet0c5e94e2014-02-12 17:15:07 +0100889 /* takes care of avoiding namespaces collisions */
Markus Armbruster7f06d472014-10-07 13:59:12 +0200890 if (blk_by_name(node_name)) {
Benoît Canet0c5e94e2014-02-12 17:15:07 +0100891 error_setg(errp, "node-name=%s is conflicting with a device id",
892 node_name);
Kevin Wolf636ea372014-01-24 14:11:52 +0100893 return;
Benoît Canet0c5e94e2014-02-12 17:15:07 +0100894 }
895
Benoît Canet6913c0c2014-01-23 21:31:33 +0100896 /* takes care of avoiding duplicates node names */
897 if (bdrv_find_node(node_name)) {
898 error_setg(errp, "Duplicate node name");
Kevin Wolf636ea372014-01-24 14:11:52 +0100899 return;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100900 }
901
902 /* copy node name into the bs and insert it into the graph list */
903 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
904 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
Benoît Canet6913c0c2014-01-23 21:31:33 +0100905}
906
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200907/*
Kevin Wolf57915332010-04-14 15:24:50 +0200908 * Common part for opening disk images and files
Kevin Wolfb6ad4912013-03-15 10:35:04 +0100909 *
910 * Removes all processed options from *options.
Kevin Wolf57915332010-04-14 15:24:50 +0200911 */
Kevin Wolff500a6d2012-11-12 17:35:27 +0100912static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
Max Reitz34b5d2c2013-09-05 14:45:29 +0200913 QDict *options, int flags, BlockDriver *drv, Error **errp)
Kevin Wolf57915332010-04-14 15:24:50 +0200914{
915 int ret, open_flags;
Kevin Wolf035fccd2013-04-09 14:34:19 +0200916 const char *filename;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100917 const char *node_name = NULL;
Max Reitz34b5d2c2013-09-05 14:45:29 +0200918 Error *local_err = NULL;
Kevin Wolf57915332010-04-14 15:24:50 +0200919
920 assert(drv != NULL);
Paolo Bonzini64058752012-05-08 16:51:49 +0200921 assert(bs->file == NULL);
Kevin Wolf707ff822013-03-06 12:20:31 +0100922 assert(options != NULL && bs->options != options);
Kevin Wolf57915332010-04-14 15:24:50 +0200923
Kevin Wolf45673672013-04-22 17:48:40 +0200924 if (file != NULL) {
925 filename = file->filename;
926 } else {
927 filename = qdict_get_try_str(options, "filename");
928 }
929
Kevin Wolf765003d2014-02-03 14:49:42 +0100930 if (drv->bdrv_needs_filename && !filename) {
931 error_setg(errp, "The '%s' block driver requires a file name",
932 drv->format_name);
933 return -EINVAL;
934 }
935
Kevin Wolf45673672013-04-22 17:48:40 +0200936 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
Stefan Hajnoczi28dcee12011-09-22 20:14:12 +0100937
Benoît Canet6913c0c2014-01-23 21:31:33 +0100938 node_name = qdict_get_try_str(options, "node-name");
Kevin Wolf636ea372014-01-24 14:11:52 +0100939 bdrv_assign_node_name(bs, node_name, &local_err);
Markus Armbruster0fb63952014-04-25 16:50:31 +0200940 if (local_err) {
Kevin Wolf636ea372014-01-24 14:11:52 +0100941 error_propagate(errp, local_err);
942 return -EINVAL;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100943 }
944 qdict_del(options, "node-name");
945
Kevin Wolf5d186eb2013-03-27 17:28:18 +0100946 /* bdrv_open() with directly using a protocol as drv. This layer is already
947 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
948 * and return immediately. */
949 if (file != NULL && drv->bdrv_file_open) {
950 bdrv_swap(file, bs);
951 return 0;
952 }
953
Kevin Wolf57915332010-04-14 15:24:50 +0200954 bs->open_flags = flags;
Paolo Bonzini1b7fd722011-11-29 11:35:47 +0100955 bs->guest_block_size = 512;
Paolo Bonzinic25f53b2011-11-29 12:42:20 +0100956 bs->request_alignment = 512;
Asias He0d51b4d2013-08-22 15:24:14 +0800957 bs->zero_beyond_eof = true;
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800958 open_flags = bdrv_open_flags(bs, flags);
959 bs->read_only = !(open_flags & BDRV_O_RDWR);
Kevin Wolf20cca272014-06-04 14:33:27 +0200960 bs->growable = !!(flags & BDRV_O_PROTOCOL);
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800961
962 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
Kevin Wolf8f94a6e2013-10-10 11:45:55 +0200963 error_setg(errp,
964 !bs->read_only && bdrv_is_whitelisted(drv, true)
965 ? "Driver '%s' can only be used for read-only devices"
966 : "Driver '%s' is not whitelisted",
967 drv->format_name);
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800968 return -ENOTSUP;
969 }
Kevin Wolf57915332010-04-14 15:24:50 +0200970
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000971 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
Kevin Wolf0ebd24e2013-09-19 15:12:18 +0200972 if (flags & BDRV_O_COPY_ON_READ) {
973 if (!bs->read_only) {
974 bdrv_enable_copy_on_read(bs);
975 } else {
976 error_setg(errp, "Can't use copy-on-read on read-only device");
977 return -EINVAL;
978 }
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000979 }
980
Kevin Wolfc2ad1b02013-03-18 16:40:51 +0100981 if (filename != NULL) {
982 pstrcpy(bs->filename, sizeof(bs->filename), filename);
983 } else {
984 bs->filename[0] = '\0';
985 }
Max Reitz91af7012014-07-18 20:24:56 +0200986 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), bs->filename);
Kevin Wolf57915332010-04-14 15:24:50 +0200987
Kevin Wolf57915332010-04-14 15:24:50 +0200988 bs->drv = drv;
Anthony Liguori7267c092011-08-20 22:09:37 -0500989 bs->opaque = g_malloc0(drv->instance_size);
Kevin Wolf57915332010-04-14 15:24:50 +0200990
Stefan Hajnoczi03f541b2011-10-27 10:54:28 +0100991 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
Stefan Hajnoczie7c63792011-10-27 10:54:27 +0100992
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200993 /* Open the image, either directly or using a protocol */
994 if (drv->bdrv_file_open) {
Kevin Wolf5d186eb2013-03-27 17:28:18 +0100995 assert(file == NULL);
Benoît Canet030be322013-09-24 17:07:04 +0200996 assert(!drv->bdrv_needs_filename || filename != NULL);
Max Reitz34b5d2c2013-09-05 14:45:29 +0200997 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
Kevin Wolff500a6d2012-11-12 17:35:27 +0100998 } else {
Kevin Wolf2af5ef72013-04-09 13:19:18 +0200999 if (file == NULL) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001000 error_setg(errp, "Can't use '%s' as a block driver for the "
1001 "protocol level", drv->format_name);
Kevin Wolf2af5ef72013-04-09 13:19:18 +02001002 ret = -EINVAL;
1003 goto free_and_fail;
1004 }
Kevin Wolff500a6d2012-11-12 17:35:27 +01001005 bs->file = file;
Max Reitz34b5d2c2013-09-05 14:45:29 +02001006 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001007 }
1008
Kevin Wolf57915332010-04-14 15:24:50 +02001009 if (ret < 0) {
Markus Armbruster84d18f02014-01-30 15:07:28 +01001010 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001011 error_propagate(errp, local_err);
Dunrong Huang2fa9aa52013-09-24 18:14:01 +08001012 } else if (bs->filename[0]) {
1013 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
Max Reitz34b5d2c2013-09-05 14:45:29 +02001014 } else {
1015 error_setg_errno(errp, -ret, "Could not open image");
1016 }
Kevin Wolf57915332010-04-14 15:24:50 +02001017 goto free_and_fail;
1018 }
1019
Stefan Hajnoczi51762282010-04-19 16:56:41 +01001020 ret = refresh_total_sectors(bs, bs->total_sectors);
1021 if (ret < 0) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001022 error_setg_errno(errp, -ret, "Could not refresh total sector count");
Stefan Hajnoczi51762282010-04-19 16:56:41 +01001023 goto free_and_fail;
Kevin Wolf57915332010-04-14 15:24:50 +02001024 }
Stefan Hajnoczi51762282010-04-19 16:56:41 +01001025
Kevin Wolf3baca892014-07-16 17:48:16 +02001026 bdrv_refresh_limits(bs, &local_err);
1027 if (local_err) {
1028 error_propagate(errp, local_err);
1029 ret = -EINVAL;
1030 goto free_and_fail;
1031 }
1032
Paolo Bonzinic25f53b2011-11-29 12:42:20 +01001033 assert(bdrv_opt_mem_align(bs) != 0);
Kevin Wolf47ea2de2014-03-05 15:49:55 +01001034 assert((bs->request_alignment != 0) || bs->sg);
Kevin Wolf57915332010-04-14 15:24:50 +02001035 return 0;
1036
1037free_and_fail:
Kevin Wolff500a6d2012-11-12 17:35:27 +01001038 bs->file = NULL;
Anthony Liguori7267c092011-08-20 22:09:37 -05001039 g_free(bs->opaque);
Kevin Wolf57915332010-04-14 15:24:50 +02001040 bs->opaque = NULL;
1041 bs->drv = NULL;
1042 return ret;
1043}
1044
Kevin Wolf5e5c4f62014-05-26 11:45:08 +02001045static QDict *parse_json_filename(const char *filename, Error **errp)
1046{
1047 QObject *options_obj;
1048 QDict *options;
1049 int ret;
1050
1051 ret = strstart(filename, "json:", &filename);
1052 assert(ret);
1053
1054 options_obj = qobject_from_json(filename);
1055 if (!options_obj) {
1056 error_setg(errp, "Could not parse the JSON options");
1057 return NULL;
1058 }
1059
1060 if (qobject_type(options_obj) != QTYPE_QDICT) {
1061 qobject_decref(options_obj);
1062 error_setg(errp, "Invalid JSON object given");
1063 return NULL;
1064 }
1065
1066 options = qobject_to_qdict(options_obj);
1067 qdict_flatten(options);
1068
1069 return options;
1070}
1071
Kevin Wolf57915332010-04-14 15:24:50 +02001072/*
Kevin Wolff54120f2014-05-26 11:09:59 +02001073 * Fills in default options for opening images and converts the legacy
1074 * filename/flags pair to option QDict entries.
1075 */
Kevin Wolf5e5c4f62014-05-26 11:45:08 +02001076static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
Kevin Wolf17b005f2014-05-27 10:50:29 +02001077 BlockDriver *drv, Error **errp)
Kevin Wolff54120f2014-05-26 11:09:59 +02001078{
Kevin Wolf5e5c4f62014-05-26 11:45:08 +02001079 const char *filename = *pfilename;
Kevin Wolff54120f2014-05-26 11:09:59 +02001080 const char *drvname;
Kevin Wolf462f5bc2014-05-26 11:39:55 +02001081 bool protocol = flags & BDRV_O_PROTOCOL;
Kevin Wolff54120f2014-05-26 11:09:59 +02001082 bool parse_filename = false;
1083 Error *local_err = NULL;
Kevin Wolff54120f2014-05-26 11:09:59 +02001084
Kevin Wolf5e5c4f62014-05-26 11:45:08 +02001085 /* Parse json: pseudo-protocol */
1086 if (filename && g_str_has_prefix(filename, "json:")) {
1087 QDict *json_options = parse_json_filename(filename, &local_err);
1088 if (local_err) {
1089 error_propagate(errp, local_err);
1090 return -EINVAL;
1091 }
1092
1093 /* Options given in the filename have lower priority than options
1094 * specified directly */
1095 qdict_join(*options, json_options, false);
1096 QDECREF(json_options);
1097 *pfilename = filename = NULL;
1098 }
1099
Kevin Wolff54120f2014-05-26 11:09:59 +02001100 /* Fetch the file name from the options QDict if necessary */
Kevin Wolf17b005f2014-05-27 10:50:29 +02001101 if (protocol && filename) {
Kevin Wolff54120f2014-05-26 11:09:59 +02001102 if (!qdict_haskey(*options, "filename")) {
1103 qdict_put(*options, "filename", qstring_from_str(filename));
1104 parse_filename = true;
1105 } else {
1106 error_setg(errp, "Can't specify 'file' and 'filename' options at "
1107 "the same time");
1108 return -EINVAL;
1109 }
1110 }
1111
1112 /* Find the right block driver */
1113 filename = qdict_get_try_str(*options, "filename");
1114 drvname = qdict_get_try_str(*options, "driver");
1115
Kevin Wolf17b005f2014-05-27 10:50:29 +02001116 if (drv) {
1117 if (drvname) {
1118 error_setg(errp, "Driver specified twice");
1119 return -EINVAL;
1120 }
1121 drvname = drv->format_name;
1122 qdict_put(*options, "driver", qstring_from_str(drvname));
1123 } else {
1124 if (!drvname && protocol) {
1125 if (filename) {
1126 drv = bdrv_find_protocol(filename, parse_filename);
1127 if (!drv) {
1128 error_setg(errp, "Unknown protocol");
1129 return -EINVAL;
1130 }
1131
1132 drvname = drv->format_name;
1133 qdict_put(*options, "driver", qstring_from_str(drvname));
1134 } else {
1135 error_setg(errp, "Must specify either driver or file");
Kevin Wolff54120f2014-05-26 11:09:59 +02001136 return -EINVAL;
1137 }
Kevin Wolf17b005f2014-05-27 10:50:29 +02001138 } else if (drvname) {
1139 drv = bdrv_find_format(drvname);
1140 if (!drv) {
1141 error_setg(errp, "Unknown driver '%s'", drvname);
1142 return -ENOENT;
1143 }
Kevin Wolff54120f2014-05-26 11:09:59 +02001144 }
1145 }
1146
Kevin Wolf17b005f2014-05-27 10:50:29 +02001147 assert(drv || !protocol);
Kevin Wolff54120f2014-05-26 11:09:59 +02001148
1149 /* Driver-specific filename parsing */
Kevin Wolf17b005f2014-05-27 10:50:29 +02001150 if (drv && drv->bdrv_parse_filename && parse_filename) {
Kevin Wolff54120f2014-05-26 11:09:59 +02001151 drv->bdrv_parse_filename(filename, *options, &local_err);
1152 if (local_err) {
1153 error_propagate(errp, local_err);
1154 return -EINVAL;
1155 }
1156
1157 if (!drv->bdrv_needs_filename) {
1158 qdict_del(*options, "filename");
1159 }
1160 }
1161
1162 return 0;
1163}
1164
Fam Zheng8d24cce2014-05-23 21:29:45 +08001165void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1166{
1167
Fam Zheng826b6ca2014-05-23 21:29:47 +08001168 if (bs->backing_hd) {
1169 assert(bs->backing_blocker);
1170 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1171 } else if (backing_hd) {
1172 error_setg(&bs->backing_blocker,
1173 "device is used as backing hd of '%s'",
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02001174 bdrv_get_device_name(bs));
Fam Zheng826b6ca2014-05-23 21:29:47 +08001175 }
1176
Fam Zheng8d24cce2014-05-23 21:29:45 +08001177 bs->backing_hd = backing_hd;
1178 if (!backing_hd) {
Fam Zheng826b6ca2014-05-23 21:29:47 +08001179 error_free(bs->backing_blocker);
1180 bs->backing_blocker = NULL;
Fam Zheng8d24cce2014-05-23 21:29:45 +08001181 goto out;
1182 }
1183 bs->open_flags &= ~BDRV_O_NO_BACKING;
1184 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1185 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1186 backing_hd->drv ? backing_hd->drv->format_name : "");
Fam Zheng826b6ca2014-05-23 21:29:47 +08001187
1188 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1189 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1190 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1191 bs->backing_blocker);
Fam Zheng8d24cce2014-05-23 21:29:45 +08001192out:
Kevin Wolf3baca892014-07-16 17:48:16 +02001193 bdrv_refresh_limits(bs, NULL);
Fam Zheng8d24cce2014-05-23 21:29:45 +08001194}
1195
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001196/*
1197 * Opens the backing file for a BlockDriverState if not yet open
1198 *
1199 * options is a QDict of options to pass to the block drivers, or NULL for an
1200 * empty set of options. The reference to the QDict is transferred to this
1201 * function (even on failure), so if the caller intends to reuse the dictionary,
1202 * it needs to use QINCREF() before calling bdrv_file_open.
1203 */
Max Reitz34b5d2c2013-09-05 14:45:29 +02001204int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
Paolo Bonzini9156df12012-10-18 16:49:17 +02001205{
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001206 char *backing_filename = g_malloc0(PATH_MAX);
Kevin Wolf317fc442014-04-25 13:27:34 +02001207 int ret = 0;
Fam Zheng8d24cce2014-05-23 21:29:45 +08001208 BlockDriverState *backing_hd;
Max Reitz34b5d2c2013-09-05 14:45:29 +02001209 Error *local_err = NULL;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001210
1211 if (bs->backing_hd != NULL) {
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001212 QDECREF(options);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001213 goto free_exit;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001214 }
1215
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001216 /* NULL means an empty set of options */
1217 if (options == NULL) {
1218 options = qdict_new();
1219 }
1220
Paolo Bonzini9156df12012-10-18 16:49:17 +02001221 bs->open_flags &= ~BDRV_O_NO_BACKING;
Kevin Wolf1cb6f502013-04-12 20:27:07 +02001222 if (qdict_haskey(options, "file.filename")) {
1223 backing_filename[0] = '\0';
1224 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001225 QDECREF(options);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001226 goto free_exit;
Fam Zhengdbecebd2013-09-22 20:05:06 +08001227 } else {
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001228 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
Paolo Bonzini9156df12012-10-18 16:49:17 +02001229 }
1230
Kevin Wolf8ee79e72014-06-04 15:09:35 +02001231 if (!bs->drv || !bs->drv->supports_backing) {
1232 ret = -EINVAL;
1233 error_setg(errp, "Driver doesn't support backing files");
1234 QDECREF(options);
1235 goto free_exit;
1236 }
1237
Markus Armbrustere4e99862014-10-07 13:59:03 +02001238 backing_hd = bdrv_new();
Fam Zheng8d24cce2014-05-23 21:29:45 +08001239
Kevin Wolfc5f6e492014-11-25 18:12:42 +01001240 if (bs->backing_format[0] != '\0' && !qdict_haskey(options, "driver")) {
1241 qdict_put(options, "driver", qstring_from_str(bs->backing_format));
Paolo Bonzini9156df12012-10-18 16:49:17 +02001242 }
1243
Max Reitzf67503e2014-02-18 18:33:05 +01001244 assert(bs->backing_hd == NULL);
Fam Zheng8d24cce2014-05-23 21:29:45 +08001245 ret = bdrv_open(&backing_hd,
Max Reitzddf56362014-02-18 18:33:06 +01001246 *backing_filename ? backing_filename : NULL, NULL, options,
Kevin Wolfc5f6e492014-11-25 18:12:42 +01001247 bdrv_backing_flags(bs->open_flags), NULL, &local_err);
Paolo Bonzini9156df12012-10-18 16:49:17 +02001248 if (ret < 0) {
Fam Zheng8d24cce2014-05-23 21:29:45 +08001249 bdrv_unref(backing_hd);
1250 backing_hd = NULL;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001251 bs->open_flags |= BDRV_O_NO_BACKING;
Fam Zhengb04b6b62013-11-08 11:26:49 +08001252 error_setg(errp, "Could not open backing file: %s",
1253 error_get_pretty(local_err));
1254 error_free(local_err);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001255 goto free_exit;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001256 }
Fam Zheng8d24cce2014-05-23 21:29:45 +08001257 bdrv_set_backing_hd(bs, backing_hd);
Peter Feinerd80ac652014-01-08 19:43:25 +00001258
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001259free_exit:
1260 g_free(backing_filename);
1261 return ret;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001262}
1263
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001264/*
Max Reitzda557aa2013-12-20 19:28:11 +01001265 * Opens a disk image whose options are given as BlockdevRef in another block
1266 * device's options.
1267 *
Max Reitzda557aa2013-12-20 19:28:11 +01001268 * If allow_none is true, no image will be opened if filename is false and no
1269 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1270 *
1271 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1272 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1273 * itself, all options starting with "${bdref_key}." are considered part of the
1274 * BlockdevRef.
1275 *
1276 * The BlockdevRef will be removed from the options QDict.
Max Reitzf67503e2014-02-18 18:33:05 +01001277 *
1278 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
Max Reitzda557aa2013-12-20 19:28:11 +01001279 */
1280int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1281 QDict *options, const char *bdref_key, int flags,
Max Reitzf7d9fd82014-02-18 18:33:12 +01001282 bool allow_none, Error **errp)
Max Reitzda557aa2013-12-20 19:28:11 +01001283{
1284 QDict *image_options;
1285 int ret;
1286 char *bdref_key_dot;
1287 const char *reference;
1288
Max Reitzf67503e2014-02-18 18:33:05 +01001289 assert(pbs);
1290 assert(*pbs == NULL);
1291
Max Reitzda557aa2013-12-20 19:28:11 +01001292 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1293 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1294 g_free(bdref_key_dot);
1295
1296 reference = qdict_get_try_str(options, bdref_key);
1297 if (!filename && !reference && !qdict_size(image_options)) {
1298 if (allow_none) {
1299 ret = 0;
1300 } else {
1301 error_setg(errp, "A block device must be specified for \"%s\"",
1302 bdref_key);
1303 ret = -EINVAL;
1304 }
Markus Armbrusterb20e61e2014-05-28 11:16:57 +02001305 QDECREF(image_options);
Max Reitzda557aa2013-12-20 19:28:11 +01001306 goto done;
1307 }
1308
Max Reitzf7d9fd82014-02-18 18:33:12 +01001309 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
Max Reitzda557aa2013-12-20 19:28:11 +01001310
1311done:
1312 qdict_del(options, bdref_key);
1313 return ret;
1314}
1315
Chen Gang6b8aeca2014-06-23 23:28:23 +08001316int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
Kevin Wolfb9988752014-04-03 12:09:34 +02001317{
1318 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001319 char *tmp_filename = g_malloc0(PATH_MAX + 1);
Kevin Wolfb9988752014-04-03 12:09:34 +02001320 int64_t total_size;
Chunyan Liu83d05212014-06-05 17:20:51 +08001321 QemuOpts *opts = NULL;
Kevin Wolfb9988752014-04-03 12:09:34 +02001322 QDict *snapshot_options;
1323 BlockDriverState *bs_snapshot;
1324 Error *local_err;
1325 int ret;
1326
1327 /* if snapshot, we create a temporary backing file and open it
1328 instead of opening 'filename' directly */
1329
1330 /* Get the required size from the image */
Kevin Wolff1877432014-04-04 17:07:19 +02001331 total_size = bdrv_getlength(bs);
1332 if (total_size < 0) {
Chen Gang6b8aeca2014-06-23 23:28:23 +08001333 ret = total_size;
Kevin Wolff1877432014-04-04 17:07:19 +02001334 error_setg_errno(errp, -total_size, "Could not get image size");
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001335 goto out;
Kevin Wolff1877432014-04-04 17:07:19 +02001336 }
Kevin Wolfb9988752014-04-03 12:09:34 +02001337
1338 /* Create the temporary image */
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001339 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
Kevin Wolfb9988752014-04-03 12:09:34 +02001340 if (ret < 0) {
1341 error_setg_errno(errp, -ret, "Could not get temporary filename");
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001342 goto out;
Kevin Wolfb9988752014-04-03 12:09:34 +02001343 }
1344
Max Reitzef810432014-12-02 18:32:42 +01001345 opts = qemu_opts_create(bdrv_qcow2.create_opts, NULL, 0,
Chunyan Liuc282e1f2014-06-05 17:21:11 +08001346 &error_abort);
Chunyan Liu83d05212014-06-05 17:20:51 +08001347 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
Max Reitzef810432014-12-02 18:32:42 +01001348 ret = bdrv_create(&bdrv_qcow2, tmp_filename, opts, &local_err);
Chunyan Liu83d05212014-06-05 17:20:51 +08001349 qemu_opts_del(opts);
Kevin Wolfb9988752014-04-03 12:09:34 +02001350 if (ret < 0) {
1351 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1352 "'%s': %s", tmp_filename,
1353 error_get_pretty(local_err));
1354 error_free(local_err);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001355 goto out;
Kevin Wolfb9988752014-04-03 12:09:34 +02001356 }
1357
1358 /* Prepare a new options QDict for the temporary file */
1359 snapshot_options = qdict_new();
1360 qdict_put(snapshot_options, "file.driver",
1361 qstring_from_str("file"));
1362 qdict_put(snapshot_options, "file.filename",
1363 qstring_from_str(tmp_filename));
1364
Markus Armbrustere4e99862014-10-07 13:59:03 +02001365 bs_snapshot = bdrv_new();
Kevin Wolfb9988752014-04-03 12:09:34 +02001366
1367 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
Max Reitzef810432014-12-02 18:32:42 +01001368 flags, &bdrv_qcow2, &local_err);
Kevin Wolfb9988752014-04-03 12:09:34 +02001369 if (ret < 0) {
1370 error_propagate(errp, local_err);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001371 goto out;
Kevin Wolfb9988752014-04-03 12:09:34 +02001372 }
1373
1374 bdrv_append(bs_snapshot, bs);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001375
1376out:
1377 g_free(tmp_filename);
Chen Gang6b8aeca2014-06-23 23:28:23 +08001378 return ret;
Kevin Wolfb9988752014-04-03 12:09:34 +02001379}
1380
Max Reitzda557aa2013-12-20 19:28:11 +01001381/*
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001382 * Opens a disk image (raw, qcow2, vmdk, ...)
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001383 *
1384 * options is a QDict of options to pass to the block drivers, or NULL for an
1385 * empty set of options. The reference to the QDict belongs to the block layer
1386 * after the call (even on failure), so if the caller intends to reuse the
1387 * dictionary, it needs to use QINCREF() before calling bdrv_open.
Max Reitzf67503e2014-02-18 18:33:05 +01001388 *
1389 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1390 * If it is not NULL, the referenced BDS will be reused.
Max Reitzddf56362014-02-18 18:33:06 +01001391 *
1392 * The reference parameter may be used to specify an existing block device which
1393 * should be opened. If specified, neither options nor a filename may be given,
1394 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001395 */
Max Reitzddf56362014-02-18 18:33:06 +01001396int bdrv_open(BlockDriverState **pbs, const char *filename,
1397 const char *reference, QDict *options, int flags,
1398 BlockDriver *drv, Error **errp)
bellardea2384d2004-08-01 21:59:26 +00001399{
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001400 int ret;
Max Reitzf67503e2014-02-18 18:33:05 +01001401 BlockDriverState *file = NULL, *bs;
Kevin Wolf74fe54f2013-07-09 11:09:02 +02001402 const char *drvname;
Max Reitz34b5d2c2013-09-05 14:45:29 +02001403 Error *local_err = NULL;
Kevin Wolfb1e6fc02014-05-06 12:11:42 +02001404 int snapshot_flags = 0;
bellard712e7872005-04-28 21:09:32 +00001405
Max Reitzf67503e2014-02-18 18:33:05 +01001406 assert(pbs);
1407
Max Reitzddf56362014-02-18 18:33:06 +01001408 if (reference) {
1409 bool options_non_empty = options ? qdict_size(options) : false;
1410 QDECREF(options);
1411
1412 if (*pbs) {
1413 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1414 "another block device");
1415 return -EINVAL;
1416 }
1417
1418 if (filename || options_non_empty) {
1419 error_setg(errp, "Cannot reference an existing block device with "
1420 "additional options or a new filename");
1421 return -EINVAL;
1422 }
1423
1424 bs = bdrv_lookup_bs(reference, reference, errp);
1425 if (!bs) {
1426 return -ENODEV;
1427 }
1428 bdrv_ref(bs);
1429 *pbs = bs;
1430 return 0;
1431 }
1432
Max Reitzf67503e2014-02-18 18:33:05 +01001433 if (*pbs) {
1434 bs = *pbs;
1435 } else {
Markus Armbrustere4e99862014-10-07 13:59:03 +02001436 bs = bdrv_new();
Max Reitzf67503e2014-02-18 18:33:05 +01001437 }
1438
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001439 /* NULL means an empty set of options */
1440 if (options == NULL) {
1441 options = qdict_new();
1442 }
1443
Kevin Wolf17b005f2014-05-27 10:50:29 +02001444 ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err);
Kevin Wolf462f5bc2014-05-26 11:39:55 +02001445 if (local_err) {
1446 goto fail;
1447 }
1448
Kevin Wolf76c591b2014-06-04 14:19:44 +02001449 /* Find the right image format driver */
1450 drv = NULL;
1451 drvname = qdict_get_try_str(options, "driver");
1452 if (drvname) {
1453 drv = bdrv_find_format(drvname);
1454 qdict_del(options, "driver");
1455 if (!drv) {
1456 error_setg(errp, "Unknown driver: '%s'", drvname);
1457 ret = -EINVAL;
1458 goto fail;
1459 }
1460 }
1461
1462 assert(drvname || !(flags & BDRV_O_PROTOCOL));
1463 if (drv && !drv->bdrv_file_open) {
1464 /* If the user explicitly wants a format driver here, we'll need to add
1465 * another layer for the protocol in bs->file */
1466 flags &= ~BDRV_O_PROTOCOL;
1467 }
1468
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001469 bs->options = options;
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001470 options = qdict_clone_shallow(options);
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001471
Kevin Wolff4788ad2014-06-03 16:44:19 +02001472 /* Open image file without format layer */
1473 if ((flags & BDRV_O_PROTOCOL) == 0) {
1474 if (flags & BDRV_O_RDWR) {
1475 flags |= BDRV_O_ALLOW_RDWR;
1476 }
1477 if (flags & BDRV_O_SNAPSHOT) {
1478 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1479 flags = bdrv_backing_flags(flags);
1480 }
1481
1482 assert(file == NULL);
1483 ret = bdrv_open_image(&file, filename, options, "file",
1484 bdrv_inherited_flags(flags),
1485 true, &local_err);
1486 if (ret < 0) {
Max Reitz5469a2a2014-02-18 18:33:10 +01001487 goto fail;
1488 }
1489 }
1490
Kevin Wolf76c591b2014-06-04 14:19:44 +02001491 /* Image format probing */
Kevin Wolf38f3ef52014-11-20 16:27:12 +01001492 bs->probed = !drv;
Kevin Wolf76c591b2014-06-04 14:19:44 +02001493 if (!drv && file) {
Kevin Wolf17b005f2014-05-27 10:50:29 +02001494 ret = find_image_format(file, filename, &drv, &local_err);
1495 if (ret < 0) {
Kevin Wolf8bfea152014-04-11 19:16:36 +02001496 goto fail;
Max Reitz2a05cbe2013-12-20 19:28:10 +01001497 }
Kevin Wolf76c591b2014-06-04 14:19:44 +02001498 } else if (!drv) {
Kevin Wolf17b005f2014-05-27 10:50:29 +02001499 error_setg(errp, "Must specify either driver or file");
1500 ret = -EINVAL;
Kevin Wolf8bfea152014-04-11 19:16:36 +02001501 goto fail;
Kevin Wolff500a6d2012-11-12 17:35:27 +01001502 }
1503
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001504 /* Open the image */
Max Reitz34b5d2c2013-09-05 14:45:29 +02001505 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001506 if (ret < 0) {
Kevin Wolf8bfea152014-04-11 19:16:36 +02001507 goto fail;
Christoph Hellwig69873072010-01-20 18:13:25 +01001508 }
1509
Max Reitz2a05cbe2013-12-20 19:28:10 +01001510 if (file && (bs->file != file)) {
Fam Zheng4f6fd342013-08-23 09:14:47 +08001511 bdrv_unref(file);
Kevin Wolff500a6d2012-11-12 17:35:27 +01001512 file = NULL;
1513 }
1514
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001515 /* If there is a backing file, use it */
Paolo Bonzini9156df12012-10-18 16:49:17 +02001516 if ((flags & BDRV_O_NO_BACKING) == 0) {
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001517 QDict *backing_options;
1518
Benoît Canet5726d872013-09-25 13:30:01 +02001519 qdict_extract_subqdict(options, &backing_options, "backing.");
Max Reitz34b5d2c2013-09-05 14:45:29 +02001520 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001521 if (ret < 0) {
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001522 goto close_and_fail;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001523 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001524 }
1525
Max Reitz91af7012014-07-18 20:24:56 +02001526 bdrv_refresh_filename(bs);
1527
Kevin Wolfb9988752014-04-03 12:09:34 +02001528 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1529 * temporary snapshot afterwards. */
Kevin Wolfb1e6fc02014-05-06 12:11:42 +02001530 if (snapshot_flags) {
Chen Gang6b8aeca2014-06-23 23:28:23 +08001531 ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
Kevin Wolfb9988752014-04-03 12:09:34 +02001532 if (local_err) {
Kevin Wolfb9988752014-04-03 12:09:34 +02001533 goto close_and_fail;
1534 }
1535 }
1536
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001537 /* Check if any unknown options were used */
Max Reitz5acd9d82014-02-18 18:33:11 +01001538 if (options && (qdict_size(options) != 0)) {
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001539 const QDictEntry *entry = qdict_first(options);
Max Reitz5acd9d82014-02-18 18:33:11 +01001540 if (flags & BDRV_O_PROTOCOL) {
1541 error_setg(errp, "Block protocol '%s' doesn't support the option "
1542 "'%s'", drv->format_name, entry->key);
1543 } else {
1544 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1545 "support the option '%s'", drv->format_name,
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02001546 bdrv_get_device_name(bs), entry->key);
Max Reitz5acd9d82014-02-18 18:33:11 +01001547 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001548
1549 ret = -EINVAL;
1550 goto close_and_fail;
1551 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001552
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001553 if (!bdrv_key_required(bs)) {
Markus Armbrustera7f53e22014-10-07 13:59:25 +02001554 if (bs->blk) {
1555 blk_dev_change_media_cb(bs->blk, true);
1556 }
Markus Armbrusterc3adb582014-03-14 09:22:48 +01001557 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1558 && !runstate_check(RUN_STATE_INMIGRATE)
1559 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1560 error_setg(errp,
1561 "Guest must be stopped for opening of encrypted image");
1562 ret = -EBUSY;
1563 goto close_and_fail;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001564 }
1565
Markus Armbrusterc3adb582014-03-14 09:22:48 +01001566 QDECREF(options);
Max Reitzf67503e2014-02-18 18:33:05 +01001567 *pbs = bs;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001568 return 0;
1569
Kevin Wolf8bfea152014-04-11 19:16:36 +02001570fail:
Kevin Wolff500a6d2012-11-12 17:35:27 +01001571 if (file != NULL) {
Fam Zheng4f6fd342013-08-23 09:14:47 +08001572 bdrv_unref(file);
Kevin Wolff500a6d2012-11-12 17:35:27 +01001573 }
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001574 QDECREF(bs->options);
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001575 QDECREF(options);
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001576 bs->options = NULL;
Max Reitzf67503e2014-02-18 18:33:05 +01001577 if (!*pbs) {
1578 /* If *pbs is NULL, a new BDS has been created in this function and
1579 needs to be freed now. Otherwise, it does not need to be closed,
1580 since it has not really been opened yet. */
1581 bdrv_unref(bs);
1582 }
Markus Armbruster84d18f02014-01-30 15:07:28 +01001583 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001584 error_propagate(errp, local_err);
1585 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001586 return ret;
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001587
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001588close_and_fail:
Max Reitzf67503e2014-02-18 18:33:05 +01001589 /* See fail path, but now the BDS has to be always closed */
1590 if (*pbs) {
1591 bdrv_close(bs);
1592 } else {
1593 bdrv_unref(bs);
1594 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001595 QDECREF(options);
Markus Armbruster84d18f02014-01-30 15:07:28 +01001596 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001597 error_propagate(errp, local_err);
1598 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001599 return ret;
1600}
1601
Jeff Codye971aa12012-09-20 15:13:19 -04001602typedef struct BlockReopenQueueEntry {
1603 bool prepared;
1604 BDRVReopenState state;
1605 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1606} BlockReopenQueueEntry;
1607
1608/*
1609 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1610 * reopen of multiple devices.
1611 *
1612 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1613 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1614 * be created and initialized. This newly created BlockReopenQueue should be
1615 * passed back in for subsequent calls that are intended to be of the same
1616 * atomic 'set'.
1617 *
1618 * bs is the BlockDriverState to add to the reopen queue.
1619 *
1620 * flags contains the open flags for the associated bs
1621 *
1622 * returns a pointer to bs_queue, which is either the newly allocated
1623 * bs_queue, or the existing bs_queue being used.
1624 *
1625 */
1626BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1627 BlockDriverState *bs, int flags)
1628{
1629 assert(bs != NULL);
1630
1631 BlockReopenQueueEntry *bs_entry;
1632 if (bs_queue == NULL) {
1633 bs_queue = g_new0(BlockReopenQueue, 1);
1634 QSIMPLEQ_INIT(bs_queue);
1635 }
1636
Kevin Wolff1f25a22014-04-25 19:04:55 +02001637 /* bdrv_open() masks this flag out */
1638 flags &= ~BDRV_O_PROTOCOL;
1639
Jeff Codye971aa12012-09-20 15:13:19 -04001640 if (bs->file) {
Kevin Wolff1f25a22014-04-25 19:04:55 +02001641 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
Jeff Codye971aa12012-09-20 15:13:19 -04001642 }
1643
1644 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1645 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1646
1647 bs_entry->state.bs = bs;
1648 bs_entry->state.flags = flags;
1649
1650 return bs_queue;
1651}
1652
1653/*
1654 * Reopen multiple BlockDriverStates atomically & transactionally.
1655 *
1656 * The queue passed in (bs_queue) must have been built up previous
1657 * via bdrv_reopen_queue().
1658 *
1659 * Reopens all BDS specified in the queue, with the appropriate
1660 * flags. All devices are prepared for reopen, and failure of any
1661 * device will cause all device changes to be abandonded, and intermediate
1662 * data cleaned up.
1663 *
1664 * If all devices prepare successfully, then the changes are committed
1665 * to all devices.
1666 *
1667 */
1668int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1669{
1670 int ret = -1;
1671 BlockReopenQueueEntry *bs_entry, *next;
1672 Error *local_err = NULL;
1673
1674 assert(bs_queue != NULL);
1675
1676 bdrv_drain_all();
1677
1678 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1679 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1680 error_propagate(errp, local_err);
1681 goto cleanup;
1682 }
1683 bs_entry->prepared = true;
1684 }
1685
1686 /* If we reach this point, we have success and just need to apply the
1687 * changes
1688 */
1689 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1690 bdrv_reopen_commit(&bs_entry->state);
1691 }
1692
1693 ret = 0;
1694
1695cleanup:
1696 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1697 if (ret && bs_entry->prepared) {
1698 bdrv_reopen_abort(&bs_entry->state);
1699 }
1700 g_free(bs_entry);
1701 }
1702 g_free(bs_queue);
1703 return ret;
1704}
1705
1706
1707/* Reopen a single BlockDriverState with the specified flags. */
1708int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1709{
1710 int ret = -1;
1711 Error *local_err = NULL;
1712 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1713
1714 ret = bdrv_reopen_multiple(queue, &local_err);
1715 if (local_err != NULL) {
1716 error_propagate(errp, local_err);
1717 }
1718 return ret;
1719}
1720
1721
1722/*
1723 * Prepares a BlockDriverState for reopen. All changes are staged in the
1724 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1725 * the block driver layer .bdrv_reopen_prepare()
1726 *
1727 * bs is the BlockDriverState to reopen
1728 * flags are the new open flags
1729 * queue is the reopen queue
1730 *
1731 * Returns 0 on success, non-zero on error. On error errp will be set
1732 * as well.
1733 *
1734 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1735 * It is the responsibility of the caller to then call the abort() or
1736 * commit() for any other BDS that have been left in a prepare() state
1737 *
1738 */
1739int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1740 Error **errp)
1741{
1742 int ret = -1;
1743 Error *local_err = NULL;
1744 BlockDriver *drv;
1745
1746 assert(reopen_state != NULL);
1747 assert(reopen_state->bs->drv != NULL);
1748 drv = reopen_state->bs->drv;
1749
1750 /* if we are to stay read-only, do not allow permission change
1751 * to r/w */
1752 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1753 reopen_state->flags & BDRV_O_RDWR) {
1754 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02001755 bdrv_get_device_name(reopen_state->bs));
Jeff Codye971aa12012-09-20 15:13:19 -04001756 goto error;
1757 }
1758
1759
1760 ret = bdrv_flush(reopen_state->bs);
1761 if (ret) {
1762 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1763 strerror(-ret));
1764 goto error;
1765 }
1766
1767 if (drv->bdrv_reopen_prepare) {
1768 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1769 if (ret) {
1770 if (local_err != NULL) {
1771 error_propagate(errp, local_err);
1772 } else {
Luiz Capitulinod8b68952013-06-10 11:29:27 -04001773 error_setg(errp, "failed while preparing to reopen image '%s'",
1774 reopen_state->bs->filename);
Jeff Codye971aa12012-09-20 15:13:19 -04001775 }
1776 goto error;
1777 }
1778 } else {
1779 /* It is currently mandatory to have a bdrv_reopen_prepare()
1780 * handler for each supported drv. */
1781 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02001782 drv->format_name, bdrv_get_device_name(reopen_state->bs),
Jeff Codye971aa12012-09-20 15:13:19 -04001783 "reopening of file");
1784 ret = -1;
1785 goto error;
1786 }
1787
1788 ret = 0;
1789
1790error:
1791 return ret;
1792}
1793
1794/*
1795 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1796 * makes them final by swapping the staging BlockDriverState contents into
1797 * the active BlockDriverState contents.
1798 */
1799void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1800{
1801 BlockDriver *drv;
1802
1803 assert(reopen_state != NULL);
1804 drv = reopen_state->bs->drv;
1805 assert(drv != NULL);
1806
1807 /* If there are any driver level actions to take */
1808 if (drv->bdrv_reopen_commit) {
1809 drv->bdrv_reopen_commit(reopen_state);
1810 }
1811
1812 /* set BDS specific flags now */
1813 reopen_state->bs->open_flags = reopen_state->flags;
1814 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1815 BDRV_O_CACHE_WB);
1816 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
Kevin Wolf355ef4a2013-12-11 20:14:09 +01001817
Kevin Wolf3baca892014-07-16 17:48:16 +02001818 bdrv_refresh_limits(reopen_state->bs, NULL);
Jeff Codye971aa12012-09-20 15:13:19 -04001819}
1820
1821/*
1822 * Abort the reopen, and delete and free the staged changes in
1823 * reopen_state
1824 */
1825void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1826{
1827 BlockDriver *drv;
1828
1829 assert(reopen_state != NULL);
1830 drv = reopen_state->bs->drv;
1831 assert(drv != NULL);
1832
1833 if (drv->bdrv_reopen_abort) {
1834 drv->bdrv_reopen_abort(reopen_state);
1835 }
1836}
1837
1838
bellardfc01f7e2003-06-30 10:03:06 +00001839void bdrv_close(BlockDriverState *bs)
1840{
Max Reitz33384422014-06-20 21:57:33 +02001841 BdrvAioNotifier *ban, *ban_next;
1842
Paolo Bonzini3cbc0022012-10-19 11:36:48 +02001843 if (bs->job) {
1844 block_job_cancel_sync(bs->job);
1845 }
Stefan Hajnoczi58fda172013-07-02 15:36:25 +02001846 bdrv_drain_all(); /* complete I/O */
1847 bdrv_flush(bs);
1848 bdrv_drain_all(); /* in case flush left pending I/O */
Paolo Bonzinid7d512f2012-08-23 11:20:36 +02001849 notifier_list_notify(&bs->close_notifiers, bs);
Kevin Wolf7094f122012-04-11 11:06:37 +02001850
Paolo Bonzini3cbc0022012-10-19 11:36:48 +02001851 if (bs->drv) {
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +01001852 if (bs->backing_hd) {
Fam Zheng826b6ca2014-05-23 21:29:47 +08001853 BlockDriverState *backing_hd = bs->backing_hd;
1854 bdrv_set_backing_hd(bs, NULL);
1855 bdrv_unref(backing_hd);
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +01001856 }
bellardea2384d2004-08-01 21:59:26 +00001857 bs->drv->bdrv_close(bs);
Anthony Liguori7267c092011-08-20 22:09:37 -05001858 g_free(bs->opaque);
bellardea2384d2004-08-01 21:59:26 +00001859 bs->opaque = NULL;
1860 bs->drv = NULL;
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +00001861 bs->copy_on_read = 0;
Paolo Bonzinia275fa42012-05-08 16:51:43 +02001862 bs->backing_file[0] = '\0';
1863 bs->backing_format[0] = '\0';
Paolo Bonzini64058752012-05-08 16:51:49 +02001864 bs->total_sectors = 0;
1865 bs->encrypted = 0;
1866 bs->valid_key = 0;
1867 bs->sg = 0;
1868 bs->growable = 0;
Asias He0d51b4d2013-08-22 15:24:14 +08001869 bs->zero_beyond_eof = false;
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001870 QDECREF(bs->options);
1871 bs->options = NULL;
Max Reitz91af7012014-07-18 20:24:56 +02001872 QDECREF(bs->full_open_options);
1873 bs->full_open_options = NULL;
bellardb3380822004-03-14 21:38:54 +00001874
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001875 if (bs->file != NULL) {
Fam Zheng4f6fd342013-08-23 09:14:47 +08001876 bdrv_unref(bs->file);
Paolo Bonzini0ac93772012-05-08 16:51:44 +02001877 bs->file = NULL;
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001878 }
bellardb3380822004-03-14 21:38:54 +00001879 }
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08001880
Markus Armbrustera7f53e22014-10-07 13:59:25 +02001881 if (bs->blk) {
1882 blk_dev_change_media_cb(bs->blk, false);
1883 }
Pavel Hrdina9ca11152012-08-09 12:44:48 +02001884
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08001885 /*throttling disk I/O limits*/
1886 if (bs->io_limits_enabled) {
1887 bdrv_io_limits_disable(bs);
1888 }
Max Reitz33384422014-06-20 21:57:33 +02001889
1890 QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
1891 g_free(ban);
1892 }
1893 QLIST_INIT(&bs->aio_notifiers);
bellardb3380822004-03-14 21:38:54 +00001894}
1895
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +09001896void bdrv_close_all(void)
1897{
1898 BlockDriverState *bs;
1899
Benoît Canetdc364f42014-01-23 21:31:32 +01001900 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02001901 AioContext *aio_context = bdrv_get_aio_context(bs);
1902
1903 aio_context_acquire(aio_context);
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +09001904 bdrv_close(bs);
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02001905 aio_context_release(aio_context);
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +09001906 }
1907}
1908
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001909/* Check if any requests are in-flight (including throttled requests) */
1910static bool bdrv_requests_pending(BlockDriverState *bs)
1911{
1912 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1913 return true;
1914 }
Benoît Canetcc0681c2013-09-02 14:14:39 +02001915 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1916 return true;
1917 }
1918 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001919 return true;
1920 }
1921 if (bs->file && bdrv_requests_pending(bs->file)) {
1922 return true;
1923 }
1924 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1925 return true;
1926 }
1927 return false;
1928}
1929
Stefan Hajnoczi5b98db02014-10-21 12:03:55 +01001930static bool bdrv_drain_one(BlockDriverState *bs)
1931{
1932 bool bs_busy;
1933
1934 bdrv_flush_io_queue(bs);
1935 bdrv_start_throttled_reqs(bs);
1936 bs_busy = bdrv_requests_pending(bs);
1937 bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy);
1938 return bs_busy;
1939}
1940
1941/*
1942 * Wait for pending requests to complete on a single BlockDriverState subtree
1943 *
1944 * See the warning in bdrv_drain_all(). This function can only be called if
1945 * you are sure nothing can generate I/O because you have op blockers
1946 * installed.
1947 *
1948 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
1949 * AioContext.
1950 */
1951void bdrv_drain(BlockDriverState *bs)
1952{
1953 while (bdrv_drain_one(bs)) {
1954 /* Keep iterating */
1955 }
1956}
1957
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001958/*
1959 * Wait for pending requests to complete across all BlockDriverStates
1960 *
1961 * This function does not flush data to disk, use bdrv_flush_all() for that
1962 * after calling this function.
Zhi Yong Wu4c355d52012-04-12 14:00:57 +02001963 *
1964 * Note that completion of an asynchronous I/O operation can trigger any
1965 * number of other I/O operations on other devices---for example a coroutine
1966 * can be arbitrarily complex and a constant flow of I/O can come until the
1967 * coroutine is complete. Because of this, it is not possible to have a
1968 * function to drain a single device's I/O queue.
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001969 */
1970void bdrv_drain_all(void)
1971{
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001972 /* Always run first iteration so any pending completion BHs run */
1973 bool busy = true;
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001974 BlockDriverState *bs;
1975
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001976 while (busy) {
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001977 busy = false;
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001978
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001979 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1980 AioContext *aio_context = bdrv_get_aio_context(bs);
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001981
1982 aio_context_acquire(aio_context);
Stefan Hajnoczi5b98db02014-10-21 12:03:55 +01001983 busy |= bdrv_drain_one(bs);
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001984 aio_context_release(aio_context);
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001985 }
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001986 }
1987}
1988
Benoît Canetdc364f42014-01-23 21:31:32 +01001989/* make a BlockDriverState anonymous by removing from bdrv_state and
1990 * graph_bdrv_state list.
Ryan Harperd22b2f42011-03-29 20:51:47 -05001991 Also, NULL terminate the device_name to prevent double remove */
1992void bdrv_make_anon(BlockDriverState *bs)
1993{
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02001994 /*
1995 * Take care to remove bs from bdrv_states only when it's actually
1996 * in it. Note that bs->device_list.tqe_prev is initially null,
1997 * and gets set to non-null by QTAILQ_INSERT_TAIL(). Establish
1998 * the useful invariant "bs in bdrv_states iff bs->tqe_prev" by
1999 * resetting it to null on remove.
2000 */
2001 if (bs->device_list.tqe_prev) {
Benoît Canetdc364f42014-01-23 21:31:32 +01002002 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002003 bs->device_list.tqe_prev = NULL;
Ryan Harperd22b2f42011-03-29 20:51:47 -05002004 }
Benoît Canetdc364f42014-01-23 21:31:32 +01002005 if (bs->node_name[0] != '\0') {
2006 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
2007 }
2008 bs->node_name[0] = '\0';
Ryan Harperd22b2f42011-03-29 20:51:47 -05002009}
2010
Paolo Bonzinie023b2e2012-05-08 16:51:41 +02002011static void bdrv_rebind(BlockDriverState *bs)
2012{
2013 if (bs->drv && bs->drv->bdrv_rebind) {
2014 bs->drv->bdrv_rebind(bs);
2015 }
2016}
2017
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002018static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
2019 BlockDriverState *bs_src)
2020{
2021 /* move some fields that need to stay attached to the device */
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002022
2023 /* dev info */
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01002024 bs_dest->guest_block_size = bs_src->guest_block_size;
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002025 bs_dest->copy_on_read = bs_src->copy_on_read;
2026
2027 bs_dest->enable_write_cache = bs_src->enable_write_cache;
2028
Benoît Canetcc0681c2013-09-02 14:14:39 +02002029 /* i/o throttled req */
2030 memcpy(&bs_dest->throttle_state,
2031 &bs_src->throttle_state,
2032 sizeof(ThrottleState));
2033 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
2034 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002035 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
2036
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002037 /* r/w error */
2038 bs_dest->on_read_error = bs_src->on_read_error;
2039 bs_dest->on_write_error = bs_src->on_write_error;
2040
2041 /* i/o status */
2042 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
2043 bs_dest->iostatus = bs_src->iostatus;
2044
2045 /* dirty bitmap */
Fam Zhenge4654d22013-11-13 18:29:43 +08002046 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002047
Fam Zheng9fcb0252013-08-23 09:14:46 +08002048 /* reference count */
2049 bs_dest->refcnt = bs_src->refcnt;
2050
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002051 /* job */
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002052 bs_dest->job = bs_src->job;
2053
2054 /* keep the same entry in bdrv_states */
Benoît Canetdc364f42014-01-23 21:31:32 +01002055 bs_dest->device_list = bs_src->device_list;
Markus Armbruster7e7d56d2014-10-07 13:59:05 +02002056 bs_dest->blk = bs_src->blk;
2057
Fam Zhengfbe40ff2014-05-23 21:29:42 +08002058 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
2059 sizeof(bs_dest->op_blockers));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002060}
2061
2062/*
2063 * Swap bs contents for two image chains while they are live,
2064 * while keeping required fields on the BlockDriverState that is
2065 * actually attached to a device.
2066 *
2067 * This will modify the BlockDriverState fields, and swap contents
2068 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2069 *
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002070 * bs_new must not be attached to a BlockBackend.
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002071 *
2072 * This function does not create any image files.
2073 */
2074void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2075{
2076 BlockDriverState tmp;
2077
Benoît Canet90ce8a02014-03-05 23:48:29 +01002078 /* The code needs to swap the node_name but simply swapping node_list won't
2079 * work so first remove the nodes from the graph list, do the swap then
2080 * insert them back if needed.
2081 */
2082 if (bs_new->node_name[0] != '\0') {
2083 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2084 }
2085 if (bs_old->node_name[0] != '\0') {
2086 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2087 }
2088
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002089 /* bs_new must be unattached and shouldn't have anything fancy enabled */
Markus Armbruster7e7d56d2014-10-07 13:59:05 +02002090 assert(!bs_new->blk);
Fam Zhenge4654d22013-11-13 18:29:43 +08002091 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002092 assert(bs_new->job == NULL);
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002093 assert(bs_new->io_limits_enabled == false);
Benoît Canetcc0681c2013-09-02 14:14:39 +02002094 assert(!throttle_have_timer(&bs_new->throttle_state));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002095
2096 tmp = *bs_new;
2097 *bs_new = *bs_old;
2098 *bs_old = tmp;
2099
2100 /* there are some fields that should not be swapped, move them back */
2101 bdrv_move_feature_fields(&tmp, bs_old);
2102 bdrv_move_feature_fields(bs_old, bs_new);
2103 bdrv_move_feature_fields(bs_new, &tmp);
2104
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002105 /* bs_new must remain unattached */
Markus Armbruster7e7d56d2014-10-07 13:59:05 +02002106 assert(!bs_new->blk);
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002107
2108 /* Check a few fields that should remain attached to the device */
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002109 assert(bs_new->job == NULL);
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002110 assert(bs_new->io_limits_enabled == false);
Benoît Canetcc0681c2013-09-02 14:14:39 +02002111 assert(!throttle_have_timer(&bs_new->throttle_state));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002112
Benoît Canet90ce8a02014-03-05 23:48:29 +01002113 /* insert the nodes back into the graph node list if needed */
2114 if (bs_new->node_name[0] != '\0') {
2115 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2116 }
2117 if (bs_old->node_name[0] != '\0') {
2118 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2119 }
2120
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002121 bdrv_rebind(bs_new);
2122 bdrv_rebind(bs_old);
2123}
2124
Jeff Cody8802d1f2012-02-28 15:54:06 -05002125/*
2126 * Add new bs contents at the top of an image chain while the chain is
2127 * live, while keeping required fields on the top layer.
2128 *
2129 * This will modify the BlockDriverState fields, and swap contents
2130 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2131 *
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02002132 * bs_new must not be attached to a BlockBackend.
Jeff Codyf6801b82012-03-27 16:30:19 -04002133 *
Jeff Cody8802d1f2012-02-28 15:54:06 -05002134 * This function does not create any image files.
2135 */
2136void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2137{
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002138 bdrv_swap(bs_new, bs_top);
Jeff Cody8802d1f2012-02-28 15:54:06 -05002139
2140 /* The contents of 'tmp' will become bs_top, as we are
2141 * swapping bs_new and bs_top contents. */
Fam Zheng8d24cce2014-05-23 21:29:45 +08002142 bdrv_set_backing_hd(bs_top, bs_new);
Jeff Cody8802d1f2012-02-28 15:54:06 -05002143}
2144
Fam Zheng4f6fd342013-08-23 09:14:47 +08002145static void bdrv_delete(BlockDriverState *bs)
bellardb3380822004-03-14 21:38:54 +00002146{
Paolo Bonzini3e914652012-03-30 13:17:11 +02002147 assert(!bs->job);
Fam Zheng3718d8a2014-05-23 21:29:43 +08002148 assert(bdrv_op_blocker_is_empty(bs));
Fam Zheng4f6fd342013-08-23 09:14:47 +08002149 assert(!bs->refcnt);
Fam Zhenge4654d22013-11-13 18:29:43 +08002150 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
Markus Armbruster18846de2010-06-29 16:58:30 +02002151
Stefan Hajnoczie1b5c522013-06-27 15:32:26 +02002152 bdrv_close(bs);
2153
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002154 /* remove from list, if necessary */
Ryan Harperd22b2f42011-03-29 20:51:47 -05002155 bdrv_make_anon(bs);
aurel3234c6f052008-04-08 19:51:21 +00002156
Anthony Liguori7267c092011-08-20 22:09:37 -05002157 g_free(bs);
bellardfc01f7e2003-06-30 10:03:06 +00002158}
2159
aliguorie97fc192009-04-21 23:11:50 +00002160/*
2161 * Run consistency checks on an image
2162 *
Kevin Wolfe076f332010-06-29 11:43:13 +02002163 * Returns 0 if the check could be completed (it doesn't mean that the image is
Stefan Weila1c72732011-04-28 17:20:38 +02002164 * free of errors) or -errno when an internal error occurred. The results of the
Kevin Wolfe076f332010-06-29 11:43:13 +02002165 * check are stored in res.
aliguorie97fc192009-04-21 23:11:50 +00002166 */
Kevin Wolf4534ff52012-05-11 16:07:02 +02002167int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
aliguorie97fc192009-04-21 23:11:50 +00002168{
Max Reitz908bcd52014-08-07 22:47:55 +02002169 if (bs->drv == NULL) {
2170 return -ENOMEDIUM;
2171 }
aliguorie97fc192009-04-21 23:11:50 +00002172 if (bs->drv->bdrv_check == NULL) {
2173 return -ENOTSUP;
2174 }
2175
Kevin Wolfe076f332010-06-29 11:43:13 +02002176 memset(res, 0, sizeof(*res));
Kevin Wolf4534ff52012-05-11 16:07:02 +02002177 return bs->drv->bdrv_check(bs, res, fix);
aliguorie97fc192009-04-21 23:11:50 +00002178}
2179
Kevin Wolf8a426612010-07-16 17:17:01 +02002180#define COMMIT_BUF_SECTORS 2048
2181
bellard33e39632003-07-06 17:15:21 +00002182/* commit COW file into the raw image */
2183int bdrv_commit(BlockDriverState *bs)
2184{
bellard19cb3732006-08-19 11:45:59 +00002185 BlockDriver *drv = bs->drv;
Jeff Cody72706ea2014-01-24 09:02:35 -05002186 int64_t sector, total_sectors, length, backing_length;
Kevin Wolf8a426612010-07-16 17:17:01 +02002187 int n, ro, open_flags;
Jeff Cody0bce5972012-09-20 15:13:34 -04002188 int ret = 0;
Jeff Cody72706ea2014-01-24 09:02:35 -05002189 uint8_t *buf = NULL;
Jim Meyeringc2cba3d2012-10-04 13:09:46 +02002190 char filename[PATH_MAX];
bellard33e39632003-07-06 17:15:21 +00002191
bellard19cb3732006-08-19 11:45:59 +00002192 if (!drv)
2193 return -ENOMEDIUM;
Liu Yuan6bb45152014-09-01 13:35:21 +08002194
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002195 if (!bs->backing_hd) {
2196 return -ENOTSUP;
bellard33e39632003-07-06 17:15:21 +00002197 }
2198
Fam Zheng3718d8a2014-05-23 21:29:43 +08002199 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2200 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
Stefan Hajnoczi2d3735d2012-01-18 14:40:41 +00002201 return -EBUSY;
2202 }
2203
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002204 ro = bs->backing_hd->read_only;
Jim Meyeringc2cba3d2012-10-04 13:09:46 +02002205 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2206 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002207 open_flags = bs->backing_hd->open_flags;
2208
2209 if (ro) {
Jeff Cody0bce5972012-09-20 15:13:34 -04002210 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2211 return -EACCES;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002212 }
bellard33e39632003-07-06 17:15:21 +00002213 }
bellardea2384d2004-08-01 21:59:26 +00002214
Jeff Cody72706ea2014-01-24 09:02:35 -05002215 length = bdrv_getlength(bs);
2216 if (length < 0) {
2217 ret = length;
2218 goto ro_cleanup;
2219 }
2220
2221 backing_length = bdrv_getlength(bs->backing_hd);
2222 if (backing_length < 0) {
2223 ret = backing_length;
2224 goto ro_cleanup;
2225 }
2226
2227 /* If our top snapshot is larger than the backing file image,
2228 * grow the backing file image if possible. If not possible,
2229 * we must return an error */
2230 if (length > backing_length) {
2231 ret = bdrv_truncate(bs->backing_hd, length);
2232 if (ret < 0) {
2233 goto ro_cleanup;
2234 }
2235 }
2236
2237 total_sectors = length >> BDRV_SECTOR_BITS;
Kevin Wolf857d4f42014-05-20 13:16:51 +02002238
2239 /* qemu_try_blockalign() for bs will choose an alignment that works for
2240 * bs->backing_hd as well, so no need to compare the alignment manually. */
2241 buf = qemu_try_blockalign(bs, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2242 if (buf == NULL) {
2243 ret = -ENOMEM;
2244 goto ro_cleanup;
2245 }
bellardea2384d2004-08-01 21:59:26 +00002246
Kevin Wolf8a426612010-07-16 17:17:01 +02002247 for (sector = 0; sector < total_sectors; sector += n) {
Paolo Bonzinid6636402013-09-04 19:00:25 +02002248 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2249 if (ret < 0) {
2250 goto ro_cleanup;
2251 }
2252 if (ret) {
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002253 ret = bdrv_read(bs, sector, buf, n);
2254 if (ret < 0) {
Kevin Wolf8a426612010-07-16 17:17:01 +02002255 goto ro_cleanup;
2256 }
2257
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002258 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2259 if (ret < 0) {
Kevin Wolf8a426612010-07-16 17:17:01 +02002260 goto ro_cleanup;
2261 }
bellardea2384d2004-08-01 21:59:26 +00002262 }
2263 }
bellard95389c82005-12-18 18:28:15 +00002264
Christoph Hellwig1d449522010-01-17 12:32:30 +01002265 if (drv->bdrv_make_empty) {
2266 ret = drv->bdrv_make_empty(bs);
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002267 if (ret < 0) {
2268 goto ro_cleanup;
2269 }
Christoph Hellwig1d449522010-01-17 12:32:30 +01002270 bdrv_flush(bs);
2271 }
bellard95389c82005-12-18 18:28:15 +00002272
Christoph Hellwig3f5075a2010-01-12 13:49:23 +01002273 /*
2274 * Make sure all data we wrote to the backing device is actually
2275 * stable on disk.
2276 */
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002277 if (bs->backing_hd) {
Christoph Hellwig3f5075a2010-01-12 13:49:23 +01002278 bdrv_flush(bs->backing_hd);
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002279 }
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002280
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002281 ret = 0;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002282ro_cleanup:
Kevin Wolf857d4f42014-05-20 13:16:51 +02002283 qemu_vfree(buf);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002284
2285 if (ro) {
Jeff Cody0bce5972012-09-20 15:13:34 -04002286 /* ignoring error return here */
2287 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002288 }
2289
Christoph Hellwig1d449522010-01-17 12:32:30 +01002290 return ret;
bellard33e39632003-07-06 17:15:21 +00002291}
2292
Stefan Hajnoczie8877492012-03-05 18:10:11 +00002293int bdrv_commit_all(void)
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02002294{
2295 BlockDriverState *bs;
2296
Benoît Canetdc364f42014-01-23 21:31:32 +01002297 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02002298 AioContext *aio_context = bdrv_get_aio_context(bs);
2299
2300 aio_context_acquire(aio_context);
Jeff Cody272d2d82013-02-26 09:55:48 -05002301 if (bs->drv && bs->backing_hd) {
2302 int ret = bdrv_commit(bs);
2303 if (ret < 0) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02002304 aio_context_release(aio_context);
Jeff Cody272d2d82013-02-26 09:55:48 -05002305 return ret;
2306 }
Stefan Hajnoczie8877492012-03-05 18:10:11 +00002307 }
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02002308 aio_context_release(aio_context);
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02002309 }
Stefan Hajnoczie8877492012-03-05 18:10:11 +00002310 return 0;
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02002311}
2312
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002313/**
2314 * Remove an active request from the tracked requests list
2315 *
2316 * This function should be called when a tracked request is completing.
2317 */
2318static void tracked_request_end(BdrvTrackedRequest *req)
2319{
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002320 if (req->serialising) {
2321 req->bs->serialising_in_flight--;
2322 }
2323
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002324 QLIST_REMOVE(req, list);
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002325 qemu_co_queue_restart_all(&req->wait_queue);
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002326}
2327
2328/**
2329 * Add an active request to the tracked requests list
2330 */
2331static void tracked_request_begin(BdrvTrackedRequest *req,
2332 BlockDriverState *bs,
Kevin Wolf793ed472013-12-03 15:31:25 +01002333 int64_t offset,
2334 unsigned int bytes, bool is_write)
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002335{
2336 *req = (BdrvTrackedRequest){
2337 .bs = bs,
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002338 .offset = offset,
2339 .bytes = bytes,
2340 .is_write = is_write,
2341 .co = qemu_coroutine_self(),
2342 .serialising = false,
Kevin Wolf73271452013-12-04 17:08:50 +01002343 .overlap_offset = offset,
2344 .overlap_bytes = bytes,
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002345 };
2346
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002347 qemu_co_queue_init(&req->wait_queue);
2348
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002349 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2350}
2351
Kevin Wolfe96126f2014-02-08 10:42:18 +01002352static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002353{
Kevin Wolf73271452013-12-04 17:08:50 +01002354 int64_t overlap_offset = req->offset & ~(align - 1);
Kevin Wolfe96126f2014-02-08 10:42:18 +01002355 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2356 - overlap_offset;
Kevin Wolf73271452013-12-04 17:08:50 +01002357
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002358 if (!req->serialising) {
2359 req->bs->serialising_in_flight++;
2360 req->serialising = true;
2361 }
Kevin Wolf73271452013-12-04 17:08:50 +01002362
2363 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2364 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002365}
2366
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002367/**
2368 * Round a region to cluster boundaries
2369 */
Paolo Bonzini343bded2013-01-21 17:09:42 +01002370void bdrv_round_to_clusters(BlockDriverState *bs,
2371 int64_t sector_num, int nb_sectors,
2372 int64_t *cluster_sector_num,
2373 int *cluster_nb_sectors)
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002374{
2375 BlockDriverInfo bdi;
2376
2377 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2378 *cluster_sector_num = sector_num;
2379 *cluster_nb_sectors = nb_sectors;
2380 } else {
2381 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2382 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2383 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2384 nb_sectors, c);
2385 }
2386}
2387
Kevin Wolf73271452013-12-04 17:08:50 +01002388static int bdrv_get_cluster_size(BlockDriverState *bs)
Kevin Wolf793ed472013-12-03 15:31:25 +01002389{
2390 BlockDriverInfo bdi;
Kevin Wolf73271452013-12-04 17:08:50 +01002391 int ret;
Kevin Wolf793ed472013-12-03 15:31:25 +01002392
Kevin Wolf73271452013-12-04 17:08:50 +01002393 ret = bdrv_get_info(bs, &bdi);
2394 if (ret < 0 || bdi.cluster_size == 0) {
2395 return bs->request_alignment;
Kevin Wolf793ed472013-12-03 15:31:25 +01002396 } else {
Kevin Wolf73271452013-12-04 17:08:50 +01002397 return bdi.cluster_size;
Kevin Wolf793ed472013-12-03 15:31:25 +01002398 }
2399}
2400
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002401static bool tracked_request_overlaps(BdrvTrackedRequest *req,
Kevin Wolf793ed472013-12-03 15:31:25 +01002402 int64_t offset, unsigned int bytes)
2403{
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002404 /* aaaa bbbb */
Kevin Wolf73271452013-12-04 17:08:50 +01002405 if (offset >= req->overlap_offset + req->overlap_bytes) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002406 return false;
2407 }
2408 /* bbbb aaaa */
Kevin Wolf73271452013-12-04 17:08:50 +01002409 if (req->overlap_offset >= offset + bytes) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002410 return false;
2411 }
2412 return true;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002413}
2414
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002415static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002416{
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002417 BlockDriverState *bs = self->bs;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002418 BdrvTrackedRequest *req;
2419 bool retry;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002420 bool waited = false;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002421
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002422 if (!bs->serialising_in_flight) {
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002423 return false;
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002424 }
2425
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002426 do {
2427 retry = false;
2428 QLIST_FOREACH(req, &bs->tracked_requests, list) {
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002429 if (req == self || (!req->serialising && !self->serialising)) {
Kevin Wolf65afd212013-12-03 14:55:55 +01002430 continue;
2431 }
Kevin Wolf73271452013-12-04 17:08:50 +01002432 if (tracked_request_overlaps(req, self->overlap_offset,
2433 self->overlap_bytes))
2434 {
Stefan Hajnoczi5f8b6492011-11-30 12:23:42 +00002435 /* Hitting this means there was a reentrant request, for
2436 * example, a block driver issuing nested requests. This must
2437 * never happen since it means deadlock.
2438 */
2439 assert(qemu_coroutine_self() != req->co);
2440
Kevin Wolf64604402013-12-13 13:04:35 +01002441 /* If the request is already (indirectly) waiting for us, or
2442 * will wait for us as soon as it wakes up, then just go on
2443 * (instead of producing a deadlock in the former case). */
2444 if (!req->waiting_for) {
2445 self->waiting_for = req;
2446 qemu_co_queue_wait(&req->wait_queue);
2447 self->waiting_for = NULL;
2448 retry = true;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002449 waited = true;
Kevin Wolf64604402013-12-13 13:04:35 +01002450 break;
2451 }
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002452 }
2453 }
2454 } while (retry);
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002455
2456 return waited;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002457}
2458
Kevin Wolf756e6732010-01-12 12:55:17 +01002459/*
2460 * Return values:
2461 * 0 - success
2462 * -EINVAL - backing format specified, but no file
2463 * -ENOSPC - can't update the backing file because no space is left in the
2464 * image file header
2465 * -ENOTSUP - format driver doesn't support changing the backing file
2466 */
2467int bdrv_change_backing_file(BlockDriverState *bs,
2468 const char *backing_file, const char *backing_fmt)
2469{
2470 BlockDriver *drv = bs->drv;
Paolo Bonzini469ef352012-04-12 14:01:02 +02002471 int ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01002472
Paolo Bonzini5f377792012-04-12 14:01:01 +02002473 /* Backing file format doesn't make sense without a backing file */
2474 if (backing_fmt && !backing_file) {
2475 return -EINVAL;
2476 }
2477
Kevin Wolf756e6732010-01-12 12:55:17 +01002478 if (drv->bdrv_change_backing_file != NULL) {
Paolo Bonzini469ef352012-04-12 14:01:02 +02002479 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
Kevin Wolf756e6732010-01-12 12:55:17 +01002480 } else {
Paolo Bonzini469ef352012-04-12 14:01:02 +02002481 ret = -ENOTSUP;
Kevin Wolf756e6732010-01-12 12:55:17 +01002482 }
Paolo Bonzini469ef352012-04-12 14:01:02 +02002483
2484 if (ret == 0) {
2485 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2486 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2487 }
2488 return ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01002489}
2490
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002491/*
2492 * Finds the image layer in the chain that has 'bs' as its backing file.
2493 *
2494 * active is the current topmost image.
2495 *
2496 * Returns NULL if bs is not found in active's image chain,
2497 * or if active == bs.
Jeff Cody4caf0fc2014-06-25 15:35:26 -04002498 *
2499 * Returns the bottommost base image if bs == NULL.
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002500 */
2501BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2502 BlockDriverState *bs)
2503{
Jeff Cody4caf0fc2014-06-25 15:35:26 -04002504 while (active && bs != active->backing_hd) {
2505 active = active->backing_hd;
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002506 }
2507
Jeff Cody4caf0fc2014-06-25 15:35:26 -04002508 return active;
2509}
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002510
Jeff Cody4caf0fc2014-06-25 15:35:26 -04002511/* Given a BDS, searches for the base layer. */
2512BlockDriverState *bdrv_find_base(BlockDriverState *bs)
2513{
2514 return bdrv_find_overlay(bs, NULL);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002515}
2516
2517typedef struct BlkIntermediateStates {
2518 BlockDriverState *bs;
2519 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2520} BlkIntermediateStates;
2521
2522
2523/*
2524 * Drops images above 'base' up to and including 'top', and sets the image
2525 * above 'top' to have base as its backing file.
2526 *
2527 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2528 * information in 'bs' can be properly updated.
2529 *
2530 * E.g., this will convert the following chain:
2531 * bottom <- base <- intermediate <- top <- active
2532 *
2533 * to
2534 *
2535 * bottom <- base <- active
2536 *
2537 * It is allowed for bottom==base, in which case it converts:
2538 *
2539 * base <- intermediate <- top <- active
2540 *
2541 * to
2542 *
2543 * base <- active
2544 *
Jeff Cody54e26902014-06-25 15:40:10 -04002545 * If backing_file_str is non-NULL, it will be used when modifying top's
2546 * overlay image metadata.
2547 *
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002548 * Error conditions:
2549 * if active == top, that is considered an error
2550 *
2551 */
2552int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
Jeff Cody54e26902014-06-25 15:40:10 -04002553 BlockDriverState *base, const char *backing_file_str)
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002554{
2555 BlockDriverState *intermediate;
2556 BlockDriverState *base_bs = NULL;
2557 BlockDriverState *new_top_bs = NULL;
2558 BlkIntermediateStates *intermediate_state, *next;
2559 int ret = -EIO;
2560
2561 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2562 QSIMPLEQ_INIT(&states_to_delete);
2563
2564 if (!top->drv || !base->drv) {
2565 goto exit;
2566 }
2567
2568 new_top_bs = bdrv_find_overlay(active, top);
2569
2570 if (new_top_bs == NULL) {
2571 /* we could not find the image above 'top', this is an error */
2572 goto exit;
2573 }
2574
2575 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2576 * to do, no intermediate images */
2577 if (new_top_bs->backing_hd == base) {
2578 ret = 0;
2579 goto exit;
2580 }
2581
2582 intermediate = top;
2583
2584 /* now we will go down through the list, and add each BDS we find
2585 * into our deletion queue, until we hit the 'base'
2586 */
2587 while (intermediate) {
Markus Armbruster5839e532014-08-19 10:31:08 +02002588 intermediate_state = g_new0(BlkIntermediateStates, 1);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002589 intermediate_state->bs = intermediate;
2590 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2591
2592 if (intermediate->backing_hd == base) {
2593 base_bs = intermediate->backing_hd;
2594 break;
2595 }
2596 intermediate = intermediate->backing_hd;
2597 }
2598 if (base_bs == NULL) {
2599 /* something went wrong, we did not end at the base. safely
2600 * unravel everything, and exit with error */
2601 goto exit;
2602 }
2603
2604 /* success - we can delete the intermediate states, and link top->base */
Jeff Cody54e26902014-06-25 15:40:10 -04002605 backing_file_str = backing_file_str ? backing_file_str : base_bs->filename;
2606 ret = bdrv_change_backing_file(new_top_bs, backing_file_str,
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002607 base_bs->drv ? base_bs->drv->format_name : "");
2608 if (ret) {
2609 goto exit;
2610 }
Fam Zheng920beae2014-05-23 21:29:46 +08002611 bdrv_set_backing_hd(new_top_bs, base_bs);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002612
2613 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2614 /* so that bdrv_close() does not recursively close the chain */
Fam Zheng920beae2014-05-23 21:29:46 +08002615 bdrv_set_backing_hd(intermediate_state->bs, NULL);
Fam Zheng4f6fd342013-08-23 09:14:47 +08002616 bdrv_unref(intermediate_state->bs);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002617 }
2618 ret = 0;
2619
2620exit:
2621 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2622 g_free(intermediate_state);
2623 }
2624 return ret;
2625}
2626
2627
aliguori71d07702009-03-03 17:37:16 +00002628static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2629 size_t size)
2630{
2631 int64_t len;
2632
Kevin Wolf1dd3a442014-04-14 14:48:16 +02002633 if (size > INT_MAX) {
2634 return -EIO;
2635 }
2636
aliguori71d07702009-03-03 17:37:16 +00002637 if (!bdrv_is_inserted(bs))
2638 return -ENOMEDIUM;
2639
2640 if (bs->growable)
2641 return 0;
2642
2643 len = bdrv_getlength(bs);
2644
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02002645 if (offset < 0)
2646 return -EIO;
2647
2648 if ((offset > len) || (len - offset < size))
aliguori71d07702009-03-03 17:37:16 +00002649 return -EIO;
2650
2651 return 0;
2652}
2653
2654static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2655 int nb_sectors)
2656{
Kevin Wolf54db38a2014-04-14 14:47:14 +02002657 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
Kevin Wolf8f4754e2014-03-26 13:06:02 +01002658 return -EIO;
2659 }
2660
Jes Sorenseneb5a3162010-05-27 16:20:31 +02002661 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2662 nb_sectors * BDRV_SECTOR_SIZE);
aliguori71d07702009-03-03 17:37:16 +00002663}
2664
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002665typedef struct RwCo {
2666 BlockDriverState *bs;
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002667 int64_t offset;
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002668 QEMUIOVector *qiov;
2669 bool is_write;
2670 int ret;
Peter Lieven4105eaa2013-07-11 14:16:22 +02002671 BdrvRequestFlags flags;
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002672} RwCo;
2673
2674static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2675{
2676 RwCo *rwco = opaque;
2677
2678 if (!rwco->is_write) {
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002679 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2680 rwco->qiov->size, rwco->qiov,
Peter Lieven4105eaa2013-07-11 14:16:22 +02002681 rwco->flags);
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002682 } else {
2683 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2684 rwco->qiov->size, rwco->qiov,
2685 rwco->flags);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002686 }
2687}
2688
2689/*
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002690 * Process a vectored synchronous request using coroutines
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002691 */
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002692static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2693 QEMUIOVector *qiov, bool is_write,
2694 BdrvRequestFlags flags)
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002695{
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002696 Coroutine *co;
2697 RwCo rwco = {
2698 .bs = bs,
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002699 .offset = offset,
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002700 .qiov = qiov,
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002701 .is_write = is_write,
2702 .ret = NOT_DONE,
Peter Lieven4105eaa2013-07-11 14:16:22 +02002703 .flags = flags,
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002704 };
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002705
Zhi Yong Wu498e3862012-04-02 18:59:34 +08002706 /**
2707 * In sync call context, when the vcpu is blocked, this throttling timer
2708 * will not fire; so the I/O throttling function has to be disabled here
2709 * if it has been enabled.
2710 */
2711 if (bs->io_limits_enabled) {
2712 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2713 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2714 bdrv_io_limits_disable(bs);
2715 }
2716
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002717 if (qemu_in_coroutine()) {
2718 /* Fast-path if already in coroutine context */
2719 bdrv_rw_co_entry(&rwco);
2720 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02002721 AioContext *aio_context = bdrv_get_aio_context(bs);
2722
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002723 co = qemu_coroutine_create(bdrv_rw_co_entry);
2724 qemu_coroutine_enter(co, &rwco);
2725 while (rwco.ret == NOT_DONE) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02002726 aio_poll(aio_context, true);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002727 }
2728 }
2729 return rwco.ret;
2730}
2731
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002732/*
2733 * Process a synchronous request using coroutines
2734 */
2735static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
Peter Lieven4105eaa2013-07-11 14:16:22 +02002736 int nb_sectors, bool is_write, BdrvRequestFlags flags)
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002737{
2738 QEMUIOVector qiov;
2739 struct iovec iov = {
2740 .iov_base = (void *)buf,
2741 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2742 };
2743
Kevin Wolfda15ee52014-04-14 15:39:36 +02002744 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2745 return -EINVAL;
2746 }
2747
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002748 qemu_iovec_init_external(&qiov, &iov, 1);
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002749 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2750 &qiov, is_write, flags);
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002751}
2752
bellard19cb3732006-08-19 11:45:59 +00002753/* return < 0 if error. See bdrv_write() for the return codes */
ths5fafdf22007-09-16 21:08:06 +00002754int bdrv_read(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00002755 uint8_t *buf, int nb_sectors)
2756{
Peter Lieven4105eaa2013-07-11 14:16:22 +02002757 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
bellardfc01f7e2003-06-30 10:03:06 +00002758}
2759
Markus Armbruster07d27a42012-06-29 17:34:29 +02002760/* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2761int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2762 uint8_t *buf, int nb_sectors)
2763{
2764 bool enabled;
2765 int ret;
2766
2767 enabled = bs->io_limits_enabled;
2768 bs->io_limits_enabled = false;
Peter Lieven4e7395e2013-07-18 10:37:32 +02002769 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
Markus Armbruster07d27a42012-06-29 17:34:29 +02002770 bs->io_limits_enabled = enabled;
2771 return ret;
2772}
2773
ths5fafdf22007-09-16 21:08:06 +00002774/* Return < 0 if error. Important errors are:
bellard19cb3732006-08-19 11:45:59 +00002775 -EIO generic I/O error (may happen for all errors)
2776 -ENOMEDIUM No media inserted.
2777 -EINVAL Invalid sector number or nb_sectors
2778 -EACCES Trying to write a read-only device
2779*/
ths5fafdf22007-09-16 21:08:06 +00002780int bdrv_write(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00002781 const uint8_t *buf, int nb_sectors)
2782{
Peter Lieven4105eaa2013-07-11 14:16:22 +02002783 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
bellard83f64092006-08-01 16:21:11 +00002784}
2785
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02002786int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2787 int nb_sectors, BdrvRequestFlags flags)
Peter Lieven4105eaa2013-07-11 14:16:22 +02002788{
2789 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02002790 BDRV_REQ_ZERO_WRITE | flags);
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002791}
2792
Peter Lievend75cbb52013-10-24 12:07:03 +02002793/*
2794 * Completely zero out a block device with the help of bdrv_write_zeroes.
2795 * The operation is sped up by checking the block status and only writing
2796 * zeroes to the device if they currently do not return zeroes. Optional
2797 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2798 *
2799 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2800 */
2801int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2802{
Markus Armbrusterd32f7c12014-06-26 13:23:18 +02002803 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
Peter Lievend75cbb52013-10-24 12:07:03 +02002804 int n;
2805
Markus Armbrusterd32f7c12014-06-26 13:23:18 +02002806 target_sectors = bdrv_nb_sectors(bs);
2807 if (target_sectors < 0) {
2808 return target_sectors;
Kevin Wolf9ce10c02014-04-14 17:03:34 +02002809 }
Kevin Wolf9ce10c02014-04-14 17:03:34 +02002810
Peter Lievend75cbb52013-10-24 12:07:03 +02002811 for (;;) {
Markus Armbrusterd32f7c12014-06-26 13:23:18 +02002812 nb_sectors = target_sectors - sector_num;
Peter Lievend75cbb52013-10-24 12:07:03 +02002813 if (nb_sectors <= 0) {
2814 return 0;
2815 }
Fam Zhengf3a9cfd2014-11-10 15:07:44 +08002816 if (nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2817 nb_sectors = INT_MAX / BDRV_SECTOR_SIZE;
Peter Lievend75cbb52013-10-24 12:07:03 +02002818 }
2819 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
Peter Lieven3d94ce62013-12-12 13:57:05 +01002820 if (ret < 0) {
2821 error_report("error getting block status at sector %" PRId64 ": %s",
2822 sector_num, strerror(-ret));
2823 return ret;
2824 }
Peter Lievend75cbb52013-10-24 12:07:03 +02002825 if (ret & BDRV_BLOCK_ZERO) {
2826 sector_num += n;
2827 continue;
2828 }
2829 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2830 if (ret < 0) {
2831 error_report("error writing zeroes at sector %" PRId64 ": %s",
2832 sector_num, strerror(-ret));
2833 return ret;
2834 }
2835 sector_num += n;
2836 }
2837}
2838
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002839int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
bellard83f64092006-08-01 16:21:11 +00002840{
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002841 QEMUIOVector qiov;
2842 struct iovec iov = {
2843 .iov_base = (void *)buf,
2844 .iov_len = bytes,
2845 };
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01002846 int ret;
bellard83f64092006-08-01 16:21:11 +00002847
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002848 if (bytes < 0) {
2849 return -EINVAL;
bellard83f64092006-08-01 16:21:11 +00002850 }
2851
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002852 qemu_iovec_init_external(&qiov, &iov, 1);
2853 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2854 if (ret < 0) {
2855 return ret;
bellard83f64092006-08-01 16:21:11 +00002856 }
2857
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002858 return bytes;
bellard83f64092006-08-01 16:21:11 +00002859}
2860
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002861int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
bellard83f64092006-08-01 16:21:11 +00002862{
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01002863 int ret;
bellard83f64092006-08-01 16:21:11 +00002864
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002865 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2866 if (ret < 0) {
2867 return ret;
bellard83f64092006-08-01 16:21:11 +00002868 }
2869
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002870 return qiov->size;
2871}
2872
2873int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002874 const void *buf, int bytes)
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002875{
2876 QEMUIOVector qiov;
2877 struct iovec iov = {
2878 .iov_base = (void *) buf,
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002879 .iov_len = bytes,
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002880 };
2881
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002882 if (bytes < 0) {
2883 return -EINVAL;
2884 }
2885
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002886 qemu_iovec_init_external(&qiov, &iov, 1);
2887 return bdrv_pwritev(bs, offset, &qiov);
bellard83f64092006-08-01 16:21:11 +00002888}
bellard83f64092006-08-01 16:21:11 +00002889
Kevin Wolff08145f2010-06-16 16:38:15 +02002890/*
2891 * Writes to the file and ensures that no writes are reordered across this
2892 * request (acts as a barrier)
2893 *
2894 * Returns 0 on success, -errno in error cases.
2895 */
2896int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2897 const void *buf, int count)
2898{
2899 int ret;
2900
2901 ret = bdrv_pwrite(bs, offset, buf, count);
2902 if (ret < 0) {
2903 return ret;
2904 }
2905
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02002906 /* No flush needed for cache modes that already do it */
2907 if (bs->enable_write_cache) {
Kevin Wolff08145f2010-06-16 16:38:15 +02002908 bdrv_flush(bs);
2909 }
2910
2911 return 0;
2912}
2913
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00002914static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
Stefan Hajnocziab185922011-11-17 13:40:31 +00002915 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2916{
2917 /* Perform I/O through a temporary buffer so that users who scribble over
2918 * their read buffer while the operation is in progress do not end up
2919 * modifying the image file. This is critical for zero-copy guest I/O
2920 * where anything might happen inside guest memory.
2921 */
2922 void *bounce_buffer;
2923
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002924 BlockDriver *drv = bs->drv;
Stefan Hajnocziab185922011-11-17 13:40:31 +00002925 struct iovec iov;
2926 QEMUIOVector bounce_qiov;
2927 int64_t cluster_sector_num;
2928 int cluster_nb_sectors;
2929 size_t skip_bytes;
2930 int ret;
2931
2932 /* Cover entire cluster so no additional backing file I/O is required when
2933 * allocating cluster in the image file.
2934 */
Paolo Bonzini343bded2013-01-21 17:09:42 +01002935 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2936 &cluster_sector_num, &cluster_nb_sectors);
Stefan Hajnocziab185922011-11-17 13:40:31 +00002937
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00002938 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2939 cluster_sector_num, cluster_nb_sectors);
Stefan Hajnocziab185922011-11-17 13:40:31 +00002940
2941 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
Kevin Wolf857d4f42014-05-20 13:16:51 +02002942 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
2943 if (bounce_buffer == NULL) {
2944 ret = -ENOMEM;
2945 goto err;
2946 }
2947
Stefan Hajnocziab185922011-11-17 13:40:31 +00002948 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2949
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002950 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2951 &bounce_qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00002952 if (ret < 0) {
2953 goto err;
2954 }
2955
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002956 if (drv->bdrv_co_write_zeroes &&
2957 buffer_is_zero(bounce_buffer, iov.iov_len)) {
Kevin Wolf621f0582012-03-20 15:12:58 +01002958 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02002959 cluster_nb_sectors, 0);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002960 } else {
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02002961 /* This does not change the data on the disk, it is not necessary
2962 * to flush even in cache=writethrough mode.
2963 */
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002964 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
Stefan Hajnocziab185922011-11-17 13:40:31 +00002965 &bounce_qiov);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002966 }
2967
Stefan Hajnocziab185922011-11-17 13:40:31 +00002968 if (ret < 0) {
2969 /* It might be okay to ignore write errors for guest requests. If this
2970 * is a deliberate copy-on-read then we don't want to ignore the error.
2971 * Simply report it in all cases.
2972 */
2973 goto err;
2974 }
2975
2976 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
Michael Tokarev03396142012-06-07 20:17:55 +04002977 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
2978 nb_sectors * BDRV_SECTOR_SIZE);
Stefan Hajnocziab185922011-11-17 13:40:31 +00002979
2980err:
2981 qemu_vfree(bounce_buffer);
2982 return ret;
2983}
2984
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01002985/*
Kevin Wolfd0c7f642013-12-02 15:07:48 +01002986 * Forwards an already correctly aligned request to the BlockDriver. This
2987 * handles copy on read and zeroing after EOF; any other features must be
2988 * implemented by the caller.
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01002989 */
Kevin Wolfd0c7f642013-12-02 15:07:48 +01002990static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
Kevin Wolf65afd212013-12-03 14:55:55 +01002991 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
Kevin Wolfec746e12013-12-04 12:13:10 +01002992 int64_t align, QEMUIOVector *qiov, int flags)
Kevin Wolfda1fa912011-07-14 17:27:13 +02002993{
2994 BlockDriver *drv = bs->drv;
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002995 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02002996
Kevin Wolfd0c7f642013-12-02 15:07:48 +01002997 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
2998 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
Kevin Wolfda1fa912011-07-14 17:27:13 +02002999
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003000 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3001 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
Kevin Wolf8eb029c2014-07-01 16:09:54 +02003002 assert(!qiov || bytes == qiov->size);
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003003
3004 /* Handle Copy on Read and associated serialisation */
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003005 if (flags & BDRV_REQ_COPY_ON_READ) {
Kevin Wolf73271452013-12-04 17:08:50 +01003006 /* If we touch the same cluster it counts as an overlap. This
3007 * guarantees that allocating writes will be serialized and not race
3008 * with each other for the same cluster. For example, in copy-on-read
3009 * it ensures that the CoR read and write operations are atomic and
3010 * guest writes cannot interleave between them. */
3011 mark_request_serialising(req, bdrv_get_cluster_size(bs));
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003012 }
3013
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01003014 wait_serialising_requests(req);
Stefan Hajnoczif4658282011-11-17 13:40:29 +00003015
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003016 if (flags & BDRV_REQ_COPY_ON_READ) {
Stefan Hajnocziab185922011-11-17 13:40:31 +00003017 int pnum;
3018
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02003019 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003020 if (ret < 0) {
3021 goto out;
3022 }
3023
3024 if (!ret || pnum != nb_sectors) {
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003025 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003026 goto out;
3027 }
3028 }
3029
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003030 /* Forward the request to the BlockDriver */
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003031 if (!(bs->zero_beyond_eof && bs->growable)) {
3032 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3033 } else {
3034 /* Read zeros after EOF of growable BDSes */
Markus Armbruster40490822014-06-26 13:23:19 +02003035 int64_t total_sectors, max_nb_sectors;
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003036
Markus Armbruster40490822014-06-26 13:23:19 +02003037 total_sectors = bdrv_nb_sectors(bs);
3038 if (total_sectors < 0) {
3039 ret = total_sectors;
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003040 goto out;
3041 }
3042
Kevin Wolf5f5bcd82014-02-07 16:00:09 +01003043 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3044 align >> BDRV_SECTOR_BITS);
Paolo Bonzinie012b782014-12-17 16:09:59 +01003045 if (nb_sectors < max_nb_sectors) {
3046 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3047 } else if (max_nb_sectors > 0) {
Kevin Wolf33f461e2014-07-03 13:21:24 +02003048 QEMUIOVector local_qiov;
Kevin Wolf33f461e2014-07-03 13:21:24 +02003049
3050 qemu_iovec_init(&local_qiov, qiov->niov);
3051 qemu_iovec_concat(&local_qiov, qiov, 0,
Paolo Bonzinie012b782014-12-17 16:09:59 +01003052 max_nb_sectors * BDRV_SECTOR_SIZE);
Kevin Wolf33f461e2014-07-03 13:21:24 +02003053
Paolo Bonzinie012b782014-12-17 16:09:59 +01003054 ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
Kevin Wolf33f461e2014-07-03 13:21:24 +02003055 &local_qiov);
3056
3057 qemu_iovec_destroy(&local_qiov);
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003058 } else {
3059 ret = 0;
3060 }
3061
3062 /* Reading beyond end of file is supposed to produce zeroes */
3063 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3064 uint64_t offset = MAX(0, total_sectors - sector_num);
3065 uint64_t bytes = (sector_num + nb_sectors - offset) *
3066 BDRV_SECTOR_SIZE;
3067 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3068 }
3069 }
Stefan Hajnocziab185922011-11-17 13:40:31 +00003070
3071out:
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00003072 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003073}
3074
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003075/*
3076 * Handle a read request in coroutine context
3077 */
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003078static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3079 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003080 BdrvRequestFlags flags)
3081{
3082 BlockDriver *drv = bs->drv;
Kevin Wolf65afd212013-12-03 14:55:55 +01003083 BdrvTrackedRequest req;
3084
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003085 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3086 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3087 uint8_t *head_buf = NULL;
3088 uint8_t *tail_buf = NULL;
3089 QEMUIOVector local_qiov;
3090 bool use_local_qiov = false;
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003091 int ret;
3092
3093 if (!drv) {
3094 return -ENOMEDIUM;
3095 }
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003096 if (bdrv_check_byte_request(bs, offset, bytes)) {
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003097 return -EIO;
3098 }
3099
3100 if (bs->copy_on_read) {
3101 flags |= BDRV_REQ_COPY_ON_READ;
3102 }
3103
3104 /* throttling disk I/O */
3105 if (bs->io_limits_enabled) {
Kevin Wolfd5103582014-01-16 13:29:10 +01003106 bdrv_io_limits_intercept(bs, bytes, false);
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003107 }
3108
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003109 /* Align read if necessary by padding qiov */
3110 if (offset & (align - 1)) {
3111 head_buf = qemu_blockalign(bs, align);
3112 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3113 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3114 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3115 use_local_qiov = true;
3116
3117 bytes += offset & (align - 1);
3118 offset = offset & ~(align - 1);
3119 }
3120
3121 if ((offset + bytes) & (align - 1)) {
3122 if (!use_local_qiov) {
3123 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3124 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3125 use_local_qiov = true;
3126 }
3127 tail_buf = qemu_blockalign(bs, align);
3128 qemu_iovec_add(&local_qiov, tail_buf,
3129 align - ((offset + bytes) & (align - 1)));
3130
3131 bytes = ROUND_UP(bytes, align);
3132 }
3133
Kevin Wolf65afd212013-12-03 14:55:55 +01003134 tracked_request_begin(&req, bs, offset, bytes, false);
Kevin Wolfec746e12013-12-04 12:13:10 +01003135 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003136 use_local_qiov ? &local_qiov : qiov,
3137 flags);
Kevin Wolf65afd212013-12-03 14:55:55 +01003138 tracked_request_end(&req);
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003139
3140 if (use_local_qiov) {
3141 qemu_iovec_destroy(&local_qiov);
3142 qemu_vfree(head_buf);
3143 qemu_vfree(tail_buf);
3144 }
3145
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003146 return ret;
3147}
3148
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003149static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3150 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3151 BdrvRequestFlags flags)
3152{
3153 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3154 return -EINVAL;
3155 }
3156
3157 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3158 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3159}
3160
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003161int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
Kevin Wolfda1fa912011-07-14 17:27:13 +02003162 int nb_sectors, QEMUIOVector *qiov)
3163{
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003164 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
Kevin Wolfda1fa912011-07-14 17:27:13 +02003165
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003166 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3167}
3168
3169int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3170 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3171{
3172 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3173
3174 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3175 BDRV_REQ_COPY_ON_READ);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003176}
3177
Peter Lievenc31cb702013-10-24 12:06:58 +02003178/* if no limit is specified in the BlockLimits use a default
3179 * of 32768 512-byte sectors (16 MiB) per request.
3180 */
3181#define MAX_WRITE_ZEROES_DEFAULT 32768
3182
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003183static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003184 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003185{
3186 BlockDriver *drv = bs->drv;
3187 QEMUIOVector qiov;
Peter Lievenc31cb702013-10-24 12:06:58 +02003188 struct iovec iov = {0};
3189 int ret = 0;
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003190
Peter Lievenc31cb702013-10-24 12:06:58 +02003191 int max_write_zeroes = bs->bl.max_write_zeroes ?
3192 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
Kevin Wolf621f0582012-03-20 15:12:58 +01003193
Peter Lievenc31cb702013-10-24 12:06:58 +02003194 while (nb_sectors > 0 && !ret) {
3195 int num = nb_sectors;
3196
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003197 /* Align request. Block drivers can expect the "bulk" of the request
3198 * to be aligned.
3199 */
3200 if (bs->bl.write_zeroes_alignment
3201 && num > bs->bl.write_zeroes_alignment) {
3202 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3203 /* Make a small request up to the first aligned sector. */
Peter Lievenc31cb702013-10-24 12:06:58 +02003204 num = bs->bl.write_zeroes_alignment;
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003205 num -= sector_num % bs->bl.write_zeroes_alignment;
3206 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3207 /* Shorten the request to the last aligned sector. num cannot
3208 * underflow because num > bs->bl.write_zeroes_alignment.
3209 */
3210 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
Peter Lievenc31cb702013-10-24 12:06:58 +02003211 }
Kevin Wolf621f0582012-03-20 15:12:58 +01003212 }
Peter Lievenc31cb702013-10-24 12:06:58 +02003213
3214 /* limit request size */
3215 if (num > max_write_zeroes) {
3216 num = max_write_zeroes;
3217 }
3218
3219 ret = -ENOTSUP;
3220 /* First try the efficient write zeroes operation */
3221 if (drv->bdrv_co_write_zeroes) {
3222 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3223 }
3224
3225 if (ret == -ENOTSUP) {
3226 /* Fall back to bounce buffer if write zeroes is unsupported */
3227 iov.iov_len = num * BDRV_SECTOR_SIZE;
3228 if (iov.iov_base == NULL) {
Kevin Wolf857d4f42014-05-20 13:16:51 +02003229 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
3230 if (iov.iov_base == NULL) {
3231 ret = -ENOMEM;
3232 goto fail;
3233 }
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003234 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
Peter Lievenc31cb702013-10-24 12:06:58 +02003235 }
3236 qemu_iovec_init_external(&qiov, &iov, 1);
3237
3238 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003239
3240 /* Keep bounce buffer around if it is big enough for all
3241 * all future requests.
3242 */
3243 if (num < max_write_zeroes) {
3244 qemu_vfree(iov.iov_base);
3245 iov.iov_base = NULL;
3246 }
Peter Lievenc31cb702013-10-24 12:06:58 +02003247 }
3248
3249 sector_num += num;
3250 nb_sectors -= num;
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003251 }
3252
Kevin Wolf857d4f42014-05-20 13:16:51 +02003253fail:
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003254 qemu_vfree(iov.iov_base);
3255 return ret;
3256}
3257
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003258/*
Kevin Wolfb404f722013-12-03 14:02:23 +01003259 * Forwards an already correctly aligned write request to the BlockDriver.
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003260 */
Kevin Wolfb404f722013-12-03 14:02:23 +01003261static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
Kevin Wolf65afd212013-12-03 14:55:55 +01003262 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3263 QEMUIOVector *qiov, int flags)
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003264{
3265 BlockDriver *drv = bs->drv;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003266 bool waited;
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01003267 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003268
Kevin Wolfb404f722013-12-03 14:02:23 +01003269 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3270 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003271
Kevin Wolfb404f722013-12-03 14:02:23 +01003272 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3273 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
Kevin Wolf8eb029c2014-07-01 16:09:54 +02003274 assert(!qiov || bytes == qiov->size);
Benoît Canetcc0681c2013-09-02 14:14:39 +02003275
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003276 waited = wait_serialising_requests(req);
3277 assert(!waited || !req->serialising);
Kevin Wolfaf91f9a2014-02-07 15:35:56 +01003278 assert(req->overlap_offset <= offset);
3279 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
Kevin Wolf244eade2013-12-03 14:30:44 +01003280
Kevin Wolf65afd212013-12-03 14:55:55 +01003281 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
Stefan Hajnoczid616b222013-06-24 17:13:10 +02003282
Peter Lieven465bee12014-05-18 00:58:19 +02003283 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3284 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3285 qemu_iovec_is_zero(qiov)) {
3286 flags |= BDRV_REQ_ZERO_WRITE;
3287 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3288 flags |= BDRV_REQ_MAY_UNMAP;
3289 }
3290 }
3291
Stefan Hajnoczid616b222013-06-24 17:13:10 +02003292 if (ret < 0) {
3293 /* Do nothing, write notifier decided to fail this request */
3294 } else if (flags & BDRV_REQ_ZERO_WRITE) {
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003295 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003296 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003297 } else {
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003298 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003299 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3300 }
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003301 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01003302
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02003303 if (ret == 0 && !bs->enable_write_cache) {
3304 ret = bdrv_co_flush(bs);
3305 }
3306
Fam Zhenge4654d22013-11-13 18:29:43 +08003307 bdrv_set_dirty(bs, sector_num, nb_sectors);
Kevin Wolfda1fa912011-07-14 17:27:13 +02003308
Benoît Canet5366d0c2014-09-05 15:46:18 +02003309 block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
Benoît Canet5e5a94b2014-09-05 15:46:16 +02003310
Paolo Bonzinidf2a6f22013-09-04 19:00:21 +02003311 if (bs->growable && ret >= 0) {
3312 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3313 }
Kevin Wolfda1fa912011-07-14 17:27:13 +02003314
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01003315 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003316}
3317
Kevin Wolfb404f722013-12-03 14:02:23 +01003318/*
3319 * Handle a write request in coroutine context
3320 */
Kevin Wolf66015532013-12-03 14:40:18 +01003321static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3322 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Kevin Wolfb404f722013-12-03 14:02:23 +01003323 BdrvRequestFlags flags)
3324{
Kevin Wolf65afd212013-12-03 14:55:55 +01003325 BdrvTrackedRequest req;
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003326 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3327 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3328 uint8_t *head_buf = NULL;
3329 uint8_t *tail_buf = NULL;
3330 QEMUIOVector local_qiov;
3331 bool use_local_qiov = false;
Kevin Wolfb404f722013-12-03 14:02:23 +01003332 int ret;
3333
3334 if (!bs->drv) {
3335 return -ENOMEDIUM;
3336 }
3337 if (bs->read_only) {
3338 return -EACCES;
3339 }
Kevin Wolf66015532013-12-03 14:40:18 +01003340 if (bdrv_check_byte_request(bs, offset, bytes)) {
Kevin Wolfb404f722013-12-03 14:02:23 +01003341 return -EIO;
3342 }
3343
Kevin Wolfb404f722013-12-03 14:02:23 +01003344 /* throttling disk I/O */
3345 if (bs->io_limits_enabled) {
Kevin Wolfd5103582014-01-16 13:29:10 +01003346 bdrv_io_limits_intercept(bs, bytes, true);
Kevin Wolfb404f722013-12-03 14:02:23 +01003347 }
3348
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003349 /*
3350 * Align write if necessary by performing a read-modify-write cycle.
3351 * Pad qiov with the read parts and be sure to have a tracked request not
3352 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3353 */
Kevin Wolf65afd212013-12-03 14:55:55 +01003354 tracked_request_begin(&req, bs, offset, bytes, true);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003355
3356 if (offset & (align - 1)) {
3357 QEMUIOVector head_qiov;
3358 struct iovec head_iov;
3359
3360 mark_request_serialising(&req, align);
3361 wait_serialising_requests(&req);
3362
3363 head_buf = qemu_blockalign(bs, align);
3364 head_iov = (struct iovec) {
3365 .iov_base = head_buf,
3366 .iov_len = align,
3367 };
3368 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3369
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003370 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003371 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3372 align, &head_qiov, 0);
3373 if (ret < 0) {
3374 goto fail;
3375 }
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003376 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003377
3378 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3379 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3380 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3381 use_local_qiov = true;
3382
3383 bytes += offset & (align - 1);
3384 offset = offset & ~(align - 1);
3385 }
3386
3387 if ((offset + bytes) & (align - 1)) {
3388 QEMUIOVector tail_qiov;
3389 struct iovec tail_iov;
3390 size_t tail_bytes;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003391 bool waited;
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003392
3393 mark_request_serialising(&req, align);
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003394 waited = wait_serialising_requests(&req);
3395 assert(!waited || !use_local_qiov);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003396
3397 tail_buf = qemu_blockalign(bs, align);
3398 tail_iov = (struct iovec) {
3399 .iov_base = tail_buf,
3400 .iov_len = align,
3401 };
3402 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3403
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003404 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003405 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3406 align, &tail_qiov, 0);
3407 if (ret < 0) {
3408 goto fail;
3409 }
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003410 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003411
3412 if (!use_local_qiov) {
3413 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3414 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3415 use_local_qiov = true;
3416 }
3417
3418 tail_bytes = (offset + bytes) & (align - 1);
3419 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3420
3421 bytes = ROUND_UP(bytes, align);
3422 }
3423
3424 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3425 use_local_qiov ? &local_qiov : qiov,
3426 flags);
3427
3428fail:
Kevin Wolf65afd212013-12-03 14:55:55 +01003429 tracked_request_end(&req);
Kevin Wolfb404f722013-12-03 14:02:23 +01003430
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003431 if (use_local_qiov) {
3432 qemu_iovec_destroy(&local_qiov);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003433 }
Kevin Wolf99c4a852014-02-07 15:29:00 +01003434 qemu_vfree(head_buf);
3435 qemu_vfree(tail_buf);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003436
Kevin Wolfb404f722013-12-03 14:02:23 +01003437 return ret;
3438}
3439
Kevin Wolf66015532013-12-03 14:40:18 +01003440static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3441 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3442 BdrvRequestFlags flags)
3443{
3444 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3445 return -EINVAL;
3446 }
3447
3448 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3449 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3450}
3451
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003452int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3453 int nb_sectors, QEMUIOVector *qiov)
3454{
3455 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3456
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003457 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3458}
3459
3460int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003461 int64_t sector_num, int nb_sectors,
3462 BdrvRequestFlags flags)
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003463{
Paolo Bonzini94d6ff22013-11-22 13:39:45 +01003464 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003465
Peter Lievend32f35c2013-10-24 12:06:52 +02003466 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3467 flags &= ~BDRV_REQ_MAY_UNMAP;
3468 }
3469
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003470 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003471 BDRV_REQ_ZERO_WRITE | flags);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003472}
3473
bellard83f64092006-08-01 16:21:11 +00003474/**
bellard83f64092006-08-01 16:21:11 +00003475 * Truncate file to 'offset' bytes (needed only for file protocols)
3476 */
3477int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3478{
3479 BlockDriver *drv = bs->drv;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003480 int ret;
bellard83f64092006-08-01 16:21:11 +00003481 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00003482 return -ENOMEDIUM;
bellard83f64092006-08-01 16:21:11 +00003483 if (!drv->bdrv_truncate)
3484 return -ENOTSUP;
Naphtali Sprei59f26892009-10-26 16:25:16 +02003485 if (bs->read_only)
3486 return -EACCES;
Jeff Cody9c75e162014-06-25 16:55:30 -04003487
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003488 ret = drv->bdrv_truncate(bs, offset);
3489 if (ret == 0) {
3490 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
Markus Armbrustera7f53e22014-10-07 13:59:25 +02003491 if (bs->blk) {
3492 blk_dev_resize_cb(bs->blk);
3493 }
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003494 }
3495 return ret;
bellard83f64092006-08-01 16:21:11 +00003496}
3497
3498/**
Fam Zheng4a1d5e12011-07-12 19:56:39 +08003499 * Length of a allocated file in bytes. Sparse files are counted by actual
3500 * allocated space. Return < 0 if error or unknown.
3501 */
3502int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3503{
3504 BlockDriver *drv = bs->drv;
3505 if (!drv) {
3506 return -ENOMEDIUM;
3507 }
3508 if (drv->bdrv_get_allocated_file_size) {
3509 return drv->bdrv_get_allocated_file_size(bs);
3510 }
3511 if (bs->file) {
3512 return bdrv_get_allocated_file_size(bs->file);
3513 }
3514 return -ENOTSUP;
3515}
3516
3517/**
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003518 * Return number of sectors on success, -errno on error.
bellard83f64092006-08-01 16:21:11 +00003519 */
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003520int64_t bdrv_nb_sectors(BlockDriverState *bs)
bellard83f64092006-08-01 16:21:11 +00003521{
3522 BlockDriver *drv = bs->drv;
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003523
bellard83f64092006-08-01 16:21:11 +00003524 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00003525 return -ENOMEDIUM;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003526
Kevin Wolfb94a2612013-10-29 12:18:58 +01003527 if (drv->has_variable_length) {
3528 int ret = refresh_total_sectors(bs, bs->total_sectors);
3529 if (ret < 0) {
3530 return ret;
Stefan Hajnoczi46a4e4e2011-03-29 20:04:41 +01003531 }
bellard83f64092006-08-01 16:21:11 +00003532 }
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003533 return bs->total_sectors;
3534}
3535
3536/**
3537 * Return length in bytes on success, -errno on error.
3538 * The length is always a multiple of BDRV_SECTOR_SIZE.
3539 */
3540int64_t bdrv_getlength(BlockDriverState *bs)
3541{
3542 int64_t ret = bdrv_nb_sectors(bs);
3543
3544 return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE;
bellardfc01f7e2003-06-30 10:03:06 +00003545}
3546
bellard19cb3732006-08-19 11:45:59 +00003547/* return 0 as number of sectors if no device present or error */
ths96b8f132007-12-17 01:35:20 +00003548void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
bellardfc01f7e2003-06-30 10:03:06 +00003549{
Markus Armbruster65a9bb22014-06-26 13:23:17 +02003550 int64_t nb_sectors = bdrv_nb_sectors(bs);
3551
3552 *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
bellardfc01f7e2003-06-30 10:03:06 +00003553}
bellardcf989512004-02-16 21:56:36 +00003554
Paolo Bonziniff06f5f2012-09-28 17:22:54 +02003555void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3556 BlockdevOnError on_write_error)
Markus Armbrusterabd7f682010-06-02 18:55:17 +02003557{
3558 bs->on_read_error = on_read_error;
3559 bs->on_write_error = on_write_error;
3560}
3561
Paolo Bonzini1ceee0d2012-09-28 17:22:56 +02003562BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
Markus Armbrusterabd7f682010-06-02 18:55:17 +02003563{
3564 return is_read ? bs->on_read_error : bs->on_write_error;
3565}
3566
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003567BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3568{
3569 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3570
3571 switch (on_err) {
3572 case BLOCKDEV_ON_ERROR_ENOSPC:
Wenchao Xiaa5895692014-06-18 08:43:30 +02003573 return (error == ENOSPC) ?
3574 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003575 case BLOCKDEV_ON_ERROR_STOP:
Wenchao Xiaa5895692014-06-18 08:43:30 +02003576 return BLOCK_ERROR_ACTION_STOP;
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003577 case BLOCKDEV_ON_ERROR_REPORT:
Wenchao Xiaa5895692014-06-18 08:43:30 +02003578 return BLOCK_ERROR_ACTION_REPORT;
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003579 case BLOCKDEV_ON_ERROR_IGNORE:
Wenchao Xiaa5895692014-06-18 08:43:30 +02003580 return BLOCK_ERROR_ACTION_IGNORE;
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003581 default:
3582 abort();
3583 }
3584}
3585
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003586static void send_qmp_error_event(BlockDriverState *bs,
3587 BlockErrorAction action,
3588 bool is_read, int error)
3589{
Peter Maydell573742a2014-10-10 20:33:03 +01003590 IoOperationType optype;
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003591
Peter Maydell573742a2014-10-10 20:33:03 +01003592 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
3593 qapi_event_send_block_io_error(bdrv_get_device_name(bs), optype, action,
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003594 bdrv_iostatus_is_enabled(bs),
Luiz Capitulino624ff572014-09-11 10:25:48 -04003595 error == ENOSPC, strerror(error),
3596 &error_abort);
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003597}
3598
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003599/* This is done by device models because, while the block layer knows
3600 * about the error, it does not know whether an operation comes from
3601 * the device or the block layer (from a job, for example).
3602 */
3603void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3604 bool is_read, int error)
3605{
3606 assert(error >= 0);
Paolo Bonzini2bd3bce2014-06-05 14:53:59 +02003607
Wenchao Xiaa5895692014-06-18 08:43:30 +02003608 if (action == BLOCK_ERROR_ACTION_STOP) {
Paolo Bonzini2bd3bce2014-06-05 14:53:59 +02003609 /* First set the iostatus, so that "info block" returns an iostatus
3610 * that matches the events raised so far (an additional error iostatus
3611 * is fine, but not a lost one).
3612 */
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003613 bdrv_iostatus_set_err(bs, error);
Paolo Bonzini2bd3bce2014-06-05 14:53:59 +02003614
3615 /* Then raise the request to stop the VM and the event.
3616 * qemu_system_vmstop_request_prepare has two effects. First,
3617 * it ensures that the STOP event always comes after the
3618 * BLOCK_IO_ERROR event. Second, it ensures that even if management
3619 * can observe the STOP event and do a "cont" before the STOP
3620 * event is issued, the VM will not stop. In this case, vm_start()
3621 * also ensures that the STOP/RESUME pair of events is emitted.
3622 */
3623 qemu_system_vmstop_request_prepare();
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003624 send_qmp_error_event(bs, action, is_read, error);
Paolo Bonzini2bd3bce2014-06-05 14:53:59 +02003625 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
3626 } else {
Luiz Capitulinoc7c2ff02014-08-29 16:07:27 -04003627 send_qmp_error_event(bs, action, is_read, error);
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003628 }
3629}
3630
bellardb3380822004-03-14 21:38:54 +00003631int bdrv_is_read_only(BlockDriverState *bs)
3632{
3633 return bs->read_only;
3634}
3635
ths985a03b2007-12-24 16:10:43 +00003636int bdrv_is_sg(BlockDriverState *bs)
3637{
3638 return bs->sg;
3639}
3640
Christoph Hellwige900a7b2009-09-04 19:01:15 +02003641int bdrv_enable_write_cache(BlockDriverState *bs)
3642{
3643 return bs->enable_write_cache;
3644}
3645
Paolo Bonzini425b0142012-06-06 00:04:52 +02003646void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3647{
3648 bs->enable_write_cache = wce;
Jeff Cody55b110f2012-09-20 15:13:18 -04003649
3650 /* so a reopen() will preserve wce */
3651 if (wce) {
3652 bs->open_flags |= BDRV_O_CACHE_WB;
3653 } else {
3654 bs->open_flags &= ~BDRV_O_CACHE_WB;
3655 }
Paolo Bonzini425b0142012-06-06 00:04:52 +02003656}
3657
bellardea2384d2004-08-01 21:59:26 +00003658int bdrv_is_encrypted(BlockDriverState *bs)
3659{
3660 if (bs->backing_hd && bs->backing_hd->encrypted)
3661 return 1;
3662 return bs->encrypted;
3663}
3664
aliguoric0f4ce72009-03-05 23:01:01 +00003665int bdrv_key_required(BlockDriverState *bs)
3666{
3667 BlockDriverState *backing_hd = bs->backing_hd;
3668
3669 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3670 return 1;
3671 return (bs->encrypted && !bs->valid_key);
3672}
3673
bellardea2384d2004-08-01 21:59:26 +00003674int bdrv_set_key(BlockDriverState *bs, const char *key)
3675{
3676 int ret;
3677 if (bs->backing_hd && bs->backing_hd->encrypted) {
3678 ret = bdrv_set_key(bs->backing_hd, key);
3679 if (ret < 0)
3680 return ret;
3681 if (!bs->encrypted)
3682 return 0;
3683 }
Shahar Havivifd04a2a2010-03-06 00:26:13 +02003684 if (!bs->encrypted) {
3685 return -EINVAL;
3686 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3687 return -ENOMEDIUM;
3688 }
aliguoric0f4ce72009-03-05 23:01:01 +00003689 ret = bs->drv->bdrv_set_key(bs, key);
aliguoribb5fc202009-03-05 23:01:15 +00003690 if (ret < 0) {
3691 bs->valid_key = 0;
3692 } else if (!bs->valid_key) {
3693 bs->valid_key = 1;
Markus Armbrustera7f53e22014-10-07 13:59:25 +02003694 if (bs->blk) {
3695 /* call the change callback now, we skipped it on open */
3696 blk_dev_change_media_cb(bs->blk, true);
3697 }
aliguoribb5fc202009-03-05 23:01:15 +00003698 }
aliguoric0f4ce72009-03-05 23:01:01 +00003699 return ret;
bellardea2384d2004-08-01 21:59:26 +00003700}
3701
Markus Armbrusterf8d6bba2012-06-13 10:11:48 +02003702const char *bdrv_get_format_name(BlockDriverState *bs)
bellardea2384d2004-08-01 21:59:26 +00003703{
Markus Armbrusterf8d6bba2012-06-13 10:11:48 +02003704 return bs->drv ? bs->drv->format_name : NULL;
bellardea2384d2004-08-01 21:59:26 +00003705}
3706
Stefan Hajnocziada42402014-08-27 12:08:55 +01003707static int qsort_strcmp(const void *a, const void *b)
3708{
3709 return strcmp(a, b);
3710}
3711
ths5fafdf22007-09-16 21:08:06 +00003712void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
bellardea2384d2004-08-01 21:59:26 +00003713 void *opaque)
3714{
3715 BlockDriver *drv;
Jeff Codye855e4f2014-04-28 18:29:54 -04003716 int count = 0;
Stefan Hajnocziada42402014-08-27 12:08:55 +01003717 int i;
Jeff Codye855e4f2014-04-28 18:29:54 -04003718 const char **formats = NULL;
bellardea2384d2004-08-01 21:59:26 +00003719
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +01003720 QLIST_FOREACH(drv, &bdrv_drivers, list) {
Jeff Codye855e4f2014-04-28 18:29:54 -04003721 if (drv->format_name) {
3722 bool found = false;
3723 int i = count;
3724 while (formats && i && !found) {
3725 found = !strcmp(formats[--i], drv->format_name);
3726 }
3727
3728 if (!found) {
Markus Armbruster5839e532014-08-19 10:31:08 +02003729 formats = g_renew(const char *, formats, count + 1);
Jeff Codye855e4f2014-04-28 18:29:54 -04003730 formats[count++] = drv->format_name;
Jeff Codye855e4f2014-04-28 18:29:54 -04003731 }
3732 }
bellardea2384d2004-08-01 21:59:26 +00003733 }
Stefan Hajnocziada42402014-08-27 12:08:55 +01003734
3735 qsort(formats, count, sizeof(formats[0]), qsort_strcmp);
3736
3737 for (i = 0; i < count; i++) {
3738 it(opaque, formats[i]);
3739 }
3740
Jeff Codye855e4f2014-04-28 18:29:54 -04003741 g_free(formats);
bellardea2384d2004-08-01 21:59:26 +00003742}
3743
Benoît Canetdc364f42014-01-23 21:31:32 +01003744/* This function is to find block backend bs */
Markus Armbruster7f06d472014-10-07 13:59:12 +02003745/* TODO convert callers to blk_by_name(), then remove */
bellardb3380822004-03-14 21:38:54 +00003746BlockDriverState *bdrv_find(const char *name)
3747{
Markus Armbruster7f06d472014-10-07 13:59:12 +02003748 BlockBackend *blk = blk_by_name(name);
bellardb3380822004-03-14 21:38:54 +00003749
Markus Armbruster7f06d472014-10-07 13:59:12 +02003750 return blk ? blk_bs(blk) : NULL;
bellardb3380822004-03-14 21:38:54 +00003751}
3752
Benoît Canetdc364f42014-01-23 21:31:32 +01003753/* This function is to find a node in the bs graph */
3754BlockDriverState *bdrv_find_node(const char *node_name)
3755{
3756 BlockDriverState *bs;
3757
3758 assert(node_name);
3759
3760 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3761 if (!strcmp(node_name, bs->node_name)) {
3762 return bs;
3763 }
3764 }
3765 return NULL;
3766}
3767
Benoît Canetc13163f2014-01-23 21:31:34 +01003768/* Put this QMP function here so it can access the static graph_bdrv_states. */
3769BlockDeviceInfoList *bdrv_named_nodes_list(void)
3770{
3771 BlockDeviceInfoList *list, *entry;
3772 BlockDriverState *bs;
3773
3774 list = NULL;
3775 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3776 entry = g_malloc0(sizeof(*entry));
3777 entry->value = bdrv_block_device_info(bs);
3778 entry->next = list;
3779 list = entry;
3780 }
3781
3782 return list;
3783}
3784
Benoît Canet12d3ba82014-01-23 21:31:35 +01003785BlockDriverState *bdrv_lookup_bs(const char *device,
3786 const char *node_name,
3787 Error **errp)
3788{
Markus Armbruster7f06d472014-10-07 13:59:12 +02003789 BlockBackend *blk;
3790 BlockDriverState *bs;
Benoît Canet12d3ba82014-01-23 21:31:35 +01003791
Benoît Canet12d3ba82014-01-23 21:31:35 +01003792 if (device) {
Markus Armbruster7f06d472014-10-07 13:59:12 +02003793 blk = blk_by_name(device);
Benoît Canet12d3ba82014-01-23 21:31:35 +01003794
Markus Armbruster7f06d472014-10-07 13:59:12 +02003795 if (blk) {
3796 return blk_bs(blk);
Benoît Canet12d3ba82014-01-23 21:31:35 +01003797 }
Benoît Canet12d3ba82014-01-23 21:31:35 +01003798 }
3799
Benoît Canetdd67fa52014-02-12 17:15:06 +01003800 if (node_name) {
3801 bs = bdrv_find_node(node_name);
Benoît Canet12d3ba82014-01-23 21:31:35 +01003802
Benoît Canetdd67fa52014-02-12 17:15:06 +01003803 if (bs) {
3804 return bs;
3805 }
Benoît Canet12d3ba82014-01-23 21:31:35 +01003806 }
3807
Benoît Canetdd67fa52014-02-12 17:15:06 +01003808 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3809 device ? device : "",
3810 node_name ? node_name : "");
3811 return NULL;
Benoît Canet12d3ba82014-01-23 21:31:35 +01003812}
3813
Jeff Cody5a6684d2014-06-25 15:40:09 -04003814/* If 'base' is in the same chain as 'top', return true. Otherwise,
3815 * return false. If either argument is NULL, return false. */
3816bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base)
3817{
3818 while (top && top != base) {
3819 top = top->backing_hd;
3820 }
3821
3822 return top != NULL;
3823}
3824
Fam Zheng04df7652014-10-31 11:32:54 +08003825BlockDriverState *bdrv_next_node(BlockDriverState *bs)
3826{
3827 if (!bs) {
3828 return QTAILQ_FIRST(&graph_bdrv_states);
3829 }
3830 return QTAILQ_NEXT(bs, node_list);
3831}
3832
Markus Armbruster2f399b02010-06-02 18:55:20 +02003833BlockDriverState *bdrv_next(BlockDriverState *bs)
3834{
3835 if (!bs) {
3836 return QTAILQ_FIRST(&bdrv_states);
3837 }
Benoît Canetdc364f42014-01-23 21:31:32 +01003838 return QTAILQ_NEXT(bs, device_list);
Markus Armbruster2f399b02010-06-02 18:55:20 +02003839}
3840
Fam Zheng20a9e772014-10-31 11:32:55 +08003841const char *bdrv_get_node_name(const BlockDriverState *bs)
3842{
3843 return bs->node_name;
3844}
3845
Markus Armbruster7f06d472014-10-07 13:59:12 +02003846/* TODO check what callers really want: bs->node_name or blk_name() */
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02003847const char *bdrv_get_device_name(const BlockDriverState *bs)
bellardea2384d2004-08-01 21:59:26 +00003848{
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02003849 return bs->blk ? blk_name(bs->blk) : "";
bellardea2384d2004-08-01 21:59:26 +00003850}
3851
Markus Armbrusterc8433282012-06-05 16:49:24 +02003852int bdrv_get_flags(BlockDriverState *bs)
3853{
3854 return bs->open_flags;
3855}
3856
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003857int bdrv_flush_all(void)
aliguoric6ca28d2008-10-06 13:55:43 +00003858{
3859 BlockDriverState *bs;
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003860 int result = 0;
aliguoric6ca28d2008-10-06 13:55:43 +00003861
Benoît Canetdc364f42014-01-23 21:31:32 +01003862 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02003863 AioContext *aio_context = bdrv_get_aio_context(bs);
3864 int ret;
3865
3866 aio_context_acquire(aio_context);
3867 ret = bdrv_flush(bs);
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003868 if (ret < 0 && !result) {
3869 result = ret;
3870 }
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02003871 aio_context_release(aio_context);
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01003872 }
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003873
3874 return result;
aliguoric6ca28d2008-10-06 13:55:43 +00003875}
3876
Peter Lieven3ac21622013-06-28 12:47:42 +02003877int bdrv_has_zero_init_1(BlockDriverState *bs)
3878{
3879 return 1;
3880}
3881
Kevin Wolff2feebb2010-04-14 17:30:35 +02003882int bdrv_has_zero_init(BlockDriverState *bs)
3883{
3884 assert(bs->drv);
3885
Paolo Bonzini11212d82013-09-04 19:00:27 +02003886 /* If BS is a copy on write image, it is initialized to
3887 the contents of the base image, which may not be zeroes. */
3888 if (bs->backing_hd) {
3889 return 0;
3890 }
Kevin Wolf336c1c12010-07-28 11:26:29 +02003891 if (bs->drv->bdrv_has_zero_init) {
3892 return bs->drv->bdrv_has_zero_init(bs);
Kevin Wolff2feebb2010-04-14 17:30:35 +02003893 }
3894
Peter Lieven3ac21622013-06-28 12:47:42 +02003895 /* safe default */
3896 return 0;
Kevin Wolff2feebb2010-04-14 17:30:35 +02003897}
3898
Peter Lieven4ce78692013-10-24 12:06:54 +02003899bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3900{
3901 BlockDriverInfo bdi;
3902
3903 if (bs->backing_hd) {
3904 return false;
3905 }
3906
3907 if (bdrv_get_info(bs, &bdi) == 0) {
3908 return bdi.unallocated_blocks_are_zero;
3909 }
3910
3911 return false;
3912}
3913
3914bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3915{
3916 BlockDriverInfo bdi;
3917
3918 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3919 return false;
3920 }
3921
3922 if (bdrv_get_info(bs, &bdi) == 0) {
3923 return bdi.can_write_zeroes_with_unmap;
3924 }
3925
3926 return false;
3927}
3928
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003929typedef struct BdrvCoGetBlockStatusData {
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003930 BlockDriverState *bs;
Miroslav Rezaninab35b2bb2013-02-13 09:09:39 +01003931 BlockDriverState *base;
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003932 int64_t sector_num;
3933 int nb_sectors;
3934 int *pnum;
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003935 int64_t ret;
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003936 bool done;
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003937} BdrvCoGetBlockStatusData;
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003938
thsf58c7b32008-06-05 21:53:49 +00003939/*
Fam Zheng705be722014-11-10 17:10:38 +08003940 * Returns the allocation status of the specified sectors.
3941 * Drivers not implementing the functionality are assumed to not support
3942 * backing files, hence all their sectors are reported as allocated.
thsf58c7b32008-06-05 21:53:49 +00003943 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003944 * If 'sector_num' is beyond the end of the disk image the return value is 0
3945 * and 'pnum' is set to 0.
3946 *
thsf58c7b32008-06-05 21:53:49 +00003947 * 'pnum' is set to the number of sectors (including and immediately following
3948 * the specified sector) that are known to be in the same
3949 * allocated/unallocated state.
3950 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003951 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3952 * beyond the end of the disk image it will be clamped.
thsf58c7b32008-06-05 21:53:49 +00003953 */
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003954static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3955 int64_t sector_num,
3956 int nb_sectors, int *pnum)
thsf58c7b32008-06-05 21:53:49 +00003957{
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02003958 int64_t total_sectors;
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003959 int64_t n;
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02003960 int64_t ret, ret2;
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003961
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02003962 total_sectors = bdrv_nb_sectors(bs);
3963 if (total_sectors < 0) {
3964 return total_sectors;
Paolo Bonzini617ccb42013-09-04 19:00:23 +02003965 }
3966
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02003967 if (sector_num >= total_sectors) {
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003968 *pnum = 0;
3969 return 0;
3970 }
3971
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02003972 n = total_sectors - sector_num;
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003973 if (n < nb_sectors) {
3974 nb_sectors = n;
3975 }
3976
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003977 if (!bs->drv->bdrv_co_get_block_status) {
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003978 *pnum = nb_sectors;
Kevin Wolfe88ae222014-05-06 15:25:36 +02003979 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
Paolo Bonzini918e92d2013-09-04 19:00:37 +02003980 if (bs->drv->protocol_name) {
3981 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3982 }
3983 return ret;
thsf58c7b32008-06-05 21:53:49 +00003984 }
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00003985
Paolo Bonzini415b5b02013-09-04 19:00:31 +02003986 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3987 if (ret < 0) {
Peter Lieven3e0a2332013-09-24 15:35:08 +02003988 *pnum = 0;
Paolo Bonzini415b5b02013-09-04 19:00:31 +02003989 return ret;
3990 }
3991
Peter Lieven92bc50a2013-10-08 14:43:14 +02003992 if (ret & BDRV_BLOCK_RAW) {
3993 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3994 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3995 *pnum, pnum);
3996 }
3997
Kevin Wolfe88ae222014-05-06 15:25:36 +02003998 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
3999 ret |= BDRV_BLOCK_ALLOCATED;
4000 }
4001
Peter Lievenc3d86882013-10-24 12:07:04 +02004002 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
4003 if (bdrv_unallocated_blocks_are_zero(bs)) {
Paolo Bonzinif0ad5712013-09-04 19:00:32 +02004004 ret |= BDRV_BLOCK_ZERO;
Peter Lieven1f9db222013-09-24 15:35:09 +02004005 } else if (bs->backing_hd) {
Paolo Bonzinif0ad5712013-09-04 19:00:32 +02004006 BlockDriverState *bs2 = bs->backing_hd;
Markus Armbruster30a7f2f2014-06-26 13:23:20 +02004007 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
4008 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
Paolo Bonzinif0ad5712013-09-04 19:00:32 +02004009 ret |= BDRV_BLOCK_ZERO;
4010 }
4011 }
Paolo Bonzini415b5b02013-09-04 19:00:31 +02004012 }
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02004013
4014 if (bs->file &&
4015 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
4016 (ret & BDRV_BLOCK_OFFSET_VALID)) {
Max Reitz59c9a952014-10-22 17:00:15 +02004017 int file_pnum;
4018
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02004019 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
Max Reitz59c9a952014-10-22 17:00:15 +02004020 *pnum, &file_pnum);
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02004021 if (ret2 >= 0) {
4022 /* Ignore errors. This is just providing extra information, it
4023 * is useful but not necessary.
4024 */
Max Reitz59c9a952014-10-22 17:00:15 +02004025 if (!file_pnum) {
4026 /* !file_pnum indicates an offset at or beyond the EOF; it is
4027 * perfectly valid for the format block driver to point to such
4028 * offsets, so catch it and mark everything as zero */
4029 ret |= BDRV_BLOCK_ZERO;
4030 } else {
4031 /* Limit request to the range reported by the protocol driver */
4032 *pnum = file_pnum;
4033 ret |= (ret2 & BDRV_BLOCK_ZERO);
4034 }
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02004035 }
4036 }
4037
Paolo Bonzini415b5b02013-09-04 19:00:31 +02004038 return ret;
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004039}
4040
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004041/* Coroutine wrapper for bdrv_get_block_status() */
4042static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004043{
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004044 BdrvCoGetBlockStatusData *data = opaque;
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004045 BlockDriverState *bs = data->bs;
4046
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004047 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4048 data->pnum);
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004049 data->done = true;
4050}
4051
4052/*
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004053 * Synchronous wrapper around bdrv_co_get_block_status().
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004054 *
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004055 * See bdrv_co_get_block_status() for details.
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004056 */
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004057int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4058 int nb_sectors, int *pnum)
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004059{
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004060 Coroutine *co;
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004061 BdrvCoGetBlockStatusData data = {
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004062 .bs = bs,
4063 .sector_num = sector_num,
4064 .nb_sectors = nb_sectors,
4065 .pnum = pnum,
4066 .done = false,
4067 };
4068
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004069 if (qemu_in_coroutine()) {
4070 /* Fast-path if already in coroutine context */
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004071 bdrv_get_block_status_co_entry(&data);
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004072 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004073 AioContext *aio_context = bdrv_get_aio_context(bs);
4074
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004075 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004076 qemu_coroutine_enter(co, &data);
4077 while (!data.done) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004078 aio_poll(aio_context, true);
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004079 }
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004080 }
4081 return data.ret;
thsf58c7b32008-06-05 21:53:49 +00004082}
4083
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004084int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4085 int nb_sectors, int *pnum)
4086{
Paolo Bonzini4333bb72013-09-04 19:00:29 +02004087 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4088 if (ret < 0) {
4089 return ret;
4090 }
Kevin Wolf01fb2702014-07-07 17:00:37 +02004091 return !!(ret & BDRV_BLOCK_ALLOCATED);
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004092}
4093
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004094/*
4095 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4096 *
4097 * Return true if the given sector is allocated in any image between
4098 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4099 * sector is allocated in any image of the chain. Return false otherwise.
4100 *
4101 * 'pnum' is set to the number of sectors (including and immediately following
4102 * the specified sector) that are known to be in the same
4103 * allocated/unallocated state.
4104 *
4105 */
Paolo Bonzini4f578632013-09-04 19:00:24 +02004106int bdrv_is_allocated_above(BlockDriverState *top,
4107 BlockDriverState *base,
4108 int64_t sector_num,
4109 int nb_sectors, int *pnum)
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004110{
4111 BlockDriverState *intermediate;
4112 int ret, n = nb_sectors;
4113
4114 intermediate = top;
4115 while (intermediate && intermediate != base) {
4116 int pnum_inter;
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004117 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4118 &pnum_inter);
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004119 if (ret < 0) {
4120 return ret;
4121 } else if (ret) {
4122 *pnum = pnum_inter;
4123 return 1;
4124 }
4125
4126 /*
4127 * [sector_num, nb_sectors] is unallocated on top but intermediate
4128 * might have
4129 *
4130 * [sector_num+x, nr_sectors] allocated.
4131 */
Vishvananda Ishaya63ba17d2013-01-24 10:02:08 -08004132 if (n > pnum_inter &&
4133 (intermediate == top ||
4134 sector_num + pnum_inter < intermediate->total_sectors)) {
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004135 n = pnum_inter;
4136 }
4137
4138 intermediate = intermediate->backing_hd;
4139 }
4140
4141 *pnum = n;
4142 return 0;
4143}
4144
aliguori045df332009-03-05 23:00:48 +00004145const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4146{
4147 if (bs->backing_hd && bs->backing_hd->encrypted)
4148 return bs->backing_file;
4149 else if (bs->encrypted)
4150 return bs->filename;
4151 else
4152 return NULL;
4153}
4154
ths5fafdf22007-09-16 21:08:06 +00004155void bdrv_get_backing_filename(BlockDriverState *bs,
bellard83f64092006-08-01 16:21:11 +00004156 char *filename, int filename_size)
bellardea2384d2004-08-01 21:59:26 +00004157{
Kevin Wolf3574c602011-10-26 11:02:11 +02004158 pstrcpy(filename, filename_size, bs->backing_file);
bellardea2384d2004-08-01 21:59:26 +00004159}
4160
ths5fafdf22007-09-16 21:08:06 +00004161int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
bellardfaea38e2006-08-05 21:31:00 +00004162 const uint8_t *buf, int nb_sectors)
4163{
4164 BlockDriver *drv = bs->drv;
4165 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00004166 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00004167 if (!drv->bdrv_write_compressed)
4168 return -ENOTSUP;
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02004169 if (bdrv_check_request(bs, sector_num, nb_sectors))
4170 return -EIO;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004171
Fam Zhenge4654d22013-11-13 18:29:43 +08004172 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004173
bellardfaea38e2006-08-05 21:31:00 +00004174 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4175}
ths3b46e622007-09-17 08:09:54 +00004176
bellardfaea38e2006-08-05 21:31:00 +00004177int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4178{
4179 BlockDriver *drv = bs->drv;
4180 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00004181 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00004182 if (!drv->bdrv_get_info)
4183 return -ENOTSUP;
4184 memset(bdi, 0, sizeof(*bdi));
4185 return drv->bdrv_get_info(bs, bdi);
4186}
4187
Max Reitzeae041f2013-10-09 10:46:16 +02004188ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4189{
4190 BlockDriver *drv = bs->drv;
4191 if (drv && drv->bdrv_get_specific_info) {
4192 return drv->bdrv_get_specific_info(bs);
4193 }
4194 return NULL;
4195}
4196
Christoph Hellwig45566e92009-07-10 23:11:57 +02004197int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4198 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00004199{
Kevin Wolfcf8074b2013-04-05 21:27:53 +02004200 QEMUIOVector qiov;
4201 struct iovec iov = {
4202 .iov_base = (void *) buf,
4203 .iov_len = size,
4204 };
4205
4206 qemu_iovec_init_external(&qiov, &iov, 1);
4207 return bdrv_writev_vmstate(bs, &qiov, pos);
4208}
4209
4210int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4211{
aliguori178e08a2009-04-05 19:10:55 +00004212 BlockDriver *drv = bs->drv;
Kevin Wolfcf8074b2013-04-05 21:27:53 +02004213
4214 if (!drv) {
aliguori178e08a2009-04-05 19:10:55 +00004215 return -ENOMEDIUM;
Kevin Wolfcf8074b2013-04-05 21:27:53 +02004216 } else if (drv->bdrv_save_vmstate) {
4217 return drv->bdrv_save_vmstate(bs, qiov, pos);
4218 } else if (bs->file) {
4219 return bdrv_writev_vmstate(bs->file, qiov, pos);
4220 }
4221
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09004222 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00004223}
4224
Christoph Hellwig45566e92009-07-10 23:11:57 +02004225int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4226 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00004227{
4228 BlockDriver *drv = bs->drv;
4229 if (!drv)
4230 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09004231 if (drv->bdrv_load_vmstate)
4232 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4233 if (bs->file)
4234 return bdrv_load_vmstate(bs->file, buf, pos, size);
4235 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00004236}
4237
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004238void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4239{
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004240 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004241 return;
4242 }
4243
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004244 bs->drv->bdrv_debug_event(bs, event);
Kevin Wolf41c695c2012-12-06 14:32:58 +01004245}
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004246
Kevin Wolf41c695c2012-12-06 14:32:58 +01004247int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4248 const char *tag)
4249{
4250 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4251 bs = bs->file;
4252 }
4253
4254 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4255 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4256 }
4257
4258 return -ENOTSUP;
4259}
4260
Fam Zheng4cc70e92013-11-20 10:01:54 +08004261int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4262{
4263 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4264 bs = bs->file;
4265 }
4266
4267 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4268 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4269 }
4270
4271 return -ENOTSUP;
4272}
4273
Kevin Wolf41c695c2012-12-06 14:32:58 +01004274int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4275{
Max Reitz938789e2014-03-10 23:44:08 +01004276 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
Kevin Wolf41c695c2012-12-06 14:32:58 +01004277 bs = bs->file;
4278 }
4279
4280 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4281 return bs->drv->bdrv_debug_resume(bs, tag);
4282 }
4283
4284 return -ENOTSUP;
4285}
4286
4287bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4288{
4289 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4290 bs = bs->file;
4291 }
4292
4293 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4294 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4295 }
4296
4297 return false;
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004298}
4299
Blue Swirl199630b2010-07-25 20:49:34 +00004300int bdrv_is_snapshot(BlockDriverState *bs)
4301{
4302 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4303}
4304
Jeff Codyb1b1d782012-10-16 15:49:09 -04004305/* backing_file can either be relative, or absolute, or a protocol. If it is
4306 * relative, it must be relative to the chain. So, passing in bs->filename
4307 * from a BDS as backing_file should not be done, as that may be relative to
4308 * the CWD rather than the chain. */
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004309BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4310 const char *backing_file)
4311{
Jeff Codyb1b1d782012-10-16 15:49:09 -04004312 char *filename_full = NULL;
4313 char *backing_file_full = NULL;
4314 char *filename_tmp = NULL;
4315 int is_protocol = 0;
4316 BlockDriverState *curr_bs = NULL;
4317 BlockDriverState *retval = NULL;
4318
4319 if (!bs || !bs->drv || !backing_file) {
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004320 return NULL;
4321 }
4322
Jeff Codyb1b1d782012-10-16 15:49:09 -04004323 filename_full = g_malloc(PATH_MAX);
4324 backing_file_full = g_malloc(PATH_MAX);
4325 filename_tmp = g_malloc(PATH_MAX);
4326
4327 is_protocol = path_has_protocol(backing_file);
4328
4329 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4330
4331 /* If either of the filename paths is actually a protocol, then
4332 * compare unmodified paths; otherwise make paths relative */
4333 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4334 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4335 retval = curr_bs->backing_hd;
4336 break;
4337 }
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004338 } else {
Jeff Codyb1b1d782012-10-16 15:49:09 -04004339 /* If not an absolute filename path, make it relative to the current
4340 * image's filename path */
4341 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4342 backing_file);
4343
4344 /* We are going to compare absolute pathnames */
4345 if (!realpath(filename_tmp, filename_full)) {
4346 continue;
4347 }
4348
4349 /* We need to make sure the backing filename we are comparing against
4350 * is relative to the current image filename (or absolute) */
4351 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4352 curr_bs->backing_file);
4353
4354 if (!realpath(filename_tmp, backing_file_full)) {
4355 continue;
4356 }
4357
4358 if (strcmp(backing_file_full, filename_full) == 0) {
4359 retval = curr_bs->backing_hd;
4360 break;
4361 }
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004362 }
4363 }
4364
Jeff Codyb1b1d782012-10-16 15:49:09 -04004365 g_free(filename_full);
4366 g_free(backing_file_full);
4367 g_free(filename_tmp);
4368 return retval;
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004369}
4370
Benoît Canetf198fd12012-08-02 10:22:47 +02004371int bdrv_get_backing_file_depth(BlockDriverState *bs)
4372{
4373 if (!bs->drv) {
4374 return 0;
4375 }
4376
4377 if (!bs->backing_hd) {
4378 return 0;
4379 }
4380
4381 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4382}
4383
bellard83f64092006-08-01 16:21:11 +00004384/**************************************************************/
4385/* async I/Os */
4386
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004387BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4388 QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004389 BlockCompletionFunc *cb, void *opaque)
aliguori3b69e4b2009-01-22 16:59:24 +00004390{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01004391 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4392
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004393 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004394 cb, opaque, false);
bellard83f64092006-08-01 16:21:11 +00004395}
4396
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004397BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4398 QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004399 BlockCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00004400{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01004401 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4402
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004403 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004404 cb, opaque, true);
bellard83f64092006-08-01 16:21:11 +00004405}
4406
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004407BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
Paolo Bonzinid5ef94d2013-11-22 13:39:46 +01004408 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
Markus Armbruster097310b2014-10-07 13:59:15 +02004409 BlockCompletionFunc *cb, void *opaque)
Paolo Bonzinid5ef94d2013-11-22 13:39:46 +01004410{
4411 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4412
4413 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4414 BDRV_REQ_ZERO_WRITE | flags,
4415 cb, opaque, true);
4416}
4417
Kevin Wolf40b4f532009-09-09 17:53:37 +02004418
4419typedef struct MultiwriteCB {
4420 int error;
4421 int num_requests;
4422 int num_callbacks;
4423 struct {
Markus Armbruster097310b2014-10-07 13:59:15 +02004424 BlockCompletionFunc *cb;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004425 void *opaque;
4426 QEMUIOVector *free_qiov;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004427 } callbacks[];
4428} MultiwriteCB;
4429
4430static void multiwrite_user_cb(MultiwriteCB *mcb)
4431{
4432 int i;
4433
4434 for (i = 0; i < mcb->num_callbacks; i++) {
4435 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
Stefan Hajnoczi1e1ea482010-04-21 20:35:45 +01004436 if (mcb->callbacks[i].free_qiov) {
4437 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4438 }
Anthony Liguori7267c092011-08-20 22:09:37 -05004439 g_free(mcb->callbacks[i].free_qiov);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004440 }
4441}
4442
4443static void multiwrite_cb(void *opaque, int ret)
4444{
4445 MultiwriteCB *mcb = opaque;
4446
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01004447 trace_multiwrite_cb(mcb, ret);
4448
Kevin Wolfcb6d3ca2010-04-01 22:48:44 +02004449 if (ret < 0 && !mcb->error) {
Kevin Wolf40b4f532009-09-09 17:53:37 +02004450 mcb->error = ret;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004451 }
4452
4453 mcb->num_requests--;
4454 if (mcb->num_requests == 0) {
Kevin Wolfde189a12010-07-01 16:08:51 +02004455 multiwrite_user_cb(mcb);
Anthony Liguori7267c092011-08-20 22:09:37 -05004456 g_free(mcb);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004457 }
4458}
4459
4460static int multiwrite_req_compare(const void *a, const void *b)
4461{
Christoph Hellwig77be4362010-05-19 20:53:10 +02004462 const BlockRequest *req1 = a, *req2 = b;
4463
4464 /*
4465 * Note that we can't simply subtract req2->sector from req1->sector
4466 * here as that could overflow the return value.
4467 */
4468 if (req1->sector > req2->sector) {
4469 return 1;
4470 } else if (req1->sector < req2->sector) {
4471 return -1;
4472 } else {
4473 return 0;
4474 }
Kevin Wolf40b4f532009-09-09 17:53:37 +02004475}
4476
4477/*
4478 * Takes a bunch of requests and tries to merge them. Returns the number of
4479 * requests that remain after merging.
4480 */
4481static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4482 int num_reqs, MultiwriteCB *mcb)
4483{
4484 int i, outidx;
4485
4486 // Sort requests by start sector
4487 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4488
4489 // Check if adjacent requests touch the same clusters. If so, combine them,
4490 // filling up gaps with zero sectors.
4491 outidx = 0;
4492 for (i = 1; i < num_reqs; i++) {
4493 int merge = 0;
4494 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4495
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01004496 // Handle exactly sequential writes and overlapping writes.
Kevin Wolf40b4f532009-09-09 17:53:37 +02004497 if (reqs[i].sector <= oldreq_last) {
4498 merge = 1;
4499 }
4500
Christoph Hellwige2a305f2010-01-26 14:49:08 +01004501 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4502 merge = 0;
4503 }
4504
Peter Lieven6c5a42a2014-10-27 10:18:46 +01004505 if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
4506 reqs[i].nb_sectors > bs->bl.max_transfer_length) {
4507 merge = 0;
4508 }
4509
Kevin Wolf40b4f532009-09-09 17:53:37 +02004510 if (merge) {
4511 size_t size;
Anthony Liguori7267c092011-08-20 22:09:37 -05004512 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
Kevin Wolf40b4f532009-09-09 17:53:37 +02004513 qemu_iovec_init(qiov,
4514 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4515
4516 // Add the first request to the merged one. If the requests are
4517 // overlapping, drop the last sectors of the first request.
4518 size = (reqs[i].sector - reqs[outidx].sector) << 9;
Michael Tokarev1b093c42012-03-12 21:28:06 +04004519 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004520
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01004521 // We should need to add any zeros between the two requests
4522 assert (reqs[i].sector <= oldreq_last);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004523
4524 // Add the second request
Michael Tokarev1b093c42012-03-12 21:28:06 +04004525 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004526
Stefan Hajnoczi391827e2014-07-30 09:53:30 +01004527 // Add tail of first request, if necessary
4528 if (qiov->size < reqs[outidx].qiov->size) {
4529 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
4530 reqs[outidx].qiov->size - qiov->size);
4531 }
4532
Kevin Wolfcbf1dff2010-05-21 11:09:42 +02004533 reqs[outidx].nb_sectors = qiov->size >> 9;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004534 reqs[outidx].qiov = qiov;
4535
4536 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4537 } else {
4538 outidx++;
4539 reqs[outidx].sector = reqs[i].sector;
4540 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4541 reqs[outidx].qiov = reqs[i].qiov;
4542 }
4543 }
4544
4545 return outidx + 1;
4546}
4547
4548/*
4549 * Submit multiple AIO write requests at once.
4550 *
4551 * On success, the function returns 0 and all requests in the reqs array have
4552 * been submitted. In error case this function returns -1, and any of the
4553 * requests may or may not be submitted yet. In particular, this means that the
4554 * callback will be called for some of the requests, for others it won't. The
4555 * caller must check the error field of the BlockRequest to wait for the right
4556 * callbacks (if error != 0, no callback will be called).
4557 *
4558 * The implementation may modify the contents of the reqs array, e.g. to merge
4559 * requests. However, the fields opaque and error are left unmodified as they
4560 * are used to signal failure for a single request to the caller.
4561 */
4562int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4563{
Kevin Wolf40b4f532009-09-09 17:53:37 +02004564 MultiwriteCB *mcb;
4565 int i;
4566
Ryan Harper301db7c2011-03-07 10:01:04 -06004567 /* don't submit writes if we don't have a medium */
4568 if (bs->drv == NULL) {
4569 for (i = 0; i < num_reqs; i++) {
4570 reqs[i].error = -ENOMEDIUM;
4571 }
4572 return -1;
4573 }
4574
Kevin Wolf40b4f532009-09-09 17:53:37 +02004575 if (num_reqs == 0) {
4576 return 0;
4577 }
4578
4579 // Create MultiwriteCB structure
Anthony Liguori7267c092011-08-20 22:09:37 -05004580 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
Kevin Wolf40b4f532009-09-09 17:53:37 +02004581 mcb->num_requests = 0;
4582 mcb->num_callbacks = num_reqs;
4583
4584 for (i = 0; i < num_reqs; i++) {
4585 mcb->callbacks[i].cb = reqs[i].cb;
4586 mcb->callbacks[i].opaque = reqs[i].opaque;
4587 }
4588
4589 // Check for mergable requests
4590 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4591
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01004592 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4593
Paolo Bonzinidf9309f2011-11-14 17:50:50 +01004594 /* Run the aio requests. */
4595 mcb->num_requests = num_reqs;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004596 for (i = 0; i < num_reqs; i++) {
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004597 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4598 reqs[i].nb_sectors, reqs[i].flags,
4599 multiwrite_cb, mcb,
4600 true);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004601 }
4602
4603 return 0;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004604}
4605
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004606void bdrv_aio_cancel(BlockAIOCB *acb)
pbrookce1a14d2006-08-07 02:38:06 +00004607{
Fam Zhengca5fd112014-09-11 13:41:27 +08004608 qemu_aio_ref(acb);
4609 bdrv_aio_cancel_async(acb);
4610 while (acb->refcnt > 1) {
4611 if (acb->aiocb_info->get_aio_context) {
4612 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
4613 } else if (acb->bs) {
4614 aio_poll(bdrv_get_aio_context(acb->bs), true);
4615 } else {
4616 abort();
Fam Zheng02c50ef2014-09-11 13:41:09 +08004617 }
Fam Zheng02c50ef2014-09-11 13:41:09 +08004618 }
Fam Zheng80074292014-09-11 13:41:28 +08004619 qemu_aio_unref(acb);
Fam Zheng02c50ef2014-09-11 13:41:09 +08004620}
4621
4622/* Async version of aio cancel. The caller is not blocked if the acb implements
4623 * cancel_async, otherwise we do nothing and let the request normally complete.
4624 * In either case the completion callback must be called. */
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004625void bdrv_aio_cancel_async(BlockAIOCB *acb)
Fam Zheng02c50ef2014-09-11 13:41:09 +08004626{
4627 if (acb->aiocb_info->cancel_async) {
4628 acb->aiocb_info->cancel_async(acb);
4629 }
bellard83f64092006-08-01 16:21:11 +00004630}
4631
4632/**************************************************************/
4633/* async block device emulation */
4634
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004635typedef struct BlockAIOCBSync {
4636 BlockAIOCB common;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004637 QEMUBH *bh;
4638 int ret;
4639 /* vector translation state */
4640 QEMUIOVector *qiov;
4641 uint8_t *bounce;
4642 int is_write;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004643} BlockAIOCBSync;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004644
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004645static const AIOCBInfo bdrv_em_aiocb_info = {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004646 .aiocb_size = sizeof(BlockAIOCBSync),
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004647};
4648
bellard83f64092006-08-01 16:21:11 +00004649static void bdrv_aio_bh_cb(void *opaque)
bellardbeac80c2006-06-26 20:08:57 +00004650{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004651 BlockAIOCBSync *acb = opaque;
aliguorif141eaf2009-04-07 18:43:24 +00004652
Kevin Wolf857d4f42014-05-20 13:16:51 +02004653 if (!acb->is_write && acb->ret >= 0) {
Michael Tokarev03396142012-06-07 20:17:55 +04004654 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
Kevin Wolf857d4f42014-05-20 13:16:51 +02004655 }
aliguoriceb42de2009-04-07 18:43:28 +00004656 qemu_vfree(acb->bounce);
pbrookce1a14d2006-08-07 02:38:06 +00004657 acb->common.cb(acb->common.opaque, acb->ret);
Dor Laor6a7ad292009-06-01 12:07:23 +03004658 qemu_bh_delete(acb->bh);
Avi Kivity36afc452009-06-23 16:20:36 +03004659 acb->bh = NULL;
Fam Zheng80074292014-09-11 13:41:28 +08004660 qemu_aio_unref(acb);
bellardbeac80c2006-06-26 20:08:57 +00004661}
bellardbeac80c2006-06-26 20:08:57 +00004662
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004663static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4664 int64_t sector_num,
4665 QEMUIOVector *qiov,
4666 int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004667 BlockCompletionFunc *cb,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004668 void *opaque,
4669 int is_write)
aliguorif141eaf2009-04-07 18:43:24 +00004670
bellardea2384d2004-08-01 21:59:26 +00004671{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004672 BlockAIOCBSync *acb;
pbrookce1a14d2006-08-07 02:38:06 +00004673
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004674 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
aliguorif141eaf2009-04-07 18:43:24 +00004675 acb->is_write = is_write;
4676 acb->qiov = qiov;
Kevin Wolf857d4f42014-05-20 13:16:51 +02004677 acb->bounce = qemu_try_blockalign(bs, qiov->size);
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004678 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
aliguorif141eaf2009-04-07 18:43:24 +00004679
Kevin Wolf857d4f42014-05-20 13:16:51 +02004680 if (acb->bounce == NULL) {
4681 acb->ret = -ENOMEM;
4682 } else if (is_write) {
Michael Tokarevd5e6b162012-06-07 20:21:06 +04004683 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01004684 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00004685 } else {
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01004686 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00004687 }
4688
pbrookce1a14d2006-08-07 02:38:06 +00004689 qemu_bh_schedule(acb->bh);
aliguorif141eaf2009-04-07 18:43:24 +00004690
pbrookce1a14d2006-08-07 02:38:06 +00004691 return &acb->common;
pbrook7a6cba62006-06-04 11:39:07 +00004692}
4693
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004694static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
aliguorif141eaf2009-04-07 18:43:24 +00004695 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004696 BlockCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00004697{
aliguorif141eaf2009-04-07 18:43:24 +00004698 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
bellard83f64092006-08-01 16:21:11 +00004699}
4700
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004701static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
aliguorif141eaf2009-04-07 18:43:24 +00004702 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004703 BlockCompletionFunc *cb, void *opaque)
aliguorif141eaf2009-04-07 18:43:24 +00004704{
4705 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4706}
4707
Kevin Wolf68485422011-06-30 10:05:46 +02004708
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004709typedef struct BlockAIOCBCoroutine {
4710 BlockAIOCB common;
Kevin Wolf68485422011-06-30 10:05:46 +02004711 BlockRequest req;
4712 bool is_write;
Kevin Wolfd318aea2012-11-13 16:35:08 +01004713 bool *done;
Kevin Wolf68485422011-06-30 10:05:46 +02004714 QEMUBH* bh;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004715} BlockAIOCBCoroutine;
Kevin Wolf68485422011-06-30 10:05:46 +02004716
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004717static const AIOCBInfo bdrv_em_co_aiocb_info = {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004718 .aiocb_size = sizeof(BlockAIOCBCoroutine),
Kevin Wolf68485422011-06-30 10:05:46 +02004719};
4720
Paolo Bonzini35246a62011-10-14 10:41:29 +02004721static void bdrv_co_em_bh(void *opaque)
Kevin Wolf68485422011-06-30 10:05:46 +02004722{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004723 BlockAIOCBCoroutine *acb = opaque;
Kevin Wolf68485422011-06-30 10:05:46 +02004724
4725 acb->common.cb(acb->common.opaque, acb->req.error);
Kevin Wolfd318aea2012-11-13 16:35:08 +01004726
Kevin Wolf68485422011-06-30 10:05:46 +02004727 qemu_bh_delete(acb->bh);
Fam Zheng80074292014-09-11 13:41:28 +08004728 qemu_aio_unref(acb);
Kevin Wolf68485422011-06-30 10:05:46 +02004729}
4730
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004731/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4732static void coroutine_fn bdrv_co_do_rw(void *opaque)
4733{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004734 BlockAIOCBCoroutine *acb = opaque;
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004735 BlockDriverState *bs = acb->common.bs;
4736
4737 if (!acb->is_write) {
4738 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004739 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004740 } else {
4741 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004742 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004743 }
4744
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004745 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004746 qemu_bh_schedule(acb->bh);
4747}
4748
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004749static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4750 int64_t sector_num,
4751 QEMUIOVector *qiov,
4752 int nb_sectors,
4753 BdrvRequestFlags flags,
Markus Armbruster097310b2014-10-07 13:59:15 +02004754 BlockCompletionFunc *cb,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004755 void *opaque,
4756 bool is_write)
Kevin Wolf68485422011-06-30 10:05:46 +02004757{
4758 Coroutine *co;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004759 BlockAIOCBCoroutine *acb;
Kevin Wolf68485422011-06-30 10:05:46 +02004760
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004761 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
Kevin Wolf68485422011-06-30 10:05:46 +02004762 acb->req.sector = sector_num;
4763 acb->req.nb_sectors = nb_sectors;
4764 acb->req.qiov = qiov;
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004765 acb->req.flags = flags;
Kevin Wolf68485422011-06-30 10:05:46 +02004766 acb->is_write = is_write;
4767
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004768 co = qemu_coroutine_create(bdrv_co_do_rw);
Kevin Wolf68485422011-06-30 10:05:46 +02004769 qemu_coroutine_enter(co, acb);
4770
4771 return &acb->common;
4772}
4773
Paolo Bonzini07f07612011-10-17 12:32:12 +02004774static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004775{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004776 BlockAIOCBCoroutine *acb = opaque;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004777 BlockDriverState *bs = acb->common.bs;
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004778
Paolo Bonzini07f07612011-10-17 12:32:12 +02004779 acb->req.error = bdrv_co_flush(bs);
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004780 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004781 qemu_bh_schedule(acb->bh);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004782}
4783
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004784BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
Markus Armbruster097310b2014-10-07 13:59:15 +02004785 BlockCompletionFunc *cb, void *opaque)
Alexander Graf016f5cf2010-05-26 17:51:49 +02004786{
Paolo Bonzini07f07612011-10-17 12:32:12 +02004787 trace_bdrv_aio_flush(bs, opaque);
Alexander Graf016f5cf2010-05-26 17:51:49 +02004788
Paolo Bonzini07f07612011-10-17 12:32:12 +02004789 Coroutine *co;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004790 BlockAIOCBCoroutine *acb;
Alexander Graf016f5cf2010-05-26 17:51:49 +02004791
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004792 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
Kevin Wolfd318aea2012-11-13 16:35:08 +01004793
Paolo Bonzini07f07612011-10-17 12:32:12 +02004794 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4795 qemu_coroutine_enter(co, acb);
Alexander Graf016f5cf2010-05-26 17:51:49 +02004796
Alexander Graf016f5cf2010-05-26 17:51:49 +02004797 return &acb->common;
4798}
4799
Paolo Bonzini4265d622011-10-17 12:32:14 +02004800static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4801{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004802 BlockAIOCBCoroutine *acb = opaque;
Paolo Bonzini4265d622011-10-17 12:32:14 +02004803 BlockDriverState *bs = acb->common.bs;
4804
4805 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004806 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
Paolo Bonzini4265d622011-10-17 12:32:14 +02004807 qemu_bh_schedule(acb->bh);
4808}
4809
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004810BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
Paolo Bonzini4265d622011-10-17 12:32:14 +02004811 int64_t sector_num, int nb_sectors,
Markus Armbruster097310b2014-10-07 13:59:15 +02004812 BlockCompletionFunc *cb, void *opaque)
Paolo Bonzini4265d622011-10-17 12:32:14 +02004813{
4814 Coroutine *co;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004815 BlockAIOCBCoroutine *acb;
Paolo Bonzini4265d622011-10-17 12:32:14 +02004816
4817 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4818
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004819 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
Paolo Bonzini4265d622011-10-17 12:32:14 +02004820 acb->req.sector = sector_num;
4821 acb->req.nb_sectors = nb_sectors;
4822 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4823 qemu_coroutine_enter(co, acb);
4824
4825 return &acb->common;
4826}
4827
bellardea2384d2004-08-01 21:59:26 +00004828void bdrv_init(void)
4829{
Anthony Liguori5efa9d52009-05-09 17:03:42 -05004830 module_call_init(MODULE_INIT_BLOCK);
bellardea2384d2004-08-01 21:59:26 +00004831}
pbrookce1a14d2006-08-07 02:38:06 +00004832
Markus Armbrustereb852012009-10-27 18:41:44 +01004833void bdrv_init_with_whitelist(void)
4834{
4835 use_bdrv_whitelist = 1;
4836 bdrv_init();
4837}
4838
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004839void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
Markus Armbruster097310b2014-10-07 13:59:15 +02004840 BlockCompletionFunc *cb, void *opaque)
aliguori6bbff9a2009-03-20 18:25:59 +00004841{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004842 BlockAIOCB *acb;
pbrookce1a14d2006-08-07 02:38:06 +00004843
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004844 acb = g_slice_alloc(aiocb_info->aiocb_size);
4845 acb->aiocb_info = aiocb_info;
pbrookce1a14d2006-08-07 02:38:06 +00004846 acb->bs = bs;
4847 acb->cb = cb;
4848 acb->opaque = opaque;
Fam Zhengf197fe22014-09-11 13:41:08 +08004849 acb->refcnt = 1;
pbrookce1a14d2006-08-07 02:38:06 +00004850 return acb;
4851}
4852
Fam Zhengf197fe22014-09-11 13:41:08 +08004853void qemu_aio_ref(void *p)
4854{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004855 BlockAIOCB *acb = p;
Fam Zhengf197fe22014-09-11 13:41:08 +08004856 acb->refcnt++;
4857}
4858
Fam Zheng80074292014-09-11 13:41:28 +08004859void qemu_aio_unref(void *p)
pbrookce1a14d2006-08-07 02:38:06 +00004860{
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004861 BlockAIOCB *acb = p;
Fam Zhengf197fe22014-09-11 13:41:08 +08004862 assert(acb->refcnt > 0);
4863 if (--acb->refcnt == 0) {
4864 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4865 }
pbrookce1a14d2006-08-07 02:38:06 +00004866}
bellard19cb3732006-08-19 11:45:59 +00004867
4868/**************************************************************/
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004869/* Coroutine block device emulation */
4870
4871typedef struct CoroutineIOCompletion {
4872 Coroutine *coroutine;
4873 int ret;
4874} CoroutineIOCompletion;
4875
4876static void bdrv_co_io_em_complete(void *opaque, int ret)
4877{
4878 CoroutineIOCompletion *co = opaque;
4879
4880 co->ret = ret;
4881 qemu_coroutine_enter(co->coroutine, NULL);
4882}
4883
4884static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4885 int nb_sectors, QEMUIOVector *iov,
4886 bool is_write)
4887{
4888 CoroutineIOCompletion co = {
4889 .coroutine = qemu_coroutine_self(),
4890 };
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004891 BlockAIOCB *acb;
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004892
4893 if (is_write) {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01004894 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4895 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004896 } else {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01004897 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4898 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004899 }
4900
Stefan Hajnoczi59370aa2011-09-30 17:34:58 +01004901 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004902 if (!acb) {
4903 return -EIO;
4904 }
4905 qemu_coroutine_yield();
4906
4907 return co.ret;
4908}
4909
4910static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4911 int64_t sector_num, int nb_sectors,
4912 QEMUIOVector *iov)
4913{
4914 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4915}
4916
4917static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4918 int64_t sector_num, int nb_sectors,
4919 QEMUIOVector *iov)
4920{
4921 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4922}
4923
Paolo Bonzini07f07612011-10-17 12:32:12 +02004924static void coroutine_fn bdrv_flush_co_entry(void *opaque)
Kevin Wolfe7a8a782011-07-15 16:05:00 +02004925{
Paolo Bonzini07f07612011-10-17 12:32:12 +02004926 RwCo *rwco = opaque;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02004927
Paolo Bonzini07f07612011-10-17 12:32:12 +02004928 rwco->ret = bdrv_co_flush(rwco->bs);
4929}
4930
4931int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4932{
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004933 int ret;
4934
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004935 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
Paolo Bonzini07f07612011-10-17 12:32:12 +02004936 return 0;
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004937 }
4938
Kevin Wolfca716362011-11-10 18:13:59 +01004939 /* Write back cached data to the OS even with cache=unsafe */
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004940 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004941 if (bs->drv->bdrv_co_flush_to_os) {
4942 ret = bs->drv->bdrv_co_flush_to_os(bs);
4943 if (ret < 0) {
4944 return ret;
4945 }
4946 }
4947
Kevin Wolfca716362011-11-10 18:13:59 +01004948 /* But don't actually force it to the disk with cache=unsafe */
4949 if (bs->open_flags & BDRV_O_NO_FLUSH) {
Kevin Wolfd4c82322012-08-15 12:52:45 +02004950 goto flush_parent;
Kevin Wolfca716362011-11-10 18:13:59 +01004951 }
4952
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004953 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004954 if (bs->drv->bdrv_co_flush_to_disk) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004955 ret = bs->drv->bdrv_co_flush_to_disk(bs);
Paolo Bonzini07f07612011-10-17 12:32:12 +02004956 } else if (bs->drv->bdrv_aio_flush) {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02004957 BlockAIOCB *acb;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004958 CoroutineIOCompletion co = {
4959 .coroutine = qemu_coroutine_self(),
4960 };
4961
4962 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4963 if (acb == NULL) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004964 ret = -EIO;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004965 } else {
4966 qemu_coroutine_yield();
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004967 ret = co.ret;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004968 }
Paolo Bonzini07f07612011-10-17 12:32:12 +02004969 } else {
4970 /*
4971 * Some block drivers always operate in either writethrough or unsafe
4972 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4973 * know how the server works (because the behaviour is hardcoded or
4974 * depends on server-side configuration), so we can't ensure that
4975 * everything is safe on disk. Returning an error doesn't work because
4976 * that would break guests even if the server operates in writethrough
4977 * mode.
4978 *
4979 * Let's hope the user knows what he's doing.
4980 */
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004981 ret = 0;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02004982 }
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004983 if (ret < 0) {
4984 return ret;
4985 }
4986
4987 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4988 * in the case of cache=unsafe, so there are no useless flushes.
4989 */
Kevin Wolfd4c82322012-08-15 12:52:45 +02004990flush_parent:
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004991 return bdrv_co_flush(bs->file);
Paolo Bonzini07f07612011-10-17 12:32:12 +02004992}
4993
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01004994void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
Anthony Liguori0f154232011-11-14 15:09:45 -06004995{
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01004996 Error *local_err = NULL;
4997 int ret;
4998
Kevin Wolf3456a8d2014-03-11 10:58:39 +01004999 if (!bs->drv) {
5000 return;
Anthony Liguori0f154232011-11-14 15:09:45 -06005001 }
Kevin Wolf3456a8d2014-03-11 10:58:39 +01005002
Alexey Kardashevskiy7ea2d262014-10-09 13:50:46 +11005003 if (!(bs->open_flags & BDRV_O_INCOMING)) {
5004 return;
5005 }
5006 bs->open_flags &= ~BDRV_O_INCOMING;
5007
Kevin Wolf3456a8d2014-03-11 10:58:39 +01005008 if (bs->drv->bdrv_invalidate_cache) {
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005009 bs->drv->bdrv_invalidate_cache(bs, &local_err);
Kevin Wolf3456a8d2014-03-11 10:58:39 +01005010 } else if (bs->file) {
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005011 bdrv_invalidate_cache(bs->file, &local_err);
5012 }
5013 if (local_err) {
5014 error_propagate(errp, local_err);
5015 return;
Kevin Wolf3456a8d2014-03-11 10:58:39 +01005016 }
5017
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005018 ret = refresh_total_sectors(bs, bs->total_sectors);
5019 if (ret < 0) {
5020 error_setg_errno(errp, -ret, "Could not refresh total sector count");
5021 return;
5022 }
Anthony Liguori0f154232011-11-14 15:09:45 -06005023}
5024
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005025void bdrv_invalidate_cache_all(Error **errp)
Anthony Liguori0f154232011-11-14 15:09:45 -06005026{
5027 BlockDriverState *bs;
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005028 Error *local_err = NULL;
Anthony Liguori0f154232011-11-14 15:09:45 -06005029
Benoît Canetdc364f42014-01-23 21:31:32 +01005030 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02005031 AioContext *aio_context = bdrv_get_aio_context(bs);
5032
5033 aio_context_acquire(aio_context);
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005034 bdrv_invalidate_cache(bs, &local_err);
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02005035 aio_context_release(aio_context);
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005036 if (local_err) {
5037 error_propagate(errp, local_err);
5038 return;
5039 }
Anthony Liguori0f154232011-11-14 15:09:45 -06005040 }
5041}
5042
Paolo Bonzini07f07612011-10-17 12:32:12 +02005043int bdrv_flush(BlockDriverState *bs)
5044{
5045 Coroutine *co;
5046 RwCo rwco = {
5047 .bs = bs,
5048 .ret = NOT_DONE,
5049 };
5050
5051 if (qemu_in_coroutine()) {
5052 /* Fast-path if already in coroutine context */
5053 bdrv_flush_co_entry(&rwco);
5054 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005055 AioContext *aio_context = bdrv_get_aio_context(bs);
5056
Paolo Bonzini07f07612011-10-17 12:32:12 +02005057 co = qemu_coroutine_create(bdrv_flush_co_entry);
5058 qemu_coroutine_enter(co, &rwco);
5059 while (rwco.ret == NOT_DONE) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005060 aio_poll(aio_context, true);
Paolo Bonzini07f07612011-10-17 12:32:12 +02005061 }
5062 }
5063
5064 return rwco.ret;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02005065}
5066
Kevin Wolf775aa8b2013-12-05 12:09:38 +01005067typedef struct DiscardCo {
5068 BlockDriverState *bs;
5069 int64_t sector_num;
5070 int nb_sectors;
5071 int ret;
5072} DiscardCo;
Paolo Bonzini4265d622011-10-17 12:32:14 +02005073static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5074{
Kevin Wolf775aa8b2013-12-05 12:09:38 +01005075 DiscardCo *rwco = opaque;
Paolo Bonzini4265d622011-10-17 12:32:14 +02005076
5077 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5078}
5079
Peter Lieven6f14da52013-10-24 12:06:59 +02005080/* if no limit is specified in the BlockLimits use a default
5081 * of 32768 512-byte sectors (16 MiB) per request.
5082 */
5083#define MAX_DISCARD_DEFAULT 32768
5084
Paolo Bonzini4265d622011-10-17 12:32:14 +02005085int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5086 int nb_sectors)
5087{
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005088 int max_discard;
5089
Paolo Bonzini4265d622011-10-17 12:32:14 +02005090 if (!bs->drv) {
5091 return -ENOMEDIUM;
5092 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5093 return -EIO;
5094 } else if (bs->read_only) {
5095 return -EROFS;
Paolo Bonzinidf702c92013-01-14 16:26:58 +01005096 }
5097
Fam Zhenge4654d22013-11-13 18:29:43 +08005098 bdrv_reset_dirty(bs, sector_num, nb_sectors);
Paolo Bonzinidf702c92013-01-14 16:26:58 +01005099
Paolo Bonzini9e8f1832013-02-08 14:06:11 +01005100 /* Do nothing if disabled. */
5101 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5102 return 0;
5103 }
5104
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005105 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
Paolo Bonzini4265d622011-10-17 12:32:14 +02005106 return 0;
5107 }
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005108
5109 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5110 while (nb_sectors > 0) {
5111 int ret;
5112 int num = nb_sectors;
5113
5114 /* align request */
5115 if (bs->bl.discard_alignment &&
5116 num >= bs->bl.discard_alignment &&
5117 sector_num % bs->bl.discard_alignment) {
5118 if (num > bs->bl.discard_alignment) {
5119 num = bs->bl.discard_alignment;
5120 }
5121 num -= sector_num % bs->bl.discard_alignment;
5122 }
5123
5124 /* limit request size */
5125 if (num > max_discard) {
5126 num = max_discard;
5127 }
5128
5129 if (bs->drv->bdrv_co_discard) {
5130 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5131 } else {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02005132 BlockAIOCB *acb;
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005133 CoroutineIOCompletion co = {
5134 .coroutine = qemu_coroutine_self(),
5135 };
5136
5137 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5138 bdrv_co_io_em_complete, &co);
5139 if (acb == NULL) {
5140 return -EIO;
5141 } else {
5142 qemu_coroutine_yield();
5143 ret = co.ret;
5144 }
5145 }
Paolo Bonzini7ce21012013-11-22 13:39:47 +01005146 if (ret && ret != -ENOTSUP) {
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005147 return ret;
5148 }
5149
5150 sector_num += num;
5151 nb_sectors -= num;
5152 }
5153 return 0;
Paolo Bonzini4265d622011-10-17 12:32:14 +02005154}
5155
5156int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5157{
5158 Coroutine *co;
Kevin Wolf775aa8b2013-12-05 12:09:38 +01005159 DiscardCo rwco = {
Paolo Bonzini4265d622011-10-17 12:32:14 +02005160 .bs = bs,
5161 .sector_num = sector_num,
5162 .nb_sectors = nb_sectors,
5163 .ret = NOT_DONE,
5164 };
5165
5166 if (qemu_in_coroutine()) {
5167 /* Fast-path if already in coroutine context */
5168 bdrv_discard_co_entry(&rwco);
5169 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005170 AioContext *aio_context = bdrv_get_aio_context(bs);
5171
Paolo Bonzini4265d622011-10-17 12:32:14 +02005172 co = qemu_coroutine_create(bdrv_discard_co_entry);
5173 qemu_coroutine_enter(co, &rwco);
5174 while (rwco.ret == NOT_DONE) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005175 aio_poll(aio_context, true);
Paolo Bonzini4265d622011-10-17 12:32:14 +02005176 }
5177 }
5178
5179 return rwco.ret;
5180}
5181
Kevin Wolff9f05dc2011-07-15 13:50:26 +02005182/**************************************************************/
bellard19cb3732006-08-19 11:45:59 +00005183/* removable device support */
5184
5185/**
5186 * Return TRUE if the media is present
5187 */
5188int bdrv_is_inserted(BlockDriverState *bs)
5189{
5190 BlockDriver *drv = bs->drv;
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02005191
bellard19cb3732006-08-19 11:45:59 +00005192 if (!drv)
5193 return 0;
5194 if (!drv->bdrv_is_inserted)
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02005195 return 1;
5196 return drv->bdrv_is_inserted(bs);
bellard19cb3732006-08-19 11:45:59 +00005197}
5198
5199/**
Markus Armbruster8e49ca42011-08-03 15:08:08 +02005200 * Return whether the media changed since the last call to this
5201 * function, or -ENOTSUP if we don't know. Most drivers don't know.
bellard19cb3732006-08-19 11:45:59 +00005202 */
5203int bdrv_media_changed(BlockDriverState *bs)
5204{
5205 BlockDriver *drv = bs->drv;
bellard19cb3732006-08-19 11:45:59 +00005206
Markus Armbruster8e49ca42011-08-03 15:08:08 +02005207 if (drv && drv->bdrv_media_changed) {
5208 return drv->bdrv_media_changed(bs);
5209 }
5210 return -ENOTSUP;
bellard19cb3732006-08-19 11:45:59 +00005211}
5212
5213/**
5214 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5215 */
Luiz Capitulinof36f3942012-02-03 16:24:53 -02005216void bdrv_eject(BlockDriverState *bs, bool eject_flag)
bellard19cb3732006-08-19 11:45:59 +00005217{
5218 BlockDriver *drv = bs->drv;
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02005219 const char *device_name;
bellard19cb3732006-08-19 11:45:59 +00005220
Markus Armbruster822e1cd2011-07-20 18:23:42 +02005221 if (drv && drv->bdrv_eject) {
5222 drv->bdrv_eject(bs, eject_flag);
bellard19cb3732006-08-19 11:45:59 +00005223 }
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02005224
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02005225 device_name = bdrv_get_device_name(bs);
5226 if (device_name[0] != '\0') {
5227 qapi_event_send_device_tray_moved(device_name,
Wenchao Xiaa5ee7bd2014-06-18 08:43:44 +02005228 eject_flag, &error_abort);
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02005229 }
bellard19cb3732006-08-19 11:45:59 +00005230}
5231
bellard19cb3732006-08-19 11:45:59 +00005232/**
5233 * Lock or unlock the media (if it is locked, the user won't be able
5234 * to eject it manually).
5235 */
Markus Armbruster025e8492011-09-06 18:58:47 +02005236void bdrv_lock_medium(BlockDriverState *bs, bool locked)
bellard19cb3732006-08-19 11:45:59 +00005237{
5238 BlockDriver *drv = bs->drv;
5239
Markus Armbruster025e8492011-09-06 18:58:47 +02005240 trace_bdrv_lock_medium(bs, locked);
Stefan Hajnoczib8c6d092011-03-29 20:04:40 +01005241
Markus Armbruster025e8492011-09-06 18:58:47 +02005242 if (drv && drv->bdrv_lock_medium) {
5243 drv->bdrv_lock_medium(bs, locked);
bellard19cb3732006-08-19 11:45:59 +00005244 }
5245}
ths985a03b2007-12-24 16:10:43 +00005246
5247/* needed for generic scsi interface */
5248
5249int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5250{
5251 BlockDriver *drv = bs->drv;
5252
5253 if (drv && drv->bdrv_ioctl)
5254 return drv->bdrv_ioctl(bs, req, buf);
5255 return -ENOTSUP;
5256}
aliguori7d780662009-03-12 19:57:08 +00005257
Markus Armbruster7c84b1b2014-10-07 13:59:14 +02005258BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
aliguori221f7152009-03-28 17:28:41 +00005259 unsigned long int req, void *buf,
Markus Armbruster097310b2014-10-07 13:59:15 +02005260 BlockCompletionFunc *cb, void *opaque)
aliguori7d780662009-03-12 19:57:08 +00005261{
aliguori221f7152009-03-28 17:28:41 +00005262 BlockDriver *drv = bs->drv;
aliguori7d780662009-03-12 19:57:08 +00005263
aliguori221f7152009-03-28 17:28:41 +00005264 if (drv && drv->bdrv_aio_ioctl)
5265 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5266 return NULL;
aliguori7d780662009-03-12 19:57:08 +00005267}
aliguorie268ca52009-04-22 20:20:00 +00005268
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01005269void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
Markus Armbruster7b6f9302011-09-06 18:58:56 +02005270{
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01005271 bs->guest_block_size = align;
Markus Armbruster7b6f9302011-09-06 18:58:56 +02005272}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005273
aliguorie268ca52009-04-22 20:20:00 +00005274void *qemu_blockalign(BlockDriverState *bs, size_t size)
5275{
Kevin Wolf339064d2013-11-28 10:23:32 +01005276 return qemu_memalign(bdrv_opt_mem_align(bs), size);
aliguorie268ca52009-04-22 20:20:00 +00005277}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005278
Max Reitz9ebd8442014-10-22 14:09:27 +02005279void *qemu_blockalign0(BlockDriverState *bs, size_t size)
5280{
5281 return memset(qemu_blockalign(bs, size), 0, size);
5282}
5283
Kevin Wolf7d2a35c2014-05-20 12:24:05 +02005284void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
5285{
5286 size_t align = bdrv_opt_mem_align(bs);
5287
5288 /* Ensure that NULL is never returned on success */
5289 assert(align > 0);
5290 if (size == 0) {
5291 size = align;
5292 }
5293
5294 return qemu_try_memalign(align, size);
5295}
5296
Max Reitz9ebd8442014-10-22 14:09:27 +02005297void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
5298{
5299 void *mem = qemu_try_blockalign(bs, size);
5300
5301 if (mem) {
5302 memset(mem, 0, size);
5303 }
5304
5305 return mem;
5306}
5307
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005308/*
5309 * Check if all memory in this vector is sector aligned.
5310 */
5311bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5312{
5313 int i;
Kevin Wolf339064d2013-11-28 10:23:32 +01005314 size_t alignment = bdrv_opt_mem_align(bs);
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005315
5316 for (i = 0; i < qiov->niov; i++) {
Kevin Wolf339064d2013-11-28 10:23:32 +01005317 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005318 return false;
5319 }
Kevin Wolf339064d2013-11-28 10:23:32 +01005320 if (qiov->iov[i].iov_len % alignment) {
Kevin Wolf1ff735b2013-12-05 13:01:46 +01005321 return false;
5322 }
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005323 }
5324
5325 return true;
5326}
5327
Fam Zhengb8afb522014-04-16 09:34:30 +08005328BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5329 Error **errp)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005330{
5331 int64_t bitmap_size;
Fam Zhenge4654d22013-11-13 18:29:43 +08005332 BdrvDirtyBitmap *bitmap;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01005333
Paolo Bonzini50717e92013-01-21 17:09:45 +01005334 assert((granularity & (granularity - 1)) == 0);
5335
Fam Zhenge4654d22013-11-13 18:29:43 +08005336 granularity >>= BDRV_SECTOR_BITS;
5337 assert(granularity);
Markus Armbruster57322b72014-06-26 13:23:22 +02005338 bitmap_size = bdrv_nb_sectors(bs);
Fam Zhengb8afb522014-04-16 09:34:30 +08005339 if (bitmap_size < 0) {
5340 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5341 errno = -bitmap_size;
5342 return NULL;
5343 }
Markus Armbruster5839e532014-08-19 10:31:08 +02005344 bitmap = g_new0(BdrvDirtyBitmap, 1);
Fam Zhenge4654d22013-11-13 18:29:43 +08005345 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5346 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5347 return bitmap;
5348}
5349
5350void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5351{
5352 BdrvDirtyBitmap *bm, *next;
5353 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5354 if (bm == bitmap) {
5355 QLIST_REMOVE(bitmap, list);
5356 hbitmap_free(bitmap->bitmap);
5357 g_free(bitmap);
5358 return;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01005359 }
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005360 }
5361}
5362
Fam Zheng21b56832013-11-13 18:29:44 +08005363BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5364{
5365 BdrvDirtyBitmap *bm;
5366 BlockDirtyInfoList *list = NULL;
5367 BlockDirtyInfoList **plist = &list;
5368
5369 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
Markus Armbruster5839e532014-08-19 10:31:08 +02005370 BlockDirtyInfo *info = g_new0(BlockDirtyInfo, 1);
5371 BlockDirtyInfoList *entry = g_new0(BlockDirtyInfoList, 1);
Fam Zheng21b56832013-11-13 18:29:44 +08005372 info->count = bdrv_get_dirty_count(bs, bm);
5373 info->granularity =
5374 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5375 entry->value = info;
5376 *plist = entry;
5377 plist = &entry->next;
5378 }
5379
5380 return list;
5381}
5382
Fam Zhenge4654d22013-11-13 18:29:43 +08005383int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005384{
Fam Zhenge4654d22013-11-13 18:29:43 +08005385 if (bitmap) {
5386 return hbitmap_get(bitmap->bitmap, sector);
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005387 } else {
5388 return 0;
5389 }
5390}
5391
Fam Zhenge4654d22013-11-13 18:29:43 +08005392void bdrv_dirty_iter_init(BlockDriverState *bs,
5393 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
Paolo Bonzini1755da12012-10-18 16:49:18 +02005394{
Fam Zhenge4654d22013-11-13 18:29:43 +08005395 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
Paolo Bonzini1755da12012-10-18 16:49:18 +02005396}
5397
5398void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5399 int nr_sectors)
5400{
Fam Zhenge4654d22013-11-13 18:29:43 +08005401 BdrvDirtyBitmap *bitmap;
5402 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5403 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
Paolo Bonzini8f0720e2013-01-21 17:09:41 +01005404 }
Liran Schouraaa0eb72010-01-26 10:31:48 +02005405}
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005406
Fam Zhenge4654d22013-11-13 18:29:43 +08005407void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5408{
5409 BdrvDirtyBitmap *bitmap;
5410 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5411 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5412 }
5413}
5414
5415int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5416{
5417 return hbitmap_count(bitmap->bitmap);
5418}
5419
Fam Zheng9fcb0252013-08-23 09:14:46 +08005420/* Get a reference to bs */
5421void bdrv_ref(BlockDriverState *bs)
5422{
5423 bs->refcnt++;
5424}
5425
5426/* Release a previously grabbed reference to bs.
5427 * If after releasing, reference count is zero, the BlockDriverState is
5428 * deleted. */
5429void bdrv_unref(BlockDriverState *bs)
5430{
Jeff Cody9a4d5ca2014-07-23 17:22:57 -04005431 if (!bs) {
5432 return;
5433 }
Fam Zheng9fcb0252013-08-23 09:14:46 +08005434 assert(bs->refcnt > 0);
5435 if (--bs->refcnt == 0) {
5436 bdrv_delete(bs);
5437 }
5438}
5439
Fam Zhengfbe40ff2014-05-23 21:29:42 +08005440struct BdrvOpBlocker {
5441 Error *reason;
5442 QLIST_ENTRY(BdrvOpBlocker) list;
5443};
5444
5445bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5446{
5447 BdrvOpBlocker *blocker;
5448 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5449 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5450 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5451 if (errp) {
5452 error_setg(errp, "Device '%s' is busy: %s",
Markus Armbrusterbfb197e2014-10-07 13:59:11 +02005453 bdrv_get_device_name(bs),
5454 error_get_pretty(blocker->reason));
Fam Zhengfbe40ff2014-05-23 21:29:42 +08005455 }
5456 return true;
5457 }
5458 return false;
5459}
5460
5461void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5462{
5463 BdrvOpBlocker *blocker;
5464 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5465
Markus Armbruster5839e532014-08-19 10:31:08 +02005466 blocker = g_new0(BdrvOpBlocker, 1);
Fam Zhengfbe40ff2014-05-23 21:29:42 +08005467 blocker->reason = reason;
5468 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5469}
5470
5471void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5472{
5473 BdrvOpBlocker *blocker, *next;
5474 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5475 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5476 if (blocker->reason == reason) {
5477 QLIST_REMOVE(blocker, list);
5478 g_free(blocker);
5479 }
5480 }
5481}
5482
5483void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5484{
5485 int i;
5486 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5487 bdrv_op_block(bs, i, reason);
5488 }
5489}
5490
5491void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5492{
5493 int i;
5494 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5495 bdrv_op_unblock(bs, i, reason);
5496 }
5497}
5498
5499bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5500{
5501 int i;
5502
5503 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5504 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5505 return false;
5506 }
5507 }
5508 return true;
5509}
5510
Luiz Capitulino28a72822011-09-26 17:43:50 -03005511void bdrv_iostatus_enable(BlockDriverState *bs)
5512{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03005513 bs->iostatus_enabled = true;
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03005514 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Luiz Capitulino28a72822011-09-26 17:43:50 -03005515}
5516
5517/* The I/O status is only enabled if the drive explicitly
5518 * enables it _and_ the VM is configured to stop on errors */
5519bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5520{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03005521 return (bs->iostatus_enabled &&
Paolo Bonzini92aa5c62012-09-28 17:22:55 +02005522 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5523 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5524 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
Luiz Capitulino28a72822011-09-26 17:43:50 -03005525}
5526
5527void bdrv_iostatus_disable(BlockDriverState *bs)
5528{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03005529 bs->iostatus_enabled = false;
Luiz Capitulino28a72822011-09-26 17:43:50 -03005530}
5531
5532void bdrv_iostatus_reset(BlockDriverState *bs)
5533{
5534 if (bdrv_iostatus_is_enabled(bs)) {
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03005535 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Paolo Bonzini3bd293c2012-10-18 16:49:27 +02005536 if (bs->job) {
5537 block_job_iostatus_reset(bs->job);
5538 }
Luiz Capitulino28a72822011-09-26 17:43:50 -03005539 }
5540}
5541
Luiz Capitulino28a72822011-09-26 17:43:50 -03005542void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5543{
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02005544 assert(bdrv_iostatus_is_enabled(bs));
5545 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03005546 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5547 BLOCK_DEVICE_IO_STATUS_FAILED;
Luiz Capitulino28a72822011-09-26 17:43:50 -03005548 }
5549}
5550
Luiz Capitulinod92ada22012-11-30 10:52:09 -02005551void bdrv_img_create(const char *filename, const char *fmt,
5552 const char *base_filename, const char *base_fmt,
Miroslav Rezaninaf382d432013-02-13 09:09:40 +01005553 char *options, uint64_t img_size, int flags,
5554 Error **errp, bool quiet)
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005555{
Chunyan Liu83d05212014-06-05 17:20:51 +08005556 QemuOptsList *create_opts = NULL;
5557 QemuOpts *opts = NULL;
5558 const char *backing_fmt, *backing_file;
5559 int64_t size;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005560 BlockDriver *drv, *proto_drv;
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00005561 BlockDriver *backing_drv = NULL;
Max Reitzcc84d902013-09-06 17:14:26 +02005562 Error *local_err = NULL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005563 int ret = 0;
5564
5565 /* Find driver and parse its options */
5566 drv = bdrv_find_format(fmt);
5567 if (!drv) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005568 error_setg(errp, "Unknown file format '%s'", fmt);
Luiz Capitulinod92ada22012-11-30 10:52:09 -02005569 return;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005570 }
5571
Kevin Wolf98289622013-07-10 15:47:39 +02005572 proto_drv = bdrv_find_protocol(filename, true);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005573 if (!proto_drv) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005574 error_setg(errp, "Unknown protocol '%s'", filename);
Luiz Capitulinod92ada22012-11-30 10:52:09 -02005575 return;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005576 }
5577
Max Reitzc6149722014-12-02 18:32:45 +01005578 if (!drv->create_opts) {
5579 error_setg(errp, "Format driver '%s' does not support image creation",
5580 drv->format_name);
5581 return;
5582 }
5583
5584 if (!proto_drv->create_opts) {
5585 error_setg(errp, "Protocol driver '%s' does not support image creation",
5586 proto_drv->format_name);
5587 return;
5588 }
5589
Chunyan Liuc282e1f2014-06-05 17:21:11 +08005590 create_opts = qemu_opts_append(create_opts, drv->create_opts);
5591 create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005592
5593 /* Create parameter list with default values */
Chunyan Liu83d05212014-06-05 17:20:51 +08005594 opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
5595 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005596
5597 /* Parse -o options */
5598 if (options) {
Chunyan Liu83d05212014-06-05 17:20:51 +08005599 if (qemu_opts_do_parse(opts, options, NULL) != 0) {
5600 error_setg(errp, "Invalid options for file format '%s'", fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005601 goto out;
5602 }
5603 }
5604
5605 if (base_filename) {
Chunyan Liu83d05212014-06-05 17:20:51 +08005606 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005607 error_setg(errp, "Backing file not supported for file format '%s'",
5608 fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005609 goto out;
5610 }
5611 }
5612
5613 if (base_fmt) {
Chunyan Liu83d05212014-06-05 17:20:51 +08005614 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005615 error_setg(errp, "Backing file format not supported for file "
5616 "format '%s'", fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005617 goto out;
5618 }
5619 }
5620
Chunyan Liu83d05212014-06-05 17:20:51 +08005621 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5622 if (backing_file) {
5623 if (!strcmp(filename, backing_file)) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005624 error_setg(errp, "Error: Trying to create an image with the "
5625 "same filename as the backing file");
Jes Sorensen792da932010-12-16 13:52:17 +01005626 goto out;
5627 }
5628 }
5629
Chunyan Liu83d05212014-06-05 17:20:51 +08005630 backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5631 if (backing_fmt) {
5632 backing_drv = bdrv_find_format(backing_fmt);
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00005633 if (!backing_drv) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005634 error_setg(errp, "Unknown backing file format '%s'",
Chunyan Liu83d05212014-06-05 17:20:51 +08005635 backing_fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005636 goto out;
5637 }
5638 }
5639
5640 // The size for the image must always be specified, with one exception:
5641 // If we are using a backing file, we can obtain the size from there
Chunyan Liu83d05212014-06-05 17:20:51 +08005642 size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5643 if (size == -1) {
5644 if (backing_file) {
Max Reitz66f6b812013-12-03 14:57:52 +01005645 BlockDriverState *bs;
Markus Armbruster52bf1e72014-06-26 13:23:25 +02005646 int64_t size;
Paolo Bonzini63090da2012-04-12 14:01:03 +02005647 int back_flags;
5648
5649 /* backing files always opened read-only */
5650 back_flags =
5651 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005652
Max Reitzf67503e2014-02-18 18:33:05 +01005653 bs = NULL;
Chunyan Liu83d05212014-06-05 17:20:51 +08005654 ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags,
Max Reitzcc84d902013-09-06 17:14:26 +02005655 backing_drv, &local_err);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005656 if (ret < 0) {
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005657 goto out;
5658 }
Markus Armbruster52bf1e72014-06-26 13:23:25 +02005659 size = bdrv_getlength(bs);
5660 if (size < 0) {
5661 error_setg_errno(errp, -size, "Could not get size of '%s'",
5662 backing_file);
5663 bdrv_unref(bs);
5664 goto out;
5665 }
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005666
Chunyan Liu83d05212014-06-05 17:20:51 +08005667 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size);
Max Reitz66f6b812013-12-03 14:57:52 +01005668
5669 bdrv_unref(bs);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005670 } else {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005671 error_setg(errp, "Image creation needs a size parameter");
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005672 goto out;
5673 }
5674 }
5675
Miroslav Rezaninaf382d432013-02-13 09:09:40 +01005676 if (!quiet) {
Fam Zheng43c5d8f2014-12-09 15:38:04 +08005677 printf("Formatting '%s', fmt=%s", filename, fmt);
5678 qemu_opts_print(opts, " ");
Miroslav Rezaninaf382d432013-02-13 09:09:40 +01005679 puts("");
5680 }
Chunyan Liu83d05212014-06-05 17:20:51 +08005681
Chunyan Liuc282e1f2014-06-05 17:21:11 +08005682 ret = bdrv_create(drv, filename, opts, &local_err);
Chunyan Liu83d05212014-06-05 17:20:51 +08005683
Max Reitzcc84d902013-09-06 17:14:26 +02005684 if (ret == -EFBIG) {
5685 /* This is generally a better message than whatever the driver would
5686 * deliver (especially because of the cluster_size_hint), since that
5687 * is most probably not much different from "image too large". */
5688 const char *cluster_size_hint = "";
Chunyan Liu83d05212014-06-05 17:20:51 +08005689 if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
Max Reitzcc84d902013-09-06 17:14:26 +02005690 cluster_size_hint = " (try using a larger cluster size)";
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005691 }
Max Reitzcc84d902013-09-06 17:14:26 +02005692 error_setg(errp, "The image size is too large for file format '%s'"
5693 "%s", fmt, cluster_size_hint);
5694 error_free(local_err);
5695 local_err = NULL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005696 }
5697
5698out:
Chunyan Liu83d05212014-06-05 17:20:51 +08005699 qemu_opts_del(opts);
5700 qemu_opts_free(create_opts);
Markus Armbruster84d18f02014-01-30 15:07:28 +01005701 if (local_err) {
Max Reitzcc84d902013-09-06 17:14:26 +02005702 error_propagate(errp, local_err);
5703 }
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005704}
Stefan Hajnoczi85d126f2013-03-07 13:41:48 +01005705
5706AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5707{
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005708 return bs->aio_context;
5709}
5710
5711void bdrv_detach_aio_context(BlockDriverState *bs)
5712{
Max Reitz33384422014-06-20 21:57:33 +02005713 BdrvAioNotifier *baf;
5714
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005715 if (!bs->drv) {
5716 return;
5717 }
5718
Max Reitz33384422014-06-20 21:57:33 +02005719 QLIST_FOREACH(baf, &bs->aio_notifiers, list) {
5720 baf->detach_aio_context(baf->opaque);
5721 }
5722
Stefan Hajnoczi13af91e2014-05-14 16:22:45 +02005723 if (bs->io_limits_enabled) {
5724 throttle_detach_aio_context(&bs->throttle_state);
5725 }
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005726 if (bs->drv->bdrv_detach_aio_context) {
5727 bs->drv->bdrv_detach_aio_context(bs);
5728 }
5729 if (bs->file) {
5730 bdrv_detach_aio_context(bs->file);
5731 }
5732 if (bs->backing_hd) {
5733 bdrv_detach_aio_context(bs->backing_hd);
5734 }
5735
5736 bs->aio_context = NULL;
5737}
5738
5739void bdrv_attach_aio_context(BlockDriverState *bs,
5740 AioContext *new_context)
5741{
Max Reitz33384422014-06-20 21:57:33 +02005742 BdrvAioNotifier *ban;
5743
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005744 if (!bs->drv) {
5745 return;
5746 }
5747
5748 bs->aio_context = new_context;
5749
5750 if (bs->backing_hd) {
5751 bdrv_attach_aio_context(bs->backing_hd, new_context);
5752 }
5753 if (bs->file) {
5754 bdrv_attach_aio_context(bs->file, new_context);
5755 }
5756 if (bs->drv->bdrv_attach_aio_context) {
5757 bs->drv->bdrv_attach_aio_context(bs, new_context);
5758 }
Stefan Hajnoczi13af91e2014-05-14 16:22:45 +02005759 if (bs->io_limits_enabled) {
5760 throttle_attach_aio_context(&bs->throttle_state, new_context);
5761 }
Max Reitz33384422014-06-20 21:57:33 +02005762
5763 QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
5764 ban->attached_aio_context(new_context, ban->opaque);
5765 }
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005766}
5767
5768void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5769{
5770 bdrv_drain_all(); /* ensure there are no in-flight requests */
5771
5772 bdrv_detach_aio_context(bs);
5773
5774 /* This function executes in the old AioContext so acquire the new one in
5775 * case it runs in a different thread.
5776 */
5777 aio_context_acquire(new_context);
5778 bdrv_attach_aio_context(bs, new_context);
5779 aio_context_release(new_context);
Stefan Hajnoczi85d126f2013-03-07 13:41:48 +01005780}
Stefan Hajnoczid616b222013-06-24 17:13:10 +02005781
Max Reitz33384422014-06-20 21:57:33 +02005782void bdrv_add_aio_context_notifier(BlockDriverState *bs,
5783 void (*attached_aio_context)(AioContext *new_context, void *opaque),
5784 void (*detach_aio_context)(void *opaque), void *opaque)
5785{
5786 BdrvAioNotifier *ban = g_new(BdrvAioNotifier, 1);
5787 *ban = (BdrvAioNotifier){
5788 .attached_aio_context = attached_aio_context,
5789 .detach_aio_context = detach_aio_context,
5790 .opaque = opaque
5791 };
5792
5793 QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list);
5794}
5795
5796void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
5797 void (*attached_aio_context)(AioContext *,
5798 void *),
5799 void (*detach_aio_context)(void *),
5800 void *opaque)
5801{
5802 BdrvAioNotifier *ban, *ban_next;
5803
5804 QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
5805 if (ban->attached_aio_context == attached_aio_context &&
5806 ban->detach_aio_context == detach_aio_context &&
5807 ban->opaque == opaque)
5808 {
5809 QLIST_REMOVE(ban, list);
5810 g_free(ban);
5811
5812 return;
5813 }
5814 }
5815
5816 abort();
5817}
5818
Stefan Hajnoczid616b222013-06-24 17:13:10 +02005819void bdrv_add_before_write_notifier(BlockDriverState *bs,
5820 NotifierWithReturn *notifier)
5821{
5822 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5823}
Max Reitz6f176b42013-09-03 10:09:50 +02005824
Max Reitz77485432014-10-27 11:12:50 +01005825int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts,
5826 BlockDriverAmendStatusCB *status_cb)
Max Reitz6f176b42013-09-03 10:09:50 +02005827{
Chunyan Liuc282e1f2014-06-05 17:21:11 +08005828 if (!bs->drv->bdrv_amend_options) {
Max Reitz6f176b42013-09-03 10:09:50 +02005829 return -ENOTSUP;
5830 }
Max Reitz77485432014-10-27 11:12:50 +01005831 return bs->drv->bdrv_amend_options(bs, opts, status_cb);
Max Reitz6f176b42013-09-03 10:09:50 +02005832}
Benoît Canetf6186f42013-10-02 14:33:48 +02005833
Benoît Canetb5042a32014-03-03 19:11:34 +01005834/* This function will be called by the bdrv_recurse_is_first_non_filter method
5835 * of block filter and by bdrv_is_first_non_filter.
5836 * It is used to test if the given bs is the candidate or recurse more in the
5837 * node graph.
Benoît Canet212a5a82014-01-23 21:31:36 +01005838 */
Benoît Canet212a5a82014-01-23 21:31:36 +01005839bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5840 BlockDriverState *candidate)
Benoît Canetf6186f42013-10-02 14:33:48 +02005841{
Benoît Canetb5042a32014-03-03 19:11:34 +01005842 /* return false if basic checks fails */
5843 if (!bs || !bs->drv) {
5844 return false;
5845 }
5846
5847 /* the code reached a non block filter driver -> check if the bs is
5848 * the same as the candidate. It's the recursion termination condition.
5849 */
5850 if (!bs->drv->is_filter) {
5851 return bs == candidate;
5852 }
5853 /* Down this path the driver is a block filter driver */
5854
5855 /* If the block filter recursion method is defined use it to recurse down
5856 * the node graph.
5857 */
5858 if (bs->drv->bdrv_recurse_is_first_non_filter) {
Benoît Canet212a5a82014-01-23 21:31:36 +01005859 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5860 }
5861
Benoît Canetb5042a32014-03-03 19:11:34 +01005862 /* the driver is a block filter but don't allow to recurse -> return false
5863 */
5864 return false;
Benoît Canet212a5a82014-01-23 21:31:36 +01005865}
5866
5867/* This function checks if the candidate is the first non filter bs down it's
5868 * bs chain. Since we don't have pointers to parents it explore all bs chains
5869 * from the top. Some filters can choose not to pass down the recursion.
5870 */
5871bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5872{
5873 BlockDriverState *bs;
5874
5875 /* walk down the bs forest recursively */
5876 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5877 bool perm;
5878
Benoît Canetb5042a32014-03-03 19:11:34 +01005879 /* try to recurse in this top level bs */
Kevin Wolfe6dc8a12014-02-04 11:45:31 +01005880 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
Benoît Canet212a5a82014-01-23 21:31:36 +01005881
5882 /* candidate is the first non filter */
5883 if (perm) {
5884 return true;
5885 }
5886 }
5887
5888 return false;
Benoît Canetf6186f42013-10-02 14:33:48 +02005889}
Benoît Canet09158f02014-06-27 18:25:25 +02005890
5891BlockDriverState *check_to_replace_node(const char *node_name, Error **errp)
5892{
5893 BlockDriverState *to_replace_bs = bdrv_find_node(node_name);
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005894 AioContext *aio_context;
5895
Benoît Canet09158f02014-06-27 18:25:25 +02005896 if (!to_replace_bs) {
5897 error_setg(errp, "Node name '%s' not found", node_name);
5898 return NULL;
5899 }
5900
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005901 aio_context = bdrv_get_aio_context(to_replace_bs);
5902 aio_context_acquire(aio_context);
5903
Benoît Canet09158f02014-06-27 18:25:25 +02005904 if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) {
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005905 to_replace_bs = NULL;
5906 goto out;
Benoît Canet09158f02014-06-27 18:25:25 +02005907 }
5908
5909 /* We don't want arbitrary node of the BDS chain to be replaced only the top
5910 * most non filter in order to prevent data corruption.
5911 * Another benefit is that this tests exclude backing files which are
5912 * blocked by the backing blockers.
5913 */
5914 if (!bdrv_is_first_non_filter(to_replace_bs)) {
5915 error_setg(errp, "Only top most non filter can be replaced");
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005916 to_replace_bs = NULL;
5917 goto out;
Benoît Canet09158f02014-06-27 18:25:25 +02005918 }
5919
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +01005920out:
5921 aio_context_release(aio_context);
Benoît Canet09158f02014-06-27 18:25:25 +02005922 return to_replace_bs;
5923}
Ming Lei448ad912014-07-04 18:04:33 +08005924
5925void bdrv_io_plug(BlockDriverState *bs)
5926{
5927 BlockDriver *drv = bs->drv;
5928 if (drv && drv->bdrv_io_plug) {
5929 drv->bdrv_io_plug(bs);
5930 } else if (bs->file) {
5931 bdrv_io_plug(bs->file);
5932 }
5933}
5934
5935void bdrv_io_unplug(BlockDriverState *bs)
5936{
5937 BlockDriver *drv = bs->drv;
5938 if (drv && drv->bdrv_io_unplug) {
5939 drv->bdrv_io_unplug(bs);
5940 } else if (bs->file) {
5941 bdrv_io_unplug(bs->file);
5942 }
5943}
5944
5945void bdrv_flush_io_queue(BlockDriverState *bs)
5946{
5947 BlockDriver *drv = bs->drv;
5948 if (drv && drv->bdrv_flush_io_queue) {
5949 drv->bdrv_flush_io_queue(bs);
5950 } else if (bs->file) {
5951 bdrv_flush_io_queue(bs->file);
5952 }
5953}
Max Reitz91af7012014-07-18 20:24:56 +02005954
5955static bool append_open_options(QDict *d, BlockDriverState *bs)
5956{
5957 const QDictEntry *entry;
5958 bool found_any = false;
5959
5960 for (entry = qdict_first(bs->options); entry;
5961 entry = qdict_next(bs->options, entry))
5962 {
5963 /* Only take options for this level and exclude all non-driver-specific
5964 * options */
5965 if (!strchr(qdict_entry_key(entry), '.') &&
5966 strcmp(qdict_entry_key(entry), "node-name"))
5967 {
5968 qobject_incref(qdict_entry_value(entry));
5969 qdict_put_obj(d, qdict_entry_key(entry), qdict_entry_value(entry));
5970 found_any = true;
5971 }
5972 }
5973
5974 return found_any;
5975}
5976
5977/* Updates the following BDS fields:
5978 * - exact_filename: A filename which may be used for opening a block device
5979 * which (mostly) equals the given BDS (even without any
5980 * other options; so reading and writing must return the same
5981 * results, but caching etc. may be different)
5982 * - full_open_options: Options which, when given when opening a block device
5983 * (without a filename), result in a BDS (mostly)
5984 * equalling the given one
5985 * - filename: If exact_filename is set, it is copied here. Otherwise,
5986 * full_open_options is converted to a JSON object, prefixed with
5987 * "json:" (for use through the JSON pseudo protocol) and put here.
5988 */
5989void bdrv_refresh_filename(BlockDriverState *bs)
5990{
5991 BlockDriver *drv = bs->drv;
5992 QDict *opts;
5993
5994 if (!drv) {
5995 return;
5996 }
5997
5998 /* This BDS's file name will most probably depend on its file's name, so
5999 * refresh that first */
6000 if (bs->file) {
6001 bdrv_refresh_filename(bs->file);
6002 }
6003
6004 if (drv->bdrv_refresh_filename) {
6005 /* Obsolete information is of no use here, so drop the old file name
6006 * information before refreshing it */
6007 bs->exact_filename[0] = '\0';
6008 if (bs->full_open_options) {
6009 QDECREF(bs->full_open_options);
6010 bs->full_open_options = NULL;
6011 }
6012
6013 drv->bdrv_refresh_filename(bs);
6014 } else if (bs->file) {
6015 /* Try to reconstruct valid information from the underlying file */
6016 bool has_open_options;
6017
6018 bs->exact_filename[0] = '\0';
6019 if (bs->full_open_options) {
6020 QDECREF(bs->full_open_options);
6021 bs->full_open_options = NULL;
6022 }
6023
6024 opts = qdict_new();
6025 has_open_options = append_open_options(opts, bs);
6026
6027 /* If no specific options have been given for this BDS, the filename of
6028 * the underlying file should suffice for this one as well */
6029 if (bs->file->exact_filename[0] && !has_open_options) {
6030 strcpy(bs->exact_filename, bs->file->exact_filename);
6031 }
6032 /* Reconstructing the full options QDict is simple for most format block
6033 * drivers, as long as the full options are known for the underlying
6034 * file BDS. The full options QDict of that file BDS should somehow
6035 * contain a representation of the filename, therefore the following
6036 * suffices without querying the (exact_)filename of this BDS. */
6037 if (bs->file->full_open_options) {
6038 qdict_put_obj(opts, "driver",
6039 QOBJECT(qstring_from_str(drv->format_name)));
6040 QINCREF(bs->file->full_open_options);
6041 qdict_put_obj(opts, "file", QOBJECT(bs->file->full_open_options));
6042
6043 bs->full_open_options = opts;
6044 } else {
6045 QDECREF(opts);
6046 }
6047 } else if (!bs->full_open_options && qdict_size(bs->options)) {
6048 /* There is no underlying file BDS (at least referenced by BDS.file),
6049 * so the full options QDict should be equal to the options given
6050 * specifically for this block device when it was opened (plus the
6051 * driver specification).
6052 * Because those options don't change, there is no need to update
6053 * full_open_options when it's already set. */
6054
6055 opts = qdict_new();
6056 append_open_options(opts, bs);
6057 qdict_put_obj(opts, "driver",
6058 QOBJECT(qstring_from_str(drv->format_name)));
6059
6060 if (bs->exact_filename[0]) {
6061 /* This may not work for all block protocol drivers (some may
6062 * require this filename to be parsed), but we have to find some
6063 * default solution here, so just include it. If some block driver
6064 * does not support pure options without any filename at all or
6065 * needs some special format of the options QDict, it needs to
6066 * implement the driver-specific bdrv_refresh_filename() function.
6067 */
6068 qdict_put_obj(opts, "filename",
6069 QOBJECT(qstring_from_str(bs->exact_filename)));
6070 }
6071
6072 bs->full_open_options = opts;
6073 }
6074
6075 if (bs->exact_filename[0]) {
6076 pstrcpy(bs->filename, sizeof(bs->filename), bs->exact_filename);
6077 } else if (bs->full_open_options) {
6078 QString *json = qobject_to_json(QOBJECT(bs->full_open_options));
6079 snprintf(bs->filename, sizeof(bs->filename), "json:%s",
6080 qstring_get_str(json));
6081 QDECREF(json);
6082 }
6083}
Benoît Canet5366d0c2014-09-05 15:46:18 +02006084
6085/* This accessor function purpose is to allow the device models to access the
6086 * BlockAcctStats structure embedded inside a BlockDriverState without being
6087 * aware of the BlockDriverState structure layout.
6088 * It will go away when the BlockAcctStats structure will be moved inside
6089 * the device models.
6090 */
6091BlockAcctStats *bdrv_get_stats(BlockDriverState *bs)
6092{
6093 return &bs->stats;
6094}