blob: 1fd38159e211bddf49a65d8da168dc9dae197ca0 [file] [log] [blame]
bellardfc01f7e2003-06-30 10:03:06 +00001/*
2 * QEMU System Emulator block driver
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardfc01f7e2003-06-30 10:03:06 +00004 * Copyright (c) 2003 Fabrice Bellard
ths5fafdf22007-09-16 21:08:06 +00005 *
bellardfc01f7e2003-06-30 10:03:06 +00006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
blueswir13990d092008-12-05 17:53:21 +000024#include "config-host.h"
pbrookfaf07962007-11-11 02:51:17 +000025#include "qemu-common.h"
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +010026#include "trace.h"
Paolo Bonzini83c90892012-12-17 18:19:49 +010027#include "monitor/monitor.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010028#include "block/block_int.h"
29#include "block/blockjob.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/module.h"
Paolo Bonzini7b1b5d12012-12-17 18:19:43 +010031#include "qapi/qmp/qjson.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/notify.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010034#include "block/coroutine.h"
Benoît Canetc13163f2014-01-23 21:31:34 +010035#include "block/qapi.h"
Luiz Capitulinob2023812011-09-21 17:16:47 -030036#include "qmp-commands.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
bellardfc01f7e2003-06-30 10:03:06 +000038
Juan Quintela71e72a12009-07-27 16:12:56 +020039#ifdef CONFIG_BSD
bellard7674e7b2005-04-26 21:59:26 +000040#include <sys/types.h>
41#include <sys/stat.h>
42#include <sys/ioctl.h>
Blue Swirl72cf2d42009-09-12 07:36:22 +000043#include <sys/queue.h>
blueswir1c5e97232009-03-07 20:06:23 +000044#ifndef __DragonFly__
bellard7674e7b2005-04-26 21:59:26 +000045#include <sys/disk.h>
46#endif
blueswir1c5e97232009-03-07 20:06:23 +000047#endif
bellard7674e7b2005-04-26 21:59:26 +000048
aliguori49dc7682009-03-08 16:26:59 +000049#ifdef _WIN32
50#include <windows.h>
51#endif
52
Fam Zhenge4654d22013-11-13 18:29:43 +080053struct BdrvDirtyBitmap {
54 HBitmap *bitmap;
55 QLIST_ENTRY(BdrvDirtyBitmap) list;
56};
57
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +010058#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
59
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +020060static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
aliguorif141eaf2009-04-07 18:43:24 +000061static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
aliguoric87c0672009-04-07 18:43:20 +000063 BlockDriverCompletionFunc *cb, void *opaque);
aliguorif141eaf2009-04-07 18:43:24 +000064static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
pbrookce1a14d2006-08-07 02:38:06 +000066 BlockDriverCompletionFunc *cb, void *opaque);
Kevin Wolff9f05dc2011-07-15 13:50:26 +020067static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
Kevin Wolf775aa8b2013-12-05 12:09:38 +010073static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +000075 BdrvRequestFlags flags);
Kevin Wolf775aa8b2013-12-05 12:09:38 +010076static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +000078 BdrvRequestFlags flags);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +010079static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
Paolo Bonzinid20d9b72013-11-22 13:39:44 +010083 BdrvRequestFlags flags,
Stefan Hajnoczib2a61372011-10-13 13:08:23 +010084 BlockDriverCompletionFunc *cb,
85 void *opaque,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +010086 bool is_write);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +010087static void coroutine_fn bdrv_co_do_rw(void *opaque);
Kevin Wolf621f0582012-03-20 15:12:58 +010088static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +020089 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
bellardec530c82006-04-25 22:36:06 +000090
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +010091static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
blueswir17ee930d2008-09-17 19:04:14 +000093
Benoît Canetdc364f42014-01-23 21:31:32 +010094static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
96
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +010097static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
bellardea2384d2004-08-01 21:59:26 +000099
Markus Armbrustereb852012009-10-27 18:41:44 +0100100/* If non-zero, use only whitelisted block drivers */
101static int use_bdrv_whitelist;
102
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000103#ifdef _WIN32
104static int is_windows_drive_prefix(const char *filename)
105{
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
109}
110
111int is_windows_drive(const char *filename)
112{
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
120}
121#endif
122
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800123/* throttling disk I/O limits */
Benoît Canetcc0681c2013-09-02 14:14:39 +0200124void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
126{
127 int i;
128
129 throttle_config(&bs->throttle_state, cfg);
130
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
133 }
134}
135
136/* this function drain all the throttled IOs */
137static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
138{
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
142
143 bs->io_limits_enabled = false;
144
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
148 }
149 }
150
151 bs->io_limits_enabled = enabled;
152
153 return drained;
154}
155
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800156void bdrv_io_limits_disable(BlockDriverState *bs)
157{
158 bs->io_limits_enabled = false;
159
Benoît Canetcc0681c2013-09-02 14:14:39 +0200160 bdrv_start_throttled_reqs(bs);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800161
Benoît Canetcc0681c2013-09-02 14:14:39 +0200162 throttle_destroy(&bs->throttle_state);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800163}
164
Benoît Canetcc0681c2013-09-02 14:14:39 +0200165static void bdrv_throttle_read_timer_cb(void *opaque)
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800166{
167 BlockDriverState *bs = opaque;
Benoît Canetcc0681c2013-09-02 14:14:39 +0200168 qemu_co_enter_next(&bs->throttled_reqs[0]);
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800169}
170
Benoît Canetcc0681c2013-09-02 14:14:39 +0200171static void bdrv_throttle_write_timer_cb(void *opaque)
172{
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
175}
176
177/* should be called before bdrv_set_io_limits if a limit is set */
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800178void bdrv_io_limits_enable(BlockDriverState *bs)
179{
Benoît Canetcc0681c2013-09-02 14:14:39 +0200180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
182 QEMU_CLOCK_VIRTUAL,
183 bdrv_throttle_read_timer_cb,
184 bdrv_throttle_write_timer_cb,
185 bs);
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800186 bs->io_limits_enabled = true;
187}
188
Benoît Canetcc0681c2013-09-02 14:14:39 +0200189/* This function makes an IO wait if needed
190 *
191 * @nb_sectors: the number of sectors of the IO
192 * @is_write: is the IO a write
193 */
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800194static void bdrv_io_limits_intercept(BlockDriverState *bs,
Kevin Wolfd5103582014-01-16 13:29:10 +0100195 unsigned int bytes,
Benoît Canetcc0681c2013-09-02 14:14:39 +0200196 bool is_write)
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800197{
Benoît Canetcc0681c2013-09-02 14:14:39 +0200198 /* does this io must wait */
199 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800200
Benoît Canetcc0681c2013-09-02 14:14:39 +0200201 /* if must wait or any request of this type throttled queue the IO */
202 if (must_wait ||
203 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
204 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800205 }
206
Benoît Canetcc0681c2013-09-02 14:14:39 +0200207 /* the IO will be executed, do the accounting */
Kevin Wolfd5103582014-01-16 13:29:10 +0100208 throttle_account(&bs->throttle_state, is_write, bytes);
209
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800210
Benoît Canetcc0681c2013-09-02 14:14:39 +0200211 /* if the next request must wait -> do nothing */
212 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
213 return;
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800214 }
215
Benoît Canetcc0681c2013-09-02 14:14:39 +0200216 /* else queue next request for execution */
217 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800218}
219
Kevin Wolf339064d2013-11-28 10:23:32 +0100220size_t bdrv_opt_mem_align(BlockDriverState *bs)
221{
222 if (!bs || !bs->drv) {
223 /* 4k should be on the safe side */
224 return 4096;
225 }
226
227 return bs->bl.opt_mem_alignment;
228}
229
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000230/* check if the path starts with "<protocol>:" */
231static int path_has_protocol(const char *path)
232{
Paolo Bonzini947995c2012-05-08 16:51:48 +0200233 const char *p;
234
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000235#ifdef _WIN32
236 if (is_windows_drive(path) ||
237 is_windows_drive_prefix(path)) {
238 return 0;
239 }
Paolo Bonzini947995c2012-05-08 16:51:48 +0200240 p = path + strcspn(path, ":/\\");
241#else
242 p = path + strcspn(path, ":/");
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000243#endif
244
Paolo Bonzini947995c2012-05-08 16:51:48 +0200245 return *p == ':';
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000246}
247
bellard83f64092006-08-01 16:21:11 +0000248int path_is_absolute(const char *path)
249{
bellard21664422007-01-07 18:22:37 +0000250#ifdef _WIN32
251 /* specific case for names like: "\\.\d:" */
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200252 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
bellard21664422007-01-07 18:22:37 +0000253 return 1;
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200254 }
255 return (*path == '/' || *path == '\\');
bellard3b9f94e2007-01-07 17:27:07 +0000256#else
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200257 return (*path == '/');
bellard3b9f94e2007-01-07 17:27:07 +0000258#endif
bellard83f64092006-08-01 16:21:11 +0000259}
260
261/* if filename is absolute, just copy it to dest. Otherwise, build a
262 path to it by considering it is relative to base_path. URL are
263 supported. */
264void path_combine(char *dest, int dest_size,
265 const char *base_path,
266 const char *filename)
267{
268 const char *p, *p1;
269 int len;
270
271 if (dest_size <= 0)
272 return;
273 if (path_is_absolute(filename)) {
274 pstrcpy(dest, dest_size, filename);
275 } else {
276 p = strchr(base_path, ':');
277 if (p)
278 p++;
279 else
280 p = base_path;
bellard3b9f94e2007-01-07 17:27:07 +0000281 p1 = strrchr(base_path, '/');
282#ifdef _WIN32
283 {
284 const char *p2;
285 p2 = strrchr(base_path, '\\');
286 if (!p1 || p2 > p1)
287 p1 = p2;
288 }
289#endif
bellard83f64092006-08-01 16:21:11 +0000290 if (p1)
291 p1++;
292 else
293 p1 = base_path;
294 if (p1 > p)
295 p = p1;
296 len = p - base_path;
297 if (len > dest_size - 1)
298 len = dest_size - 1;
299 memcpy(dest, base_path, len);
300 dest[len] = '\0';
301 pstrcat(dest, dest_size, filename);
302 }
303}
304
Paolo Bonzinidc5a1372012-05-08 16:51:50 +0200305void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
306{
307 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
308 pstrcpy(dest, sz, bs->backing_file);
309 } else {
310 path_combine(dest, sz, bs->filename, bs->backing_file);
311 }
312}
313
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500314void bdrv_register(BlockDriver *bdrv)
bellardea2384d2004-08-01 21:59:26 +0000315{
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +0100316 /* Block drivers without coroutine functions need emulation */
317 if (!bdrv->bdrv_co_readv) {
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200318 bdrv->bdrv_co_readv = bdrv_co_readv_em;
319 bdrv->bdrv_co_writev = bdrv_co_writev_em;
320
Stefan Hajnoczif8c35c12011-10-13 21:09:31 +0100321 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
322 * the block driver lacks aio we need to emulate that too.
323 */
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200324 if (!bdrv->bdrv_aio_readv) {
325 /* add AIO emulation layer */
326 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
327 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200328 }
bellard83f64092006-08-01 16:21:11 +0000329 }
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +0200330
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100331 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
bellardea2384d2004-08-01 21:59:26 +0000332}
bellardb3380822004-03-14 21:38:54 +0000333
334/* create a new block device (by default it is empty) */
Kevin Wolf98522f62014-04-17 13:16:01 +0200335BlockDriverState *bdrv_new(const char *device_name, Error **errp)
bellardfc01f7e2003-06-30 10:03:06 +0000336{
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +0100337 BlockDriverState *bs;
Fam Zhengfbe40ff2014-05-23 21:29:42 +0800338 int i;
bellardb3380822004-03-14 21:38:54 +0000339
Kevin Wolff2d953e2014-04-17 13:27:05 +0200340 if (bdrv_find(device_name)) {
341 error_setg(errp, "Device with id '%s' already exists",
342 device_name);
343 return NULL;
344 }
345 if (bdrv_find_node(device_name)) {
346 error_setg(errp, "Device with node-name '%s' already exists",
347 device_name);
348 return NULL;
349 }
350
Anthony Liguori7267c092011-08-20 22:09:37 -0500351 bs = g_malloc0(sizeof(BlockDriverState));
Fam Zhenge4654d22013-11-13 18:29:43 +0800352 QLIST_INIT(&bs->dirty_bitmaps);
bellardb3380822004-03-14 21:38:54 +0000353 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
bellardea2384d2004-08-01 21:59:26 +0000354 if (device_name[0] != '\0') {
Benoît Canetdc364f42014-01-23 21:31:32 +0100355 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
bellardea2384d2004-08-01 21:59:26 +0000356 }
Fam Zhengfbe40ff2014-05-23 21:29:42 +0800357 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
358 QLIST_INIT(&bs->op_blockers[i]);
359 }
Luiz Capitulino28a72822011-09-26 17:43:50 -0300360 bdrv_iostatus_disable(bs);
Paolo Bonzinid7d512f2012-08-23 11:20:36 +0200361 notifier_list_init(&bs->close_notifiers);
Stefan Hajnoczid616b222013-06-24 17:13:10 +0200362 notifier_with_return_list_init(&bs->before_write_notifiers);
Benoît Canetcc0681c2013-09-02 14:14:39 +0200363 qemu_co_queue_init(&bs->throttled_reqs[0]);
364 qemu_co_queue_init(&bs->throttled_reqs[1]);
Fam Zheng9fcb0252013-08-23 09:14:46 +0800365 bs->refcnt = 1;
Stefan Hajnoczidcd04222014-05-08 16:34:37 +0200366 bs->aio_context = qemu_get_aio_context();
Paolo Bonzinid7d512f2012-08-23 11:20:36 +0200367
bellardb3380822004-03-14 21:38:54 +0000368 return bs;
369}
370
Paolo Bonzinid7d512f2012-08-23 11:20:36 +0200371void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
372{
373 notifier_list_add(&bs->close_notifiers, notify);
374}
375
bellardea2384d2004-08-01 21:59:26 +0000376BlockDriver *bdrv_find_format(const char *format_name)
377{
378 BlockDriver *drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100379 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
380 if (!strcmp(drv1->format_name, format_name)) {
bellardea2384d2004-08-01 21:59:26 +0000381 return drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100382 }
bellardea2384d2004-08-01 21:59:26 +0000383 }
384 return NULL;
385}
386
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800387static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
Markus Armbrustereb852012009-10-27 18:41:44 +0100388{
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800389 static const char *whitelist_rw[] = {
390 CONFIG_BDRV_RW_WHITELIST
391 };
392 static const char *whitelist_ro[] = {
393 CONFIG_BDRV_RO_WHITELIST
Markus Armbrustereb852012009-10-27 18:41:44 +0100394 };
395 const char **p;
396
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800397 if (!whitelist_rw[0] && !whitelist_ro[0]) {
Markus Armbrustereb852012009-10-27 18:41:44 +0100398 return 1; /* no whitelist, anything goes */
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800399 }
Markus Armbrustereb852012009-10-27 18:41:44 +0100400
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800401 for (p = whitelist_rw; *p; p++) {
Markus Armbrustereb852012009-10-27 18:41:44 +0100402 if (!strcmp(drv->format_name, *p)) {
403 return 1;
404 }
405 }
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800406 if (read_only) {
407 for (p = whitelist_ro; *p; p++) {
408 if (!strcmp(drv->format_name, *p)) {
409 return 1;
410 }
411 }
412 }
Markus Armbrustereb852012009-10-27 18:41:44 +0100413 return 0;
414}
415
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800416BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
417 bool read_only)
Markus Armbrustereb852012009-10-27 18:41:44 +0100418{
419 BlockDriver *drv = bdrv_find_format(format_name);
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800420 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
Markus Armbrustereb852012009-10-27 18:41:44 +0100421}
422
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800423typedef struct CreateCo {
424 BlockDriver *drv;
425 char *filename;
426 QEMUOptionParameter *options;
427 int ret;
Max Reitzcc84d902013-09-06 17:14:26 +0200428 Error *err;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800429} CreateCo;
430
431static void coroutine_fn bdrv_create_co_entry(void *opaque)
432{
Max Reitzcc84d902013-09-06 17:14:26 +0200433 Error *local_err = NULL;
434 int ret;
435
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800436 CreateCo *cco = opaque;
437 assert(cco->drv);
438
Max Reitzcc84d902013-09-06 17:14:26 +0200439 ret = cco->drv->bdrv_create(cco->filename, cco->options, &local_err);
Markus Armbruster84d18f02014-01-30 15:07:28 +0100440 if (local_err) {
Max Reitzcc84d902013-09-06 17:14:26 +0200441 error_propagate(&cco->err, local_err);
442 }
443 cco->ret = ret;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800444}
445
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200446int bdrv_create(BlockDriver *drv, const char* filename,
Max Reitzcc84d902013-09-06 17:14:26 +0200447 QEMUOptionParameter *options, Error **errp)
bellardea2384d2004-08-01 21:59:26 +0000448{
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800449 int ret;
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200450
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800451 Coroutine *co;
452 CreateCo cco = {
453 .drv = drv,
454 .filename = g_strdup(filename),
455 .options = options,
456 .ret = NOT_DONE,
Max Reitzcc84d902013-09-06 17:14:26 +0200457 .err = NULL,
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800458 };
459
460 if (!drv->bdrv_create) {
Max Reitzcc84d902013-09-06 17:14:26 +0200461 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
Luiz Capitulino80168bf2012-10-17 16:45:25 -0300462 ret = -ENOTSUP;
463 goto out;
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800464 }
465
466 if (qemu_in_coroutine()) {
467 /* Fast-path if already in coroutine context */
468 bdrv_create_co_entry(&cco);
469 } else {
470 co = qemu_coroutine_create(bdrv_create_co_entry);
471 qemu_coroutine_enter(co, &cco);
472 while (cco.ret == NOT_DONE) {
473 qemu_aio_wait();
474 }
475 }
476
477 ret = cco.ret;
Max Reitzcc84d902013-09-06 17:14:26 +0200478 if (ret < 0) {
Markus Armbruster84d18f02014-01-30 15:07:28 +0100479 if (cco.err) {
Max Reitzcc84d902013-09-06 17:14:26 +0200480 error_propagate(errp, cco.err);
481 } else {
482 error_setg_errno(errp, -ret, "Could not create image");
483 }
484 }
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800485
Luiz Capitulino80168bf2012-10-17 16:45:25 -0300486out:
487 g_free(cco.filename);
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800488 return ret;
bellardea2384d2004-08-01 21:59:26 +0000489}
490
Max Reitzcc84d902013-09-06 17:14:26 +0200491int bdrv_create_file(const char* filename, QEMUOptionParameter *options,
492 Error **errp)
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200493{
494 BlockDriver *drv;
Max Reitzcc84d902013-09-06 17:14:26 +0200495 Error *local_err = NULL;
496 int ret;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200497
Kevin Wolf98289622013-07-10 15:47:39 +0200498 drv = bdrv_find_protocol(filename, true);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200499 if (drv == NULL) {
Max Reitzcc84d902013-09-06 17:14:26 +0200500 error_setg(errp, "Could not find protocol for file '%s'", filename);
Stefan Hajnoczi16905d72010-11-30 15:14:14 +0000501 return -ENOENT;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200502 }
503
Max Reitzcc84d902013-09-06 17:14:26 +0200504 ret = bdrv_create(drv, filename, options, &local_err);
Markus Armbruster84d18f02014-01-30 15:07:28 +0100505 if (local_err) {
Max Reitzcc84d902013-09-06 17:14:26 +0200506 error_propagate(errp, local_err);
507 }
508 return ret;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200509}
510
Kevin Wolf355ef4a2013-12-11 20:14:09 +0100511int bdrv_refresh_limits(BlockDriverState *bs)
Kevin Wolfd34682c2013-12-11 19:26:16 +0100512{
513 BlockDriver *drv = bs->drv;
514
515 memset(&bs->bl, 0, sizeof(bs->bl));
516
Kevin Wolf466ad822013-12-11 19:50:32 +0100517 if (!drv) {
518 return 0;
519 }
520
521 /* Take some limits from the children as a default */
522 if (bs->file) {
523 bdrv_refresh_limits(bs->file);
524 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
Kevin Wolf339064d2013-11-28 10:23:32 +0100525 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
526 } else {
527 bs->bl.opt_mem_alignment = 512;
Kevin Wolf466ad822013-12-11 19:50:32 +0100528 }
529
530 if (bs->backing_hd) {
531 bdrv_refresh_limits(bs->backing_hd);
532 bs->bl.opt_transfer_length =
533 MAX(bs->bl.opt_transfer_length,
534 bs->backing_hd->bl.opt_transfer_length);
Kevin Wolf339064d2013-11-28 10:23:32 +0100535 bs->bl.opt_mem_alignment =
536 MAX(bs->bl.opt_mem_alignment,
537 bs->backing_hd->bl.opt_mem_alignment);
Kevin Wolf466ad822013-12-11 19:50:32 +0100538 }
539
540 /* Then let the driver override it */
541 if (drv->bdrv_refresh_limits) {
Kevin Wolfd34682c2013-12-11 19:26:16 +0100542 return drv->bdrv_refresh_limits(bs);
543 }
544
545 return 0;
546}
547
Jim Meyeringeba25052012-05-28 09:27:54 +0200548/*
549 * Create a uniquely-named empty temporary file.
550 * Return 0 upon success, otherwise a negative errno value.
551 */
552int get_tmp_filename(char *filename, int size)
553{
bellardd5249392004-08-03 21:14:23 +0000554#ifdef _WIN32
bellard3b9f94e2007-01-07 17:27:07 +0000555 char temp_dir[MAX_PATH];
Jim Meyeringeba25052012-05-28 09:27:54 +0200556 /* GetTempFileName requires that its output buffer (4th param)
557 have length MAX_PATH or greater. */
558 assert(size >= MAX_PATH);
559 return (GetTempPath(MAX_PATH, temp_dir)
560 && GetTempFileName(temp_dir, "qem", 0, filename)
561 ? 0 : -GetLastError());
bellardd5249392004-08-03 21:14:23 +0000562#else
bellardea2384d2004-08-01 21:59:26 +0000563 int fd;
blueswir17ccfb2e2008-09-14 06:45:34 +0000564 const char *tmpdir;
aurel320badc1e2008-03-10 00:05:34 +0000565 tmpdir = getenv("TMPDIR");
Amit Shah69bef792014-02-26 15:12:37 +0530566 if (!tmpdir) {
567 tmpdir = "/var/tmp";
568 }
Jim Meyeringeba25052012-05-28 09:27:54 +0200569 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
570 return -EOVERFLOW;
571 }
bellardea2384d2004-08-01 21:59:26 +0000572 fd = mkstemp(filename);
Dunrong Huangfe235a02012-09-05 21:26:22 +0800573 if (fd < 0) {
574 return -errno;
575 }
576 if (close(fd) != 0) {
577 unlink(filename);
Jim Meyeringeba25052012-05-28 09:27:54 +0200578 return -errno;
579 }
580 return 0;
bellardd5249392004-08-03 21:14:23 +0000581#endif
Jim Meyeringeba25052012-05-28 09:27:54 +0200582}
bellardea2384d2004-08-01 21:59:26 +0000583
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200584/*
585 * Detect host devices. By convention, /dev/cdrom[N] is always
586 * recognized as a host CDROM.
587 */
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200588static BlockDriver *find_hdev_driver(const char *filename)
589{
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200590 int score_max = 0, score;
591 BlockDriver *drv = NULL, *d;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200592
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100593 QLIST_FOREACH(d, &bdrv_drivers, list) {
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200594 if (d->bdrv_probe_device) {
595 score = d->bdrv_probe_device(filename);
596 if (score > score_max) {
597 score_max = score;
598 drv = d;
599 }
600 }
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200601 }
602
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200603 return drv;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200604}
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200605
Kevin Wolf98289622013-07-10 15:47:39 +0200606BlockDriver *bdrv_find_protocol(const char *filename,
607 bool allow_protocol_prefix)
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200608{
609 BlockDriver *drv1;
610 char protocol[128];
611 int len;
612 const char *p;
613
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200614 /* TODO Drivers without bdrv_file_open must be specified explicitly */
615
Christoph Hellwig39508e72010-06-23 12:25:17 +0200616 /*
617 * XXX(hch): we really should not let host device detection
618 * override an explicit protocol specification, but moving this
619 * later breaks access to device names with colons in them.
620 * Thanks to the brain-dead persistent naming schemes on udev-
621 * based Linux systems those actually are quite common.
622 */
623 drv1 = find_hdev_driver(filename);
624 if (drv1) {
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200625 return drv1;
626 }
Christoph Hellwig39508e72010-06-23 12:25:17 +0200627
Kevin Wolf98289622013-07-10 15:47:39 +0200628 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
Christoph Hellwig39508e72010-06-23 12:25:17 +0200629 return bdrv_find_format("file");
630 }
Kevin Wolf98289622013-07-10 15:47:39 +0200631
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000632 p = strchr(filename, ':');
633 assert(p != NULL);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200634 len = p - filename;
635 if (len > sizeof(protocol) - 1)
636 len = sizeof(protocol) - 1;
637 memcpy(protocol, filename, len);
638 protocol[len] = '\0';
639 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
640 if (drv1->protocol_name &&
641 !strcmp(drv1->protocol_name, protocol)) {
642 return drv1;
643 }
644 }
645 return NULL;
646}
647
Kevin Wolff500a6d2012-11-12 17:35:27 +0100648static int find_image_format(BlockDriverState *bs, const char *filename,
Max Reitz34b5d2c2013-09-05 14:45:29 +0200649 BlockDriver **pdrv, Error **errp)
bellardea2384d2004-08-01 21:59:26 +0000650{
Kevin Wolff500a6d2012-11-12 17:35:27 +0100651 int score, score_max;
bellardea2384d2004-08-01 21:59:26 +0000652 BlockDriver *drv1, *drv;
bellard83f64092006-08-01 16:21:11 +0000653 uint8_t buf[2048];
Kevin Wolff500a6d2012-11-12 17:35:27 +0100654 int ret = 0;
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700655
Kevin Wolf08a00552010-06-01 18:37:31 +0200656 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
Paolo Bonzini8e895592013-01-10 15:39:27 +0100657 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
Stefan Weilc98ac352010-07-21 21:51:51 +0200658 drv = bdrv_find_format("raw");
659 if (!drv) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200660 error_setg(errp, "Could not find raw image format");
Stefan Weilc98ac352010-07-21 21:51:51 +0200661 ret = -ENOENT;
662 }
663 *pdrv = drv;
664 return ret;
Nicholas A. Bellinger1a396852010-05-27 08:56:28 -0700665 }
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700666
bellard83f64092006-08-01 16:21:11 +0000667 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
bellard83f64092006-08-01 16:21:11 +0000668 if (ret < 0) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200669 error_setg_errno(errp, -ret, "Could not read image for determining its "
670 "format");
Stefan Weilc98ac352010-07-21 21:51:51 +0200671 *pdrv = NULL;
672 return ret;
bellard83f64092006-08-01 16:21:11 +0000673 }
674
bellardea2384d2004-08-01 21:59:26 +0000675 score_max = 0;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200676 drv = NULL;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100677 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
bellard83f64092006-08-01 16:21:11 +0000678 if (drv1->bdrv_probe) {
679 score = drv1->bdrv_probe(buf, ret, filename);
680 if (score > score_max) {
681 score_max = score;
682 drv = drv1;
683 }
bellardea2384d2004-08-01 21:59:26 +0000684 }
685 }
Stefan Weilc98ac352010-07-21 21:51:51 +0200686 if (!drv) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200687 error_setg(errp, "Could not determine image format: No compatible "
688 "driver found");
Stefan Weilc98ac352010-07-21 21:51:51 +0200689 ret = -ENOENT;
690 }
691 *pdrv = drv;
692 return ret;
bellardea2384d2004-08-01 21:59:26 +0000693}
694
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100695/**
696 * Set the current 'total_sectors' value
697 */
698static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
699{
700 BlockDriver *drv = bs->drv;
701
Nicholas Bellinger396759a2010-05-17 09:46:04 -0700702 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
703 if (bs->sg)
704 return 0;
705
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100706 /* query actual device if possible, otherwise just trust the hint */
707 if (drv->bdrv_getlength) {
708 int64_t length = drv->bdrv_getlength(bs);
709 if (length < 0) {
710 return length;
711 }
Fam Zheng7e382002013-11-06 19:48:06 +0800712 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100713 }
714
715 bs->total_sectors = hint;
716 return 0;
717}
718
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100719/**
Paolo Bonzini9e8f1832013-02-08 14:06:11 +0100720 * Set open flags for a given discard mode
721 *
722 * Return 0 on success, -1 if the discard mode was invalid.
723 */
724int bdrv_parse_discard_flags(const char *mode, int *flags)
725{
726 *flags &= ~BDRV_O_UNMAP;
727
728 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
729 /* do nothing */
730 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
731 *flags |= BDRV_O_UNMAP;
732 } else {
733 return -1;
734 }
735
736 return 0;
737}
738
739/**
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100740 * Set open flags for a given cache mode
741 *
742 * Return 0 on success, -1 if the cache mode was invalid.
743 */
744int bdrv_parse_cache_flags(const char *mode, int *flags)
745{
746 *flags &= ~BDRV_O_CACHE_MASK;
747
748 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
749 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
Stefan Hajnoczi92196b22011-08-04 12:26:52 +0100750 } else if (!strcmp(mode, "directsync")) {
751 *flags |= BDRV_O_NOCACHE;
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100752 } else if (!strcmp(mode, "writeback")) {
753 *flags |= BDRV_O_CACHE_WB;
754 } else if (!strcmp(mode, "unsafe")) {
755 *flags |= BDRV_O_CACHE_WB;
756 *flags |= BDRV_O_NO_FLUSH;
757 } else if (!strcmp(mode, "writethrough")) {
758 /* this is the default */
759 } else {
760 return -1;
761 }
762
763 return 0;
764}
765
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000766/**
767 * The copy-on-read flag is actually a reference count so multiple users may
768 * use the feature without worrying about clobbering its previous state.
769 * Copy-on-read stays enabled until all users have called to disable it.
770 */
771void bdrv_enable_copy_on_read(BlockDriverState *bs)
772{
773 bs->copy_on_read++;
774}
775
776void bdrv_disable_copy_on_read(BlockDriverState *bs)
777{
778 assert(bs->copy_on_read > 0);
779 bs->copy_on_read--;
780}
781
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200782/*
Kevin Wolfb1e6fc02014-05-06 12:11:42 +0200783 * Returns the flags that a temporary snapshot should get, based on the
784 * originally requested flags (the originally requested image will have flags
785 * like a backing file)
786 */
787static int bdrv_temp_snapshot_flags(int flags)
788{
789 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
790}
791
792/*
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200793 * Returns the flags that bs->file should get, based on the given flags for
794 * the parent BDS
795 */
796static int bdrv_inherited_flags(int flags)
797{
798 /* Enable protocol handling, disable format probing for bs->file */
799 flags |= BDRV_O_PROTOCOL;
800
801 /* Our block drivers take care to send flushes and respect unmap policy,
802 * so we can enable both unconditionally on lower layers. */
803 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
804
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200805 /* Clear flags that only apply to the top layer */
Kevin Wolf5669b442014-04-11 21:36:45 +0200806 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
Kevin Wolf0b50cc82014-04-11 21:29:52 +0200807
808 return flags;
809}
810
Kevin Wolf317fc442014-04-25 13:27:34 +0200811/*
812 * Returns the flags that bs->backing_hd should get, based on the given flags
813 * for the parent BDS
814 */
815static int bdrv_backing_flags(int flags)
816{
817 /* backing files always opened read-only */
818 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
819
820 /* snapshot=on is handled on the top layer */
Kevin Wolf8bfea152014-04-11 19:16:36 +0200821 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
Kevin Wolf317fc442014-04-25 13:27:34 +0200822
823 return flags;
824}
825
Kevin Wolf7b272452012-11-12 17:05:39 +0100826static int bdrv_open_flags(BlockDriverState *bs, int flags)
827{
828 int open_flags = flags | BDRV_O_CACHE_WB;
829
830 /*
831 * Clear flags that are internal to the block layer before opening the
832 * image.
833 */
834 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
835
836 /*
837 * Snapshots should be writable.
838 */
Kevin Wolf8bfea152014-04-11 19:16:36 +0200839 if (flags & BDRV_O_TEMPORARY) {
Kevin Wolf7b272452012-11-12 17:05:39 +0100840 open_flags |= BDRV_O_RDWR;
841 }
842
843 return open_flags;
844}
845
Kevin Wolf636ea372014-01-24 14:11:52 +0100846static void bdrv_assign_node_name(BlockDriverState *bs,
847 const char *node_name,
848 Error **errp)
Benoît Canet6913c0c2014-01-23 21:31:33 +0100849{
850 if (!node_name) {
Kevin Wolf636ea372014-01-24 14:11:52 +0100851 return;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100852 }
853
854 /* empty string node name is invalid */
855 if (node_name[0] == '\0') {
856 error_setg(errp, "Empty node name");
Kevin Wolf636ea372014-01-24 14:11:52 +0100857 return;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100858 }
859
Benoît Canet0c5e94e2014-02-12 17:15:07 +0100860 /* takes care of avoiding namespaces collisions */
861 if (bdrv_find(node_name)) {
862 error_setg(errp, "node-name=%s is conflicting with a device id",
863 node_name);
Kevin Wolf636ea372014-01-24 14:11:52 +0100864 return;
Benoît Canet0c5e94e2014-02-12 17:15:07 +0100865 }
866
Benoît Canet6913c0c2014-01-23 21:31:33 +0100867 /* takes care of avoiding duplicates node names */
868 if (bdrv_find_node(node_name)) {
869 error_setg(errp, "Duplicate node name");
Kevin Wolf636ea372014-01-24 14:11:52 +0100870 return;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100871 }
872
873 /* copy node name into the bs and insert it into the graph list */
874 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
875 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
Benoît Canet6913c0c2014-01-23 21:31:33 +0100876}
877
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200878/*
Kevin Wolf57915332010-04-14 15:24:50 +0200879 * Common part for opening disk images and files
Kevin Wolfb6ad4912013-03-15 10:35:04 +0100880 *
881 * Removes all processed options from *options.
Kevin Wolf57915332010-04-14 15:24:50 +0200882 */
Kevin Wolff500a6d2012-11-12 17:35:27 +0100883static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
Max Reitz34b5d2c2013-09-05 14:45:29 +0200884 QDict *options, int flags, BlockDriver *drv, Error **errp)
Kevin Wolf57915332010-04-14 15:24:50 +0200885{
886 int ret, open_flags;
Kevin Wolf035fccd2013-04-09 14:34:19 +0200887 const char *filename;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100888 const char *node_name = NULL;
Max Reitz34b5d2c2013-09-05 14:45:29 +0200889 Error *local_err = NULL;
Kevin Wolf57915332010-04-14 15:24:50 +0200890
891 assert(drv != NULL);
Paolo Bonzini64058752012-05-08 16:51:49 +0200892 assert(bs->file == NULL);
Kevin Wolf707ff822013-03-06 12:20:31 +0100893 assert(options != NULL && bs->options != options);
Kevin Wolf57915332010-04-14 15:24:50 +0200894
Kevin Wolf45673672013-04-22 17:48:40 +0200895 if (file != NULL) {
896 filename = file->filename;
897 } else {
898 filename = qdict_get_try_str(options, "filename");
899 }
900
Kevin Wolf765003d2014-02-03 14:49:42 +0100901 if (drv->bdrv_needs_filename && !filename) {
902 error_setg(errp, "The '%s' block driver requires a file name",
903 drv->format_name);
904 return -EINVAL;
905 }
906
Kevin Wolf45673672013-04-22 17:48:40 +0200907 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
Stefan Hajnoczi28dcee12011-09-22 20:14:12 +0100908
Benoît Canet6913c0c2014-01-23 21:31:33 +0100909 node_name = qdict_get_try_str(options, "node-name");
Kevin Wolf636ea372014-01-24 14:11:52 +0100910 bdrv_assign_node_name(bs, node_name, &local_err);
Markus Armbruster0fb63952014-04-25 16:50:31 +0200911 if (local_err) {
Kevin Wolf636ea372014-01-24 14:11:52 +0100912 error_propagate(errp, local_err);
913 return -EINVAL;
Benoît Canet6913c0c2014-01-23 21:31:33 +0100914 }
915 qdict_del(options, "node-name");
916
Kevin Wolf5d186eb2013-03-27 17:28:18 +0100917 /* bdrv_open() with directly using a protocol as drv. This layer is already
918 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
919 * and return immediately. */
920 if (file != NULL && drv->bdrv_file_open) {
921 bdrv_swap(file, bs);
922 return 0;
923 }
924
Kevin Wolf57915332010-04-14 15:24:50 +0200925 bs->open_flags = flags;
Paolo Bonzini1b7fd722011-11-29 11:35:47 +0100926 bs->guest_block_size = 512;
Paolo Bonzinic25f53b2011-11-29 12:42:20 +0100927 bs->request_alignment = 512;
Asias He0d51b4d2013-08-22 15:24:14 +0800928 bs->zero_beyond_eof = true;
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800929 open_flags = bdrv_open_flags(bs, flags);
930 bs->read_only = !(open_flags & BDRV_O_RDWR);
931
932 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
Kevin Wolf8f94a6e2013-10-10 11:45:55 +0200933 error_setg(errp,
934 !bs->read_only && bdrv_is_whitelisted(drv, true)
935 ? "Driver '%s' can only be used for read-only devices"
936 : "Driver '%s' is not whitelisted",
937 drv->format_name);
Fam Zhengb64ec4e2013-05-29 19:35:40 +0800938 return -ENOTSUP;
939 }
Kevin Wolf57915332010-04-14 15:24:50 +0200940
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000941 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
Kevin Wolf0ebd24e2013-09-19 15:12:18 +0200942 if (flags & BDRV_O_COPY_ON_READ) {
943 if (!bs->read_only) {
944 bdrv_enable_copy_on_read(bs);
945 } else {
946 error_setg(errp, "Can't use copy-on-read on read-only device");
947 return -EINVAL;
948 }
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000949 }
950
Kevin Wolfc2ad1b02013-03-18 16:40:51 +0100951 if (filename != NULL) {
952 pstrcpy(bs->filename, sizeof(bs->filename), filename);
953 } else {
954 bs->filename[0] = '\0';
955 }
Kevin Wolf57915332010-04-14 15:24:50 +0200956
Kevin Wolf57915332010-04-14 15:24:50 +0200957 bs->drv = drv;
Anthony Liguori7267c092011-08-20 22:09:37 -0500958 bs->opaque = g_malloc0(drv->instance_size);
Kevin Wolf57915332010-04-14 15:24:50 +0200959
Stefan Hajnoczi03f541b2011-10-27 10:54:28 +0100960 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
Stefan Hajnoczie7c63792011-10-27 10:54:27 +0100961
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200962 /* Open the image, either directly or using a protocol */
963 if (drv->bdrv_file_open) {
Kevin Wolf5d186eb2013-03-27 17:28:18 +0100964 assert(file == NULL);
Benoît Canet030be322013-09-24 17:07:04 +0200965 assert(!drv->bdrv_needs_filename || filename != NULL);
Max Reitz34b5d2c2013-09-05 14:45:29 +0200966 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
Kevin Wolff500a6d2012-11-12 17:35:27 +0100967 } else {
Kevin Wolf2af5ef72013-04-09 13:19:18 +0200968 if (file == NULL) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200969 error_setg(errp, "Can't use '%s' as a block driver for the "
970 "protocol level", drv->format_name);
Kevin Wolf2af5ef72013-04-09 13:19:18 +0200971 ret = -EINVAL;
972 goto free_and_fail;
973 }
Kevin Wolff500a6d2012-11-12 17:35:27 +0100974 bs->file = file;
Max Reitz34b5d2c2013-09-05 14:45:29 +0200975 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200976 }
977
Kevin Wolf57915332010-04-14 15:24:50 +0200978 if (ret < 0) {
Markus Armbruster84d18f02014-01-30 15:07:28 +0100979 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200980 error_propagate(errp, local_err);
Dunrong Huang2fa9aa52013-09-24 18:14:01 +0800981 } else if (bs->filename[0]) {
982 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
Max Reitz34b5d2c2013-09-05 14:45:29 +0200983 } else {
984 error_setg_errno(errp, -ret, "Could not open image");
985 }
Kevin Wolf57915332010-04-14 15:24:50 +0200986 goto free_and_fail;
987 }
988
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100989 ret = refresh_total_sectors(bs, bs->total_sectors);
990 if (ret < 0) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200991 error_setg_errno(errp, -ret, "Could not refresh total sector count");
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100992 goto free_and_fail;
Kevin Wolf57915332010-04-14 15:24:50 +0200993 }
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100994
Kevin Wolfd34682c2013-12-11 19:26:16 +0100995 bdrv_refresh_limits(bs);
Paolo Bonzinic25f53b2011-11-29 12:42:20 +0100996 assert(bdrv_opt_mem_align(bs) != 0);
Kevin Wolf47ea2de2014-03-05 15:49:55 +0100997 assert((bs->request_alignment != 0) || bs->sg);
Kevin Wolf57915332010-04-14 15:24:50 +0200998 return 0;
999
1000free_and_fail:
Kevin Wolff500a6d2012-11-12 17:35:27 +01001001 bs->file = NULL;
Anthony Liguori7267c092011-08-20 22:09:37 -05001002 g_free(bs->opaque);
Kevin Wolf57915332010-04-14 15:24:50 +02001003 bs->opaque = NULL;
1004 bs->drv = NULL;
1005 return ret;
1006}
1007
1008/*
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001009 * Opens a file using a protocol (file, host_device, nbd, ...)
Kevin Wolf787e4a82013-03-06 11:52:48 +01001010 *
Max Reitz5acd9d82014-02-18 18:33:11 +01001011 * options is an indirect pointer to a QDict of options to pass to the block
1012 * drivers, or pointer to NULL for an empty set of options. If this function
1013 * takes ownership of the QDict reference, it will set *options to NULL;
1014 * otherwise, it will contain unused/unrecognized options after this function
1015 * returns. Then, the caller is responsible for freeing it. If it intends to
1016 * reuse the QDict, QINCREF() should be called beforehand.
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001017 */
Max Reitzd4446ea2014-02-18 18:33:09 +01001018static int bdrv_file_open(BlockDriverState *bs, const char *filename,
Max Reitz5acd9d82014-02-18 18:33:11 +01001019 QDict **options, int flags, Error **errp)
bellardb3380822004-03-14 21:38:54 +00001020{
Christoph Hellwig6db95602010-04-05 16:53:57 +02001021 BlockDriver *drv;
Kevin Wolfc2ad1b02013-03-18 16:40:51 +01001022 const char *drvname;
Kevin Wolfe3fa4bf2014-04-03 12:45:51 +02001023 bool parse_filename = false;
Max Reitz34b5d2c2013-09-05 14:45:29 +02001024 Error *local_err = NULL;
bellard83f64092006-08-01 16:21:11 +00001025 int ret;
1026
Kevin Wolf035fccd2013-04-09 14:34:19 +02001027 /* Fetch the file name from the options QDict if necessary */
1028 if (!filename) {
Max Reitz5acd9d82014-02-18 18:33:11 +01001029 filename = qdict_get_try_str(*options, "filename");
1030 } else if (filename && !qdict_haskey(*options, "filename")) {
1031 qdict_put(*options, "filename", qstring_from_str(filename));
Kevin Wolfe3fa4bf2014-04-03 12:45:51 +02001032 parse_filename = true;
Kevin Wolf035fccd2013-04-09 14:34:19 +02001033 } else {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001034 error_setg(errp, "Can't specify 'file' and 'filename' options at the "
1035 "same time");
Kevin Wolf035fccd2013-04-09 14:34:19 +02001036 ret = -EINVAL;
1037 goto fail;
1038 }
1039
Kevin Wolfc2ad1b02013-03-18 16:40:51 +01001040 /* Find the right block driver */
Max Reitz5acd9d82014-02-18 18:33:11 +01001041 drvname = qdict_get_try_str(*options, "driver");
Kevin Wolfc2ad1b02013-03-18 16:40:51 +01001042 if (drvname) {
Kevin Wolf8f94a6e2013-10-10 11:45:55 +02001043 drv = bdrv_find_format(drvname);
Max Reitz34b5d2c2013-09-05 14:45:29 +02001044 if (!drv) {
1045 error_setg(errp, "Unknown driver '%s'", drvname);
1046 }
Max Reitz5acd9d82014-02-18 18:33:11 +01001047 qdict_del(*options, "driver");
Kevin Wolfc2ad1b02013-03-18 16:40:51 +01001048 } else if (filename) {
Kevin Wolfe3fa4bf2014-04-03 12:45:51 +02001049 drv = bdrv_find_protocol(filename, parse_filename);
Kevin Wolf98289622013-07-10 15:47:39 +02001050 if (!drv) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001051 error_setg(errp, "Unknown protocol");
Kevin Wolf98289622013-07-10 15:47:39 +02001052 }
Kevin Wolfc2ad1b02013-03-18 16:40:51 +01001053 } else {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001054 error_setg(errp, "Must specify either driver or file");
Kevin Wolfc2ad1b02013-03-18 16:40:51 +01001055 drv = NULL;
1056 }
1057
1058 if (!drv) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001059 /* errp has been set already */
Kevin Wolfc2ad1b02013-03-18 16:40:51 +01001060 ret = -ENOENT;
1061 goto fail;
1062 }
1063
1064 /* Parse the filename and open it */
Kevin Wolfe3fa4bf2014-04-03 12:45:51 +02001065 if (drv->bdrv_parse_filename && parse_filename) {
Max Reitz5acd9d82014-02-18 18:33:11 +01001066 drv->bdrv_parse_filename(filename, *options, &local_err);
Markus Armbruster84d18f02014-01-30 15:07:28 +01001067 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001068 error_propagate(errp, local_err);
Kevin Wolf6963a302013-03-15 18:47:22 +01001069 ret = -EINVAL;
1070 goto fail;
1071 }
Max Reitzcd5d0312014-03-05 22:41:36 +01001072
1073 if (!drv->bdrv_needs_filename) {
1074 qdict_del(*options, "filename");
1075 } else {
1076 filename = qdict_get_str(*options, "filename");
1077 }
Kevin Wolf6963a302013-03-15 18:47:22 +01001078 }
1079
Max Reitz505d7582013-12-20 19:28:13 +01001080 if (!drv->bdrv_file_open) {
Max Reitz5acd9d82014-02-18 18:33:11 +01001081 ret = bdrv_open(&bs, filename, NULL, *options, flags, drv, &local_err);
1082 *options = NULL;
Max Reitz505d7582013-12-20 19:28:13 +01001083 } else {
Max Reitz5acd9d82014-02-18 18:33:11 +01001084 ret = bdrv_open_common(bs, NULL, *options, flags, drv, &local_err);
Max Reitz505d7582013-12-20 19:28:13 +01001085 }
Kevin Wolf707ff822013-03-06 12:20:31 +01001086 if (ret < 0) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001087 error_propagate(errp, local_err);
Kevin Wolf707ff822013-03-06 12:20:31 +01001088 goto fail;
1089 }
1090
aliguori71d07702009-03-03 17:37:16 +00001091 bs->growable = 1;
bellard83f64092006-08-01 16:21:11 +00001092 return 0;
Kevin Wolf707ff822013-03-06 12:20:31 +01001093
1094fail:
Kevin Wolf707ff822013-03-06 12:20:31 +01001095 return ret;
bellardea2384d2004-08-01 21:59:26 +00001096}
bellardfc01f7e2003-06-30 10:03:06 +00001097
Fam Zheng8d24cce2014-05-23 21:29:45 +08001098void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1099{
1100
Fam Zheng826b6ca2014-05-23 21:29:47 +08001101 if (bs->backing_hd) {
1102 assert(bs->backing_blocker);
1103 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1104 } else if (backing_hd) {
1105 error_setg(&bs->backing_blocker,
1106 "device is used as backing hd of '%s'",
1107 bs->device_name);
1108 }
1109
Fam Zheng8d24cce2014-05-23 21:29:45 +08001110 bs->backing_hd = backing_hd;
1111 if (!backing_hd) {
Fam Zheng826b6ca2014-05-23 21:29:47 +08001112 error_free(bs->backing_blocker);
1113 bs->backing_blocker = NULL;
Fam Zheng8d24cce2014-05-23 21:29:45 +08001114 goto out;
1115 }
1116 bs->open_flags &= ~BDRV_O_NO_BACKING;
1117 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1118 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1119 backing_hd->drv ? backing_hd->drv->format_name : "");
Fam Zheng826b6ca2014-05-23 21:29:47 +08001120
1121 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1122 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1123 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1124 bs->backing_blocker);
Fam Zheng8d24cce2014-05-23 21:29:45 +08001125out:
1126 bdrv_refresh_limits(bs);
1127}
1128
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001129/*
1130 * Opens the backing file for a BlockDriverState if not yet open
1131 *
1132 * options is a QDict of options to pass to the block drivers, or NULL for an
1133 * empty set of options. The reference to the QDict is transferred to this
1134 * function (even on failure), so if the caller intends to reuse the dictionary,
1135 * it needs to use QINCREF() before calling bdrv_file_open.
1136 */
Max Reitz34b5d2c2013-09-05 14:45:29 +02001137int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
Paolo Bonzini9156df12012-10-18 16:49:17 +02001138{
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001139 char *backing_filename = g_malloc0(PATH_MAX);
Kevin Wolf317fc442014-04-25 13:27:34 +02001140 int ret = 0;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001141 BlockDriver *back_drv = NULL;
Fam Zheng8d24cce2014-05-23 21:29:45 +08001142 BlockDriverState *backing_hd;
Max Reitz34b5d2c2013-09-05 14:45:29 +02001143 Error *local_err = NULL;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001144
1145 if (bs->backing_hd != NULL) {
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001146 QDECREF(options);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001147 goto free_exit;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001148 }
1149
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001150 /* NULL means an empty set of options */
1151 if (options == NULL) {
1152 options = qdict_new();
1153 }
1154
Paolo Bonzini9156df12012-10-18 16:49:17 +02001155 bs->open_flags &= ~BDRV_O_NO_BACKING;
Kevin Wolf1cb6f502013-04-12 20:27:07 +02001156 if (qdict_haskey(options, "file.filename")) {
1157 backing_filename[0] = '\0';
1158 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001159 QDECREF(options);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001160 goto free_exit;
Fam Zhengdbecebd2013-09-22 20:05:06 +08001161 } else {
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001162 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
Paolo Bonzini9156df12012-10-18 16:49:17 +02001163 }
1164
Fam Zheng8d24cce2014-05-23 21:29:45 +08001165 backing_hd = bdrv_new("", errp);
1166
Paolo Bonzini9156df12012-10-18 16:49:17 +02001167 if (bs->backing_format[0] != '\0') {
1168 back_drv = bdrv_find_format(bs->backing_format);
1169 }
1170
Max Reitzf67503e2014-02-18 18:33:05 +01001171 assert(bs->backing_hd == NULL);
Fam Zheng8d24cce2014-05-23 21:29:45 +08001172 ret = bdrv_open(&backing_hd,
Max Reitzddf56362014-02-18 18:33:06 +01001173 *backing_filename ? backing_filename : NULL, NULL, options,
Kevin Wolf317fc442014-04-25 13:27:34 +02001174 bdrv_backing_flags(bs->open_flags), back_drv, &local_err);
Paolo Bonzini9156df12012-10-18 16:49:17 +02001175 if (ret < 0) {
Fam Zheng8d24cce2014-05-23 21:29:45 +08001176 bdrv_unref(backing_hd);
1177 backing_hd = NULL;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001178 bs->open_flags |= BDRV_O_NO_BACKING;
Fam Zhengb04b6b62013-11-08 11:26:49 +08001179 error_setg(errp, "Could not open backing file: %s",
1180 error_get_pretty(local_err));
1181 error_free(local_err);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001182 goto free_exit;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001183 }
Fam Zheng8d24cce2014-05-23 21:29:45 +08001184 bdrv_set_backing_hd(bs, backing_hd);
Peter Feinerd80ac652014-01-08 19:43:25 +00001185
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001186free_exit:
1187 g_free(backing_filename);
1188 return ret;
Paolo Bonzini9156df12012-10-18 16:49:17 +02001189}
1190
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001191/*
Max Reitzda557aa2013-12-20 19:28:11 +01001192 * Opens a disk image whose options are given as BlockdevRef in another block
1193 * device's options.
1194 *
Max Reitzda557aa2013-12-20 19:28:11 +01001195 * If allow_none is true, no image will be opened if filename is false and no
1196 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1197 *
1198 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1199 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1200 * itself, all options starting with "${bdref_key}." are considered part of the
1201 * BlockdevRef.
1202 *
1203 * The BlockdevRef will be removed from the options QDict.
Max Reitzf67503e2014-02-18 18:33:05 +01001204 *
1205 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
Max Reitzda557aa2013-12-20 19:28:11 +01001206 */
1207int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1208 QDict *options, const char *bdref_key, int flags,
Max Reitzf7d9fd82014-02-18 18:33:12 +01001209 bool allow_none, Error **errp)
Max Reitzda557aa2013-12-20 19:28:11 +01001210{
1211 QDict *image_options;
1212 int ret;
1213 char *bdref_key_dot;
1214 const char *reference;
1215
Max Reitzf67503e2014-02-18 18:33:05 +01001216 assert(pbs);
1217 assert(*pbs == NULL);
1218
Max Reitzda557aa2013-12-20 19:28:11 +01001219 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1220 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1221 g_free(bdref_key_dot);
1222
1223 reference = qdict_get_try_str(options, bdref_key);
1224 if (!filename && !reference && !qdict_size(image_options)) {
1225 if (allow_none) {
1226 ret = 0;
1227 } else {
1228 error_setg(errp, "A block device must be specified for \"%s\"",
1229 bdref_key);
1230 ret = -EINVAL;
1231 }
Markus Armbrusterb20e61e2014-05-28 11:16:57 +02001232 QDECREF(image_options);
Max Reitzda557aa2013-12-20 19:28:11 +01001233 goto done;
1234 }
1235
Max Reitzf7d9fd82014-02-18 18:33:12 +01001236 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
Max Reitzda557aa2013-12-20 19:28:11 +01001237
1238done:
1239 qdict_del(options, bdref_key);
1240 return ret;
1241}
1242
Kevin Wolfb1e6fc02014-05-06 12:11:42 +02001243void bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
Kevin Wolfb9988752014-04-03 12:09:34 +02001244{
1245 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001246 char *tmp_filename = g_malloc0(PATH_MAX + 1);
Kevin Wolfb9988752014-04-03 12:09:34 +02001247 int64_t total_size;
1248 BlockDriver *bdrv_qcow2;
1249 QEMUOptionParameter *create_options;
1250 QDict *snapshot_options;
1251 BlockDriverState *bs_snapshot;
1252 Error *local_err;
1253 int ret;
1254
1255 /* if snapshot, we create a temporary backing file and open it
1256 instead of opening 'filename' directly */
1257
1258 /* Get the required size from the image */
Kevin Wolff1877432014-04-04 17:07:19 +02001259 total_size = bdrv_getlength(bs);
1260 if (total_size < 0) {
1261 error_setg_errno(errp, -total_size, "Could not get image size");
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001262 goto out;
Kevin Wolff1877432014-04-04 17:07:19 +02001263 }
1264 total_size &= BDRV_SECTOR_MASK;
Kevin Wolfb9988752014-04-03 12:09:34 +02001265
1266 /* Create the temporary image */
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001267 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
Kevin Wolfb9988752014-04-03 12:09:34 +02001268 if (ret < 0) {
1269 error_setg_errno(errp, -ret, "Could not get temporary filename");
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001270 goto out;
Kevin Wolfb9988752014-04-03 12:09:34 +02001271 }
1272
1273 bdrv_qcow2 = bdrv_find_format("qcow2");
1274 create_options = parse_option_parameters("", bdrv_qcow2->create_options,
1275 NULL);
1276
1277 set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size);
1278
1279 ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err);
1280 free_option_parameters(create_options);
1281 if (ret < 0) {
1282 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1283 "'%s': %s", tmp_filename,
1284 error_get_pretty(local_err));
1285 error_free(local_err);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001286 goto out;
Kevin Wolfb9988752014-04-03 12:09:34 +02001287 }
1288
1289 /* Prepare a new options QDict for the temporary file */
1290 snapshot_options = qdict_new();
1291 qdict_put(snapshot_options, "file.driver",
1292 qstring_from_str("file"));
1293 qdict_put(snapshot_options, "file.filename",
1294 qstring_from_str(tmp_filename));
1295
Kevin Wolf98522f62014-04-17 13:16:01 +02001296 bs_snapshot = bdrv_new("", &error_abort);
Kevin Wolfb9988752014-04-03 12:09:34 +02001297
1298 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
Kevin Wolfb1e6fc02014-05-06 12:11:42 +02001299 flags, bdrv_qcow2, &local_err);
Kevin Wolfb9988752014-04-03 12:09:34 +02001300 if (ret < 0) {
1301 error_propagate(errp, local_err);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001302 goto out;
Kevin Wolfb9988752014-04-03 12:09:34 +02001303 }
1304
1305 bdrv_append(bs_snapshot, bs);
Benoît Canet1ba4b6a2014-04-22 17:05:27 +02001306
1307out:
1308 g_free(tmp_filename);
Kevin Wolfb9988752014-04-03 12:09:34 +02001309}
1310
Max Reitz4993f7e2014-05-08 20:12:41 +02001311static QDict *parse_json_filename(const char *filename, Error **errp)
1312{
1313 QObject *options_obj;
1314 QDict *options;
1315 int ret;
1316
1317 ret = strstart(filename, "json:", &filename);
1318 assert(ret);
1319
1320 options_obj = qobject_from_json(filename);
1321 if (!options_obj) {
1322 error_setg(errp, "Could not parse the JSON options");
1323 return NULL;
1324 }
1325
1326 if (qobject_type(options_obj) != QTYPE_QDICT) {
1327 qobject_decref(options_obj);
1328 error_setg(errp, "Invalid JSON object given");
1329 return NULL;
1330 }
1331
1332 options = qobject_to_qdict(options_obj);
1333 qdict_flatten(options);
1334
1335 return options;
1336}
1337
Max Reitzda557aa2013-12-20 19:28:11 +01001338/*
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001339 * Opens a disk image (raw, qcow2, vmdk, ...)
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001340 *
1341 * options is a QDict of options to pass to the block drivers, or NULL for an
1342 * empty set of options. The reference to the QDict belongs to the block layer
1343 * after the call (even on failure), so if the caller intends to reuse the
1344 * dictionary, it needs to use QINCREF() before calling bdrv_open.
Max Reitzf67503e2014-02-18 18:33:05 +01001345 *
1346 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1347 * If it is not NULL, the referenced BDS will be reused.
Max Reitzddf56362014-02-18 18:33:06 +01001348 *
1349 * The reference parameter may be used to specify an existing block device which
1350 * should be opened. If specified, neither options nor a filename may be given,
1351 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001352 */
Max Reitzddf56362014-02-18 18:33:06 +01001353int bdrv_open(BlockDriverState **pbs, const char *filename,
1354 const char *reference, QDict *options, int flags,
1355 BlockDriver *drv, Error **errp)
bellardea2384d2004-08-01 21:59:26 +00001356{
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001357 int ret;
Max Reitzf67503e2014-02-18 18:33:05 +01001358 BlockDriverState *file = NULL, *bs;
Kevin Wolf74fe54f2013-07-09 11:09:02 +02001359 const char *drvname;
Max Reitz34b5d2c2013-09-05 14:45:29 +02001360 Error *local_err = NULL;
Kevin Wolfb1e6fc02014-05-06 12:11:42 +02001361 int snapshot_flags = 0;
bellard712e7872005-04-28 21:09:32 +00001362
Max Reitzf67503e2014-02-18 18:33:05 +01001363 assert(pbs);
1364
Max Reitzddf56362014-02-18 18:33:06 +01001365 if (reference) {
1366 bool options_non_empty = options ? qdict_size(options) : false;
1367 QDECREF(options);
1368
1369 if (*pbs) {
1370 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1371 "another block device");
1372 return -EINVAL;
1373 }
1374
1375 if (filename || options_non_empty) {
1376 error_setg(errp, "Cannot reference an existing block device with "
1377 "additional options or a new filename");
1378 return -EINVAL;
1379 }
1380
1381 bs = bdrv_lookup_bs(reference, reference, errp);
1382 if (!bs) {
1383 return -ENODEV;
1384 }
1385 bdrv_ref(bs);
1386 *pbs = bs;
1387 return 0;
1388 }
1389
Max Reitzf67503e2014-02-18 18:33:05 +01001390 if (*pbs) {
1391 bs = *pbs;
1392 } else {
Kevin Wolf98522f62014-04-17 13:16:01 +02001393 bs = bdrv_new("", &error_abort);
Max Reitzf67503e2014-02-18 18:33:05 +01001394 }
1395
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001396 /* NULL means an empty set of options */
1397 if (options == NULL) {
1398 options = qdict_new();
1399 }
1400
Max Reitz4993f7e2014-05-08 20:12:41 +02001401 if (filename && g_str_has_prefix(filename, "json:")) {
1402 QDict *json_options = parse_json_filename(filename, &local_err);
1403 if (local_err) {
1404 ret = -EINVAL;
1405 goto fail;
1406 }
1407
1408 /* Options given in the filename have lower priority than options
1409 * specified directly */
1410 qdict_join(options, json_options, false);
1411 QDECREF(json_options);
1412 filename = NULL;
1413 }
1414
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001415 bs->options = options;
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001416 options = qdict_clone_shallow(options);
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001417
Max Reitz5469a2a2014-02-18 18:33:10 +01001418 if (flags & BDRV_O_PROTOCOL) {
1419 assert(!drv);
Max Reitz5acd9d82014-02-18 18:33:11 +01001420 ret = bdrv_file_open(bs, filename, &options, flags & ~BDRV_O_PROTOCOL,
Max Reitz5469a2a2014-02-18 18:33:10 +01001421 &local_err);
Max Reitz5469a2a2014-02-18 18:33:10 +01001422 if (!ret) {
Kevin Wolfeb909c72014-03-06 16:34:46 +01001423 drv = bs->drv;
Max Reitz5acd9d82014-02-18 18:33:11 +01001424 goto done;
Max Reitz5469a2a2014-02-18 18:33:10 +01001425 } else if (bs->drv) {
1426 goto close_and_fail;
1427 } else {
1428 goto fail;
1429 }
1430 }
1431
Kevin Wolff500a6d2012-11-12 17:35:27 +01001432 /* Open image file without format layer */
Jeff Codybe028ad2012-09-20 15:13:17 -04001433 if (flags & BDRV_O_RDWR) {
1434 flags |= BDRV_O_ALLOW_RDWR;
1435 }
Kevin Wolfb1e6fc02014-05-06 12:11:42 +02001436 if (flags & BDRV_O_SNAPSHOT) {
1437 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1438 flags = bdrv_backing_flags(flags);
1439 }
Jeff Codybe028ad2012-09-20 15:13:17 -04001440
Max Reitzf67503e2014-02-18 18:33:05 +01001441 assert(file == NULL);
Max Reitz054963f2013-12-20 19:28:12 +01001442 ret = bdrv_open_image(&file, filename, options, "file",
Kevin Wolf0b50cc82014-04-11 21:29:52 +02001443 bdrv_inherited_flags(flags),
1444 true, &local_err);
Max Reitz054963f2013-12-20 19:28:12 +01001445 if (ret < 0) {
Kevin Wolf8bfea152014-04-11 19:16:36 +02001446 goto fail;
Kevin Wolff500a6d2012-11-12 17:35:27 +01001447 }
1448
1449 /* Find the right image format driver */
Kevin Wolf74fe54f2013-07-09 11:09:02 +02001450 drvname = qdict_get_try_str(options, "driver");
1451 if (drvname) {
Kevin Wolf8f94a6e2013-10-10 11:45:55 +02001452 drv = bdrv_find_format(drvname);
Kevin Wolf74fe54f2013-07-09 11:09:02 +02001453 qdict_del(options, "driver");
Kevin Wolf06d22aa2013-08-08 17:44:52 +02001454 if (!drv) {
1455 error_setg(errp, "Invalid driver: '%s'", drvname);
1456 ret = -EINVAL;
Kevin Wolf8bfea152014-04-11 19:16:36 +02001457 goto fail;
Kevin Wolf06d22aa2013-08-08 17:44:52 +02001458 }
Kevin Wolf74fe54f2013-07-09 11:09:02 +02001459 }
1460
Kevin Wolff500a6d2012-11-12 17:35:27 +01001461 if (!drv) {
Max Reitz2a05cbe2013-12-20 19:28:10 +01001462 if (file) {
1463 ret = find_image_format(file, filename, &drv, &local_err);
1464 } else {
1465 error_setg(errp, "Must specify either driver or file");
1466 ret = -EINVAL;
Kevin Wolf8bfea152014-04-11 19:16:36 +02001467 goto fail;
Max Reitz2a05cbe2013-12-20 19:28:10 +01001468 }
Kevin Wolff500a6d2012-11-12 17:35:27 +01001469 }
1470
1471 if (!drv) {
Kevin Wolf8bfea152014-04-11 19:16:36 +02001472 goto fail;
Kevin Wolff500a6d2012-11-12 17:35:27 +01001473 }
1474
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001475 /* Open the image */
Max Reitz34b5d2c2013-09-05 14:45:29 +02001476 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001477 if (ret < 0) {
Kevin Wolf8bfea152014-04-11 19:16:36 +02001478 goto fail;
Christoph Hellwig69873072010-01-20 18:13:25 +01001479 }
1480
Max Reitz2a05cbe2013-12-20 19:28:10 +01001481 if (file && (bs->file != file)) {
Fam Zheng4f6fd342013-08-23 09:14:47 +08001482 bdrv_unref(file);
Kevin Wolff500a6d2012-11-12 17:35:27 +01001483 file = NULL;
1484 }
1485
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001486 /* If there is a backing file, use it */
Paolo Bonzini9156df12012-10-18 16:49:17 +02001487 if ((flags & BDRV_O_NO_BACKING) == 0) {
Kevin Wolf31ca6d02013-03-28 15:29:24 +01001488 QDict *backing_options;
1489
Benoît Canet5726d872013-09-25 13:30:01 +02001490 qdict_extract_subqdict(options, &backing_options, "backing.");
Max Reitz34b5d2c2013-09-05 14:45:29 +02001491 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001492 if (ret < 0) {
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001493 goto close_and_fail;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001494 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001495 }
1496
Kevin Wolfb9988752014-04-03 12:09:34 +02001497 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1498 * temporary snapshot afterwards. */
Kevin Wolfb1e6fc02014-05-06 12:11:42 +02001499 if (snapshot_flags) {
1500 bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
Kevin Wolfb9988752014-04-03 12:09:34 +02001501 if (local_err) {
1502 error_propagate(errp, local_err);
1503 goto close_and_fail;
1504 }
1505 }
1506
1507
Max Reitz5acd9d82014-02-18 18:33:11 +01001508done:
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001509 /* Check if any unknown options were used */
Max Reitz5acd9d82014-02-18 18:33:11 +01001510 if (options && (qdict_size(options) != 0)) {
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001511 const QDictEntry *entry = qdict_first(options);
Max Reitz5acd9d82014-02-18 18:33:11 +01001512 if (flags & BDRV_O_PROTOCOL) {
1513 error_setg(errp, "Block protocol '%s' doesn't support the option "
1514 "'%s'", drv->format_name, entry->key);
1515 } else {
1516 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1517 "support the option '%s'", drv->format_name,
1518 bs->device_name, entry->key);
1519 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001520
1521 ret = -EINVAL;
1522 goto close_and_fail;
1523 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001524
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001525 if (!bdrv_key_required(bs)) {
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02001526 bdrv_dev_change_media_cb(bs, true);
Markus Armbrusterc3adb582014-03-14 09:22:48 +01001527 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1528 && !runstate_check(RUN_STATE_INMIGRATE)
1529 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1530 error_setg(errp,
1531 "Guest must be stopped for opening of encrypted image");
1532 ret = -EBUSY;
1533 goto close_and_fail;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001534 }
1535
Markus Armbrusterc3adb582014-03-14 09:22:48 +01001536 QDECREF(options);
Max Reitzf67503e2014-02-18 18:33:05 +01001537 *pbs = bs;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001538 return 0;
1539
Kevin Wolf8bfea152014-04-11 19:16:36 +02001540fail:
Kevin Wolff500a6d2012-11-12 17:35:27 +01001541 if (file != NULL) {
Fam Zheng4f6fd342013-08-23 09:14:47 +08001542 bdrv_unref(file);
Kevin Wolff500a6d2012-11-12 17:35:27 +01001543 }
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001544 QDECREF(bs->options);
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001545 QDECREF(options);
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001546 bs->options = NULL;
Max Reitzf67503e2014-02-18 18:33:05 +01001547 if (!*pbs) {
1548 /* If *pbs is NULL, a new BDS has been created in this function and
1549 needs to be freed now. Otherwise, it does not need to be closed,
1550 since it has not really been opened yet. */
1551 bdrv_unref(bs);
1552 }
Markus Armbruster84d18f02014-01-30 15:07:28 +01001553 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001554 error_propagate(errp, local_err);
1555 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001556 return ret;
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001557
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001558close_and_fail:
Max Reitzf67503e2014-02-18 18:33:05 +01001559 /* See fail path, but now the BDS has to be always closed */
1560 if (*pbs) {
1561 bdrv_close(bs);
1562 } else {
1563 bdrv_unref(bs);
1564 }
Kevin Wolfb6ad4912013-03-15 10:35:04 +01001565 QDECREF(options);
Markus Armbruster84d18f02014-01-30 15:07:28 +01001566 if (local_err) {
Max Reitz34b5d2c2013-09-05 14:45:29 +02001567 error_propagate(errp, local_err);
1568 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +02001569 return ret;
1570}
1571
Jeff Codye971aa12012-09-20 15:13:19 -04001572typedef struct BlockReopenQueueEntry {
1573 bool prepared;
1574 BDRVReopenState state;
1575 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1576} BlockReopenQueueEntry;
1577
1578/*
1579 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1580 * reopen of multiple devices.
1581 *
1582 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1583 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1584 * be created and initialized. This newly created BlockReopenQueue should be
1585 * passed back in for subsequent calls that are intended to be of the same
1586 * atomic 'set'.
1587 *
1588 * bs is the BlockDriverState to add to the reopen queue.
1589 *
1590 * flags contains the open flags for the associated bs
1591 *
1592 * returns a pointer to bs_queue, which is either the newly allocated
1593 * bs_queue, or the existing bs_queue being used.
1594 *
1595 */
1596BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1597 BlockDriverState *bs, int flags)
1598{
1599 assert(bs != NULL);
1600
1601 BlockReopenQueueEntry *bs_entry;
1602 if (bs_queue == NULL) {
1603 bs_queue = g_new0(BlockReopenQueue, 1);
1604 QSIMPLEQ_INIT(bs_queue);
1605 }
1606
Kevin Wolff1f25a22014-04-25 19:04:55 +02001607 /* bdrv_open() masks this flag out */
1608 flags &= ~BDRV_O_PROTOCOL;
1609
Jeff Codye971aa12012-09-20 15:13:19 -04001610 if (bs->file) {
Kevin Wolff1f25a22014-04-25 19:04:55 +02001611 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
Jeff Codye971aa12012-09-20 15:13:19 -04001612 }
1613
1614 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1615 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1616
1617 bs_entry->state.bs = bs;
1618 bs_entry->state.flags = flags;
1619
1620 return bs_queue;
1621}
1622
1623/*
1624 * Reopen multiple BlockDriverStates atomically & transactionally.
1625 *
1626 * The queue passed in (bs_queue) must have been built up previous
1627 * via bdrv_reopen_queue().
1628 *
1629 * Reopens all BDS specified in the queue, with the appropriate
1630 * flags. All devices are prepared for reopen, and failure of any
1631 * device will cause all device changes to be abandonded, and intermediate
1632 * data cleaned up.
1633 *
1634 * If all devices prepare successfully, then the changes are committed
1635 * to all devices.
1636 *
1637 */
1638int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1639{
1640 int ret = -1;
1641 BlockReopenQueueEntry *bs_entry, *next;
1642 Error *local_err = NULL;
1643
1644 assert(bs_queue != NULL);
1645
1646 bdrv_drain_all();
1647
1648 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1649 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1650 error_propagate(errp, local_err);
1651 goto cleanup;
1652 }
1653 bs_entry->prepared = true;
1654 }
1655
1656 /* If we reach this point, we have success and just need to apply the
1657 * changes
1658 */
1659 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1660 bdrv_reopen_commit(&bs_entry->state);
1661 }
1662
1663 ret = 0;
1664
1665cleanup:
1666 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1667 if (ret && bs_entry->prepared) {
1668 bdrv_reopen_abort(&bs_entry->state);
1669 }
1670 g_free(bs_entry);
1671 }
1672 g_free(bs_queue);
1673 return ret;
1674}
1675
1676
1677/* Reopen a single BlockDriverState with the specified flags. */
1678int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1679{
1680 int ret = -1;
1681 Error *local_err = NULL;
1682 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1683
1684 ret = bdrv_reopen_multiple(queue, &local_err);
1685 if (local_err != NULL) {
1686 error_propagate(errp, local_err);
1687 }
1688 return ret;
1689}
1690
1691
1692/*
1693 * Prepares a BlockDriverState for reopen. All changes are staged in the
1694 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1695 * the block driver layer .bdrv_reopen_prepare()
1696 *
1697 * bs is the BlockDriverState to reopen
1698 * flags are the new open flags
1699 * queue is the reopen queue
1700 *
1701 * Returns 0 on success, non-zero on error. On error errp will be set
1702 * as well.
1703 *
1704 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1705 * It is the responsibility of the caller to then call the abort() or
1706 * commit() for any other BDS that have been left in a prepare() state
1707 *
1708 */
1709int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1710 Error **errp)
1711{
1712 int ret = -1;
1713 Error *local_err = NULL;
1714 BlockDriver *drv;
1715
1716 assert(reopen_state != NULL);
1717 assert(reopen_state->bs->drv != NULL);
1718 drv = reopen_state->bs->drv;
1719
1720 /* if we are to stay read-only, do not allow permission change
1721 * to r/w */
1722 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1723 reopen_state->flags & BDRV_O_RDWR) {
1724 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1725 reopen_state->bs->device_name);
1726 goto error;
1727 }
1728
1729
1730 ret = bdrv_flush(reopen_state->bs);
1731 if (ret) {
1732 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1733 strerror(-ret));
1734 goto error;
1735 }
1736
1737 if (drv->bdrv_reopen_prepare) {
1738 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1739 if (ret) {
1740 if (local_err != NULL) {
1741 error_propagate(errp, local_err);
1742 } else {
Luiz Capitulinod8b68952013-06-10 11:29:27 -04001743 error_setg(errp, "failed while preparing to reopen image '%s'",
1744 reopen_state->bs->filename);
Jeff Codye971aa12012-09-20 15:13:19 -04001745 }
1746 goto error;
1747 }
1748 } else {
1749 /* It is currently mandatory to have a bdrv_reopen_prepare()
1750 * handler for each supported drv. */
1751 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1752 drv->format_name, reopen_state->bs->device_name,
1753 "reopening of file");
1754 ret = -1;
1755 goto error;
1756 }
1757
1758 ret = 0;
1759
1760error:
1761 return ret;
1762}
1763
1764/*
1765 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1766 * makes them final by swapping the staging BlockDriverState contents into
1767 * the active BlockDriverState contents.
1768 */
1769void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1770{
1771 BlockDriver *drv;
1772
1773 assert(reopen_state != NULL);
1774 drv = reopen_state->bs->drv;
1775 assert(drv != NULL);
1776
1777 /* If there are any driver level actions to take */
1778 if (drv->bdrv_reopen_commit) {
1779 drv->bdrv_reopen_commit(reopen_state);
1780 }
1781
1782 /* set BDS specific flags now */
1783 reopen_state->bs->open_flags = reopen_state->flags;
1784 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1785 BDRV_O_CACHE_WB);
1786 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
Kevin Wolf355ef4a2013-12-11 20:14:09 +01001787
1788 bdrv_refresh_limits(reopen_state->bs);
Jeff Codye971aa12012-09-20 15:13:19 -04001789}
1790
1791/*
1792 * Abort the reopen, and delete and free the staged changes in
1793 * reopen_state
1794 */
1795void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1796{
1797 BlockDriver *drv;
1798
1799 assert(reopen_state != NULL);
1800 drv = reopen_state->bs->drv;
1801 assert(drv != NULL);
1802
1803 if (drv->bdrv_reopen_abort) {
1804 drv->bdrv_reopen_abort(reopen_state);
1805 }
1806}
1807
1808
bellardfc01f7e2003-06-30 10:03:06 +00001809void bdrv_close(BlockDriverState *bs)
1810{
Paolo Bonzini3cbc0022012-10-19 11:36:48 +02001811 if (bs->job) {
1812 block_job_cancel_sync(bs->job);
1813 }
Stefan Hajnoczi58fda172013-07-02 15:36:25 +02001814 bdrv_drain_all(); /* complete I/O */
1815 bdrv_flush(bs);
1816 bdrv_drain_all(); /* in case flush left pending I/O */
Paolo Bonzinid7d512f2012-08-23 11:20:36 +02001817 notifier_list_notify(&bs->close_notifiers, bs);
Kevin Wolf7094f122012-04-11 11:06:37 +02001818
Paolo Bonzini3cbc0022012-10-19 11:36:48 +02001819 if (bs->drv) {
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +01001820 if (bs->backing_hd) {
Fam Zheng826b6ca2014-05-23 21:29:47 +08001821 BlockDriverState *backing_hd = bs->backing_hd;
1822 bdrv_set_backing_hd(bs, NULL);
1823 bdrv_unref(backing_hd);
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +01001824 }
bellardea2384d2004-08-01 21:59:26 +00001825 bs->drv->bdrv_close(bs);
Anthony Liguori7267c092011-08-20 22:09:37 -05001826 g_free(bs->opaque);
bellardea2384d2004-08-01 21:59:26 +00001827 bs->opaque = NULL;
1828 bs->drv = NULL;
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +00001829 bs->copy_on_read = 0;
Paolo Bonzinia275fa42012-05-08 16:51:43 +02001830 bs->backing_file[0] = '\0';
1831 bs->backing_format[0] = '\0';
Paolo Bonzini64058752012-05-08 16:51:49 +02001832 bs->total_sectors = 0;
1833 bs->encrypted = 0;
1834 bs->valid_key = 0;
1835 bs->sg = 0;
1836 bs->growable = 0;
Asias He0d51b4d2013-08-22 15:24:14 +08001837 bs->zero_beyond_eof = false;
Kevin Wolfde9c0ce2013-03-15 10:35:02 +01001838 QDECREF(bs->options);
1839 bs->options = NULL;
bellardb3380822004-03-14 21:38:54 +00001840
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001841 if (bs->file != NULL) {
Fam Zheng4f6fd342013-08-23 09:14:47 +08001842 bdrv_unref(bs->file);
Paolo Bonzini0ac93772012-05-08 16:51:44 +02001843 bs->file = NULL;
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001844 }
bellardb3380822004-03-14 21:38:54 +00001845 }
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08001846
Pavel Hrdina9ca11152012-08-09 12:44:48 +02001847 bdrv_dev_change_media_cb(bs, false);
1848
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08001849 /*throttling disk I/O limits*/
1850 if (bs->io_limits_enabled) {
1851 bdrv_io_limits_disable(bs);
1852 }
bellardb3380822004-03-14 21:38:54 +00001853}
1854
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +09001855void bdrv_close_all(void)
1856{
1857 BlockDriverState *bs;
1858
Benoît Canetdc364f42014-01-23 21:31:32 +01001859 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02001860 AioContext *aio_context = bdrv_get_aio_context(bs);
1861
1862 aio_context_acquire(aio_context);
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +09001863 bdrv_close(bs);
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02001864 aio_context_release(aio_context);
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +09001865 }
1866}
1867
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001868/* Check if any requests are in-flight (including throttled requests) */
1869static bool bdrv_requests_pending(BlockDriverState *bs)
1870{
1871 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1872 return true;
1873 }
Benoît Canetcc0681c2013-09-02 14:14:39 +02001874 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1875 return true;
1876 }
1877 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001878 return true;
1879 }
1880 if (bs->file && bdrv_requests_pending(bs->file)) {
1881 return true;
1882 }
1883 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1884 return true;
1885 }
1886 return false;
1887}
1888
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001889/*
1890 * Wait for pending requests to complete across all BlockDriverStates
1891 *
1892 * This function does not flush data to disk, use bdrv_flush_all() for that
1893 * after calling this function.
Zhi Yong Wu4c355d52012-04-12 14:00:57 +02001894 *
1895 * Note that completion of an asynchronous I/O operation can trigger any
1896 * number of other I/O operations on other devices---for example a coroutine
1897 * can be arbitrarily complex and a constant flow of I/O can come until the
1898 * coroutine is complete. Because of this, it is not possible to have a
1899 * function to drain a single device's I/O queue.
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001900 */
1901void bdrv_drain_all(void)
1902{
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001903 /* Always run first iteration so any pending completion BHs run */
1904 bool busy = true;
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001905 BlockDriverState *bs;
1906
Stefan Hajnoczi88266f52013-04-11 15:41:13 +02001907 while (busy) {
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001908 busy = false;
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001909
Stefan Hajnoczi9b536ad2014-05-08 16:34:36 +02001910 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1911 AioContext *aio_context = bdrv_get_aio_context(bs);
1912 bool bs_busy;
1913
1914 aio_context_acquire(aio_context);
1915 bdrv_start_throttled_reqs(bs);
1916 bs_busy = bdrv_requests_pending(bs);
1917 bs_busy |= aio_poll(aio_context, bs_busy);
1918 aio_context_release(aio_context);
1919
1920 busy |= bs_busy;
1921 }
Stefan Hajnoczi922453b2011-11-30 12:23:43 +00001922 }
1923}
1924
Benoît Canetdc364f42014-01-23 21:31:32 +01001925/* make a BlockDriverState anonymous by removing from bdrv_state and
1926 * graph_bdrv_state list.
Ryan Harperd22b2f42011-03-29 20:51:47 -05001927 Also, NULL terminate the device_name to prevent double remove */
1928void bdrv_make_anon(BlockDriverState *bs)
1929{
1930 if (bs->device_name[0] != '\0') {
Benoît Canetdc364f42014-01-23 21:31:32 +01001931 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
Ryan Harperd22b2f42011-03-29 20:51:47 -05001932 }
1933 bs->device_name[0] = '\0';
Benoît Canetdc364f42014-01-23 21:31:32 +01001934 if (bs->node_name[0] != '\0') {
1935 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1936 }
1937 bs->node_name[0] = '\0';
Ryan Harperd22b2f42011-03-29 20:51:47 -05001938}
1939
Paolo Bonzinie023b2e2012-05-08 16:51:41 +02001940static void bdrv_rebind(BlockDriverState *bs)
1941{
1942 if (bs->drv && bs->drv->bdrv_rebind) {
1943 bs->drv->bdrv_rebind(bs);
1944 }
1945}
1946
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001947static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1948 BlockDriverState *bs_src)
1949{
1950 /* move some fields that need to stay attached to the device */
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001951
1952 /* dev info */
1953 bs_dest->dev_ops = bs_src->dev_ops;
1954 bs_dest->dev_opaque = bs_src->dev_opaque;
1955 bs_dest->dev = bs_src->dev;
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01001956 bs_dest->guest_block_size = bs_src->guest_block_size;
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001957 bs_dest->copy_on_read = bs_src->copy_on_read;
1958
1959 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1960
Benoît Canetcc0681c2013-09-02 14:14:39 +02001961 /* i/o throttled req */
1962 memcpy(&bs_dest->throttle_state,
1963 &bs_src->throttle_state,
1964 sizeof(ThrottleState));
1965 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1966 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001967 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1968
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001969 /* r/w error */
1970 bs_dest->on_read_error = bs_src->on_read_error;
1971 bs_dest->on_write_error = bs_src->on_write_error;
1972
1973 /* i/o status */
1974 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1975 bs_dest->iostatus = bs_src->iostatus;
1976
1977 /* dirty bitmap */
Fam Zhenge4654d22013-11-13 18:29:43 +08001978 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001979
Fam Zheng9fcb0252013-08-23 09:14:46 +08001980 /* reference count */
1981 bs_dest->refcnt = bs_src->refcnt;
1982
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001983 /* job */
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001984 bs_dest->job = bs_src->job;
1985
1986 /* keep the same entry in bdrv_states */
1987 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1988 bs_src->device_name);
Benoît Canetdc364f42014-01-23 21:31:32 +01001989 bs_dest->device_list = bs_src->device_list;
Fam Zhengfbe40ff2014-05-23 21:29:42 +08001990 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
1991 sizeof(bs_dest->op_blockers));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001992}
1993
1994/*
1995 * Swap bs contents for two image chains while they are live,
1996 * while keeping required fields on the BlockDriverState that is
1997 * actually attached to a device.
1998 *
1999 * This will modify the BlockDriverState fields, and swap contents
2000 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2001 *
2002 * bs_new is required to be anonymous.
2003 *
2004 * This function does not create any image files.
2005 */
2006void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2007{
2008 BlockDriverState tmp;
2009
Benoît Canet90ce8a02014-03-05 23:48:29 +01002010 /* The code needs to swap the node_name but simply swapping node_list won't
2011 * work so first remove the nodes from the graph list, do the swap then
2012 * insert them back if needed.
2013 */
2014 if (bs_new->node_name[0] != '\0') {
2015 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2016 }
2017 if (bs_old->node_name[0] != '\0') {
2018 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2019 }
2020
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002021 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
2022 assert(bs_new->device_name[0] == '\0');
Fam Zhenge4654d22013-11-13 18:29:43 +08002023 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002024 assert(bs_new->job == NULL);
2025 assert(bs_new->dev == NULL);
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002026 assert(bs_new->io_limits_enabled == false);
Benoît Canetcc0681c2013-09-02 14:14:39 +02002027 assert(!throttle_have_timer(&bs_new->throttle_state));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002028
2029 tmp = *bs_new;
2030 *bs_new = *bs_old;
2031 *bs_old = tmp;
2032
2033 /* there are some fields that should not be swapped, move them back */
2034 bdrv_move_feature_fields(&tmp, bs_old);
2035 bdrv_move_feature_fields(bs_old, bs_new);
2036 bdrv_move_feature_fields(bs_new, &tmp);
2037
2038 /* bs_new shouldn't be in bdrv_states even after the swap! */
2039 assert(bs_new->device_name[0] == '\0');
2040
2041 /* Check a few fields that should remain attached to the device */
2042 assert(bs_new->dev == NULL);
2043 assert(bs_new->job == NULL);
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002044 assert(bs_new->io_limits_enabled == false);
Benoît Canetcc0681c2013-09-02 14:14:39 +02002045 assert(!throttle_have_timer(&bs_new->throttle_state));
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002046
Benoît Canet90ce8a02014-03-05 23:48:29 +01002047 /* insert the nodes back into the graph node list if needed */
2048 if (bs_new->node_name[0] != '\0') {
2049 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2050 }
2051 if (bs_old->node_name[0] != '\0') {
2052 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2053 }
2054
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002055 bdrv_rebind(bs_new);
2056 bdrv_rebind(bs_old);
2057}
2058
Jeff Cody8802d1f2012-02-28 15:54:06 -05002059/*
2060 * Add new bs contents at the top of an image chain while the chain is
2061 * live, while keeping required fields on the top layer.
2062 *
2063 * This will modify the BlockDriverState fields, and swap contents
2064 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2065 *
Jeff Codyf6801b82012-03-27 16:30:19 -04002066 * bs_new is required to be anonymous.
2067 *
Jeff Cody8802d1f2012-02-28 15:54:06 -05002068 * This function does not create any image files.
2069 */
2070void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2071{
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02002072 bdrv_swap(bs_new, bs_top);
Jeff Cody8802d1f2012-02-28 15:54:06 -05002073
2074 /* The contents of 'tmp' will become bs_top, as we are
2075 * swapping bs_new and bs_top contents. */
Fam Zheng8d24cce2014-05-23 21:29:45 +08002076 bdrv_set_backing_hd(bs_top, bs_new);
Jeff Cody8802d1f2012-02-28 15:54:06 -05002077}
2078
Fam Zheng4f6fd342013-08-23 09:14:47 +08002079static void bdrv_delete(BlockDriverState *bs)
bellardb3380822004-03-14 21:38:54 +00002080{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02002081 assert(!bs->dev);
Paolo Bonzini3e914652012-03-30 13:17:11 +02002082 assert(!bs->job);
Fam Zheng3718d8a2014-05-23 21:29:43 +08002083 assert(bdrv_op_blocker_is_empty(bs));
Fam Zheng4f6fd342013-08-23 09:14:47 +08002084 assert(!bs->refcnt);
Fam Zhenge4654d22013-11-13 18:29:43 +08002085 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
Markus Armbruster18846de2010-06-29 16:58:30 +02002086
Stefan Hajnoczie1b5c522013-06-27 15:32:26 +02002087 bdrv_close(bs);
2088
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002089 /* remove from list, if necessary */
Ryan Harperd22b2f42011-03-29 20:51:47 -05002090 bdrv_make_anon(bs);
aurel3234c6f052008-04-08 19:51:21 +00002091
Anthony Liguori7267c092011-08-20 22:09:37 -05002092 g_free(bs);
bellardfc01f7e2003-06-30 10:03:06 +00002093}
2094
Markus Armbrusterfa879d62011-08-03 15:07:40 +02002095int bdrv_attach_dev(BlockDriverState *bs, void *dev)
2096/* TODO change to DeviceState *dev when all users are qdevified */
Markus Armbruster18846de2010-06-29 16:58:30 +02002097{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02002098 if (bs->dev) {
Markus Armbruster18846de2010-06-29 16:58:30 +02002099 return -EBUSY;
2100 }
Markus Armbrusterfa879d62011-08-03 15:07:40 +02002101 bs->dev = dev;
Luiz Capitulino28a72822011-09-26 17:43:50 -03002102 bdrv_iostatus_reset(bs);
Markus Armbruster18846de2010-06-29 16:58:30 +02002103 return 0;
2104}
2105
Markus Armbrusterfa879d62011-08-03 15:07:40 +02002106/* TODO qdevified devices don't use this, remove when devices are qdevified */
2107void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
Markus Armbruster18846de2010-06-29 16:58:30 +02002108{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02002109 if (bdrv_attach_dev(bs, dev) < 0) {
2110 abort();
2111 }
2112}
2113
2114void bdrv_detach_dev(BlockDriverState *bs, void *dev)
2115/* TODO change to DeviceState *dev when all users are qdevified */
2116{
2117 assert(bs->dev == dev);
2118 bs->dev = NULL;
Markus Armbruster0e49de52011-08-03 15:07:41 +02002119 bs->dev_ops = NULL;
2120 bs->dev_opaque = NULL;
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01002121 bs->guest_block_size = 512;
Markus Armbruster18846de2010-06-29 16:58:30 +02002122}
2123
Markus Armbrusterfa879d62011-08-03 15:07:40 +02002124/* TODO change to return DeviceState * when all users are qdevified */
2125void *bdrv_get_attached_dev(BlockDriverState *bs)
Markus Armbruster18846de2010-06-29 16:58:30 +02002126{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02002127 return bs->dev;
Markus Armbruster18846de2010-06-29 16:58:30 +02002128}
2129
Markus Armbruster0e49de52011-08-03 15:07:41 +02002130void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
2131 void *opaque)
2132{
2133 bs->dev_ops = ops;
2134 bs->dev_opaque = opaque;
2135}
2136
Paolo Bonzini32c81a42012-09-28 17:22:58 +02002137void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
2138 enum MonitorEvent ev,
2139 BlockErrorAction action, bool is_read)
Luiz Capitulino329c0a42012-01-25 16:59:43 -02002140{
2141 QObject *data;
2142 const char *action_str;
2143
2144 switch (action) {
2145 case BDRV_ACTION_REPORT:
2146 action_str = "report";
2147 break;
2148 case BDRV_ACTION_IGNORE:
2149 action_str = "ignore";
2150 break;
2151 case BDRV_ACTION_STOP:
2152 action_str = "stop";
2153 break;
2154 default:
2155 abort();
2156 }
2157
2158 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2159 bdrv->device_name,
2160 action_str,
2161 is_read ? "read" : "write");
Paolo Bonzini32c81a42012-09-28 17:22:58 +02002162 monitor_protocol_event(ev, data);
Luiz Capitulino329c0a42012-01-25 16:59:43 -02002163
2164 qobject_decref(data);
2165}
2166
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02002167static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
2168{
2169 QObject *data;
2170
2171 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
2172 bdrv_get_device_name(bs), ejected);
2173 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
2174
2175 qobject_decref(data);
2176}
2177
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02002178static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
Markus Armbruster0e49de52011-08-03 15:07:41 +02002179{
Markus Armbruster145feb12011-08-03 15:07:42 +02002180 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02002181 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02002182 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02002183 if (tray_was_closed) {
2184 /* tray open */
2185 bdrv_emit_qmp_eject_event(bs, true);
2186 }
2187 if (load) {
2188 /* tray close */
2189 bdrv_emit_qmp_eject_event(bs, false);
2190 }
Markus Armbruster145feb12011-08-03 15:07:42 +02002191 }
2192}
2193
Markus Armbruster2c6942f2011-09-06 18:58:51 +02002194bool bdrv_dev_has_removable_media(BlockDriverState *bs)
2195{
2196 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
2197}
2198
Paolo Bonzini025ccaa2011-11-07 17:50:13 +01002199void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
2200{
2201 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
2202 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
2203 }
2204}
2205
Markus Armbrustere4def802011-09-06 18:58:53 +02002206bool bdrv_dev_is_tray_open(BlockDriverState *bs)
2207{
2208 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
2209 return bs->dev_ops->is_tray_open(bs->dev_opaque);
2210 }
2211 return false;
2212}
2213
Markus Armbruster145feb12011-08-03 15:07:42 +02002214static void bdrv_dev_resize_cb(BlockDriverState *bs)
2215{
2216 if (bs->dev_ops && bs->dev_ops->resize_cb) {
2217 bs->dev_ops->resize_cb(bs->dev_opaque);
Markus Armbruster0e49de52011-08-03 15:07:41 +02002218 }
2219}
2220
Markus Armbrusterf1076392011-09-06 18:58:46 +02002221bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2222{
2223 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2224 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2225 }
2226 return false;
2227}
2228
aliguorie97fc192009-04-21 23:11:50 +00002229/*
2230 * Run consistency checks on an image
2231 *
Kevin Wolfe076f332010-06-29 11:43:13 +02002232 * Returns 0 if the check could be completed (it doesn't mean that the image is
Stefan Weila1c72732011-04-28 17:20:38 +02002233 * free of errors) or -errno when an internal error occurred. The results of the
Kevin Wolfe076f332010-06-29 11:43:13 +02002234 * check are stored in res.
aliguorie97fc192009-04-21 23:11:50 +00002235 */
Kevin Wolf4534ff52012-05-11 16:07:02 +02002236int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
aliguorie97fc192009-04-21 23:11:50 +00002237{
2238 if (bs->drv->bdrv_check == NULL) {
2239 return -ENOTSUP;
2240 }
2241
Kevin Wolfe076f332010-06-29 11:43:13 +02002242 memset(res, 0, sizeof(*res));
Kevin Wolf4534ff52012-05-11 16:07:02 +02002243 return bs->drv->bdrv_check(bs, res, fix);
aliguorie97fc192009-04-21 23:11:50 +00002244}
2245
Kevin Wolf8a426612010-07-16 17:17:01 +02002246#define COMMIT_BUF_SECTORS 2048
2247
bellard33e39632003-07-06 17:15:21 +00002248/* commit COW file into the raw image */
2249int bdrv_commit(BlockDriverState *bs)
2250{
bellard19cb3732006-08-19 11:45:59 +00002251 BlockDriver *drv = bs->drv;
Jeff Cody72706ea2014-01-24 09:02:35 -05002252 int64_t sector, total_sectors, length, backing_length;
Kevin Wolf8a426612010-07-16 17:17:01 +02002253 int n, ro, open_flags;
Jeff Cody0bce5972012-09-20 15:13:34 -04002254 int ret = 0;
Jeff Cody72706ea2014-01-24 09:02:35 -05002255 uint8_t *buf = NULL;
Jim Meyeringc2cba3d2012-10-04 13:09:46 +02002256 char filename[PATH_MAX];
bellard33e39632003-07-06 17:15:21 +00002257
bellard19cb3732006-08-19 11:45:59 +00002258 if (!drv)
2259 return -ENOMEDIUM;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002260
2261 if (!bs->backing_hd) {
2262 return -ENOTSUP;
bellard33e39632003-07-06 17:15:21 +00002263 }
2264
Fam Zheng3718d8a2014-05-23 21:29:43 +08002265 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2266 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
Stefan Hajnoczi2d3735d2012-01-18 14:40:41 +00002267 return -EBUSY;
2268 }
2269
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002270 ro = bs->backing_hd->read_only;
Jim Meyeringc2cba3d2012-10-04 13:09:46 +02002271 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2272 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002273 open_flags = bs->backing_hd->open_flags;
2274
2275 if (ro) {
Jeff Cody0bce5972012-09-20 15:13:34 -04002276 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2277 return -EACCES;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002278 }
bellard33e39632003-07-06 17:15:21 +00002279 }
bellardea2384d2004-08-01 21:59:26 +00002280
Jeff Cody72706ea2014-01-24 09:02:35 -05002281 length = bdrv_getlength(bs);
2282 if (length < 0) {
2283 ret = length;
2284 goto ro_cleanup;
2285 }
2286
2287 backing_length = bdrv_getlength(bs->backing_hd);
2288 if (backing_length < 0) {
2289 ret = backing_length;
2290 goto ro_cleanup;
2291 }
2292
2293 /* If our top snapshot is larger than the backing file image,
2294 * grow the backing file image if possible. If not possible,
2295 * we must return an error */
2296 if (length > backing_length) {
2297 ret = bdrv_truncate(bs->backing_hd, length);
2298 if (ret < 0) {
2299 goto ro_cleanup;
2300 }
2301 }
2302
2303 total_sectors = length >> BDRV_SECTOR_BITS;
Anthony Liguori7267c092011-08-20 22:09:37 -05002304 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
bellardea2384d2004-08-01 21:59:26 +00002305
Kevin Wolf8a426612010-07-16 17:17:01 +02002306 for (sector = 0; sector < total_sectors; sector += n) {
Paolo Bonzinid6636402013-09-04 19:00:25 +02002307 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2308 if (ret < 0) {
2309 goto ro_cleanup;
2310 }
2311 if (ret) {
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002312 ret = bdrv_read(bs, sector, buf, n);
2313 if (ret < 0) {
Kevin Wolf8a426612010-07-16 17:17:01 +02002314 goto ro_cleanup;
2315 }
2316
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002317 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2318 if (ret < 0) {
Kevin Wolf8a426612010-07-16 17:17:01 +02002319 goto ro_cleanup;
2320 }
bellardea2384d2004-08-01 21:59:26 +00002321 }
2322 }
bellard95389c82005-12-18 18:28:15 +00002323
Christoph Hellwig1d449522010-01-17 12:32:30 +01002324 if (drv->bdrv_make_empty) {
2325 ret = drv->bdrv_make_empty(bs);
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002326 if (ret < 0) {
2327 goto ro_cleanup;
2328 }
Christoph Hellwig1d449522010-01-17 12:32:30 +01002329 bdrv_flush(bs);
2330 }
bellard95389c82005-12-18 18:28:15 +00002331
Christoph Hellwig3f5075a2010-01-12 13:49:23 +01002332 /*
2333 * Make sure all data we wrote to the backing device is actually
2334 * stable on disk.
2335 */
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002336 if (bs->backing_hd) {
Christoph Hellwig3f5075a2010-01-12 13:49:23 +01002337 bdrv_flush(bs->backing_hd);
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002338 }
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002339
Kevin Wolfdabfa6c2014-01-24 14:00:43 +01002340 ret = 0;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002341ro_cleanup:
Anthony Liguori7267c092011-08-20 22:09:37 -05002342 g_free(buf);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002343
2344 if (ro) {
Jeff Cody0bce5972012-09-20 15:13:34 -04002345 /* ignoring error return here */
2346 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02002347 }
2348
Christoph Hellwig1d449522010-01-17 12:32:30 +01002349 return ret;
bellard33e39632003-07-06 17:15:21 +00002350}
2351
Stefan Hajnoczie8877492012-03-05 18:10:11 +00002352int bdrv_commit_all(void)
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02002353{
2354 BlockDriverState *bs;
2355
Benoît Canetdc364f42014-01-23 21:31:32 +01002356 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02002357 AioContext *aio_context = bdrv_get_aio_context(bs);
2358
2359 aio_context_acquire(aio_context);
Jeff Cody272d2d82013-02-26 09:55:48 -05002360 if (bs->drv && bs->backing_hd) {
2361 int ret = bdrv_commit(bs);
2362 if (ret < 0) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02002363 aio_context_release(aio_context);
Jeff Cody272d2d82013-02-26 09:55:48 -05002364 return ret;
2365 }
Stefan Hajnoczie8877492012-03-05 18:10:11 +00002366 }
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02002367 aio_context_release(aio_context);
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02002368 }
Stefan Hajnoczie8877492012-03-05 18:10:11 +00002369 return 0;
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02002370}
2371
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002372/**
2373 * Remove an active request from the tracked requests list
2374 *
2375 * This function should be called when a tracked request is completing.
2376 */
2377static void tracked_request_end(BdrvTrackedRequest *req)
2378{
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002379 if (req->serialising) {
2380 req->bs->serialising_in_flight--;
2381 }
2382
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002383 QLIST_REMOVE(req, list);
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002384 qemu_co_queue_restart_all(&req->wait_queue);
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002385}
2386
2387/**
2388 * Add an active request to the tracked requests list
2389 */
2390static void tracked_request_begin(BdrvTrackedRequest *req,
2391 BlockDriverState *bs,
Kevin Wolf793ed472013-12-03 15:31:25 +01002392 int64_t offset,
2393 unsigned int bytes, bool is_write)
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002394{
2395 *req = (BdrvTrackedRequest){
2396 .bs = bs,
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002397 .offset = offset,
2398 .bytes = bytes,
2399 .is_write = is_write,
2400 .co = qemu_coroutine_self(),
2401 .serialising = false,
Kevin Wolf73271452013-12-04 17:08:50 +01002402 .overlap_offset = offset,
2403 .overlap_bytes = bytes,
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002404 };
2405
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002406 qemu_co_queue_init(&req->wait_queue);
2407
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002408 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2409}
2410
Kevin Wolfe96126f2014-02-08 10:42:18 +01002411static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002412{
Kevin Wolf73271452013-12-04 17:08:50 +01002413 int64_t overlap_offset = req->offset & ~(align - 1);
Kevin Wolfe96126f2014-02-08 10:42:18 +01002414 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2415 - overlap_offset;
Kevin Wolf73271452013-12-04 17:08:50 +01002416
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002417 if (!req->serialising) {
2418 req->bs->serialising_in_flight++;
2419 req->serialising = true;
2420 }
Kevin Wolf73271452013-12-04 17:08:50 +01002421
2422 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2423 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002424}
2425
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002426/**
2427 * Round a region to cluster boundaries
2428 */
Paolo Bonzini343bded2013-01-21 17:09:42 +01002429void bdrv_round_to_clusters(BlockDriverState *bs,
2430 int64_t sector_num, int nb_sectors,
2431 int64_t *cluster_sector_num,
2432 int *cluster_nb_sectors)
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002433{
2434 BlockDriverInfo bdi;
2435
2436 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2437 *cluster_sector_num = sector_num;
2438 *cluster_nb_sectors = nb_sectors;
2439 } else {
2440 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2441 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2442 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2443 nb_sectors, c);
2444 }
2445}
2446
Kevin Wolf73271452013-12-04 17:08:50 +01002447static int bdrv_get_cluster_size(BlockDriverState *bs)
Kevin Wolf793ed472013-12-03 15:31:25 +01002448{
2449 BlockDriverInfo bdi;
Kevin Wolf73271452013-12-04 17:08:50 +01002450 int ret;
Kevin Wolf793ed472013-12-03 15:31:25 +01002451
Kevin Wolf73271452013-12-04 17:08:50 +01002452 ret = bdrv_get_info(bs, &bdi);
2453 if (ret < 0 || bdi.cluster_size == 0) {
2454 return bs->request_alignment;
Kevin Wolf793ed472013-12-03 15:31:25 +01002455 } else {
Kevin Wolf73271452013-12-04 17:08:50 +01002456 return bdi.cluster_size;
Kevin Wolf793ed472013-12-03 15:31:25 +01002457 }
2458}
2459
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002460static bool tracked_request_overlaps(BdrvTrackedRequest *req,
Kevin Wolf793ed472013-12-03 15:31:25 +01002461 int64_t offset, unsigned int bytes)
2462{
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002463 /* aaaa bbbb */
Kevin Wolf73271452013-12-04 17:08:50 +01002464 if (offset >= req->overlap_offset + req->overlap_bytes) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002465 return false;
2466 }
2467 /* bbbb aaaa */
Kevin Wolf73271452013-12-04 17:08:50 +01002468 if (req->overlap_offset >= offset + bytes) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00002469 return false;
2470 }
2471 return true;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002472}
2473
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002474static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002475{
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002476 BlockDriverState *bs = self->bs;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002477 BdrvTrackedRequest *req;
2478 bool retry;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002479 bool waited = false;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002480
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002481 if (!bs->serialising_in_flight) {
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002482 return false;
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002483 }
2484
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002485 do {
2486 retry = false;
2487 QLIST_FOREACH(req, &bs->tracked_requests, list) {
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01002488 if (req == self || (!req->serialising && !self->serialising)) {
Kevin Wolf65afd212013-12-03 14:55:55 +01002489 continue;
2490 }
Kevin Wolf73271452013-12-04 17:08:50 +01002491 if (tracked_request_overlaps(req, self->overlap_offset,
2492 self->overlap_bytes))
2493 {
Stefan Hajnoczi5f8b6492011-11-30 12:23:42 +00002494 /* Hitting this means there was a reentrant request, for
2495 * example, a block driver issuing nested requests. This must
2496 * never happen since it means deadlock.
2497 */
2498 assert(qemu_coroutine_self() != req->co);
2499
Kevin Wolf64604402013-12-13 13:04:35 +01002500 /* If the request is already (indirectly) waiting for us, or
2501 * will wait for us as soon as it wakes up, then just go on
2502 * (instead of producing a deadlock in the former case). */
2503 if (!req->waiting_for) {
2504 self->waiting_for = req;
2505 qemu_co_queue_wait(&req->wait_queue);
2506 self->waiting_for = NULL;
2507 retry = true;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002508 waited = true;
Kevin Wolf64604402013-12-13 13:04:35 +01002509 break;
2510 }
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002511 }
2512 }
2513 } while (retry);
Kevin Wolf28de2dc2014-01-14 11:41:35 +01002514
2515 return waited;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002516}
2517
Kevin Wolf756e6732010-01-12 12:55:17 +01002518/*
2519 * Return values:
2520 * 0 - success
2521 * -EINVAL - backing format specified, but no file
2522 * -ENOSPC - can't update the backing file because no space is left in the
2523 * image file header
2524 * -ENOTSUP - format driver doesn't support changing the backing file
2525 */
2526int bdrv_change_backing_file(BlockDriverState *bs,
2527 const char *backing_file, const char *backing_fmt)
2528{
2529 BlockDriver *drv = bs->drv;
Paolo Bonzini469ef352012-04-12 14:01:02 +02002530 int ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01002531
Paolo Bonzini5f377792012-04-12 14:01:01 +02002532 /* Backing file format doesn't make sense without a backing file */
2533 if (backing_fmt && !backing_file) {
2534 return -EINVAL;
2535 }
2536
Kevin Wolf756e6732010-01-12 12:55:17 +01002537 if (drv->bdrv_change_backing_file != NULL) {
Paolo Bonzini469ef352012-04-12 14:01:02 +02002538 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
Kevin Wolf756e6732010-01-12 12:55:17 +01002539 } else {
Paolo Bonzini469ef352012-04-12 14:01:02 +02002540 ret = -ENOTSUP;
Kevin Wolf756e6732010-01-12 12:55:17 +01002541 }
Paolo Bonzini469ef352012-04-12 14:01:02 +02002542
2543 if (ret == 0) {
2544 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2545 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2546 }
2547 return ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01002548}
2549
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002550/*
2551 * Finds the image layer in the chain that has 'bs' as its backing file.
2552 *
2553 * active is the current topmost image.
2554 *
2555 * Returns NULL if bs is not found in active's image chain,
2556 * or if active == bs.
2557 */
2558BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2559 BlockDriverState *bs)
2560{
2561 BlockDriverState *overlay = NULL;
2562 BlockDriverState *intermediate;
2563
2564 assert(active != NULL);
2565 assert(bs != NULL);
2566
2567 /* if bs is the same as active, then by definition it has no overlay
2568 */
2569 if (active == bs) {
2570 return NULL;
2571 }
2572
2573 intermediate = active;
2574 while (intermediate->backing_hd) {
2575 if (intermediate->backing_hd == bs) {
2576 overlay = intermediate;
2577 break;
2578 }
2579 intermediate = intermediate->backing_hd;
2580 }
2581
2582 return overlay;
2583}
2584
2585typedef struct BlkIntermediateStates {
2586 BlockDriverState *bs;
2587 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2588} BlkIntermediateStates;
2589
2590
2591/*
2592 * Drops images above 'base' up to and including 'top', and sets the image
2593 * above 'top' to have base as its backing file.
2594 *
2595 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2596 * information in 'bs' can be properly updated.
2597 *
2598 * E.g., this will convert the following chain:
2599 * bottom <- base <- intermediate <- top <- active
2600 *
2601 * to
2602 *
2603 * bottom <- base <- active
2604 *
2605 * It is allowed for bottom==base, in which case it converts:
2606 *
2607 * base <- intermediate <- top <- active
2608 *
2609 * to
2610 *
2611 * base <- active
2612 *
2613 * Error conditions:
2614 * if active == top, that is considered an error
2615 *
2616 */
2617int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2618 BlockDriverState *base)
2619{
2620 BlockDriverState *intermediate;
2621 BlockDriverState *base_bs = NULL;
2622 BlockDriverState *new_top_bs = NULL;
2623 BlkIntermediateStates *intermediate_state, *next;
2624 int ret = -EIO;
2625
2626 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2627 QSIMPLEQ_INIT(&states_to_delete);
2628
2629 if (!top->drv || !base->drv) {
2630 goto exit;
2631 }
2632
2633 new_top_bs = bdrv_find_overlay(active, top);
2634
2635 if (new_top_bs == NULL) {
2636 /* we could not find the image above 'top', this is an error */
2637 goto exit;
2638 }
2639
2640 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2641 * to do, no intermediate images */
2642 if (new_top_bs->backing_hd == base) {
2643 ret = 0;
2644 goto exit;
2645 }
2646
2647 intermediate = top;
2648
2649 /* now we will go down through the list, and add each BDS we find
2650 * into our deletion queue, until we hit the 'base'
2651 */
2652 while (intermediate) {
2653 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2654 intermediate_state->bs = intermediate;
2655 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2656
2657 if (intermediate->backing_hd == base) {
2658 base_bs = intermediate->backing_hd;
2659 break;
2660 }
2661 intermediate = intermediate->backing_hd;
2662 }
2663 if (base_bs == NULL) {
2664 /* something went wrong, we did not end at the base. safely
2665 * unravel everything, and exit with error */
2666 goto exit;
2667 }
2668
2669 /* success - we can delete the intermediate states, and link top->base */
2670 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2671 base_bs->drv ? base_bs->drv->format_name : "");
2672 if (ret) {
2673 goto exit;
2674 }
Fam Zheng920beae2014-05-23 21:29:46 +08002675 bdrv_set_backing_hd(new_top_bs, base_bs);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002676
2677 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2678 /* so that bdrv_close() does not recursively close the chain */
Fam Zheng920beae2014-05-23 21:29:46 +08002679 bdrv_set_backing_hd(intermediate_state->bs, NULL);
Fam Zheng4f6fd342013-08-23 09:14:47 +08002680 bdrv_unref(intermediate_state->bs);
Jeff Cody6ebdcee2012-09-27 13:29:12 -04002681 }
2682 ret = 0;
2683
2684exit:
2685 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2686 g_free(intermediate_state);
2687 }
2688 return ret;
2689}
2690
2691
aliguori71d07702009-03-03 17:37:16 +00002692static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2693 size_t size)
2694{
2695 int64_t len;
2696
Kevin Wolf1dd3a442014-04-14 14:48:16 +02002697 if (size > INT_MAX) {
2698 return -EIO;
2699 }
2700
aliguori71d07702009-03-03 17:37:16 +00002701 if (!bdrv_is_inserted(bs))
2702 return -ENOMEDIUM;
2703
2704 if (bs->growable)
2705 return 0;
2706
2707 len = bdrv_getlength(bs);
2708
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02002709 if (offset < 0)
2710 return -EIO;
2711
2712 if ((offset > len) || (len - offset < size))
aliguori71d07702009-03-03 17:37:16 +00002713 return -EIO;
2714
2715 return 0;
2716}
2717
2718static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2719 int nb_sectors)
2720{
Kevin Wolf54db38a2014-04-14 14:47:14 +02002721 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
Kevin Wolf8f4754e2014-03-26 13:06:02 +01002722 return -EIO;
2723 }
2724
Jes Sorenseneb5a3162010-05-27 16:20:31 +02002725 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2726 nb_sectors * BDRV_SECTOR_SIZE);
aliguori71d07702009-03-03 17:37:16 +00002727}
2728
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002729typedef struct RwCo {
2730 BlockDriverState *bs;
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002731 int64_t offset;
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002732 QEMUIOVector *qiov;
2733 bool is_write;
2734 int ret;
Peter Lieven4105eaa2013-07-11 14:16:22 +02002735 BdrvRequestFlags flags;
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002736} RwCo;
2737
2738static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2739{
2740 RwCo *rwco = opaque;
2741
2742 if (!rwco->is_write) {
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002743 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2744 rwco->qiov->size, rwco->qiov,
Peter Lieven4105eaa2013-07-11 14:16:22 +02002745 rwco->flags);
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002746 } else {
2747 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2748 rwco->qiov->size, rwco->qiov,
2749 rwco->flags);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002750 }
2751}
2752
2753/*
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002754 * Process a vectored synchronous request using coroutines
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002755 */
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002756static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2757 QEMUIOVector *qiov, bool is_write,
2758 BdrvRequestFlags flags)
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002759{
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002760 Coroutine *co;
2761 RwCo rwco = {
2762 .bs = bs,
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002763 .offset = offset,
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002764 .qiov = qiov,
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002765 .is_write = is_write,
2766 .ret = NOT_DONE,
Peter Lieven4105eaa2013-07-11 14:16:22 +02002767 .flags = flags,
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002768 };
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002769
Zhi Yong Wu498e3862012-04-02 18:59:34 +08002770 /**
2771 * In sync call context, when the vcpu is blocked, this throttling timer
2772 * will not fire; so the I/O throttling function has to be disabled here
2773 * if it has been enabled.
2774 */
2775 if (bs->io_limits_enabled) {
2776 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2777 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2778 bdrv_io_limits_disable(bs);
2779 }
2780
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002781 if (qemu_in_coroutine()) {
2782 /* Fast-path if already in coroutine context */
2783 bdrv_rw_co_entry(&rwco);
2784 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02002785 AioContext *aio_context = bdrv_get_aio_context(bs);
2786
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002787 co = qemu_coroutine_create(bdrv_rw_co_entry);
2788 qemu_coroutine_enter(co, &rwco);
2789 while (rwco.ret == NOT_DONE) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02002790 aio_poll(aio_context, true);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01002791 }
2792 }
2793 return rwco.ret;
2794}
2795
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002796/*
2797 * Process a synchronous request using coroutines
2798 */
2799static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
Peter Lieven4105eaa2013-07-11 14:16:22 +02002800 int nb_sectors, bool is_write, BdrvRequestFlags flags)
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002801{
2802 QEMUIOVector qiov;
2803 struct iovec iov = {
2804 .iov_base = (void *)buf,
2805 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2806 };
2807
Kevin Wolfda15ee52014-04-14 15:39:36 +02002808 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2809 return -EINVAL;
2810 }
2811
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002812 qemu_iovec_init_external(&qiov, &iov, 1);
Kevin Wolf775aa8b2013-12-05 12:09:38 +01002813 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2814 &qiov, is_write, flags);
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002815}
2816
bellard19cb3732006-08-19 11:45:59 +00002817/* return < 0 if error. See bdrv_write() for the return codes */
ths5fafdf22007-09-16 21:08:06 +00002818int bdrv_read(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00002819 uint8_t *buf, int nb_sectors)
2820{
Peter Lieven4105eaa2013-07-11 14:16:22 +02002821 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
bellardfc01f7e2003-06-30 10:03:06 +00002822}
2823
Markus Armbruster07d27a42012-06-29 17:34:29 +02002824/* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2825int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2826 uint8_t *buf, int nb_sectors)
2827{
2828 bool enabled;
2829 int ret;
2830
2831 enabled = bs->io_limits_enabled;
2832 bs->io_limits_enabled = false;
Peter Lieven4e7395e2013-07-18 10:37:32 +02002833 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
Markus Armbruster07d27a42012-06-29 17:34:29 +02002834 bs->io_limits_enabled = enabled;
2835 return ret;
2836}
2837
ths5fafdf22007-09-16 21:08:06 +00002838/* Return < 0 if error. Important errors are:
bellard19cb3732006-08-19 11:45:59 +00002839 -EIO generic I/O error (may happen for all errors)
2840 -ENOMEDIUM No media inserted.
2841 -EINVAL Invalid sector number or nb_sectors
2842 -EACCES Trying to write a read-only device
2843*/
ths5fafdf22007-09-16 21:08:06 +00002844int bdrv_write(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00002845 const uint8_t *buf, int nb_sectors)
2846{
Peter Lieven4105eaa2013-07-11 14:16:22 +02002847 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
bellard83f64092006-08-01 16:21:11 +00002848}
2849
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02002850int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2851 int nb_sectors, BdrvRequestFlags flags)
Peter Lieven4105eaa2013-07-11 14:16:22 +02002852{
2853 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02002854 BDRV_REQ_ZERO_WRITE | flags);
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002855}
2856
Peter Lievend75cbb52013-10-24 12:07:03 +02002857/*
2858 * Completely zero out a block device with the help of bdrv_write_zeroes.
2859 * The operation is sped up by checking the block status and only writing
2860 * zeroes to the device if they currently do not return zeroes. Optional
2861 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2862 *
2863 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2864 */
2865int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2866{
Kevin Wolf9ce10c02014-04-14 17:03:34 +02002867 int64_t target_size;
Peter Lievend75cbb52013-10-24 12:07:03 +02002868 int64_t ret, nb_sectors, sector_num = 0;
2869 int n;
2870
Kevin Wolf9ce10c02014-04-14 17:03:34 +02002871 target_size = bdrv_getlength(bs);
2872 if (target_size < 0) {
2873 return target_size;
2874 }
2875 target_size /= BDRV_SECTOR_SIZE;
2876
Peter Lievend75cbb52013-10-24 12:07:03 +02002877 for (;;) {
2878 nb_sectors = target_size - sector_num;
2879 if (nb_sectors <= 0) {
2880 return 0;
2881 }
2882 if (nb_sectors > INT_MAX) {
2883 nb_sectors = INT_MAX;
2884 }
2885 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
Peter Lieven3d94ce62013-12-12 13:57:05 +01002886 if (ret < 0) {
2887 error_report("error getting block status at sector %" PRId64 ": %s",
2888 sector_num, strerror(-ret));
2889 return ret;
2890 }
Peter Lievend75cbb52013-10-24 12:07:03 +02002891 if (ret & BDRV_BLOCK_ZERO) {
2892 sector_num += n;
2893 continue;
2894 }
2895 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2896 if (ret < 0) {
2897 error_report("error writing zeroes at sector %" PRId64 ": %s",
2898 sector_num, strerror(-ret));
2899 return ret;
2900 }
2901 sector_num += n;
2902 }
2903}
2904
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002905int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
bellard83f64092006-08-01 16:21:11 +00002906{
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002907 QEMUIOVector qiov;
2908 struct iovec iov = {
2909 .iov_base = (void *)buf,
2910 .iov_len = bytes,
2911 };
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01002912 int ret;
bellard83f64092006-08-01 16:21:11 +00002913
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002914 if (bytes < 0) {
2915 return -EINVAL;
bellard83f64092006-08-01 16:21:11 +00002916 }
2917
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002918 qemu_iovec_init_external(&qiov, &iov, 1);
2919 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2920 if (ret < 0) {
2921 return ret;
bellard83f64092006-08-01 16:21:11 +00002922 }
2923
Kevin Wolfa3ef6572013-12-05 12:29:59 +01002924 return bytes;
bellard83f64092006-08-01 16:21:11 +00002925}
2926
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002927int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
bellard83f64092006-08-01 16:21:11 +00002928{
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01002929 int ret;
bellard83f64092006-08-01 16:21:11 +00002930
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002931 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2932 if (ret < 0) {
2933 return ret;
bellard83f64092006-08-01 16:21:11 +00002934 }
2935
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002936 return qiov->size;
2937}
2938
2939int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002940 const void *buf, int bytes)
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002941{
2942 QEMUIOVector qiov;
2943 struct iovec iov = {
2944 .iov_base = (void *) buf,
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002945 .iov_len = bytes,
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002946 };
2947
Kevin Wolf8407d5d2013-12-05 12:34:02 +01002948 if (bytes < 0) {
2949 return -EINVAL;
2950 }
2951
Kevin Wolf8d3b1a22013-04-05 21:27:55 +02002952 qemu_iovec_init_external(&qiov, &iov, 1);
2953 return bdrv_pwritev(bs, offset, &qiov);
bellard83f64092006-08-01 16:21:11 +00002954}
bellard83f64092006-08-01 16:21:11 +00002955
Kevin Wolff08145f2010-06-16 16:38:15 +02002956/*
2957 * Writes to the file and ensures that no writes are reordered across this
2958 * request (acts as a barrier)
2959 *
2960 * Returns 0 on success, -errno in error cases.
2961 */
2962int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2963 const void *buf, int count)
2964{
2965 int ret;
2966
2967 ret = bdrv_pwrite(bs, offset, buf, count);
2968 if (ret < 0) {
2969 return ret;
2970 }
2971
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02002972 /* No flush needed for cache modes that already do it */
2973 if (bs->enable_write_cache) {
Kevin Wolff08145f2010-06-16 16:38:15 +02002974 bdrv_flush(bs);
2975 }
2976
2977 return 0;
2978}
2979
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00002980static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
Stefan Hajnocziab185922011-11-17 13:40:31 +00002981 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2982{
2983 /* Perform I/O through a temporary buffer so that users who scribble over
2984 * their read buffer while the operation is in progress do not end up
2985 * modifying the image file. This is critical for zero-copy guest I/O
2986 * where anything might happen inside guest memory.
2987 */
2988 void *bounce_buffer;
2989
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00002990 BlockDriver *drv = bs->drv;
Stefan Hajnocziab185922011-11-17 13:40:31 +00002991 struct iovec iov;
2992 QEMUIOVector bounce_qiov;
2993 int64_t cluster_sector_num;
2994 int cluster_nb_sectors;
2995 size_t skip_bytes;
2996 int ret;
2997
2998 /* Cover entire cluster so no additional backing file I/O is required when
2999 * allocating cluster in the image file.
3000 */
Paolo Bonzini343bded2013-01-21 17:09:42 +01003001 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
3002 &cluster_sector_num, &cluster_nb_sectors);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003003
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003004 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
3005 cluster_sector_num, cluster_nb_sectors);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003006
3007 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
3008 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
3009 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
3010
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00003011 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
3012 &bounce_qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003013 if (ret < 0) {
3014 goto err;
3015 }
3016
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00003017 if (drv->bdrv_co_write_zeroes &&
3018 buffer_is_zero(bounce_buffer, iov.iov_len)) {
Kevin Wolf621f0582012-03-20 15:12:58 +01003019 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003020 cluster_nb_sectors, 0);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00003021 } else {
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02003022 /* This does not change the data on the disk, it is not necessary
3023 * to flush even in cache=writethrough mode.
3024 */
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00003025 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
Stefan Hajnocziab185922011-11-17 13:40:31 +00003026 &bounce_qiov);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00003027 }
3028
Stefan Hajnocziab185922011-11-17 13:40:31 +00003029 if (ret < 0) {
3030 /* It might be okay to ignore write errors for guest requests. If this
3031 * is a deliberate copy-on-read then we don't want to ignore the error.
3032 * Simply report it in all cases.
3033 */
3034 goto err;
3035 }
3036
3037 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
Michael Tokarev03396142012-06-07 20:17:55 +04003038 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
3039 nb_sectors * BDRV_SECTOR_SIZE);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003040
3041err:
3042 qemu_vfree(bounce_buffer);
3043 return ret;
3044}
3045
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003046/*
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003047 * Forwards an already correctly aligned request to the BlockDriver. This
3048 * handles copy on read and zeroing after EOF; any other features must be
3049 * implemented by the caller.
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003050 */
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003051static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
Kevin Wolf65afd212013-12-03 14:55:55 +01003052 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
Kevin Wolfec746e12013-12-04 12:13:10 +01003053 int64_t align, QEMUIOVector *qiov, int flags)
Kevin Wolfda1fa912011-07-14 17:27:13 +02003054{
3055 BlockDriver *drv = bs->drv;
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00003056 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003057
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003058 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3059 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003060
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003061 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3062 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3063
3064 /* Handle Copy on Read and associated serialisation */
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003065 if (flags & BDRV_REQ_COPY_ON_READ) {
Kevin Wolf73271452013-12-04 17:08:50 +01003066 /* If we touch the same cluster it counts as an overlap. This
3067 * guarantees that allocating writes will be serialized and not race
3068 * with each other for the same cluster. For example, in copy-on-read
3069 * it ensures that the CoR read and write operations are atomic and
3070 * guest writes cannot interleave between them. */
3071 mark_request_serialising(req, bdrv_get_cluster_size(bs));
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003072 }
3073
Kevin Wolf2dbafdc2013-12-04 16:43:44 +01003074 wait_serialising_requests(req);
Stefan Hajnoczif4658282011-11-17 13:40:29 +00003075
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003076 if (flags & BDRV_REQ_COPY_ON_READ) {
Stefan Hajnocziab185922011-11-17 13:40:31 +00003077 int pnum;
3078
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02003079 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003080 if (ret < 0) {
3081 goto out;
3082 }
3083
3084 if (!ret || pnum != nb_sectors) {
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003085 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00003086 goto out;
3087 }
3088 }
3089
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003090 /* Forward the request to the BlockDriver */
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003091 if (!(bs->zero_beyond_eof && bs->growable)) {
3092 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3093 } else {
3094 /* Read zeros after EOF of growable BDSes */
3095 int64_t len, total_sectors, max_nb_sectors;
3096
3097 len = bdrv_getlength(bs);
3098 if (len < 0) {
3099 ret = len;
3100 goto out;
3101 }
3102
Fam Zhengd055a1f2013-09-26 19:55:33 +08003103 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
Kevin Wolf5f5bcd82014-02-07 16:00:09 +01003104 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3105 align >> BDRV_SECTOR_BITS);
MORITA Kazutaka893a8f62013-08-06 09:53:40 +08003106 if (max_nb_sectors > 0) {
3107 ret = drv->bdrv_co_readv(bs, sector_num,
3108 MIN(nb_sectors, max_nb_sectors), qiov);
3109 } else {
3110 ret = 0;
3111 }
3112
3113 /* Reading beyond end of file is supposed to produce zeroes */
3114 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3115 uint64_t offset = MAX(0, total_sectors - sector_num);
3116 uint64_t bytes = (sector_num + nb_sectors - offset) *
3117 BDRV_SECTOR_SIZE;
3118 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3119 }
3120 }
Stefan Hajnocziab185922011-11-17 13:40:31 +00003121
3122out:
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00003123 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003124}
3125
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003126/*
3127 * Handle a read request in coroutine context
3128 */
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003129static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3130 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003131 BdrvRequestFlags flags)
3132{
3133 BlockDriver *drv = bs->drv;
Kevin Wolf65afd212013-12-03 14:55:55 +01003134 BdrvTrackedRequest req;
3135
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003136 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3137 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3138 uint8_t *head_buf = NULL;
3139 uint8_t *tail_buf = NULL;
3140 QEMUIOVector local_qiov;
3141 bool use_local_qiov = false;
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003142 int ret;
3143
3144 if (!drv) {
3145 return -ENOMEDIUM;
3146 }
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003147 if (bdrv_check_byte_request(bs, offset, bytes)) {
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003148 return -EIO;
3149 }
3150
3151 if (bs->copy_on_read) {
3152 flags |= BDRV_REQ_COPY_ON_READ;
3153 }
3154
3155 /* throttling disk I/O */
3156 if (bs->io_limits_enabled) {
Kevin Wolfd5103582014-01-16 13:29:10 +01003157 bdrv_io_limits_intercept(bs, bytes, false);
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003158 }
3159
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003160 /* Align read if necessary by padding qiov */
3161 if (offset & (align - 1)) {
3162 head_buf = qemu_blockalign(bs, align);
3163 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3164 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3165 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3166 use_local_qiov = true;
3167
3168 bytes += offset & (align - 1);
3169 offset = offset & ~(align - 1);
3170 }
3171
3172 if ((offset + bytes) & (align - 1)) {
3173 if (!use_local_qiov) {
3174 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3175 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3176 use_local_qiov = true;
3177 }
3178 tail_buf = qemu_blockalign(bs, align);
3179 qemu_iovec_add(&local_qiov, tail_buf,
3180 align - ((offset + bytes) & (align - 1)));
3181
3182 bytes = ROUND_UP(bytes, align);
3183 }
3184
Kevin Wolf65afd212013-12-03 14:55:55 +01003185 tracked_request_begin(&req, bs, offset, bytes, false);
Kevin Wolfec746e12013-12-04 12:13:10 +01003186 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003187 use_local_qiov ? &local_qiov : qiov,
3188 flags);
Kevin Wolf65afd212013-12-03 14:55:55 +01003189 tracked_request_end(&req);
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003190
3191 if (use_local_qiov) {
3192 qemu_iovec_destroy(&local_qiov);
3193 qemu_vfree(head_buf);
3194 qemu_vfree(tail_buf);
3195 }
3196
Kevin Wolfd0c7f642013-12-02 15:07:48 +01003197 return ret;
3198}
3199
Kevin Wolf1b0288a2013-12-02 16:09:46 +01003200static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3201 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3202 BdrvRequestFlags flags)
3203{
3204 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3205 return -EINVAL;
3206 }
3207
3208 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3209 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3210}
3211
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003212int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
Kevin Wolfda1fa912011-07-14 17:27:13 +02003213 int nb_sectors, QEMUIOVector *qiov)
3214{
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003215 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
Kevin Wolfda1fa912011-07-14 17:27:13 +02003216
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003217 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3218}
3219
3220int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3221 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3222{
3223 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3224
3225 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3226 BDRV_REQ_COPY_ON_READ);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003227}
3228
Peter Lievenc31cb702013-10-24 12:06:58 +02003229/* if no limit is specified in the BlockLimits use a default
3230 * of 32768 512-byte sectors (16 MiB) per request.
3231 */
3232#define MAX_WRITE_ZEROES_DEFAULT 32768
3233
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003234static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003235 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003236{
3237 BlockDriver *drv = bs->drv;
3238 QEMUIOVector qiov;
Peter Lievenc31cb702013-10-24 12:06:58 +02003239 struct iovec iov = {0};
3240 int ret = 0;
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003241
Peter Lievenc31cb702013-10-24 12:06:58 +02003242 int max_write_zeroes = bs->bl.max_write_zeroes ?
3243 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
Kevin Wolf621f0582012-03-20 15:12:58 +01003244
Peter Lievenc31cb702013-10-24 12:06:58 +02003245 while (nb_sectors > 0 && !ret) {
3246 int num = nb_sectors;
3247
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003248 /* Align request. Block drivers can expect the "bulk" of the request
3249 * to be aligned.
3250 */
3251 if (bs->bl.write_zeroes_alignment
3252 && num > bs->bl.write_zeroes_alignment) {
3253 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3254 /* Make a small request up to the first aligned sector. */
Peter Lievenc31cb702013-10-24 12:06:58 +02003255 num = bs->bl.write_zeroes_alignment;
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003256 num -= sector_num % bs->bl.write_zeroes_alignment;
3257 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3258 /* Shorten the request to the last aligned sector. num cannot
3259 * underflow because num > bs->bl.write_zeroes_alignment.
3260 */
3261 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
Peter Lievenc31cb702013-10-24 12:06:58 +02003262 }
Kevin Wolf621f0582012-03-20 15:12:58 +01003263 }
Peter Lievenc31cb702013-10-24 12:06:58 +02003264
3265 /* limit request size */
3266 if (num > max_write_zeroes) {
3267 num = max_write_zeroes;
3268 }
3269
3270 ret = -ENOTSUP;
3271 /* First try the efficient write zeroes operation */
3272 if (drv->bdrv_co_write_zeroes) {
3273 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3274 }
3275
3276 if (ret == -ENOTSUP) {
3277 /* Fall back to bounce buffer if write zeroes is unsupported */
3278 iov.iov_len = num * BDRV_SECTOR_SIZE;
3279 if (iov.iov_base == NULL) {
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003280 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
3281 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
Peter Lievenc31cb702013-10-24 12:06:58 +02003282 }
3283 qemu_iovec_init_external(&qiov, &iov, 1);
3284
3285 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
Paolo Bonzinib8d71c02013-11-22 13:39:48 +01003286
3287 /* Keep bounce buffer around if it is big enough for all
3288 * all future requests.
3289 */
3290 if (num < max_write_zeroes) {
3291 qemu_vfree(iov.iov_base);
3292 iov.iov_base = NULL;
3293 }
Peter Lievenc31cb702013-10-24 12:06:58 +02003294 }
3295
3296 sector_num += num;
3297 nb_sectors -= num;
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003298 }
3299
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003300 qemu_vfree(iov.iov_base);
3301 return ret;
3302}
3303
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003304/*
Kevin Wolfb404f722013-12-03 14:02:23 +01003305 * Forwards an already correctly aligned write request to the BlockDriver.
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003306 */
Kevin Wolfb404f722013-12-03 14:02:23 +01003307static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
Kevin Wolf65afd212013-12-03 14:55:55 +01003308 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3309 QEMUIOVector *qiov, int flags)
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003310{
3311 BlockDriver *drv = bs->drv;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003312 bool waited;
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01003313 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003314
Kevin Wolfb404f722013-12-03 14:02:23 +01003315 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3316 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003317
Kevin Wolfb404f722013-12-03 14:02:23 +01003318 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3319 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
Benoît Canetcc0681c2013-09-02 14:14:39 +02003320
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003321 waited = wait_serialising_requests(req);
3322 assert(!waited || !req->serialising);
Kevin Wolfaf91f9a2014-02-07 15:35:56 +01003323 assert(req->overlap_offset <= offset);
3324 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
Kevin Wolf244eade2013-12-03 14:30:44 +01003325
Kevin Wolf65afd212013-12-03 14:55:55 +01003326 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
Stefan Hajnoczid616b222013-06-24 17:13:10 +02003327
Peter Lieven465bee12014-05-18 00:58:19 +02003328 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3329 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3330 qemu_iovec_is_zero(qiov)) {
3331 flags |= BDRV_REQ_ZERO_WRITE;
3332 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3333 flags |= BDRV_REQ_MAY_UNMAP;
3334 }
3335 }
3336
Stefan Hajnoczid616b222013-06-24 17:13:10 +02003337 if (ret < 0) {
3338 /* Do nothing, write notifier decided to fail this request */
3339 } else if (flags & BDRV_REQ_ZERO_WRITE) {
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003340 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003341 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003342 } else {
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003343 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003344 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3345 }
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003346 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01003347
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02003348 if (ret == 0 && !bs->enable_write_cache) {
3349 ret = bdrv_co_flush(bs);
3350 }
3351
Fam Zhenge4654d22013-11-13 18:29:43 +08003352 bdrv_set_dirty(bs, sector_num, nb_sectors);
Kevin Wolfda1fa912011-07-14 17:27:13 +02003353
3354 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3355 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3356 }
Paolo Bonzinidf2a6f22013-09-04 19:00:21 +02003357 if (bs->growable && ret >= 0) {
3358 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3359 }
Kevin Wolfda1fa912011-07-14 17:27:13 +02003360
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01003361 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02003362}
3363
Kevin Wolfb404f722013-12-03 14:02:23 +01003364/*
3365 * Handle a write request in coroutine context
3366 */
Kevin Wolf66015532013-12-03 14:40:18 +01003367static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3368 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
Kevin Wolfb404f722013-12-03 14:02:23 +01003369 BdrvRequestFlags flags)
3370{
Kevin Wolf65afd212013-12-03 14:55:55 +01003371 BdrvTrackedRequest req;
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003372 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3373 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3374 uint8_t *head_buf = NULL;
3375 uint8_t *tail_buf = NULL;
3376 QEMUIOVector local_qiov;
3377 bool use_local_qiov = false;
Kevin Wolfb404f722013-12-03 14:02:23 +01003378 int ret;
3379
3380 if (!bs->drv) {
3381 return -ENOMEDIUM;
3382 }
3383 if (bs->read_only) {
3384 return -EACCES;
3385 }
Kevin Wolf66015532013-12-03 14:40:18 +01003386 if (bdrv_check_byte_request(bs, offset, bytes)) {
Kevin Wolfb404f722013-12-03 14:02:23 +01003387 return -EIO;
3388 }
3389
Kevin Wolfb404f722013-12-03 14:02:23 +01003390 /* throttling disk I/O */
3391 if (bs->io_limits_enabled) {
Kevin Wolfd5103582014-01-16 13:29:10 +01003392 bdrv_io_limits_intercept(bs, bytes, true);
Kevin Wolfb404f722013-12-03 14:02:23 +01003393 }
3394
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003395 /*
3396 * Align write if necessary by performing a read-modify-write cycle.
3397 * Pad qiov with the read parts and be sure to have a tracked request not
3398 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3399 */
Kevin Wolf65afd212013-12-03 14:55:55 +01003400 tracked_request_begin(&req, bs, offset, bytes, true);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003401
3402 if (offset & (align - 1)) {
3403 QEMUIOVector head_qiov;
3404 struct iovec head_iov;
3405
3406 mark_request_serialising(&req, align);
3407 wait_serialising_requests(&req);
3408
3409 head_buf = qemu_blockalign(bs, align);
3410 head_iov = (struct iovec) {
3411 .iov_base = head_buf,
3412 .iov_len = align,
3413 };
3414 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3415
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003416 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003417 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3418 align, &head_qiov, 0);
3419 if (ret < 0) {
3420 goto fail;
3421 }
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003422 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003423
3424 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3425 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3426 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3427 use_local_qiov = true;
3428
3429 bytes += offset & (align - 1);
3430 offset = offset & ~(align - 1);
3431 }
3432
3433 if ((offset + bytes) & (align - 1)) {
3434 QEMUIOVector tail_qiov;
3435 struct iovec tail_iov;
3436 size_t tail_bytes;
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003437 bool waited;
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003438
3439 mark_request_serialising(&req, align);
Kevin Wolf28de2dc2014-01-14 11:41:35 +01003440 waited = wait_serialising_requests(&req);
3441 assert(!waited || !use_local_qiov);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003442
3443 tail_buf = qemu_blockalign(bs, align);
3444 tail_iov = (struct iovec) {
3445 .iov_base = tail_buf,
3446 .iov_len = align,
3447 };
3448 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3449
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003450 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003451 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3452 align, &tail_qiov, 0);
3453 if (ret < 0) {
3454 goto fail;
3455 }
Kevin Wolf9e1cb962014-01-14 15:37:03 +01003456 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003457
3458 if (!use_local_qiov) {
3459 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3460 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3461 use_local_qiov = true;
3462 }
3463
3464 tail_bytes = (offset + bytes) & (align - 1);
3465 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3466
3467 bytes = ROUND_UP(bytes, align);
3468 }
3469
3470 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3471 use_local_qiov ? &local_qiov : qiov,
3472 flags);
3473
3474fail:
Kevin Wolf65afd212013-12-03 14:55:55 +01003475 tracked_request_end(&req);
Kevin Wolfb404f722013-12-03 14:02:23 +01003476
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003477 if (use_local_qiov) {
3478 qemu_iovec_destroy(&local_qiov);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003479 }
Kevin Wolf99c4a852014-02-07 15:29:00 +01003480 qemu_vfree(head_buf);
3481 qemu_vfree(tail_buf);
Kevin Wolf3b8242e2013-12-03 16:34:41 +01003482
Kevin Wolfb404f722013-12-03 14:02:23 +01003483 return ret;
3484}
3485
Kevin Wolf66015532013-12-03 14:40:18 +01003486static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3487 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3488 BdrvRequestFlags flags)
3489{
3490 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3491 return -EINVAL;
3492 }
3493
3494 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3495 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3496}
3497
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003498int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3499 int nb_sectors, QEMUIOVector *qiov)
3500{
3501 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3502
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003503 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3504}
3505
3506int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003507 int64_t sector_num, int nb_sectors,
3508 BdrvRequestFlags flags)
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003509{
Paolo Bonzini94d6ff22013-11-22 13:39:45 +01003510 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003511
Peter Lievend32f35c2013-10-24 12:06:52 +02003512 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3513 flags &= ~BDRV_REQ_MAY_UNMAP;
3514 }
3515
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003516 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
Peter Lievenaa7bfbf2013-10-24 12:06:51 +02003517 BDRV_REQ_ZERO_WRITE | flags);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01003518}
3519
bellard83f64092006-08-01 16:21:11 +00003520/**
bellard83f64092006-08-01 16:21:11 +00003521 * Truncate file to 'offset' bytes (needed only for file protocols)
3522 */
3523int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3524{
3525 BlockDriver *drv = bs->drv;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003526 int ret;
bellard83f64092006-08-01 16:21:11 +00003527 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00003528 return -ENOMEDIUM;
bellard83f64092006-08-01 16:21:11 +00003529 if (!drv->bdrv_truncate)
3530 return -ENOTSUP;
Naphtali Sprei59f26892009-10-26 16:25:16 +02003531 if (bs->read_only)
3532 return -EACCES;
Fam Zheng3718d8a2014-05-23 21:29:43 +08003533 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
Marcelo Tosatti85916752011-01-26 12:12:35 -02003534 return -EBUSY;
Fam Zheng3718d8a2014-05-23 21:29:43 +08003535 }
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003536 ret = drv->bdrv_truncate(bs, offset);
3537 if (ret == 0) {
3538 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
Markus Armbruster145feb12011-08-03 15:07:42 +02003539 bdrv_dev_resize_cb(bs);
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003540 }
3541 return ret;
bellard83f64092006-08-01 16:21:11 +00003542}
3543
3544/**
Fam Zheng4a1d5e12011-07-12 19:56:39 +08003545 * Length of a allocated file in bytes. Sparse files are counted by actual
3546 * allocated space. Return < 0 if error or unknown.
3547 */
3548int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3549{
3550 BlockDriver *drv = bs->drv;
3551 if (!drv) {
3552 return -ENOMEDIUM;
3553 }
3554 if (drv->bdrv_get_allocated_file_size) {
3555 return drv->bdrv_get_allocated_file_size(bs);
3556 }
3557 if (bs->file) {
3558 return bdrv_get_allocated_file_size(bs->file);
3559 }
3560 return -ENOTSUP;
3561}
3562
3563/**
bellard83f64092006-08-01 16:21:11 +00003564 * Length of a file in bytes. Return < 0 if error or unknown.
3565 */
3566int64_t bdrv_getlength(BlockDriverState *bs)
3567{
3568 BlockDriver *drv = bs->drv;
3569 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00003570 return -ENOMEDIUM;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01003571
Kevin Wolfb94a2612013-10-29 12:18:58 +01003572 if (drv->has_variable_length) {
3573 int ret = refresh_total_sectors(bs, bs->total_sectors);
3574 if (ret < 0) {
3575 return ret;
Stefan Hajnoczi46a4e4e2011-03-29 20:04:41 +01003576 }
bellard83f64092006-08-01 16:21:11 +00003577 }
Stefan Hajnoczi46a4e4e2011-03-29 20:04:41 +01003578 return bs->total_sectors * BDRV_SECTOR_SIZE;
bellardfc01f7e2003-06-30 10:03:06 +00003579}
3580
bellard19cb3732006-08-19 11:45:59 +00003581/* return 0 as number of sectors if no device present or error */
ths96b8f132007-12-17 01:35:20 +00003582void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
bellardfc01f7e2003-06-30 10:03:06 +00003583{
bellard19cb3732006-08-19 11:45:59 +00003584 int64_t length;
3585 length = bdrv_getlength(bs);
3586 if (length < 0)
3587 length = 0;
3588 else
Jan Kiszka6ea44302009-11-30 18:21:19 +01003589 length = length >> BDRV_SECTOR_BITS;
bellard19cb3732006-08-19 11:45:59 +00003590 *nb_sectors_ptr = length;
bellardfc01f7e2003-06-30 10:03:06 +00003591}
bellardcf989512004-02-16 21:56:36 +00003592
Paolo Bonziniff06f5f2012-09-28 17:22:54 +02003593void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3594 BlockdevOnError on_write_error)
Markus Armbrusterabd7f682010-06-02 18:55:17 +02003595{
3596 bs->on_read_error = on_read_error;
3597 bs->on_write_error = on_write_error;
3598}
3599
Paolo Bonzini1ceee0d2012-09-28 17:22:56 +02003600BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
Markus Armbrusterabd7f682010-06-02 18:55:17 +02003601{
3602 return is_read ? bs->on_read_error : bs->on_write_error;
3603}
3604
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003605BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3606{
3607 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3608
3609 switch (on_err) {
3610 case BLOCKDEV_ON_ERROR_ENOSPC:
3611 return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT;
3612 case BLOCKDEV_ON_ERROR_STOP:
3613 return BDRV_ACTION_STOP;
3614 case BLOCKDEV_ON_ERROR_REPORT:
3615 return BDRV_ACTION_REPORT;
3616 case BLOCKDEV_ON_ERROR_IGNORE:
3617 return BDRV_ACTION_IGNORE;
3618 default:
3619 abort();
3620 }
3621}
3622
3623/* This is done by device models because, while the block layer knows
3624 * about the error, it does not know whether an operation comes from
3625 * the device or the block layer (from a job, for example).
3626 */
3627void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3628 bool is_read, int error)
3629{
3630 assert(error >= 0);
Paolo Bonzini32c81a42012-09-28 17:22:58 +02003631 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02003632 if (action == BDRV_ACTION_STOP) {
3633 vm_stop(RUN_STATE_IO_ERROR);
3634 bdrv_iostatus_set_err(bs, error);
3635 }
3636}
3637
bellardb3380822004-03-14 21:38:54 +00003638int bdrv_is_read_only(BlockDriverState *bs)
3639{
3640 return bs->read_only;
3641}
3642
ths985a03b2007-12-24 16:10:43 +00003643int bdrv_is_sg(BlockDriverState *bs)
3644{
3645 return bs->sg;
3646}
3647
Christoph Hellwige900a7b2009-09-04 19:01:15 +02003648int bdrv_enable_write_cache(BlockDriverState *bs)
3649{
3650 return bs->enable_write_cache;
3651}
3652
Paolo Bonzini425b0142012-06-06 00:04:52 +02003653void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3654{
3655 bs->enable_write_cache = wce;
Jeff Cody55b110f2012-09-20 15:13:18 -04003656
3657 /* so a reopen() will preserve wce */
3658 if (wce) {
3659 bs->open_flags |= BDRV_O_CACHE_WB;
3660 } else {
3661 bs->open_flags &= ~BDRV_O_CACHE_WB;
3662 }
Paolo Bonzini425b0142012-06-06 00:04:52 +02003663}
3664
bellardea2384d2004-08-01 21:59:26 +00003665int bdrv_is_encrypted(BlockDriverState *bs)
3666{
3667 if (bs->backing_hd && bs->backing_hd->encrypted)
3668 return 1;
3669 return bs->encrypted;
3670}
3671
aliguoric0f4ce72009-03-05 23:01:01 +00003672int bdrv_key_required(BlockDriverState *bs)
3673{
3674 BlockDriverState *backing_hd = bs->backing_hd;
3675
3676 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3677 return 1;
3678 return (bs->encrypted && !bs->valid_key);
3679}
3680
bellardea2384d2004-08-01 21:59:26 +00003681int bdrv_set_key(BlockDriverState *bs, const char *key)
3682{
3683 int ret;
3684 if (bs->backing_hd && bs->backing_hd->encrypted) {
3685 ret = bdrv_set_key(bs->backing_hd, key);
3686 if (ret < 0)
3687 return ret;
3688 if (!bs->encrypted)
3689 return 0;
3690 }
Shahar Havivifd04a2a2010-03-06 00:26:13 +02003691 if (!bs->encrypted) {
3692 return -EINVAL;
3693 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3694 return -ENOMEDIUM;
3695 }
aliguoric0f4ce72009-03-05 23:01:01 +00003696 ret = bs->drv->bdrv_set_key(bs, key);
aliguoribb5fc202009-03-05 23:01:15 +00003697 if (ret < 0) {
3698 bs->valid_key = 0;
3699 } else if (!bs->valid_key) {
3700 bs->valid_key = 1;
3701 /* call the change callback now, we skipped it on open */
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02003702 bdrv_dev_change_media_cb(bs, true);
aliguoribb5fc202009-03-05 23:01:15 +00003703 }
aliguoric0f4ce72009-03-05 23:01:01 +00003704 return ret;
bellardea2384d2004-08-01 21:59:26 +00003705}
3706
Markus Armbrusterf8d6bba2012-06-13 10:11:48 +02003707const char *bdrv_get_format_name(BlockDriverState *bs)
bellardea2384d2004-08-01 21:59:26 +00003708{
Markus Armbrusterf8d6bba2012-06-13 10:11:48 +02003709 return bs->drv ? bs->drv->format_name : NULL;
bellardea2384d2004-08-01 21:59:26 +00003710}
3711
ths5fafdf22007-09-16 21:08:06 +00003712void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
bellardea2384d2004-08-01 21:59:26 +00003713 void *opaque)
3714{
3715 BlockDriver *drv;
Jeff Codye855e4f2014-04-28 18:29:54 -04003716 int count = 0;
3717 const char **formats = NULL;
bellardea2384d2004-08-01 21:59:26 +00003718
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +01003719 QLIST_FOREACH(drv, &bdrv_drivers, list) {
Jeff Codye855e4f2014-04-28 18:29:54 -04003720 if (drv->format_name) {
3721 bool found = false;
3722 int i = count;
3723 while (formats && i && !found) {
3724 found = !strcmp(formats[--i], drv->format_name);
3725 }
3726
3727 if (!found) {
3728 formats = g_realloc(formats, (count + 1) * sizeof(char *));
3729 formats[count++] = drv->format_name;
3730 it(opaque, drv->format_name);
3731 }
3732 }
bellardea2384d2004-08-01 21:59:26 +00003733 }
Jeff Codye855e4f2014-04-28 18:29:54 -04003734 g_free(formats);
bellardea2384d2004-08-01 21:59:26 +00003735}
3736
Benoît Canetdc364f42014-01-23 21:31:32 +01003737/* This function is to find block backend bs */
bellardb3380822004-03-14 21:38:54 +00003738BlockDriverState *bdrv_find(const char *name)
3739{
3740 BlockDriverState *bs;
3741
Benoît Canetdc364f42014-01-23 21:31:32 +01003742 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01003743 if (!strcmp(name, bs->device_name)) {
bellardb3380822004-03-14 21:38:54 +00003744 return bs;
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01003745 }
bellardb3380822004-03-14 21:38:54 +00003746 }
3747 return NULL;
3748}
3749
Benoît Canetdc364f42014-01-23 21:31:32 +01003750/* This function is to find a node in the bs graph */
3751BlockDriverState *bdrv_find_node(const char *node_name)
3752{
3753 BlockDriverState *bs;
3754
3755 assert(node_name);
3756
3757 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3758 if (!strcmp(node_name, bs->node_name)) {
3759 return bs;
3760 }
3761 }
3762 return NULL;
3763}
3764
Benoît Canetc13163f2014-01-23 21:31:34 +01003765/* Put this QMP function here so it can access the static graph_bdrv_states. */
3766BlockDeviceInfoList *bdrv_named_nodes_list(void)
3767{
3768 BlockDeviceInfoList *list, *entry;
3769 BlockDriverState *bs;
3770
3771 list = NULL;
3772 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3773 entry = g_malloc0(sizeof(*entry));
3774 entry->value = bdrv_block_device_info(bs);
3775 entry->next = list;
3776 list = entry;
3777 }
3778
3779 return list;
3780}
3781
Benoît Canet12d3ba82014-01-23 21:31:35 +01003782BlockDriverState *bdrv_lookup_bs(const char *device,
3783 const char *node_name,
3784 Error **errp)
3785{
3786 BlockDriverState *bs = NULL;
3787
Benoît Canet12d3ba82014-01-23 21:31:35 +01003788 if (device) {
3789 bs = bdrv_find(device);
3790
Benoît Canetdd67fa52014-02-12 17:15:06 +01003791 if (bs) {
3792 return bs;
Benoît Canet12d3ba82014-01-23 21:31:35 +01003793 }
Benoît Canet12d3ba82014-01-23 21:31:35 +01003794 }
3795
Benoît Canetdd67fa52014-02-12 17:15:06 +01003796 if (node_name) {
3797 bs = bdrv_find_node(node_name);
Benoît Canet12d3ba82014-01-23 21:31:35 +01003798
Benoît Canetdd67fa52014-02-12 17:15:06 +01003799 if (bs) {
3800 return bs;
3801 }
Benoît Canet12d3ba82014-01-23 21:31:35 +01003802 }
3803
Benoît Canetdd67fa52014-02-12 17:15:06 +01003804 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3805 device ? device : "",
3806 node_name ? node_name : "");
3807 return NULL;
Benoît Canet12d3ba82014-01-23 21:31:35 +01003808}
3809
Markus Armbruster2f399b02010-06-02 18:55:20 +02003810BlockDriverState *bdrv_next(BlockDriverState *bs)
3811{
3812 if (!bs) {
3813 return QTAILQ_FIRST(&bdrv_states);
3814 }
Benoît Canetdc364f42014-01-23 21:31:32 +01003815 return QTAILQ_NEXT(bs, device_list);
Markus Armbruster2f399b02010-06-02 18:55:20 +02003816}
3817
aliguori51de9762009-03-05 23:00:43 +00003818void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
bellard81d09122004-07-14 17:21:37 +00003819{
3820 BlockDriverState *bs;
3821
Benoît Canetdc364f42014-01-23 21:31:32 +01003822 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
aliguori51de9762009-03-05 23:00:43 +00003823 it(opaque, bs);
bellard81d09122004-07-14 17:21:37 +00003824 }
3825}
3826
bellardea2384d2004-08-01 21:59:26 +00003827const char *bdrv_get_device_name(BlockDriverState *bs)
3828{
3829 return bs->device_name;
3830}
3831
Markus Armbrusterc8433282012-06-05 16:49:24 +02003832int bdrv_get_flags(BlockDriverState *bs)
3833{
3834 return bs->open_flags;
3835}
3836
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003837int bdrv_flush_all(void)
aliguoric6ca28d2008-10-06 13:55:43 +00003838{
3839 BlockDriverState *bs;
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003840 int result = 0;
aliguoric6ca28d2008-10-06 13:55:43 +00003841
Benoît Canetdc364f42014-01-23 21:31:32 +01003842 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02003843 AioContext *aio_context = bdrv_get_aio_context(bs);
3844 int ret;
3845
3846 aio_context_acquire(aio_context);
3847 ret = bdrv_flush(bs);
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003848 if (ret < 0 && !result) {
3849 result = ret;
3850 }
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02003851 aio_context_release(aio_context);
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01003852 }
Kevin Wolff0f0fdf2013-07-05 13:48:01 +02003853
3854 return result;
aliguoric6ca28d2008-10-06 13:55:43 +00003855}
3856
Peter Lieven3ac21622013-06-28 12:47:42 +02003857int bdrv_has_zero_init_1(BlockDriverState *bs)
3858{
3859 return 1;
3860}
3861
Kevin Wolff2feebb2010-04-14 17:30:35 +02003862int bdrv_has_zero_init(BlockDriverState *bs)
3863{
3864 assert(bs->drv);
3865
Paolo Bonzini11212d82013-09-04 19:00:27 +02003866 /* If BS is a copy on write image, it is initialized to
3867 the contents of the base image, which may not be zeroes. */
3868 if (bs->backing_hd) {
3869 return 0;
3870 }
Kevin Wolf336c1c12010-07-28 11:26:29 +02003871 if (bs->drv->bdrv_has_zero_init) {
3872 return bs->drv->bdrv_has_zero_init(bs);
Kevin Wolff2feebb2010-04-14 17:30:35 +02003873 }
3874
Peter Lieven3ac21622013-06-28 12:47:42 +02003875 /* safe default */
3876 return 0;
Kevin Wolff2feebb2010-04-14 17:30:35 +02003877}
3878
Peter Lieven4ce78692013-10-24 12:06:54 +02003879bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3880{
3881 BlockDriverInfo bdi;
3882
3883 if (bs->backing_hd) {
3884 return false;
3885 }
3886
3887 if (bdrv_get_info(bs, &bdi) == 0) {
3888 return bdi.unallocated_blocks_are_zero;
3889 }
3890
3891 return false;
3892}
3893
3894bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3895{
3896 BlockDriverInfo bdi;
3897
3898 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3899 return false;
3900 }
3901
3902 if (bdrv_get_info(bs, &bdi) == 0) {
3903 return bdi.can_write_zeroes_with_unmap;
3904 }
3905
3906 return false;
3907}
3908
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003909typedef struct BdrvCoGetBlockStatusData {
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003910 BlockDriverState *bs;
Miroslav Rezaninab35b2bb2013-02-13 09:09:39 +01003911 BlockDriverState *base;
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003912 int64_t sector_num;
3913 int nb_sectors;
3914 int *pnum;
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003915 int64_t ret;
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003916 bool done;
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003917} BdrvCoGetBlockStatusData;
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00003918
thsf58c7b32008-06-05 21:53:49 +00003919/*
3920 * Returns true iff the specified sector is present in the disk image. Drivers
3921 * not implementing the functionality are assumed to not support backing files,
3922 * hence all their sectors are reported as allocated.
3923 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003924 * If 'sector_num' is beyond the end of the disk image the return value is 0
3925 * and 'pnum' is set to 0.
3926 *
thsf58c7b32008-06-05 21:53:49 +00003927 * 'pnum' is set to the number of sectors (including and immediately following
3928 * the specified sector) that are known to be in the same
3929 * allocated/unallocated state.
3930 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003931 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3932 * beyond the end of the disk image it will be clamped.
thsf58c7b32008-06-05 21:53:49 +00003933 */
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003934static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3935 int64_t sector_num,
3936 int nb_sectors, int *pnum)
thsf58c7b32008-06-05 21:53:49 +00003937{
Paolo Bonzini617ccb42013-09-04 19:00:23 +02003938 int64_t length;
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003939 int64_t n;
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02003940 int64_t ret, ret2;
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003941
Paolo Bonzini617ccb42013-09-04 19:00:23 +02003942 length = bdrv_getlength(bs);
3943 if (length < 0) {
3944 return length;
3945 }
3946
3947 if (sector_num >= (length >> BDRV_SECTOR_BITS)) {
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003948 *pnum = 0;
3949 return 0;
3950 }
3951
3952 n = bs->total_sectors - sector_num;
3953 if (n < nb_sectors) {
3954 nb_sectors = n;
3955 }
3956
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02003957 if (!bs->drv->bdrv_co_get_block_status) {
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00003958 *pnum = nb_sectors;
Kevin Wolfe88ae222014-05-06 15:25:36 +02003959 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
Paolo Bonzini918e92d2013-09-04 19:00:37 +02003960 if (bs->drv->protocol_name) {
3961 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3962 }
3963 return ret;
thsf58c7b32008-06-05 21:53:49 +00003964 }
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00003965
Paolo Bonzini415b5b02013-09-04 19:00:31 +02003966 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3967 if (ret < 0) {
Peter Lieven3e0a2332013-09-24 15:35:08 +02003968 *pnum = 0;
Paolo Bonzini415b5b02013-09-04 19:00:31 +02003969 return ret;
3970 }
3971
Peter Lieven92bc50a2013-10-08 14:43:14 +02003972 if (ret & BDRV_BLOCK_RAW) {
3973 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3974 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3975 *pnum, pnum);
3976 }
3977
Kevin Wolfe88ae222014-05-06 15:25:36 +02003978 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
3979 ret |= BDRV_BLOCK_ALLOCATED;
3980 }
3981
Peter Lievenc3d86882013-10-24 12:07:04 +02003982 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
3983 if (bdrv_unallocated_blocks_are_zero(bs)) {
Paolo Bonzinif0ad5712013-09-04 19:00:32 +02003984 ret |= BDRV_BLOCK_ZERO;
Peter Lieven1f9db222013-09-24 15:35:09 +02003985 } else if (bs->backing_hd) {
Paolo Bonzinif0ad5712013-09-04 19:00:32 +02003986 BlockDriverState *bs2 = bs->backing_hd;
3987 int64_t length2 = bdrv_getlength(bs2);
3988 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) {
3989 ret |= BDRV_BLOCK_ZERO;
3990 }
3991 }
Paolo Bonzini415b5b02013-09-04 19:00:31 +02003992 }
Paolo Bonzini5daa74a2013-09-04 19:00:38 +02003993
3994 if (bs->file &&
3995 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
3996 (ret & BDRV_BLOCK_OFFSET_VALID)) {
3997 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3998 *pnum, pnum);
3999 if (ret2 >= 0) {
4000 /* Ignore errors. This is just providing extra information, it
4001 * is useful but not necessary.
4002 */
4003 ret |= (ret2 & BDRV_BLOCK_ZERO);
4004 }
4005 }
4006
Paolo Bonzini415b5b02013-09-04 19:00:31 +02004007 return ret;
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004008}
4009
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004010/* Coroutine wrapper for bdrv_get_block_status() */
4011static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004012{
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004013 BdrvCoGetBlockStatusData *data = opaque;
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004014 BlockDriverState *bs = data->bs;
4015
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004016 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4017 data->pnum);
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004018 data->done = true;
4019}
4020
4021/*
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004022 * Synchronous wrapper around bdrv_co_get_block_status().
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004023 *
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004024 * See bdrv_co_get_block_status() for details.
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004025 */
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004026int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4027 int nb_sectors, int *pnum)
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00004028{
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004029 Coroutine *co;
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004030 BdrvCoGetBlockStatusData data = {
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004031 .bs = bs,
4032 .sector_num = sector_num,
4033 .nb_sectors = nb_sectors,
4034 .pnum = pnum,
4035 .done = false,
4036 };
4037
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004038 if (qemu_in_coroutine()) {
4039 /* Fast-path if already in coroutine context */
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004040 bdrv_get_block_status_co_entry(&data);
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004041 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004042 AioContext *aio_context = bdrv_get_aio_context(bs);
4043
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004044 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004045 qemu_coroutine_enter(co, &data);
4046 while (!data.done) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004047 aio_poll(aio_context, true);
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004048 }
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00004049 }
4050 return data.ret;
thsf58c7b32008-06-05 21:53:49 +00004051}
4052
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004053int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4054 int nb_sectors, int *pnum)
4055{
Paolo Bonzini4333bb72013-09-04 19:00:29 +02004056 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4057 if (ret < 0) {
4058 return ret;
4059 }
Kevin Wolfe88ae222014-05-06 15:25:36 +02004060 return (ret & BDRV_BLOCK_ALLOCATED);
Paolo Bonzinib6b8a332013-09-04 19:00:28 +02004061}
4062
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004063/*
4064 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4065 *
4066 * Return true if the given sector is allocated in any image between
4067 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4068 * sector is allocated in any image of the chain. Return false otherwise.
4069 *
4070 * 'pnum' is set to the number of sectors (including and immediately following
4071 * the specified sector) that are known to be in the same
4072 * allocated/unallocated state.
4073 *
4074 */
Paolo Bonzini4f578632013-09-04 19:00:24 +02004075int bdrv_is_allocated_above(BlockDriverState *top,
4076 BlockDriverState *base,
4077 int64_t sector_num,
4078 int nb_sectors, int *pnum)
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004079{
4080 BlockDriverState *intermediate;
4081 int ret, n = nb_sectors;
4082
4083 intermediate = top;
4084 while (intermediate && intermediate != base) {
4085 int pnum_inter;
Paolo Bonzinibdad13b2013-09-04 19:00:22 +02004086 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4087 &pnum_inter);
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004088 if (ret < 0) {
4089 return ret;
4090 } else if (ret) {
4091 *pnum = pnum_inter;
4092 return 1;
4093 }
4094
4095 /*
4096 * [sector_num, nb_sectors] is unallocated on top but intermediate
4097 * might have
4098 *
4099 * [sector_num+x, nr_sectors] allocated.
4100 */
Vishvananda Ishaya63ba17d2013-01-24 10:02:08 -08004101 if (n > pnum_inter &&
4102 (intermediate == top ||
4103 sector_num + pnum_inter < intermediate->total_sectors)) {
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02004104 n = pnum_inter;
4105 }
4106
4107 intermediate = intermediate->backing_hd;
4108 }
4109
4110 *pnum = n;
4111 return 0;
4112}
4113
aliguori045df332009-03-05 23:00:48 +00004114const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4115{
4116 if (bs->backing_hd && bs->backing_hd->encrypted)
4117 return bs->backing_file;
4118 else if (bs->encrypted)
4119 return bs->filename;
4120 else
4121 return NULL;
4122}
4123
ths5fafdf22007-09-16 21:08:06 +00004124void bdrv_get_backing_filename(BlockDriverState *bs,
bellard83f64092006-08-01 16:21:11 +00004125 char *filename, int filename_size)
bellardea2384d2004-08-01 21:59:26 +00004126{
Kevin Wolf3574c602011-10-26 11:02:11 +02004127 pstrcpy(filename, filename_size, bs->backing_file);
bellardea2384d2004-08-01 21:59:26 +00004128}
4129
ths5fafdf22007-09-16 21:08:06 +00004130int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
bellardfaea38e2006-08-05 21:31:00 +00004131 const uint8_t *buf, int nb_sectors)
4132{
4133 BlockDriver *drv = bs->drv;
4134 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00004135 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00004136 if (!drv->bdrv_write_compressed)
4137 return -ENOTSUP;
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02004138 if (bdrv_check_request(bs, sector_num, nb_sectors))
4139 return -EIO;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004140
Fam Zhenge4654d22013-11-13 18:29:43 +08004141 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004142
bellardfaea38e2006-08-05 21:31:00 +00004143 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4144}
ths3b46e622007-09-17 08:09:54 +00004145
bellardfaea38e2006-08-05 21:31:00 +00004146int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4147{
4148 BlockDriver *drv = bs->drv;
4149 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00004150 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00004151 if (!drv->bdrv_get_info)
4152 return -ENOTSUP;
4153 memset(bdi, 0, sizeof(*bdi));
4154 return drv->bdrv_get_info(bs, bdi);
4155}
4156
Max Reitzeae041f2013-10-09 10:46:16 +02004157ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4158{
4159 BlockDriver *drv = bs->drv;
4160 if (drv && drv->bdrv_get_specific_info) {
4161 return drv->bdrv_get_specific_info(bs);
4162 }
4163 return NULL;
4164}
4165
Christoph Hellwig45566e92009-07-10 23:11:57 +02004166int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4167 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00004168{
Kevin Wolfcf8074b2013-04-05 21:27:53 +02004169 QEMUIOVector qiov;
4170 struct iovec iov = {
4171 .iov_base = (void *) buf,
4172 .iov_len = size,
4173 };
4174
4175 qemu_iovec_init_external(&qiov, &iov, 1);
4176 return bdrv_writev_vmstate(bs, &qiov, pos);
4177}
4178
4179int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4180{
aliguori178e08a2009-04-05 19:10:55 +00004181 BlockDriver *drv = bs->drv;
Kevin Wolfcf8074b2013-04-05 21:27:53 +02004182
4183 if (!drv) {
aliguori178e08a2009-04-05 19:10:55 +00004184 return -ENOMEDIUM;
Kevin Wolfcf8074b2013-04-05 21:27:53 +02004185 } else if (drv->bdrv_save_vmstate) {
4186 return drv->bdrv_save_vmstate(bs, qiov, pos);
4187 } else if (bs->file) {
4188 return bdrv_writev_vmstate(bs->file, qiov, pos);
4189 }
4190
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09004191 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00004192}
4193
Christoph Hellwig45566e92009-07-10 23:11:57 +02004194int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4195 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00004196{
4197 BlockDriver *drv = bs->drv;
4198 if (!drv)
4199 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09004200 if (drv->bdrv_load_vmstate)
4201 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4202 if (bs->file)
4203 return bdrv_load_vmstate(bs->file, buf, pos, size);
4204 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00004205}
4206
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004207void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4208{
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004209 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004210 return;
4211 }
4212
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004213 bs->drv->bdrv_debug_event(bs, event);
Kevin Wolf41c695c2012-12-06 14:32:58 +01004214}
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004215
Kevin Wolf41c695c2012-12-06 14:32:58 +01004216int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4217 const char *tag)
4218{
4219 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4220 bs = bs->file;
4221 }
4222
4223 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4224 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4225 }
4226
4227 return -ENOTSUP;
4228}
4229
Fam Zheng4cc70e92013-11-20 10:01:54 +08004230int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4231{
4232 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4233 bs = bs->file;
4234 }
4235
4236 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4237 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4238 }
4239
4240 return -ENOTSUP;
4241}
4242
Kevin Wolf41c695c2012-12-06 14:32:58 +01004243int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4244{
Max Reitz938789e2014-03-10 23:44:08 +01004245 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
Kevin Wolf41c695c2012-12-06 14:32:58 +01004246 bs = bs->file;
4247 }
4248
4249 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4250 return bs->drv->bdrv_debug_resume(bs, tag);
4251 }
4252
4253 return -ENOTSUP;
4254}
4255
4256bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4257{
4258 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4259 bs = bs->file;
4260 }
4261
4262 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4263 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4264 }
4265
4266 return false;
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01004267}
4268
Blue Swirl199630b2010-07-25 20:49:34 +00004269int bdrv_is_snapshot(BlockDriverState *bs)
4270{
4271 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4272}
4273
Jeff Codyb1b1d782012-10-16 15:49:09 -04004274/* backing_file can either be relative, or absolute, or a protocol. If it is
4275 * relative, it must be relative to the chain. So, passing in bs->filename
4276 * from a BDS as backing_file should not be done, as that may be relative to
4277 * the CWD rather than the chain. */
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004278BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4279 const char *backing_file)
4280{
Jeff Codyb1b1d782012-10-16 15:49:09 -04004281 char *filename_full = NULL;
4282 char *backing_file_full = NULL;
4283 char *filename_tmp = NULL;
4284 int is_protocol = 0;
4285 BlockDriverState *curr_bs = NULL;
4286 BlockDriverState *retval = NULL;
4287
4288 if (!bs || !bs->drv || !backing_file) {
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004289 return NULL;
4290 }
4291
Jeff Codyb1b1d782012-10-16 15:49:09 -04004292 filename_full = g_malloc(PATH_MAX);
4293 backing_file_full = g_malloc(PATH_MAX);
4294 filename_tmp = g_malloc(PATH_MAX);
4295
4296 is_protocol = path_has_protocol(backing_file);
4297
4298 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4299
4300 /* If either of the filename paths is actually a protocol, then
4301 * compare unmodified paths; otherwise make paths relative */
4302 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4303 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4304 retval = curr_bs->backing_hd;
4305 break;
4306 }
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004307 } else {
Jeff Codyb1b1d782012-10-16 15:49:09 -04004308 /* If not an absolute filename path, make it relative to the current
4309 * image's filename path */
4310 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4311 backing_file);
4312
4313 /* We are going to compare absolute pathnames */
4314 if (!realpath(filename_tmp, filename_full)) {
4315 continue;
4316 }
4317
4318 /* We need to make sure the backing filename we are comparing against
4319 * is relative to the current image filename (or absolute) */
4320 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4321 curr_bs->backing_file);
4322
4323 if (!realpath(filename_tmp, backing_file_full)) {
4324 continue;
4325 }
4326
4327 if (strcmp(backing_file_full, filename_full) == 0) {
4328 retval = curr_bs->backing_hd;
4329 break;
4330 }
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004331 }
4332 }
4333
Jeff Codyb1b1d782012-10-16 15:49:09 -04004334 g_free(filename_full);
4335 g_free(backing_file_full);
4336 g_free(filename_tmp);
4337 return retval;
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00004338}
4339
Benoît Canetf198fd12012-08-02 10:22:47 +02004340int bdrv_get_backing_file_depth(BlockDriverState *bs)
4341{
4342 if (!bs->drv) {
4343 return 0;
4344 }
4345
4346 if (!bs->backing_hd) {
4347 return 0;
4348 }
4349
4350 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4351}
4352
Jeff Cody79fac562012-09-27 13:29:15 -04004353BlockDriverState *bdrv_find_base(BlockDriverState *bs)
4354{
4355 BlockDriverState *curr_bs = NULL;
4356
4357 if (!bs) {
4358 return NULL;
4359 }
4360
4361 curr_bs = bs;
4362
4363 while (curr_bs->backing_hd) {
4364 curr_bs = curr_bs->backing_hd;
4365 }
4366 return curr_bs;
4367}
4368
bellard83f64092006-08-01 16:21:11 +00004369/**************************************************************/
4370/* async I/Os */
4371
aliguori3b69e4b2009-01-22 16:59:24 +00004372BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
aliguorif141eaf2009-04-07 18:43:24 +00004373 QEMUIOVector *qiov, int nb_sectors,
aliguori3b69e4b2009-01-22 16:59:24 +00004374 BlockDriverCompletionFunc *cb, void *opaque)
4375{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01004376 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4377
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004378 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004379 cb, opaque, false);
bellard83f64092006-08-01 16:21:11 +00004380}
4381
aliguorif141eaf2009-04-07 18:43:24 +00004382BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4383 QEMUIOVector *qiov, int nb_sectors,
4384 BlockDriverCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00004385{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01004386 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4387
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004388 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004389 cb, opaque, true);
bellard83f64092006-08-01 16:21:11 +00004390}
4391
Paolo Bonzinid5ef94d2013-11-22 13:39:46 +01004392BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4393 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4394 BlockDriverCompletionFunc *cb, void *opaque)
4395{
4396 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4397
4398 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4399 BDRV_REQ_ZERO_WRITE | flags,
4400 cb, opaque, true);
4401}
4402
Kevin Wolf40b4f532009-09-09 17:53:37 +02004403
4404typedef struct MultiwriteCB {
4405 int error;
4406 int num_requests;
4407 int num_callbacks;
4408 struct {
4409 BlockDriverCompletionFunc *cb;
4410 void *opaque;
4411 QEMUIOVector *free_qiov;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004412 } callbacks[];
4413} MultiwriteCB;
4414
4415static void multiwrite_user_cb(MultiwriteCB *mcb)
4416{
4417 int i;
4418
4419 for (i = 0; i < mcb->num_callbacks; i++) {
4420 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
Stefan Hajnoczi1e1ea482010-04-21 20:35:45 +01004421 if (mcb->callbacks[i].free_qiov) {
4422 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4423 }
Anthony Liguori7267c092011-08-20 22:09:37 -05004424 g_free(mcb->callbacks[i].free_qiov);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004425 }
4426}
4427
4428static void multiwrite_cb(void *opaque, int ret)
4429{
4430 MultiwriteCB *mcb = opaque;
4431
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01004432 trace_multiwrite_cb(mcb, ret);
4433
Kevin Wolfcb6d3ca2010-04-01 22:48:44 +02004434 if (ret < 0 && !mcb->error) {
Kevin Wolf40b4f532009-09-09 17:53:37 +02004435 mcb->error = ret;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004436 }
4437
4438 mcb->num_requests--;
4439 if (mcb->num_requests == 0) {
Kevin Wolfde189a12010-07-01 16:08:51 +02004440 multiwrite_user_cb(mcb);
Anthony Liguori7267c092011-08-20 22:09:37 -05004441 g_free(mcb);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004442 }
4443}
4444
4445static int multiwrite_req_compare(const void *a, const void *b)
4446{
Christoph Hellwig77be4362010-05-19 20:53:10 +02004447 const BlockRequest *req1 = a, *req2 = b;
4448
4449 /*
4450 * Note that we can't simply subtract req2->sector from req1->sector
4451 * here as that could overflow the return value.
4452 */
4453 if (req1->sector > req2->sector) {
4454 return 1;
4455 } else if (req1->sector < req2->sector) {
4456 return -1;
4457 } else {
4458 return 0;
4459 }
Kevin Wolf40b4f532009-09-09 17:53:37 +02004460}
4461
4462/*
4463 * Takes a bunch of requests and tries to merge them. Returns the number of
4464 * requests that remain after merging.
4465 */
4466static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4467 int num_reqs, MultiwriteCB *mcb)
4468{
4469 int i, outidx;
4470
4471 // Sort requests by start sector
4472 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4473
4474 // Check if adjacent requests touch the same clusters. If so, combine them,
4475 // filling up gaps with zero sectors.
4476 outidx = 0;
4477 for (i = 1; i < num_reqs; i++) {
4478 int merge = 0;
4479 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4480
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01004481 // Handle exactly sequential writes and overlapping writes.
Kevin Wolf40b4f532009-09-09 17:53:37 +02004482 if (reqs[i].sector <= oldreq_last) {
4483 merge = 1;
4484 }
4485
Christoph Hellwige2a305f2010-01-26 14:49:08 +01004486 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4487 merge = 0;
4488 }
4489
Kevin Wolf40b4f532009-09-09 17:53:37 +02004490 if (merge) {
4491 size_t size;
Anthony Liguori7267c092011-08-20 22:09:37 -05004492 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
Kevin Wolf40b4f532009-09-09 17:53:37 +02004493 qemu_iovec_init(qiov,
4494 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4495
4496 // Add the first request to the merged one. If the requests are
4497 // overlapping, drop the last sectors of the first request.
4498 size = (reqs[i].sector - reqs[outidx].sector) << 9;
Michael Tokarev1b093c42012-03-12 21:28:06 +04004499 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004500
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01004501 // We should need to add any zeros between the two requests
4502 assert (reqs[i].sector <= oldreq_last);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004503
4504 // Add the second request
Michael Tokarev1b093c42012-03-12 21:28:06 +04004505 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004506
Kevin Wolfcbf1dff2010-05-21 11:09:42 +02004507 reqs[outidx].nb_sectors = qiov->size >> 9;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004508 reqs[outidx].qiov = qiov;
4509
4510 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4511 } else {
4512 outidx++;
4513 reqs[outidx].sector = reqs[i].sector;
4514 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4515 reqs[outidx].qiov = reqs[i].qiov;
4516 }
4517 }
4518
4519 return outidx + 1;
4520}
4521
4522/*
4523 * Submit multiple AIO write requests at once.
4524 *
4525 * On success, the function returns 0 and all requests in the reqs array have
4526 * been submitted. In error case this function returns -1, and any of the
4527 * requests may or may not be submitted yet. In particular, this means that the
4528 * callback will be called for some of the requests, for others it won't. The
4529 * caller must check the error field of the BlockRequest to wait for the right
4530 * callbacks (if error != 0, no callback will be called).
4531 *
4532 * The implementation may modify the contents of the reqs array, e.g. to merge
4533 * requests. However, the fields opaque and error are left unmodified as they
4534 * are used to signal failure for a single request to the caller.
4535 */
4536int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4537{
Kevin Wolf40b4f532009-09-09 17:53:37 +02004538 MultiwriteCB *mcb;
4539 int i;
4540
Ryan Harper301db7c2011-03-07 10:01:04 -06004541 /* don't submit writes if we don't have a medium */
4542 if (bs->drv == NULL) {
4543 for (i = 0; i < num_reqs; i++) {
4544 reqs[i].error = -ENOMEDIUM;
4545 }
4546 return -1;
4547 }
4548
Kevin Wolf40b4f532009-09-09 17:53:37 +02004549 if (num_reqs == 0) {
4550 return 0;
4551 }
4552
4553 // Create MultiwriteCB structure
Anthony Liguori7267c092011-08-20 22:09:37 -05004554 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
Kevin Wolf40b4f532009-09-09 17:53:37 +02004555 mcb->num_requests = 0;
4556 mcb->num_callbacks = num_reqs;
4557
4558 for (i = 0; i < num_reqs; i++) {
4559 mcb->callbacks[i].cb = reqs[i].cb;
4560 mcb->callbacks[i].opaque = reqs[i].opaque;
4561 }
4562
4563 // Check for mergable requests
4564 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4565
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01004566 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4567
Paolo Bonzinidf9309f2011-11-14 17:50:50 +01004568 /* Run the aio requests. */
4569 mcb->num_requests = num_reqs;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004570 for (i = 0; i < num_reqs; i++) {
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004571 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4572 reqs[i].nb_sectors, reqs[i].flags,
4573 multiwrite_cb, mcb,
4574 true);
Kevin Wolf40b4f532009-09-09 17:53:37 +02004575 }
4576
4577 return 0;
Kevin Wolf40b4f532009-09-09 17:53:37 +02004578}
4579
bellard83f64092006-08-01 16:21:11 +00004580void bdrv_aio_cancel(BlockDriverAIOCB *acb)
pbrookce1a14d2006-08-07 02:38:06 +00004581{
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004582 acb->aiocb_info->cancel(acb);
bellard83f64092006-08-01 16:21:11 +00004583}
4584
4585/**************************************************************/
4586/* async block device emulation */
4587
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004588typedef struct BlockDriverAIOCBSync {
4589 BlockDriverAIOCB common;
4590 QEMUBH *bh;
4591 int ret;
4592 /* vector translation state */
4593 QEMUIOVector *qiov;
4594 uint8_t *bounce;
4595 int is_write;
4596} BlockDriverAIOCBSync;
4597
4598static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
4599{
Kevin Wolfb666d232010-05-05 11:44:39 +02004600 BlockDriverAIOCBSync *acb =
4601 container_of(blockacb, BlockDriverAIOCBSync, common);
Dor Laor6a7ad292009-06-01 12:07:23 +03004602 qemu_bh_delete(acb->bh);
Avi Kivity36afc452009-06-23 16:20:36 +03004603 acb->bh = NULL;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004604 qemu_aio_release(acb);
4605}
4606
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004607static const AIOCBInfo bdrv_em_aiocb_info = {
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004608 .aiocb_size = sizeof(BlockDriverAIOCBSync),
4609 .cancel = bdrv_aio_cancel_em,
4610};
4611
bellard83f64092006-08-01 16:21:11 +00004612static void bdrv_aio_bh_cb(void *opaque)
bellardbeac80c2006-06-26 20:08:57 +00004613{
pbrookce1a14d2006-08-07 02:38:06 +00004614 BlockDriverAIOCBSync *acb = opaque;
aliguorif141eaf2009-04-07 18:43:24 +00004615
aliguorif141eaf2009-04-07 18:43:24 +00004616 if (!acb->is_write)
Michael Tokarev03396142012-06-07 20:17:55 +04004617 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
aliguoriceb42de2009-04-07 18:43:28 +00004618 qemu_vfree(acb->bounce);
pbrookce1a14d2006-08-07 02:38:06 +00004619 acb->common.cb(acb->common.opaque, acb->ret);
Dor Laor6a7ad292009-06-01 12:07:23 +03004620 qemu_bh_delete(acb->bh);
Avi Kivity36afc452009-06-23 16:20:36 +03004621 acb->bh = NULL;
pbrookce1a14d2006-08-07 02:38:06 +00004622 qemu_aio_release(acb);
bellardbeac80c2006-06-26 20:08:57 +00004623}
bellardbeac80c2006-06-26 20:08:57 +00004624
aliguorif141eaf2009-04-07 18:43:24 +00004625static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4626 int64_t sector_num,
4627 QEMUIOVector *qiov,
4628 int nb_sectors,
4629 BlockDriverCompletionFunc *cb,
4630 void *opaque,
4631 int is_write)
4632
bellardea2384d2004-08-01 21:59:26 +00004633{
pbrookce1a14d2006-08-07 02:38:06 +00004634 BlockDriverAIOCBSync *acb;
pbrookce1a14d2006-08-07 02:38:06 +00004635
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004636 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
aliguorif141eaf2009-04-07 18:43:24 +00004637 acb->is_write = is_write;
4638 acb->qiov = qiov;
aliguorie268ca52009-04-22 20:20:00 +00004639 acb->bounce = qemu_blockalign(bs, qiov->size);
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004640 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
aliguorif141eaf2009-04-07 18:43:24 +00004641
4642 if (is_write) {
Michael Tokarevd5e6b162012-06-07 20:21:06 +04004643 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01004644 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00004645 } else {
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01004646 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00004647 }
4648
pbrookce1a14d2006-08-07 02:38:06 +00004649 qemu_bh_schedule(acb->bh);
aliguorif141eaf2009-04-07 18:43:24 +00004650
pbrookce1a14d2006-08-07 02:38:06 +00004651 return &acb->common;
pbrook7a6cba62006-06-04 11:39:07 +00004652}
4653
aliguorif141eaf2009-04-07 18:43:24 +00004654static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4655 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
pbrookce1a14d2006-08-07 02:38:06 +00004656 BlockDriverCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00004657{
aliguorif141eaf2009-04-07 18:43:24 +00004658 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
bellard83f64092006-08-01 16:21:11 +00004659}
4660
aliguorif141eaf2009-04-07 18:43:24 +00004661static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4662 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4663 BlockDriverCompletionFunc *cb, void *opaque)
4664{
4665 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4666}
4667
Kevin Wolf68485422011-06-30 10:05:46 +02004668
4669typedef struct BlockDriverAIOCBCoroutine {
4670 BlockDriverAIOCB common;
4671 BlockRequest req;
4672 bool is_write;
Kevin Wolfd318aea2012-11-13 16:35:08 +01004673 bool *done;
Kevin Wolf68485422011-06-30 10:05:46 +02004674 QEMUBH* bh;
4675} BlockDriverAIOCBCoroutine;
4676
4677static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
4678{
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004679 AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
Kevin Wolfd318aea2012-11-13 16:35:08 +01004680 BlockDriverAIOCBCoroutine *acb =
4681 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
4682 bool done = false;
4683
4684 acb->done = &done;
4685 while (!done) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004686 aio_poll(aio_context, true);
Kevin Wolfd318aea2012-11-13 16:35:08 +01004687 }
Kevin Wolf68485422011-06-30 10:05:46 +02004688}
4689
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004690static const AIOCBInfo bdrv_em_co_aiocb_info = {
Kevin Wolf68485422011-06-30 10:05:46 +02004691 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
4692 .cancel = bdrv_aio_co_cancel_em,
4693};
4694
Paolo Bonzini35246a62011-10-14 10:41:29 +02004695static void bdrv_co_em_bh(void *opaque)
Kevin Wolf68485422011-06-30 10:05:46 +02004696{
4697 BlockDriverAIOCBCoroutine *acb = opaque;
4698
4699 acb->common.cb(acb->common.opaque, acb->req.error);
Kevin Wolfd318aea2012-11-13 16:35:08 +01004700
4701 if (acb->done) {
4702 *acb->done = true;
4703 }
4704
Kevin Wolf68485422011-06-30 10:05:46 +02004705 qemu_bh_delete(acb->bh);
4706 qemu_aio_release(acb);
4707}
4708
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004709/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4710static void coroutine_fn bdrv_co_do_rw(void *opaque)
4711{
4712 BlockDriverAIOCBCoroutine *acb = opaque;
4713 BlockDriverState *bs = acb->common.bs;
4714
4715 if (!acb->is_write) {
4716 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004717 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004718 } else {
4719 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004720 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004721 }
4722
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004723 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01004724 qemu_bh_schedule(acb->bh);
4725}
4726
Kevin Wolf68485422011-06-30 10:05:46 +02004727static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4728 int64_t sector_num,
4729 QEMUIOVector *qiov,
4730 int nb_sectors,
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004731 BdrvRequestFlags flags,
Kevin Wolf68485422011-06-30 10:05:46 +02004732 BlockDriverCompletionFunc *cb,
4733 void *opaque,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004734 bool is_write)
Kevin Wolf68485422011-06-30 10:05:46 +02004735{
4736 Coroutine *co;
4737 BlockDriverAIOCBCoroutine *acb;
4738
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004739 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
Kevin Wolf68485422011-06-30 10:05:46 +02004740 acb->req.sector = sector_num;
4741 acb->req.nb_sectors = nb_sectors;
4742 acb->req.qiov = qiov;
Paolo Bonzinid20d9b72013-11-22 13:39:44 +01004743 acb->req.flags = flags;
Kevin Wolf68485422011-06-30 10:05:46 +02004744 acb->is_write = is_write;
Kevin Wolfd318aea2012-11-13 16:35:08 +01004745 acb->done = NULL;
Kevin Wolf68485422011-06-30 10:05:46 +02004746
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01004747 co = qemu_coroutine_create(bdrv_co_do_rw);
Kevin Wolf68485422011-06-30 10:05:46 +02004748 qemu_coroutine_enter(co, acb);
4749
4750 return &acb->common;
4751}
4752
Paolo Bonzini07f07612011-10-17 12:32:12 +02004753static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004754{
Paolo Bonzini07f07612011-10-17 12:32:12 +02004755 BlockDriverAIOCBCoroutine *acb = opaque;
4756 BlockDriverState *bs = acb->common.bs;
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004757
Paolo Bonzini07f07612011-10-17 12:32:12 +02004758 acb->req.error = bdrv_co_flush(bs);
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004759 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004760 qemu_bh_schedule(acb->bh);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02004761}
4762
Paolo Bonzini07f07612011-10-17 12:32:12 +02004763BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
Alexander Graf016f5cf2010-05-26 17:51:49 +02004764 BlockDriverCompletionFunc *cb, void *opaque)
4765{
Paolo Bonzini07f07612011-10-17 12:32:12 +02004766 trace_bdrv_aio_flush(bs, opaque);
Alexander Graf016f5cf2010-05-26 17:51:49 +02004767
Paolo Bonzini07f07612011-10-17 12:32:12 +02004768 Coroutine *co;
4769 BlockDriverAIOCBCoroutine *acb;
Alexander Graf016f5cf2010-05-26 17:51:49 +02004770
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004771 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
Kevin Wolfd318aea2012-11-13 16:35:08 +01004772 acb->done = NULL;
4773
Paolo Bonzini07f07612011-10-17 12:32:12 +02004774 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4775 qemu_coroutine_enter(co, acb);
Alexander Graf016f5cf2010-05-26 17:51:49 +02004776
Alexander Graf016f5cf2010-05-26 17:51:49 +02004777 return &acb->common;
4778}
4779
Paolo Bonzini4265d622011-10-17 12:32:14 +02004780static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4781{
4782 BlockDriverAIOCBCoroutine *acb = opaque;
4783 BlockDriverState *bs = acb->common.bs;
4784
4785 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02004786 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
Paolo Bonzini4265d622011-10-17 12:32:14 +02004787 qemu_bh_schedule(acb->bh);
4788}
4789
4790BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4791 int64_t sector_num, int nb_sectors,
4792 BlockDriverCompletionFunc *cb, void *opaque)
4793{
4794 Coroutine *co;
4795 BlockDriverAIOCBCoroutine *acb;
4796
4797 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4798
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004799 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
Paolo Bonzini4265d622011-10-17 12:32:14 +02004800 acb->req.sector = sector_num;
4801 acb->req.nb_sectors = nb_sectors;
Kevin Wolfd318aea2012-11-13 16:35:08 +01004802 acb->done = NULL;
Paolo Bonzini4265d622011-10-17 12:32:14 +02004803 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4804 qemu_coroutine_enter(co, acb);
4805
4806 return &acb->common;
4807}
4808
bellardea2384d2004-08-01 21:59:26 +00004809void bdrv_init(void)
4810{
Anthony Liguori5efa9d52009-05-09 17:03:42 -05004811 module_call_init(MODULE_INIT_BLOCK);
bellardea2384d2004-08-01 21:59:26 +00004812}
pbrookce1a14d2006-08-07 02:38:06 +00004813
Markus Armbrustereb852012009-10-27 18:41:44 +01004814void bdrv_init_with_whitelist(void)
4815{
4816 use_bdrv_whitelist = 1;
4817 bdrv_init();
4818}
4819
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004820void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02004821 BlockDriverCompletionFunc *cb, void *opaque)
aliguori6bbff9a2009-03-20 18:25:59 +00004822{
pbrookce1a14d2006-08-07 02:38:06 +00004823 BlockDriverAIOCB *acb;
4824
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004825 acb = g_slice_alloc(aiocb_info->aiocb_size);
4826 acb->aiocb_info = aiocb_info;
pbrookce1a14d2006-08-07 02:38:06 +00004827 acb->bs = bs;
4828 acb->cb = cb;
4829 acb->opaque = opaque;
4830 return acb;
4831}
4832
4833void qemu_aio_release(void *p)
4834{
Stefan Hajnoczid37c9752012-10-31 16:34:36 +01004835 BlockDriverAIOCB *acb = p;
Stefan Hajnoczid7331be2012-10-31 16:34:37 +01004836 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
pbrookce1a14d2006-08-07 02:38:06 +00004837}
bellard19cb3732006-08-19 11:45:59 +00004838
4839/**************************************************************/
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004840/* Coroutine block device emulation */
4841
4842typedef struct CoroutineIOCompletion {
4843 Coroutine *coroutine;
4844 int ret;
4845} CoroutineIOCompletion;
4846
4847static void bdrv_co_io_em_complete(void *opaque, int ret)
4848{
4849 CoroutineIOCompletion *co = opaque;
4850
4851 co->ret = ret;
4852 qemu_coroutine_enter(co->coroutine, NULL);
4853}
4854
4855static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4856 int nb_sectors, QEMUIOVector *iov,
4857 bool is_write)
4858{
4859 CoroutineIOCompletion co = {
4860 .coroutine = qemu_coroutine_self(),
4861 };
4862 BlockDriverAIOCB *acb;
4863
4864 if (is_write) {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01004865 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4866 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004867 } else {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01004868 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4869 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004870 }
4871
Stefan Hajnoczi59370aa2011-09-30 17:34:58 +01004872 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02004873 if (!acb) {
4874 return -EIO;
4875 }
4876 qemu_coroutine_yield();
4877
4878 return co.ret;
4879}
4880
4881static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4882 int64_t sector_num, int nb_sectors,
4883 QEMUIOVector *iov)
4884{
4885 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4886}
4887
4888static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4889 int64_t sector_num, int nb_sectors,
4890 QEMUIOVector *iov)
4891{
4892 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4893}
4894
Paolo Bonzini07f07612011-10-17 12:32:12 +02004895static void coroutine_fn bdrv_flush_co_entry(void *opaque)
Kevin Wolfe7a8a782011-07-15 16:05:00 +02004896{
Paolo Bonzini07f07612011-10-17 12:32:12 +02004897 RwCo *rwco = opaque;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02004898
Paolo Bonzini07f07612011-10-17 12:32:12 +02004899 rwco->ret = bdrv_co_flush(rwco->bs);
4900}
4901
4902int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4903{
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004904 int ret;
4905
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004906 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
Paolo Bonzini07f07612011-10-17 12:32:12 +02004907 return 0;
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004908 }
4909
Kevin Wolfca716362011-11-10 18:13:59 +01004910 /* Write back cached data to the OS even with cache=unsafe */
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004911 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004912 if (bs->drv->bdrv_co_flush_to_os) {
4913 ret = bs->drv->bdrv_co_flush_to_os(bs);
4914 if (ret < 0) {
4915 return ret;
4916 }
4917 }
4918
Kevin Wolfca716362011-11-10 18:13:59 +01004919 /* But don't actually force it to the disk with cache=unsafe */
4920 if (bs->open_flags & BDRV_O_NO_FLUSH) {
Kevin Wolfd4c82322012-08-15 12:52:45 +02004921 goto flush_parent;
Kevin Wolfca716362011-11-10 18:13:59 +01004922 }
4923
Kevin Wolfbf736fe2013-06-05 15:17:55 +02004924 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004925 if (bs->drv->bdrv_co_flush_to_disk) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004926 ret = bs->drv->bdrv_co_flush_to_disk(bs);
Paolo Bonzini07f07612011-10-17 12:32:12 +02004927 } else if (bs->drv->bdrv_aio_flush) {
4928 BlockDriverAIOCB *acb;
4929 CoroutineIOCompletion co = {
4930 .coroutine = qemu_coroutine_self(),
4931 };
4932
4933 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4934 if (acb == NULL) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004935 ret = -EIO;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004936 } else {
4937 qemu_coroutine_yield();
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004938 ret = co.ret;
Paolo Bonzini07f07612011-10-17 12:32:12 +02004939 }
Paolo Bonzini07f07612011-10-17 12:32:12 +02004940 } else {
4941 /*
4942 * Some block drivers always operate in either writethrough or unsafe
4943 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4944 * know how the server works (because the behaviour is hardcoded or
4945 * depends on server-side configuration), so we can't ensure that
4946 * everything is safe on disk. Returning an error doesn't work because
4947 * that would break guests even if the server operates in writethrough
4948 * mode.
4949 *
4950 * Let's hope the user knows what he's doing.
4951 */
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004952 ret = 0;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02004953 }
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004954 if (ret < 0) {
4955 return ret;
4956 }
4957
4958 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4959 * in the case of cache=unsafe, so there are no useless flushes.
4960 */
Kevin Wolfd4c82322012-08-15 12:52:45 +02004961flush_parent:
Paolo Bonzini29cdb252012-03-12 18:26:01 +01004962 return bdrv_co_flush(bs->file);
Paolo Bonzini07f07612011-10-17 12:32:12 +02004963}
4964
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01004965void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
Anthony Liguori0f154232011-11-14 15:09:45 -06004966{
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01004967 Error *local_err = NULL;
4968 int ret;
4969
Kevin Wolf3456a8d2014-03-11 10:58:39 +01004970 if (!bs->drv) {
4971 return;
Anthony Liguori0f154232011-11-14 15:09:45 -06004972 }
Kevin Wolf3456a8d2014-03-11 10:58:39 +01004973
4974 if (bs->drv->bdrv_invalidate_cache) {
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01004975 bs->drv->bdrv_invalidate_cache(bs, &local_err);
Kevin Wolf3456a8d2014-03-11 10:58:39 +01004976 } else if (bs->file) {
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01004977 bdrv_invalidate_cache(bs->file, &local_err);
4978 }
4979 if (local_err) {
4980 error_propagate(errp, local_err);
4981 return;
Kevin Wolf3456a8d2014-03-11 10:58:39 +01004982 }
4983
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01004984 ret = refresh_total_sectors(bs, bs->total_sectors);
4985 if (ret < 0) {
4986 error_setg_errno(errp, -ret, "Could not refresh total sector count");
4987 return;
4988 }
Anthony Liguori0f154232011-11-14 15:09:45 -06004989}
4990
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01004991void bdrv_invalidate_cache_all(Error **errp)
Anthony Liguori0f154232011-11-14 15:09:45 -06004992{
4993 BlockDriverState *bs;
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01004994 Error *local_err = NULL;
Anthony Liguori0f154232011-11-14 15:09:45 -06004995
Benoît Canetdc364f42014-01-23 21:31:32 +01004996 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02004997 AioContext *aio_context = bdrv_get_aio_context(bs);
4998
4999 aio_context_acquire(aio_context);
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005000 bdrv_invalidate_cache(bs, &local_err);
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02005001 aio_context_release(aio_context);
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01005002 if (local_err) {
5003 error_propagate(errp, local_err);
5004 return;
5005 }
Anthony Liguori0f154232011-11-14 15:09:45 -06005006 }
5007}
5008
Benoît Canet07789262012-03-23 08:36:49 +01005009void bdrv_clear_incoming_migration_all(void)
5010{
5011 BlockDriverState *bs;
5012
Benoît Canetdc364f42014-01-23 21:31:32 +01005013 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02005014 AioContext *aio_context = bdrv_get_aio_context(bs);
5015
5016 aio_context_acquire(aio_context);
Benoît Canet07789262012-03-23 08:36:49 +01005017 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
Stefan Hajnoczied78cda2014-05-08 16:34:35 +02005018 aio_context_release(aio_context);
Benoît Canet07789262012-03-23 08:36:49 +01005019 }
5020}
5021
Paolo Bonzini07f07612011-10-17 12:32:12 +02005022int bdrv_flush(BlockDriverState *bs)
5023{
5024 Coroutine *co;
5025 RwCo rwco = {
5026 .bs = bs,
5027 .ret = NOT_DONE,
5028 };
5029
5030 if (qemu_in_coroutine()) {
5031 /* Fast-path if already in coroutine context */
5032 bdrv_flush_co_entry(&rwco);
5033 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005034 AioContext *aio_context = bdrv_get_aio_context(bs);
5035
Paolo Bonzini07f07612011-10-17 12:32:12 +02005036 co = qemu_coroutine_create(bdrv_flush_co_entry);
5037 qemu_coroutine_enter(co, &rwco);
5038 while (rwco.ret == NOT_DONE) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005039 aio_poll(aio_context, true);
Paolo Bonzini07f07612011-10-17 12:32:12 +02005040 }
5041 }
5042
5043 return rwco.ret;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02005044}
5045
Kevin Wolf775aa8b2013-12-05 12:09:38 +01005046typedef struct DiscardCo {
5047 BlockDriverState *bs;
5048 int64_t sector_num;
5049 int nb_sectors;
5050 int ret;
5051} DiscardCo;
Paolo Bonzini4265d622011-10-17 12:32:14 +02005052static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5053{
Kevin Wolf775aa8b2013-12-05 12:09:38 +01005054 DiscardCo *rwco = opaque;
Paolo Bonzini4265d622011-10-17 12:32:14 +02005055
5056 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5057}
5058
Peter Lieven6f14da52013-10-24 12:06:59 +02005059/* if no limit is specified in the BlockLimits use a default
5060 * of 32768 512-byte sectors (16 MiB) per request.
5061 */
5062#define MAX_DISCARD_DEFAULT 32768
5063
Paolo Bonzini4265d622011-10-17 12:32:14 +02005064int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5065 int nb_sectors)
5066{
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005067 int max_discard;
5068
Paolo Bonzini4265d622011-10-17 12:32:14 +02005069 if (!bs->drv) {
5070 return -ENOMEDIUM;
5071 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5072 return -EIO;
5073 } else if (bs->read_only) {
5074 return -EROFS;
Paolo Bonzinidf702c92013-01-14 16:26:58 +01005075 }
5076
Fam Zhenge4654d22013-11-13 18:29:43 +08005077 bdrv_reset_dirty(bs, sector_num, nb_sectors);
Paolo Bonzinidf702c92013-01-14 16:26:58 +01005078
Paolo Bonzini9e8f1832013-02-08 14:06:11 +01005079 /* Do nothing if disabled. */
5080 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5081 return 0;
5082 }
5083
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005084 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
Paolo Bonzini4265d622011-10-17 12:32:14 +02005085 return 0;
5086 }
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005087
5088 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5089 while (nb_sectors > 0) {
5090 int ret;
5091 int num = nb_sectors;
5092
5093 /* align request */
5094 if (bs->bl.discard_alignment &&
5095 num >= bs->bl.discard_alignment &&
5096 sector_num % bs->bl.discard_alignment) {
5097 if (num > bs->bl.discard_alignment) {
5098 num = bs->bl.discard_alignment;
5099 }
5100 num -= sector_num % bs->bl.discard_alignment;
5101 }
5102
5103 /* limit request size */
5104 if (num > max_discard) {
5105 num = max_discard;
5106 }
5107
5108 if (bs->drv->bdrv_co_discard) {
5109 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5110 } else {
5111 BlockDriverAIOCB *acb;
5112 CoroutineIOCompletion co = {
5113 .coroutine = qemu_coroutine_self(),
5114 };
5115
5116 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5117 bdrv_co_io_em_complete, &co);
5118 if (acb == NULL) {
5119 return -EIO;
5120 } else {
5121 qemu_coroutine_yield();
5122 ret = co.ret;
5123 }
5124 }
Paolo Bonzini7ce21012013-11-22 13:39:47 +01005125 if (ret && ret != -ENOTSUP) {
Paolo Bonzinid51e9fe2013-11-22 13:39:43 +01005126 return ret;
5127 }
5128
5129 sector_num += num;
5130 nb_sectors -= num;
5131 }
5132 return 0;
Paolo Bonzini4265d622011-10-17 12:32:14 +02005133}
5134
5135int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5136{
5137 Coroutine *co;
Kevin Wolf775aa8b2013-12-05 12:09:38 +01005138 DiscardCo rwco = {
Paolo Bonzini4265d622011-10-17 12:32:14 +02005139 .bs = bs,
5140 .sector_num = sector_num,
5141 .nb_sectors = nb_sectors,
5142 .ret = NOT_DONE,
5143 };
5144
5145 if (qemu_in_coroutine()) {
5146 /* Fast-path if already in coroutine context */
5147 bdrv_discard_co_entry(&rwco);
5148 } else {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005149 AioContext *aio_context = bdrv_get_aio_context(bs);
5150
Paolo Bonzini4265d622011-10-17 12:32:14 +02005151 co = qemu_coroutine_create(bdrv_discard_co_entry);
5152 qemu_coroutine_enter(co, &rwco);
5153 while (rwco.ret == NOT_DONE) {
Stefan Hajnoczi2572b372014-05-08 16:34:34 +02005154 aio_poll(aio_context, true);
Paolo Bonzini4265d622011-10-17 12:32:14 +02005155 }
5156 }
5157
5158 return rwco.ret;
5159}
5160
Kevin Wolff9f05dc2011-07-15 13:50:26 +02005161/**************************************************************/
bellard19cb3732006-08-19 11:45:59 +00005162/* removable device support */
5163
5164/**
5165 * Return TRUE if the media is present
5166 */
5167int bdrv_is_inserted(BlockDriverState *bs)
5168{
5169 BlockDriver *drv = bs->drv;
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02005170
bellard19cb3732006-08-19 11:45:59 +00005171 if (!drv)
5172 return 0;
5173 if (!drv->bdrv_is_inserted)
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02005174 return 1;
5175 return drv->bdrv_is_inserted(bs);
bellard19cb3732006-08-19 11:45:59 +00005176}
5177
5178/**
Markus Armbruster8e49ca42011-08-03 15:08:08 +02005179 * Return whether the media changed since the last call to this
5180 * function, or -ENOTSUP if we don't know. Most drivers don't know.
bellard19cb3732006-08-19 11:45:59 +00005181 */
5182int bdrv_media_changed(BlockDriverState *bs)
5183{
5184 BlockDriver *drv = bs->drv;
bellard19cb3732006-08-19 11:45:59 +00005185
Markus Armbruster8e49ca42011-08-03 15:08:08 +02005186 if (drv && drv->bdrv_media_changed) {
5187 return drv->bdrv_media_changed(bs);
5188 }
5189 return -ENOTSUP;
bellard19cb3732006-08-19 11:45:59 +00005190}
5191
5192/**
5193 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5194 */
Luiz Capitulinof36f3942012-02-03 16:24:53 -02005195void bdrv_eject(BlockDriverState *bs, bool eject_flag)
bellard19cb3732006-08-19 11:45:59 +00005196{
5197 BlockDriver *drv = bs->drv;
bellard19cb3732006-08-19 11:45:59 +00005198
Markus Armbruster822e1cd2011-07-20 18:23:42 +02005199 if (drv && drv->bdrv_eject) {
5200 drv->bdrv_eject(bs, eject_flag);
bellard19cb3732006-08-19 11:45:59 +00005201 }
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02005202
5203 if (bs->device_name[0] != '\0') {
5204 bdrv_emit_qmp_eject_event(bs, eject_flag);
5205 }
bellard19cb3732006-08-19 11:45:59 +00005206}
5207
bellard19cb3732006-08-19 11:45:59 +00005208/**
5209 * Lock or unlock the media (if it is locked, the user won't be able
5210 * to eject it manually).
5211 */
Markus Armbruster025e8492011-09-06 18:58:47 +02005212void bdrv_lock_medium(BlockDriverState *bs, bool locked)
bellard19cb3732006-08-19 11:45:59 +00005213{
5214 BlockDriver *drv = bs->drv;
5215
Markus Armbruster025e8492011-09-06 18:58:47 +02005216 trace_bdrv_lock_medium(bs, locked);
Stefan Hajnoczib8c6d092011-03-29 20:04:40 +01005217
Markus Armbruster025e8492011-09-06 18:58:47 +02005218 if (drv && drv->bdrv_lock_medium) {
5219 drv->bdrv_lock_medium(bs, locked);
bellard19cb3732006-08-19 11:45:59 +00005220 }
5221}
ths985a03b2007-12-24 16:10:43 +00005222
5223/* needed for generic scsi interface */
5224
5225int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5226{
5227 BlockDriver *drv = bs->drv;
5228
5229 if (drv && drv->bdrv_ioctl)
5230 return drv->bdrv_ioctl(bs, req, buf);
5231 return -ENOTSUP;
5232}
aliguori7d780662009-03-12 19:57:08 +00005233
aliguori221f7152009-03-28 17:28:41 +00005234BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5235 unsigned long int req, void *buf,
5236 BlockDriverCompletionFunc *cb, void *opaque)
aliguori7d780662009-03-12 19:57:08 +00005237{
aliguori221f7152009-03-28 17:28:41 +00005238 BlockDriver *drv = bs->drv;
aliguori7d780662009-03-12 19:57:08 +00005239
aliguori221f7152009-03-28 17:28:41 +00005240 if (drv && drv->bdrv_aio_ioctl)
5241 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5242 return NULL;
aliguori7d780662009-03-12 19:57:08 +00005243}
aliguorie268ca52009-04-22 20:20:00 +00005244
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01005245void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
Markus Armbruster7b6f9302011-09-06 18:58:56 +02005246{
Paolo Bonzini1b7fd722011-11-29 11:35:47 +01005247 bs->guest_block_size = align;
Markus Armbruster7b6f9302011-09-06 18:58:56 +02005248}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005249
aliguorie268ca52009-04-22 20:20:00 +00005250void *qemu_blockalign(BlockDriverState *bs, size_t size)
5251{
Kevin Wolf339064d2013-11-28 10:23:32 +01005252 return qemu_memalign(bdrv_opt_mem_align(bs), size);
aliguorie268ca52009-04-22 20:20:00 +00005253}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005254
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005255/*
5256 * Check if all memory in this vector is sector aligned.
5257 */
5258bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5259{
5260 int i;
Kevin Wolf339064d2013-11-28 10:23:32 +01005261 size_t alignment = bdrv_opt_mem_align(bs);
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005262
5263 for (i = 0; i < qiov->niov; i++) {
Kevin Wolf339064d2013-11-28 10:23:32 +01005264 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005265 return false;
5266 }
Kevin Wolf339064d2013-11-28 10:23:32 +01005267 if (qiov->iov[i].iov_len % alignment) {
Kevin Wolf1ff735b2013-12-05 13:01:46 +01005268 return false;
5269 }
Stefan Hajnoczic53b1c52013-01-11 16:41:27 +01005270 }
5271
5272 return true;
5273}
5274
Fam Zhengb8afb522014-04-16 09:34:30 +08005275BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5276 Error **errp)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005277{
5278 int64_t bitmap_size;
Fam Zhenge4654d22013-11-13 18:29:43 +08005279 BdrvDirtyBitmap *bitmap;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01005280
Paolo Bonzini50717e92013-01-21 17:09:45 +01005281 assert((granularity & (granularity - 1)) == 0);
5282
Fam Zhenge4654d22013-11-13 18:29:43 +08005283 granularity >>= BDRV_SECTOR_BITS;
5284 assert(granularity);
Fam Zhengb8afb522014-04-16 09:34:30 +08005285 bitmap_size = bdrv_getlength(bs);
5286 if (bitmap_size < 0) {
5287 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5288 errno = -bitmap_size;
5289 return NULL;
5290 }
5291 bitmap_size >>= BDRV_SECTOR_BITS;
Fam Zhenge4654d22013-11-13 18:29:43 +08005292 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
5293 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5294 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5295 return bitmap;
5296}
5297
5298void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5299{
5300 BdrvDirtyBitmap *bm, *next;
5301 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5302 if (bm == bitmap) {
5303 QLIST_REMOVE(bitmap, list);
5304 hbitmap_free(bitmap->bitmap);
5305 g_free(bitmap);
5306 return;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01005307 }
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005308 }
5309}
5310
Fam Zheng21b56832013-11-13 18:29:44 +08005311BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5312{
5313 BdrvDirtyBitmap *bm;
5314 BlockDirtyInfoList *list = NULL;
5315 BlockDirtyInfoList **plist = &list;
5316
5317 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5318 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo));
5319 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList));
5320 info->count = bdrv_get_dirty_count(bs, bm);
5321 info->granularity =
5322 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5323 entry->value = info;
5324 *plist = entry;
5325 plist = &entry->next;
5326 }
5327
5328 return list;
5329}
5330
Fam Zhenge4654d22013-11-13 18:29:43 +08005331int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005332{
Fam Zhenge4654d22013-11-13 18:29:43 +08005333 if (bitmap) {
5334 return hbitmap_get(bitmap->bitmap, sector);
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02005335 } else {
5336 return 0;
5337 }
5338}
5339
Fam Zhenge4654d22013-11-13 18:29:43 +08005340void bdrv_dirty_iter_init(BlockDriverState *bs,
5341 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
Paolo Bonzini1755da12012-10-18 16:49:18 +02005342{
Fam Zhenge4654d22013-11-13 18:29:43 +08005343 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
Paolo Bonzini1755da12012-10-18 16:49:18 +02005344}
5345
5346void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5347 int nr_sectors)
5348{
Fam Zhenge4654d22013-11-13 18:29:43 +08005349 BdrvDirtyBitmap *bitmap;
5350 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5351 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
Paolo Bonzini8f0720e2013-01-21 17:09:41 +01005352 }
Liran Schouraaa0eb72010-01-26 10:31:48 +02005353}
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005354
Fam Zhenge4654d22013-11-13 18:29:43 +08005355void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5356{
5357 BdrvDirtyBitmap *bitmap;
5358 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5359 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5360 }
5361}
5362
5363int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5364{
5365 return hbitmap_count(bitmap->bitmap);
5366}
5367
Fam Zheng9fcb0252013-08-23 09:14:46 +08005368/* Get a reference to bs */
5369void bdrv_ref(BlockDriverState *bs)
5370{
5371 bs->refcnt++;
5372}
5373
5374/* Release a previously grabbed reference to bs.
5375 * If after releasing, reference count is zero, the BlockDriverState is
5376 * deleted. */
5377void bdrv_unref(BlockDriverState *bs)
5378{
5379 assert(bs->refcnt > 0);
5380 if (--bs->refcnt == 0) {
5381 bdrv_delete(bs);
5382 }
5383}
5384
Fam Zhengfbe40ff2014-05-23 21:29:42 +08005385struct BdrvOpBlocker {
5386 Error *reason;
5387 QLIST_ENTRY(BdrvOpBlocker) list;
5388};
5389
5390bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5391{
5392 BdrvOpBlocker *blocker;
5393 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5394 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5395 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5396 if (errp) {
5397 error_setg(errp, "Device '%s' is busy: %s",
5398 bs->device_name, error_get_pretty(blocker->reason));
5399 }
5400 return true;
5401 }
5402 return false;
5403}
5404
5405void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5406{
5407 BdrvOpBlocker *blocker;
5408 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5409
5410 blocker = g_malloc0(sizeof(BdrvOpBlocker));
5411 blocker->reason = reason;
5412 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5413}
5414
5415void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5416{
5417 BdrvOpBlocker *blocker, *next;
5418 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5419 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5420 if (blocker->reason == reason) {
5421 QLIST_REMOVE(blocker, list);
5422 g_free(blocker);
5423 }
5424 }
5425}
5426
5427void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5428{
5429 int i;
5430 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5431 bdrv_op_block(bs, i, reason);
5432 }
5433}
5434
5435void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5436{
5437 int i;
5438 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5439 bdrv_op_unblock(bs, i, reason);
5440 }
5441}
5442
5443bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5444{
5445 int i;
5446
5447 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5448 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5449 return false;
5450 }
5451 }
5452 return true;
5453}
5454
Luiz Capitulino28a72822011-09-26 17:43:50 -03005455void bdrv_iostatus_enable(BlockDriverState *bs)
5456{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03005457 bs->iostatus_enabled = true;
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03005458 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Luiz Capitulino28a72822011-09-26 17:43:50 -03005459}
5460
5461/* The I/O status is only enabled if the drive explicitly
5462 * enables it _and_ the VM is configured to stop on errors */
5463bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5464{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03005465 return (bs->iostatus_enabled &&
Paolo Bonzini92aa5c62012-09-28 17:22:55 +02005466 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5467 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5468 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
Luiz Capitulino28a72822011-09-26 17:43:50 -03005469}
5470
5471void bdrv_iostatus_disable(BlockDriverState *bs)
5472{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03005473 bs->iostatus_enabled = false;
Luiz Capitulino28a72822011-09-26 17:43:50 -03005474}
5475
5476void bdrv_iostatus_reset(BlockDriverState *bs)
5477{
5478 if (bdrv_iostatus_is_enabled(bs)) {
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03005479 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Paolo Bonzini3bd293c2012-10-18 16:49:27 +02005480 if (bs->job) {
5481 block_job_iostatus_reset(bs->job);
5482 }
Luiz Capitulino28a72822011-09-26 17:43:50 -03005483 }
5484}
5485
Luiz Capitulino28a72822011-09-26 17:43:50 -03005486void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5487{
Paolo Bonzini3e1caa52012-09-28 17:22:57 +02005488 assert(bdrv_iostatus_is_enabled(bs));
5489 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03005490 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5491 BLOCK_DEVICE_IO_STATUS_FAILED;
Luiz Capitulino28a72822011-09-26 17:43:50 -03005492 }
5493}
5494
Christoph Hellwiga597e792011-08-25 08:26:01 +02005495void
5496bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
5497 enum BlockAcctType type)
5498{
5499 assert(type < BDRV_MAX_IOTYPE);
5500
5501 cookie->bytes = bytes;
Christoph Hellwigc488c7f2011-08-25 08:26:10 +02005502 cookie->start_time_ns = get_clock();
Christoph Hellwiga597e792011-08-25 08:26:01 +02005503 cookie->type = type;
5504}
5505
5506void
5507bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
5508{
5509 assert(cookie->type < BDRV_MAX_IOTYPE);
5510
5511 bs->nr_bytes[cookie->type] += cookie->bytes;
5512 bs->nr_ops[cookie->type]++;
Christoph Hellwigc488c7f2011-08-25 08:26:10 +02005513 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
Christoph Hellwiga597e792011-08-25 08:26:01 +02005514}
5515
Luiz Capitulinod92ada22012-11-30 10:52:09 -02005516void bdrv_img_create(const char *filename, const char *fmt,
5517 const char *base_filename, const char *base_fmt,
Miroslav Rezaninaf382d432013-02-13 09:09:40 +01005518 char *options, uint64_t img_size, int flags,
5519 Error **errp, bool quiet)
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005520{
5521 QEMUOptionParameter *param = NULL, *create_options = NULL;
Kevin Wolfd2208942011-06-01 14:03:31 +02005522 QEMUOptionParameter *backing_fmt, *backing_file, *size;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005523 BlockDriver *drv, *proto_drv;
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00005524 BlockDriver *backing_drv = NULL;
Max Reitzcc84d902013-09-06 17:14:26 +02005525 Error *local_err = NULL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005526 int ret = 0;
5527
5528 /* Find driver and parse its options */
5529 drv = bdrv_find_format(fmt);
5530 if (!drv) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005531 error_setg(errp, "Unknown file format '%s'", fmt);
Luiz Capitulinod92ada22012-11-30 10:52:09 -02005532 return;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005533 }
5534
Kevin Wolf98289622013-07-10 15:47:39 +02005535 proto_drv = bdrv_find_protocol(filename, true);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005536 if (!proto_drv) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005537 error_setg(errp, "Unknown protocol '%s'", filename);
Luiz Capitulinod92ada22012-11-30 10:52:09 -02005538 return;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005539 }
5540
5541 create_options = append_option_parameters(create_options,
5542 drv->create_options);
5543 create_options = append_option_parameters(create_options,
5544 proto_drv->create_options);
5545
5546 /* Create parameter list with default values */
5547 param = parse_option_parameters("", create_options, param);
5548
5549 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
5550
5551 /* Parse -o options */
5552 if (options) {
5553 param = parse_option_parameters(options, create_options, param);
5554 if (param == NULL) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005555 error_setg(errp, "Invalid options for file format '%s'.", fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005556 goto out;
5557 }
5558 }
5559
5560 if (base_filename) {
5561 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
5562 base_filename)) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005563 error_setg(errp, "Backing file not supported for file format '%s'",
5564 fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005565 goto out;
5566 }
5567 }
5568
5569 if (base_fmt) {
5570 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005571 error_setg(errp, "Backing file format not supported for file "
5572 "format '%s'", fmt);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005573 goto out;
5574 }
5575 }
5576
Jes Sorensen792da932010-12-16 13:52:17 +01005577 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
5578 if (backing_file && backing_file->value.s) {
5579 if (!strcmp(filename, backing_file->value.s)) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005580 error_setg(errp, "Error: Trying to create an image with the "
5581 "same filename as the backing file");
Jes Sorensen792da932010-12-16 13:52:17 +01005582 goto out;
5583 }
5584 }
5585
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005586 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
5587 if (backing_fmt && backing_fmt->value.s) {
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00005588 backing_drv = bdrv_find_format(backing_fmt->value.s);
5589 if (!backing_drv) {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005590 error_setg(errp, "Unknown backing file format '%s'",
5591 backing_fmt->value.s);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005592 goto out;
5593 }
5594 }
5595
5596 // The size for the image must always be specified, with one exception:
5597 // If we are using a backing file, we can obtain the size from there
Kevin Wolfd2208942011-06-01 14:03:31 +02005598 size = get_option_parameter(param, BLOCK_OPT_SIZE);
5599 if (size && size->value.n == -1) {
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005600 if (backing_file && backing_file->value.s) {
Max Reitz66f6b812013-12-03 14:57:52 +01005601 BlockDriverState *bs;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005602 uint64_t size;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005603 char buf[32];
Paolo Bonzini63090da2012-04-12 14:01:03 +02005604 int back_flags;
5605
5606 /* backing files always opened read-only */
5607 back_flags =
5608 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005609
Max Reitzf67503e2014-02-18 18:33:05 +01005610 bs = NULL;
Max Reitzddf56362014-02-18 18:33:06 +01005611 ret = bdrv_open(&bs, backing_file->value.s, NULL, NULL, back_flags,
Max Reitzcc84d902013-09-06 17:14:26 +02005612 backing_drv, &local_err);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005613 if (ret < 0) {
Max Reitzcc84d902013-09-06 17:14:26 +02005614 error_setg_errno(errp, -ret, "Could not open '%s': %s",
5615 backing_file->value.s,
5616 error_get_pretty(local_err));
5617 error_free(local_err);
5618 local_err = NULL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005619 goto out;
5620 }
5621 bdrv_get_geometry(bs, &size);
5622 size *= 512;
5623
5624 snprintf(buf, sizeof(buf), "%" PRId64, size);
5625 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
Max Reitz66f6b812013-12-03 14:57:52 +01005626
5627 bdrv_unref(bs);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005628 } else {
Luiz Capitulino71c79812012-11-30 10:52:04 -02005629 error_setg(errp, "Image creation needs a size parameter");
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005630 goto out;
5631 }
5632 }
5633
Miroslav Rezaninaf382d432013-02-13 09:09:40 +01005634 if (!quiet) {
5635 printf("Formatting '%s', fmt=%s ", filename, fmt);
5636 print_option_parameters(param);
5637 puts("");
5638 }
Max Reitzcc84d902013-09-06 17:14:26 +02005639 ret = bdrv_create(drv, filename, param, &local_err);
5640 if (ret == -EFBIG) {
5641 /* This is generally a better message than whatever the driver would
5642 * deliver (especially because of the cluster_size_hint), since that
5643 * is most probably not much different from "image too large". */
5644 const char *cluster_size_hint = "";
5645 if (get_option_parameter(create_options, BLOCK_OPT_CLUSTER_SIZE)) {
5646 cluster_size_hint = " (try using a larger cluster size)";
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005647 }
Max Reitzcc84d902013-09-06 17:14:26 +02005648 error_setg(errp, "The image size is too large for file format '%s'"
5649 "%s", fmt, cluster_size_hint);
5650 error_free(local_err);
5651 local_err = NULL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005652 }
5653
5654out:
5655 free_option_parameters(create_options);
5656 free_option_parameters(param);
5657
Markus Armbruster84d18f02014-01-30 15:07:28 +01005658 if (local_err) {
Max Reitzcc84d902013-09-06 17:14:26 +02005659 error_propagate(errp, local_err);
5660 }
Jes Sorensenf88e1a42010-12-16 13:52:15 +01005661}
Stefan Hajnoczi85d126f2013-03-07 13:41:48 +01005662
5663AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5664{
Stefan Hajnoczidcd04222014-05-08 16:34:37 +02005665 return bs->aio_context;
5666}
5667
5668void bdrv_detach_aio_context(BlockDriverState *bs)
5669{
5670 if (!bs->drv) {
5671 return;
5672 }
5673
5674 if (bs->drv->bdrv_detach_aio_context) {
5675 bs->drv->bdrv_detach_aio_context(bs);
5676 }
5677 if (bs->file) {
5678 bdrv_detach_aio_context(bs->file);
5679 }
5680 if (bs->backing_hd) {
5681 bdrv_detach_aio_context(bs->backing_hd);
5682 }
5683
5684 bs->aio_context = NULL;
5685}
5686
5687void bdrv_attach_aio_context(BlockDriverState *bs,
5688 AioContext *new_context)
5689{
5690 if (!bs->drv) {
5691 return;
5692 }
5693
5694 bs->aio_context = new_context;
5695
5696 if (bs->backing_hd) {
5697 bdrv_attach_aio_context(bs->backing_hd, new_context);
5698 }
5699 if (bs->file) {
5700 bdrv_attach_aio_context(bs->file, new_context);
5701 }
5702 if (bs->drv->bdrv_attach_aio_context) {
5703 bs->drv->bdrv_attach_aio_context(bs, new_context);
5704 }
5705}
5706
5707void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5708{
5709 bdrv_drain_all(); /* ensure there are no in-flight requests */
5710
5711 bdrv_detach_aio_context(bs);
5712
5713 /* This function executes in the old AioContext so acquire the new one in
5714 * case it runs in a different thread.
5715 */
5716 aio_context_acquire(new_context);
5717 bdrv_attach_aio_context(bs, new_context);
5718 aio_context_release(new_context);
Stefan Hajnoczi85d126f2013-03-07 13:41:48 +01005719}
Stefan Hajnoczid616b222013-06-24 17:13:10 +02005720
5721void bdrv_add_before_write_notifier(BlockDriverState *bs,
5722 NotifierWithReturn *notifier)
5723{
5724 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5725}
Max Reitz6f176b42013-09-03 10:09:50 +02005726
5727int bdrv_amend_options(BlockDriverState *bs, QEMUOptionParameter *options)
5728{
5729 if (bs->drv->bdrv_amend_options == NULL) {
5730 return -ENOTSUP;
5731 }
5732 return bs->drv->bdrv_amend_options(bs, options);
5733}
Benoît Canetf6186f42013-10-02 14:33:48 +02005734
Benoît Canetb5042a32014-03-03 19:11:34 +01005735/* This function will be called by the bdrv_recurse_is_first_non_filter method
5736 * of block filter and by bdrv_is_first_non_filter.
5737 * It is used to test if the given bs is the candidate or recurse more in the
5738 * node graph.
Benoît Canet212a5a82014-01-23 21:31:36 +01005739 */
Benoît Canet212a5a82014-01-23 21:31:36 +01005740bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5741 BlockDriverState *candidate)
Benoît Canetf6186f42013-10-02 14:33:48 +02005742{
Benoît Canetb5042a32014-03-03 19:11:34 +01005743 /* return false if basic checks fails */
5744 if (!bs || !bs->drv) {
5745 return false;
5746 }
5747
5748 /* the code reached a non block filter driver -> check if the bs is
5749 * the same as the candidate. It's the recursion termination condition.
5750 */
5751 if (!bs->drv->is_filter) {
5752 return bs == candidate;
5753 }
5754 /* Down this path the driver is a block filter driver */
5755
5756 /* If the block filter recursion method is defined use it to recurse down
5757 * the node graph.
5758 */
5759 if (bs->drv->bdrv_recurse_is_first_non_filter) {
Benoît Canet212a5a82014-01-23 21:31:36 +01005760 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5761 }
5762
Benoît Canetb5042a32014-03-03 19:11:34 +01005763 /* the driver is a block filter but don't allow to recurse -> return false
5764 */
5765 return false;
Benoît Canet212a5a82014-01-23 21:31:36 +01005766}
5767
5768/* This function checks if the candidate is the first non filter bs down it's
5769 * bs chain. Since we don't have pointers to parents it explore all bs chains
5770 * from the top. Some filters can choose not to pass down the recursion.
5771 */
5772bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5773{
5774 BlockDriverState *bs;
5775
5776 /* walk down the bs forest recursively */
5777 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5778 bool perm;
5779
Benoît Canetb5042a32014-03-03 19:11:34 +01005780 /* try to recurse in this top level bs */
Kevin Wolfe6dc8a12014-02-04 11:45:31 +01005781 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
Benoît Canet212a5a82014-01-23 21:31:36 +01005782
5783 /* candidate is the first non filter */
5784 if (perm) {
5785 return true;
5786 }
5787 }
5788
5789 return false;
Benoît Canetf6186f42013-10-02 14:33:48 +02005790}