blob: 1206bba84d2c35f5870af8595d7e1e29690e000a [file] [log] [blame]
bellardfc01f7e2003-06-30 10:03:06 +00001/*
2 * QEMU System Emulator block driver
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardfc01f7e2003-06-30 10:03:06 +00004 * Copyright (c) 2003 Fabrice Bellard
ths5fafdf22007-09-16 21:08:06 +00005 *
bellardfc01f7e2003-06-30 10:03:06 +00006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
blueswir13990d092008-12-05 17:53:21 +000024#include "config-host.h"
pbrookfaf07962007-11-11 02:51:17 +000025#include "qemu-common.h"
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +010026#include "trace.h"
aliguori376253e2009-03-05 23:01:23 +000027#include "monitor.h"
bellardea2384d2004-08-01 21:59:26 +000028#include "block_int.h"
Anthony Liguori5efa9d52009-05-09 17:03:42 -050029#include "module.h"
Luiz Capitulinof795e742011-10-21 16:05:43 -020030#include "qjson.h"
Kevin Wolf68485422011-06-30 10:05:46 +020031#include "qemu-coroutine.h"
Luiz Capitulinob2023812011-09-21 17:16:47 -030032#include "qmp-commands.h"
Zhi Yong Wu0563e192011-11-03 16:57:25 +080033#include "qemu-timer.h"
bellardfc01f7e2003-06-30 10:03:06 +000034
Juan Quintela71e72a12009-07-27 16:12:56 +020035#ifdef CONFIG_BSD
bellard7674e7b2005-04-26 21:59:26 +000036#include <sys/types.h>
37#include <sys/stat.h>
38#include <sys/ioctl.h>
Blue Swirl72cf2d42009-09-12 07:36:22 +000039#include <sys/queue.h>
blueswir1c5e97232009-03-07 20:06:23 +000040#ifndef __DragonFly__
bellard7674e7b2005-04-26 21:59:26 +000041#include <sys/disk.h>
42#endif
blueswir1c5e97232009-03-07 20:06:23 +000043#endif
bellard7674e7b2005-04-26 21:59:26 +000044
aliguori49dc7682009-03-08 16:26:59 +000045#ifdef _WIN32
46#include <windows.h>
47#endif
48
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +010049#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
50
Stefan Hajnoczi470c0502012-01-18 14:40:42 +000051typedef enum {
52 BDRV_REQ_COPY_ON_READ = 0x1,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +000053 BDRV_REQ_ZERO_WRITE = 0x2,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +000054} BdrvRequestFlags;
55
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +020056static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
aliguorif141eaf2009-04-07 18:43:24 +000057static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
58 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
aliguoric87c0672009-04-07 18:43:20 +000059 BlockDriverCompletionFunc *cb, void *opaque);
aliguorif141eaf2009-04-07 18:43:24 +000060static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
61 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
pbrookce1a14d2006-08-07 02:38:06 +000062 BlockDriverCompletionFunc *cb, void *opaque);
Kevin Wolff9f05dc2011-07-15 13:50:26 +020063static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
64 int64_t sector_num, int nb_sectors,
65 QEMUIOVector *iov);
66static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
67 int64_t sector_num, int nb_sectors,
68 QEMUIOVector *iov);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +010069static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +000070 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
71 BdrvRequestFlags flags);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +010072static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +000073 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
74 BdrvRequestFlags flags);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +010075static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
76 int64_t sector_num,
77 QEMUIOVector *qiov,
78 int nb_sectors,
79 BlockDriverCompletionFunc *cb,
80 void *opaque,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +010081 bool is_write);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +010082static void coroutine_fn bdrv_co_do_rw(void *opaque);
Kevin Wolf621f0582012-03-20 15:12:58 +010083static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
84 int64_t sector_num, int nb_sectors);
bellardec530c82006-04-25 22:36:06 +000085
Zhi Yong Wu98f90db2011-11-08 13:00:14 +080086static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
87 bool is_write, double elapsed_time, uint64_t *wait);
88static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
89 double elapsed_time, uint64_t *wait);
90static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
91 bool is_write, int64_t *wait);
92
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +010093static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
94 QTAILQ_HEAD_INITIALIZER(bdrv_states);
blueswir17ee930d2008-09-17 19:04:14 +000095
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +010096static QLIST_HEAD(, BlockDriver) bdrv_drivers =
97 QLIST_HEAD_INITIALIZER(bdrv_drivers);
bellardea2384d2004-08-01 21:59:26 +000098
Markus Armbrusterf9092b12010-06-25 10:33:39 +020099/* The device to use for VM snapshots */
100static BlockDriverState *bs_snapshots;
101
Markus Armbrustereb852012009-10-27 18:41:44 +0100102/* If non-zero, use only whitelisted block drivers */
103static int use_bdrv_whitelist;
104
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000105#ifdef _WIN32
106static int is_windows_drive_prefix(const char *filename)
107{
108 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
109 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
110 filename[1] == ':');
111}
112
113int is_windows_drive(const char *filename)
114{
115 if (is_windows_drive_prefix(filename) &&
116 filename[2] == '\0')
117 return 1;
118 if (strstart(filename, "\\\\.\\", NULL) ||
119 strstart(filename, "//./", NULL))
120 return 1;
121 return 0;
122}
123#endif
124
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800125/* throttling disk I/O limits */
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800126void bdrv_io_limits_disable(BlockDriverState *bs)
127{
128 bs->io_limits_enabled = false;
129
130 while (qemu_co_queue_next(&bs->throttled_reqs));
131
132 if (bs->block_timer) {
133 qemu_del_timer(bs->block_timer);
134 qemu_free_timer(bs->block_timer);
135 bs->block_timer = NULL;
136 }
137
138 bs->slice_start = 0;
139 bs->slice_end = 0;
140 bs->slice_time = 0;
141 memset(&bs->io_base, 0, sizeof(bs->io_base));
142}
143
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800144static void bdrv_block_timer(void *opaque)
145{
146 BlockDriverState *bs = opaque;
147
148 qemu_co_queue_next(&bs->throttled_reqs);
149}
150
151void bdrv_io_limits_enable(BlockDriverState *bs)
152{
153 qemu_co_queue_init(&bs->throttled_reqs);
154 bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
155 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
156 bs->slice_start = qemu_get_clock_ns(vm_clock);
157 bs->slice_end = bs->slice_start + bs->slice_time;
158 memset(&bs->io_base, 0, sizeof(bs->io_base));
159 bs->io_limits_enabled = true;
160}
161
162bool bdrv_io_limits_enabled(BlockDriverState *bs)
163{
164 BlockIOLimit *io_limits = &bs->io_limits;
165 return io_limits->bps[BLOCK_IO_LIMIT_READ]
166 || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
167 || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
168 || io_limits->iops[BLOCK_IO_LIMIT_READ]
169 || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
170 || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
171}
172
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800173static void bdrv_io_limits_intercept(BlockDriverState *bs,
174 bool is_write, int nb_sectors)
175{
176 int64_t wait_time = -1;
177
178 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
179 qemu_co_queue_wait(&bs->throttled_reqs);
180 }
181
182 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
183 * throttled requests will not be dequeued until the current request is
184 * allowed to be serviced. So if the current request still exceeds the
185 * limits, it will be inserted to the head. All requests followed it will
186 * be still in throttled_reqs queue.
187 */
188
189 while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
190 qemu_mod_timer(bs->block_timer,
191 wait_time + qemu_get_clock_ns(vm_clock));
192 qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
193 }
194
195 qemu_co_queue_next(&bs->throttled_reqs);
196}
197
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000198/* check if the path starts with "<protocol>:" */
199static int path_has_protocol(const char *path)
200{
Paolo Bonzini947995c2012-05-08 16:51:48 +0200201 const char *p;
202
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000203#ifdef _WIN32
204 if (is_windows_drive(path) ||
205 is_windows_drive_prefix(path)) {
206 return 0;
207 }
Paolo Bonzini947995c2012-05-08 16:51:48 +0200208 p = path + strcspn(path, ":/\\");
209#else
210 p = path + strcspn(path, ":/");
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000211#endif
212
Paolo Bonzini947995c2012-05-08 16:51:48 +0200213 return *p == ':';
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000214}
215
bellard83f64092006-08-01 16:21:11 +0000216int path_is_absolute(const char *path)
217{
bellard21664422007-01-07 18:22:37 +0000218#ifdef _WIN32
219 /* specific case for names like: "\\.\d:" */
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200220 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
bellard21664422007-01-07 18:22:37 +0000221 return 1;
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200222 }
223 return (*path == '/' || *path == '\\');
bellard3b9f94e2007-01-07 17:27:07 +0000224#else
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200225 return (*path == '/');
bellard3b9f94e2007-01-07 17:27:07 +0000226#endif
bellard83f64092006-08-01 16:21:11 +0000227}
228
229/* if filename is absolute, just copy it to dest. Otherwise, build a
230 path to it by considering it is relative to base_path. URL are
231 supported. */
232void path_combine(char *dest, int dest_size,
233 const char *base_path,
234 const char *filename)
235{
236 const char *p, *p1;
237 int len;
238
239 if (dest_size <= 0)
240 return;
241 if (path_is_absolute(filename)) {
242 pstrcpy(dest, dest_size, filename);
243 } else {
244 p = strchr(base_path, ':');
245 if (p)
246 p++;
247 else
248 p = base_path;
bellard3b9f94e2007-01-07 17:27:07 +0000249 p1 = strrchr(base_path, '/');
250#ifdef _WIN32
251 {
252 const char *p2;
253 p2 = strrchr(base_path, '\\');
254 if (!p1 || p2 > p1)
255 p1 = p2;
256 }
257#endif
bellard83f64092006-08-01 16:21:11 +0000258 if (p1)
259 p1++;
260 else
261 p1 = base_path;
262 if (p1 > p)
263 p = p1;
264 len = p - base_path;
265 if (len > dest_size - 1)
266 len = dest_size - 1;
267 memcpy(dest, base_path, len);
268 dest[len] = '\0';
269 pstrcat(dest, dest_size, filename);
270 }
271}
272
Paolo Bonzinidc5a1372012-05-08 16:51:50 +0200273void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
274{
275 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
276 pstrcpy(dest, sz, bs->backing_file);
277 } else {
278 path_combine(dest, sz, bs->filename, bs->backing_file);
279 }
280}
281
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500282void bdrv_register(BlockDriver *bdrv)
bellardea2384d2004-08-01 21:59:26 +0000283{
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +0100284 /* Block drivers without coroutine functions need emulation */
285 if (!bdrv->bdrv_co_readv) {
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200286 bdrv->bdrv_co_readv = bdrv_co_readv_em;
287 bdrv->bdrv_co_writev = bdrv_co_writev_em;
288
Stefan Hajnoczif8c35c12011-10-13 21:09:31 +0100289 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
290 * the block driver lacks aio we need to emulate that too.
291 */
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200292 if (!bdrv->bdrv_aio_readv) {
293 /* add AIO emulation layer */
294 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
295 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200296 }
bellard83f64092006-08-01 16:21:11 +0000297 }
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +0200298
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100299 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
bellardea2384d2004-08-01 21:59:26 +0000300}
bellardb3380822004-03-14 21:38:54 +0000301
302/* create a new block device (by default it is empty) */
303BlockDriverState *bdrv_new(const char *device_name)
bellardfc01f7e2003-06-30 10:03:06 +0000304{
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +0100305 BlockDriverState *bs;
bellardb3380822004-03-14 21:38:54 +0000306
Anthony Liguori7267c092011-08-20 22:09:37 -0500307 bs = g_malloc0(sizeof(BlockDriverState));
bellardb3380822004-03-14 21:38:54 +0000308 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
bellardea2384d2004-08-01 21:59:26 +0000309 if (device_name[0] != '\0') {
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +0100310 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
bellardea2384d2004-08-01 21:59:26 +0000311 }
Luiz Capitulino28a72822011-09-26 17:43:50 -0300312 bdrv_iostatus_disable(bs);
bellardb3380822004-03-14 21:38:54 +0000313 return bs;
314}
315
bellardea2384d2004-08-01 21:59:26 +0000316BlockDriver *bdrv_find_format(const char *format_name)
317{
318 BlockDriver *drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100319 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
320 if (!strcmp(drv1->format_name, format_name)) {
bellardea2384d2004-08-01 21:59:26 +0000321 return drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100322 }
bellardea2384d2004-08-01 21:59:26 +0000323 }
324 return NULL;
325}
326
Markus Armbrustereb852012009-10-27 18:41:44 +0100327static int bdrv_is_whitelisted(BlockDriver *drv)
328{
329 static const char *whitelist[] = {
330 CONFIG_BDRV_WHITELIST
331 };
332 const char **p;
333
334 if (!whitelist[0])
335 return 1; /* no whitelist, anything goes */
336
337 for (p = whitelist; *p; p++) {
338 if (!strcmp(drv->format_name, *p)) {
339 return 1;
340 }
341 }
342 return 0;
343}
344
345BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
346{
347 BlockDriver *drv = bdrv_find_format(format_name);
348 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
349}
350
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800351typedef struct CreateCo {
352 BlockDriver *drv;
353 char *filename;
354 QEMUOptionParameter *options;
355 int ret;
356} CreateCo;
357
358static void coroutine_fn bdrv_create_co_entry(void *opaque)
359{
360 CreateCo *cco = opaque;
361 assert(cco->drv);
362
363 cco->ret = cco->drv->bdrv_create(cco->filename, cco->options);
364}
365
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200366int bdrv_create(BlockDriver *drv, const char* filename,
367 QEMUOptionParameter *options)
bellardea2384d2004-08-01 21:59:26 +0000368{
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800369 int ret;
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200370
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800371 Coroutine *co;
372 CreateCo cco = {
373 .drv = drv,
374 .filename = g_strdup(filename),
375 .options = options,
376 .ret = NOT_DONE,
377 };
378
379 if (!drv->bdrv_create) {
380 return -ENOTSUP;
381 }
382
383 if (qemu_in_coroutine()) {
384 /* Fast-path if already in coroutine context */
385 bdrv_create_co_entry(&cco);
386 } else {
387 co = qemu_coroutine_create(bdrv_create_co_entry);
388 qemu_coroutine_enter(co, &cco);
389 while (cco.ret == NOT_DONE) {
390 qemu_aio_wait();
391 }
392 }
393
394 ret = cco.ret;
395 g_free(cco.filename);
396
397 return ret;
bellardea2384d2004-08-01 21:59:26 +0000398}
399
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200400int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
401{
402 BlockDriver *drv;
403
MORITA Kazutakab50cbab2010-05-26 11:35:36 +0900404 drv = bdrv_find_protocol(filename);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200405 if (drv == NULL) {
Stefan Hajnoczi16905d72010-11-30 15:14:14 +0000406 return -ENOENT;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200407 }
408
409 return bdrv_create(drv, filename, options);
410}
411
Jim Meyeringeba25052012-05-28 09:27:54 +0200412/*
413 * Create a uniquely-named empty temporary file.
414 * Return 0 upon success, otherwise a negative errno value.
415 */
416int get_tmp_filename(char *filename, int size)
417{
bellardd5249392004-08-03 21:14:23 +0000418#ifdef _WIN32
bellard3b9f94e2007-01-07 17:27:07 +0000419 char temp_dir[MAX_PATH];
Jim Meyeringeba25052012-05-28 09:27:54 +0200420 /* GetTempFileName requires that its output buffer (4th param)
421 have length MAX_PATH or greater. */
422 assert(size >= MAX_PATH);
423 return (GetTempPath(MAX_PATH, temp_dir)
424 && GetTempFileName(temp_dir, "qem", 0, filename)
425 ? 0 : -GetLastError());
bellardd5249392004-08-03 21:14:23 +0000426#else
bellardea2384d2004-08-01 21:59:26 +0000427 int fd;
blueswir17ccfb2e2008-09-14 06:45:34 +0000428 const char *tmpdir;
aurel320badc1e2008-03-10 00:05:34 +0000429 tmpdir = getenv("TMPDIR");
430 if (!tmpdir)
431 tmpdir = "/tmp";
Jim Meyeringeba25052012-05-28 09:27:54 +0200432 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
433 return -EOVERFLOW;
434 }
bellardea2384d2004-08-01 21:59:26 +0000435 fd = mkstemp(filename);
Jim Meyeringeba25052012-05-28 09:27:54 +0200436 if (fd < 0 || close(fd)) {
437 return -errno;
438 }
439 return 0;
bellardd5249392004-08-03 21:14:23 +0000440#endif
Jim Meyeringeba25052012-05-28 09:27:54 +0200441}
bellardea2384d2004-08-01 21:59:26 +0000442
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200443/*
444 * Detect host devices. By convention, /dev/cdrom[N] is always
445 * recognized as a host CDROM.
446 */
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200447static BlockDriver *find_hdev_driver(const char *filename)
448{
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200449 int score_max = 0, score;
450 BlockDriver *drv = NULL, *d;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200451
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100452 QLIST_FOREACH(d, &bdrv_drivers, list) {
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200453 if (d->bdrv_probe_device) {
454 score = d->bdrv_probe_device(filename);
455 if (score > score_max) {
456 score_max = score;
457 drv = d;
458 }
459 }
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200460 }
461
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200462 return drv;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200463}
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200464
MORITA Kazutakab50cbab2010-05-26 11:35:36 +0900465BlockDriver *bdrv_find_protocol(const char *filename)
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200466{
467 BlockDriver *drv1;
468 char protocol[128];
469 int len;
470 const char *p;
471
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200472 /* TODO Drivers without bdrv_file_open must be specified explicitly */
473
Christoph Hellwig39508e72010-06-23 12:25:17 +0200474 /*
475 * XXX(hch): we really should not let host device detection
476 * override an explicit protocol specification, but moving this
477 * later breaks access to device names with colons in them.
478 * Thanks to the brain-dead persistent naming schemes on udev-
479 * based Linux systems those actually are quite common.
480 */
481 drv1 = find_hdev_driver(filename);
482 if (drv1) {
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200483 return drv1;
484 }
Christoph Hellwig39508e72010-06-23 12:25:17 +0200485
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000486 if (!path_has_protocol(filename)) {
Christoph Hellwig39508e72010-06-23 12:25:17 +0200487 return bdrv_find_format("file");
488 }
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000489 p = strchr(filename, ':');
490 assert(p != NULL);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200491 len = p - filename;
492 if (len > sizeof(protocol) - 1)
493 len = sizeof(protocol) - 1;
494 memcpy(protocol, filename, len);
495 protocol[len] = '\0';
496 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
497 if (drv1->protocol_name &&
498 !strcmp(drv1->protocol_name, protocol)) {
499 return drv1;
500 }
501 }
502 return NULL;
503}
504
Stefan Weilc98ac352010-07-21 21:51:51 +0200505static int find_image_format(const char *filename, BlockDriver **pdrv)
bellardea2384d2004-08-01 21:59:26 +0000506{
bellard83f64092006-08-01 16:21:11 +0000507 int ret, score, score_max;
bellardea2384d2004-08-01 21:59:26 +0000508 BlockDriver *drv1, *drv;
bellard83f64092006-08-01 16:21:11 +0000509 uint8_t buf[2048];
510 BlockDriverState *bs;
ths3b46e622007-09-17 08:09:54 +0000511
Naphtali Spreif5edb012010-01-17 16:48:13 +0200512 ret = bdrv_file_open(&bs, filename, 0);
Stefan Weilc98ac352010-07-21 21:51:51 +0200513 if (ret < 0) {
514 *pdrv = NULL;
515 return ret;
516 }
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700517
Kevin Wolf08a00552010-06-01 18:37:31 +0200518 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
519 if (bs->sg || !bdrv_is_inserted(bs)) {
Nicholas A. Bellinger1a396852010-05-27 08:56:28 -0700520 bdrv_delete(bs);
Stefan Weilc98ac352010-07-21 21:51:51 +0200521 drv = bdrv_find_format("raw");
522 if (!drv) {
523 ret = -ENOENT;
524 }
525 *pdrv = drv;
526 return ret;
Nicholas A. Bellinger1a396852010-05-27 08:56:28 -0700527 }
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700528
bellard83f64092006-08-01 16:21:11 +0000529 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
530 bdrv_delete(bs);
531 if (ret < 0) {
Stefan Weilc98ac352010-07-21 21:51:51 +0200532 *pdrv = NULL;
533 return ret;
bellard83f64092006-08-01 16:21:11 +0000534 }
535
bellardea2384d2004-08-01 21:59:26 +0000536 score_max = 0;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200537 drv = NULL;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100538 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
bellard83f64092006-08-01 16:21:11 +0000539 if (drv1->bdrv_probe) {
540 score = drv1->bdrv_probe(buf, ret, filename);
541 if (score > score_max) {
542 score_max = score;
543 drv = drv1;
544 }
bellardea2384d2004-08-01 21:59:26 +0000545 }
546 }
Stefan Weilc98ac352010-07-21 21:51:51 +0200547 if (!drv) {
548 ret = -ENOENT;
549 }
550 *pdrv = drv;
551 return ret;
bellardea2384d2004-08-01 21:59:26 +0000552}
553
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100554/**
555 * Set the current 'total_sectors' value
556 */
557static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
558{
559 BlockDriver *drv = bs->drv;
560
Nicholas Bellinger396759a2010-05-17 09:46:04 -0700561 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
562 if (bs->sg)
563 return 0;
564
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100565 /* query actual device if possible, otherwise just trust the hint */
566 if (drv->bdrv_getlength) {
567 int64_t length = drv->bdrv_getlength(bs);
568 if (length < 0) {
569 return length;
570 }
571 hint = length >> BDRV_SECTOR_BITS;
572 }
573
574 bs->total_sectors = hint;
575 return 0;
576}
577
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100578/**
579 * Set open flags for a given cache mode
580 *
581 * Return 0 on success, -1 if the cache mode was invalid.
582 */
583int bdrv_parse_cache_flags(const char *mode, int *flags)
584{
585 *flags &= ~BDRV_O_CACHE_MASK;
586
587 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
588 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
Stefan Hajnoczi92196b22011-08-04 12:26:52 +0100589 } else if (!strcmp(mode, "directsync")) {
590 *flags |= BDRV_O_NOCACHE;
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100591 } else if (!strcmp(mode, "writeback")) {
592 *flags |= BDRV_O_CACHE_WB;
593 } else if (!strcmp(mode, "unsafe")) {
594 *flags |= BDRV_O_CACHE_WB;
595 *flags |= BDRV_O_NO_FLUSH;
596 } else if (!strcmp(mode, "writethrough")) {
597 /* this is the default */
598 } else {
599 return -1;
600 }
601
602 return 0;
603}
604
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000605/**
606 * The copy-on-read flag is actually a reference count so multiple users may
607 * use the feature without worrying about clobbering its previous state.
608 * Copy-on-read stays enabled until all users have called to disable it.
609 */
610void bdrv_enable_copy_on_read(BlockDriverState *bs)
611{
612 bs->copy_on_read++;
613}
614
615void bdrv_disable_copy_on_read(BlockDriverState *bs)
616{
617 assert(bs->copy_on_read > 0);
618 bs->copy_on_read--;
619}
620
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200621/*
Kevin Wolf57915332010-04-14 15:24:50 +0200622 * Common part for opening disk images and files
623 */
624static int bdrv_open_common(BlockDriverState *bs, const char *filename,
625 int flags, BlockDriver *drv)
626{
627 int ret, open_flags;
628
629 assert(drv != NULL);
Paolo Bonzini64058752012-05-08 16:51:49 +0200630 assert(bs->file == NULL);
Kevin Wolf57915332010-04-14 15:24:50 +0200631
Stefan Hajnoczi28dcee12011-09-22 20:14:12 +0100632 trace_bdrv_open_common(bs, filename, flags, drv->format_name);
633
Kevin Wolf57915332010-04-14 15:24:50 +0200634 bs->open_flags = flags;
Kevin Wolf57915332010-04-14 15:24:50 +0200635 bs->buffer_alignment = 512;
636
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000637 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
638 if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) {
639 bdrv_enable_copy_on_read(bs);
640 }
641
Kevin Wolf57915332010-04-14 15:24:50 +0200642 pstrcpy(bs->filename, sizeof(bs->filename), filename);
643
644 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
645 return -ENOTSUP;
646 }
647
648 bs->drv = drv;
Anthony Liguori7267c092011-08-20 22:09:37 -0500649 bs->opaque = g_malloc0(drv->instance_size);
Kevin Wolf57915332010-04-14 15:24:50 +0200650
Stefan Hajnoczi03f541b2011-10-27 10:54:28 +0100651 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
Paolo Bonzinie1e9b0a2012-06-06 00:04:53 +0200652 open_flags = flags | BDRV_O_CACHE_WB;
Kevin Wolf57915332010-04-14 15:24:50 +0200653
654 /*
655 * Clear flags that are internal to the block layer before opening the
656 * image.
657 */
Paolo Bonzinie1e9b0a2012-06-06 00:04:53 +0200658 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
Kevin Wolf57915332010-04-14 15:24:50 +0200659
660 /*
Stefan Weilebabb672011-04-26 10:29:36 +0200661 * Snapshots should be writable.
Kevin Wolf57915332010-04-14 15:24:50 +0200662 */
663 if (bs->is_temporary) {
664 open_flags |= BDRV_O_RDWR;
665 }
666
Stefan Hajnoczie7c63792011-10-27 10:54:27 +0100667 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
668
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200669 /* Open the image, either directly or using a protocol */
670 if (drv->bdrv_file_open) {
671 ret = drv->bdrv_file_open(bs, filename, open_flags);
672 } else {
673 ret = bdrv_file_open(&bs->file, filename, open_flags);
674 if (ret >= 0) {
675 ret = drv->bdrv_open(bs, open_flags);
676 }
677 }
678
Kevin Wolf57915332010-04-14 15:24:50 +0200679 if (ret < 0) {
680 goto free_and_fail;
681 }
682
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100683 ret = refresh_total_sectors(bs, bs->total_sectors);
684 if (ret < 0) {
685 goto free_and_fail;
Kevin Wolf57915332010-04-14 15:24:50 +0200686 }
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100687
Kevin Wolf57915332010-04-14 15:24:50 +0200688#ifndef _WIN32
689 if (bs->is_temporary) {
690 unlink(filename);
691 }
692#endif
693 return 0;
694
695free_and_fail:
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200696 if (bs->file) {
697 bdrv_delete(bs->file);
698 bs->file = NULL;
699 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500700 g_free(bs->opaque);
Kevin Wolf57915332010-04-14 15:24:50 +0200701 bs->opaque = NULL;
702 bs->drv = NULL;
703 return ret;
704}
705
706/*
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200707 * Opens a file using a protocol (file, host_device, nbd, ...)
708 */
bellard83f64092006-08-01 16:21:11 +0000709int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
bellardb3380822004-03-14 21:38:54 +0000710{
bellard83f64092006-08-01 16:21:11 +0000711 BlockDriverState *bs;
Christoph Hellwig6db95602010-04-05 16:53:57 +0200712 BlockDriver *drv;
bellard83f64092006-08-01 16:21:11 +0000713 int ret;
714
MORITA Kazutakab50cbab2010-05-26 11:35:36 +0900715 drv = bdrv_find_protocol(filename);
Christoph Hellwig6db95602010-04-05 16:53:57 +0200716 if (!drv) {
717 return -ENOENT;
718 }
719
bellard83f64092006-08-01 16:21:11 +0000720 bs = bdrv_new("");
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200721 ret = bdrv_open_common(bs, filename, flags, drv);
bellard83f64092006-08-01 16:21:11 +0000722 if (ret < 0) {
723 bdrv_delete(bs);
724 return ret;
bellard3b0d4f62005-10-30 18:30:10 +0000725 }
aliguori71d07702009-03-03 17:37:16 +0000726 bs->growable = 1;
bellard83f64092006-08-01 16:21:11 +0000727 *pbs = bs;
728 return 0;
bellardea2384d2004-08-01 21:59:26 +0000729}
bellardfc01f7e2003-06-30 10:03:06 +0000730
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200731/*
732 * Opens a disk image (raw, qcow2, vmdk, ...)
733 */
Kevin Wolfd6e90982010-03-31 14:40:27 +0200734int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
735 BlockDriver *drv)
bellardea2384d2004-08-01 21:59:26 +0000736{
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200737 int ret;
Kevin Wolf2b572812011-10-26 11:03:01 +0200738 char tmp_filename[PATH_MAX];
bellard712e7872005-04-28 21:09:32 +0000739
bellard83f64092006-08-01 16:21:11 +0000740 if (flags & BDRV_O_SNAPSHOT) {
bellardea2384d2004-08-01 21:59:26 +0000741 BlockDriverState *bs1;
742 int64_t total_size;
aliguori7c96d462008-09-12 17:54:13 +0000743 int is_protocol = 0;
Kevin Wolf91a073a2009-05-27 14:48:06 +0200744 BlockDriver *bdrv_qcow2;
745 QEMUOptionParameter *options;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200746 char backing_filename[PATH_MAX];
ths3b46e622007-09-17 08:09:54 +0000747
bellardea2384d2004-08-01 21:59:26 +0000748 /* if snapshot, we create a temporary backing file and open it
749 instead of opening 'filename' directly */
750
751 /* if there is a backing file, use it */
752 bs1 = bdrv_new("");
Kevin Wolfd6e90982010-03-31 14:40:27 +0200753 ret = bdrv_open(bs1, filename, 0, drv);
aliguori51d7c002009-03-05 23:00:29 +0000754 if (ret < 0) {
bellardea2384d2004-08-01 21:59:26 +0000755 bdrv_delete(bs1);
aliguori51d7c002009-03-05 23:00:29 +0000756 return ret;
bellardea2384d2004-08-01 21:59:26 +0000757 }
Jes Sorensen3e829902010-05-27 16:20:30 +0200758 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
aliguori7c96d462008-09-12 17:54:13 +0000759
760 if (bs1->drv && bs1->drv->protocol_name)
761 is_protocol = 1;
762
bellardea2384d2004-08-01 21:59:26 +0000763 bdrv_delete(bs1);
ths3b46e622007-09-17 08:09:54 +0000764
Jim Meyeringeba25052012-05-28 09:27:54 +0200765 ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
766 if (ret < 0) {
767 return ret;
768 }
aliguori7c96d462008-09-12 17:54:13 +0000769
770 /* Real path is meaningless for protocols */
771 if (is_protocol)
772 snprintf(backing_filename, sizeof(backing_filename),
773 "%s", filename);
Kirill A. Shutemov114cdfa2009-12-25 18:19:22 +0000774 else if (!realpath(filename, backing_filename))
775 return -errno;
aliguori7c96d462008-09-12 17:54:13 +0000776
Kevin Wolf91a073a2009-05-27 14:48:06 +0200777 bdrv_qcow2 = bdrv_find_format("qcow2");
778 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
779
Jes Sorensen3e829902010-05-27 16:20:30 +0200780 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
Kevin Wolf91a073a2009-05-27 14:48:06 +0200781 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
782 if (drv) {
783 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
784 drv->format_name);
785 }
786
787 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
Jan Kiszkad7487682010-04-29 18:24:50 +0200788 free_option_parameters(options);
aliguori51d7c002009-03-05 23:00:29 +0000789 if (ret < 0) {
790 return ret;
bellardea2384d2004-08-01 21:59:26 +0000791 }
Kevin Wolf91a073a2009-05-27 14:48:06 +0200792
bellardea2384d2004-08-01 21:59:26 +0000793 filename = tmp_filename;
Kevin Wolf91a073a2009-05-27 14:48:06 +0200794 drv = bdrv_qcow2;
bellardea2384d2004-08-01 21:59:26 +0000795 bs->is_temporary = 1;
796 }
bellard712e7872005-04-28 21:09:32 +0000797
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200798 /* Find the right image format driver */
Christoph Hellwig6db95602010-04-05 16:53:57 +0200799 if (!drv) {
Stefan Weilc98ac352010-07-21 21:51:51 +0200800 ret = find_image_format(filename, &drv);
aliguori51d7c002009-03-05 23:00:29 +0000801 }
Christoph Hellwig69873072010-01-20 18:13:25 +0100802
aliguori51d7c002009-03-05 23:00:29 +0000803 if (!drv) {
aliguori51d7c002009-03-05 23:00:29 +0000804 goto unlink_and_fail;
bellardea2384d2004-08-01 21:59:26 +0000805 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200806
807 /* Open the image */
808 ret = bdrv_open_common(bs, filename, flags, drv);
809 if (ret < 0) {
Christoph Hellwig69873072010-01-20 18:13:25 +0100810 goto unlink_and_fail;
811 }
812
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200813 /* If there is a backing file, use it */
814 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
815 char backing_filename[PATH_MAX];
816 int back_flags;
817 BlockDriver *back_drv = NULL;
818
819 bs->backing_hd = bdrv_new("");
Paolo Bonzinidc5a1372012-05-08 16:51:50 +0200820 bdrv_get_full_backing_filename(bs, backing_filename,
821 sizeof(backing_filename));
Stefan Hajnoczidf2dbb42010-12-02 16:54:13 +0000822
823 if (bs->backing_format[0] != '\0') {
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200824 back_drv = bdrv_find_format(bs->backing_format);
Stefan Hajnoczidf2dbb42010-12-02 16:54:13 +0000825 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200826
827 /* backing files always opened read-only */
828 back_flags =
829 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
830
831 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
832 if (ret < 0) {
833 bdrv_close(bs);
834 return ret;
835 }
836 if (bs->is_temporary) {
837 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
838 } else {
839 /* base image inherits from "parent" */
840 bs->backing_hd->keep_read_only = bs->keep_read_only;
841 }
842 }
843
844 if (!bdrv_key_required(bs)) {
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +0200845 bdrv_dev_change_media_cb(bs, true);
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200846 }
847
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800848 /* throttling disk I/O limits */
849 if (bs->io_limits_enabled) {
850 bdrv_io_limits_enable(bs);
851 }
852
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200853 return 0;
854
855unlink_and_fail:
856 if (bs->is_temporary) {
857 unlink(filename);
858 }
859 return ret;
860}
861
bellardfc01f7e2003-06-30 10:03:06 +0000862void bdrv_close(BlockDriverState *bs)
863{
Liu Yuan80ccf932012-04-20 17:10:56 +0800864 bdrv_flush(bs);
bellard19cb3732006-08-19 11:45:59 +0000865 if (bs->drv) {
Paolo Bonzini3e914652012-03-30 13:17:11 +0200866 if (bs->job) {
867 block_job_cancel_sync(bs->job);
868 }
Kevin Wolf7094f122012-04-11 11:06:37 +0200869 bdrv_drain_all();
870
Markus Armbrusterf9092b12010-06-25 10:33:39 +0200871 if (bs == bs_snapshots) {
872 bs_snapshots = NULL;
873 }
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +0100874 if (bs->backing_hd) {
bellardea2384d2004-08-01 21:59:26 +0000875 bdrv_delete(bs->backing_hd);
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +0100876 bs->backing_hd = NULL;
877 }
bellardea2384d2004-08-01 21:59:26 +0000878 bs->drv->bdrv_close(bs);
Anthony Liguori7267c092011-08-20 22:09:37 -0500879 g_free(bs->opaque);
bellardea2384d2004-08-01 21:59:26 +0000880#ifdef _WIN32
881 if (bs->is_temporary) {
882 unlink(bs->filename);
883 }
bellard67b915a2004-03-31 23:37:16 +0000884#endif
bellardea2384d2004-08-01 21:59:26 +0000885 bs->opaque = NULL;
886 bs->drv = NULL;
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000887 bs->copy_on_read = 0;
Paolo Bonzinia275fa42012-05-08 16:51:43 +0200888 bs->backing_file[0] = '\0';
889 bs->backing_format[0] = '\0';
Paolo Bonzini64058752012-05-08 16:51:49 +0200890 bs->total_sectors = 0;
891 bs->encrypted = 0;
892 bs->valid_key = 0;
893 bs->sg = 0;
894 bs->growable = 0;
bellardb3380822004-03-14 21:38:54 +0000895
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200896 if (bs->file != NULL) {
Paolo Bonzini0ac93772012-05-08 16:51:44 +0200897 bdrv_delete(bs->file);
898 bs->file = NULL;
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200899 }
900
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +0200901 bdrv_dev_change_media_cb(bs, false);
bellardb3380822004-03-14 21:38:54 +0000902 }
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800903
904 /*throttling disk I/O limits*/
905 if (bs->io_limits_enabled) {
906 bdrv_io_limits_disable(bs);
907 }
bellardb3380822004-03-14 21:38:54 +0000908}
909
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +0900910void bdrv_close_all(void)
911{
912 BlockDriverState *bs;
913
914 QTAILQ_FOREACH(bs, &bdrv_states, list) {
915 bdrv_close(bs);
916 }
917}
918
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000919/*
920 * Wait for pending requests to complete across all BlockDriverStates
921 *
922 * This function does not flush data to disk, use bdrv_flush_all() for that
923 * after calling this function.
Zhi Yong Wu4c355d52012-04-12 14:00:57 +0200924 *
925 * Note that completion of an asynchronous I/O operation can trigger any
926 * number of other I/O operations on other devices---for example a coroutine
927 * can be arbitrarily complex and a constant flow of I/O can come until the
928 * coroutine is complete. Because of this, it is not possible to have a
929 * function to drain a single device's I/O queue.
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000930 */
931void bdrv_drain_all(void)
932{
933 BlockDriverState *bs;
Zhi Yong Wu4c355d52012-04-12 14:00:57 +0200934 bool busy;
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000935
Zhi Yong Wu4c355d52012-04-12 14:00:57 +0200936 do {
937 busy = qemu_aio_wait();
938
939 /* FIXME: We do not have timer support here, so this is effectively
940 * a busy wait.
941 */
942 QTAILQ_FOREACH(bs, &bdrv_states, list) {
943 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
944 qemu_co_queue_restart_all(&bs->throttled_reqs);
945 busy = true;
946 }
947 }
948 } while (busy);
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000949
950 /* If requests are still pending there is a bug somewhere */
951 QTAILQ_FOREACH(bs, &bdrv_states, list) {
952 assert(QLIST_EMPTY(&bs->tracked_requests));
953 assert(qemu_co_queue_empty(&bs->throttled_reqs));
954 }
955}
956
Ryan Harperd22b2f42011-03-29 20:51:47 -0500957/* make a BlockDriverState anonymous by removing from bdrv_state list.
958 Also, NULL terminate the device_name to prevent double remove */
959void bdrv_make_anon(BlockDriverState *bs)
960{
961 if (bs->device_name[0] != '\0') {
962 QTAILQ_REMOVE(&bdrv_states, bs, list);
963 }
964 bs->device_name[0] = '\0';
965}
966
Paolo Bonzinie023b2e2012-05-08 16:51:41 +0200967static void bdrv_rebind(BlockDriverState *bs)
968{
969 if (bs->drv && bs->drv->bdrv_rebind) {
970 bs->drv->bdrv_rebind(bs);
971 }
972}
973
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +0200974static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
975 BlockDriverState *bs_src)
976{
977 /* move some fields that need to stay attached to the device */
978 bs_dest->open_flags = bs_src->open_flags;
979
980 /* dev info */
981 bs_dest->dev_ops = bs_src->dev_ops;
982 bs_dest->dev_opaque = bs_src->dev_opaque;
983 bs_dest->dev = bs_src->dev;
984 bs_dest->buffer_alignment = bs_src->buffer_alignment;
985 bs_dest->copy_on_read = bs_src->copy_on_read;
986
987 bs_dest->enable_write_cache = bs_src->enable_write_cache;
988
989 /* i/o timing parameters */
990 bs_dest->slice_time = bs_src->slice_time;
991 bs_dest->slice_start = bs_src->slice_start;
992 bs_dest->slice_end = bs_src->slice_end;
993 bs_dest->io_limits = bs_src->io_limits;
994 bs_dest->io_base = bs_src->io_base;
995 bs_dest->throttled_reqs = bs_src->throttled_reqs;
996 bs_dest->block_timer = bs_src->block_timer;
997 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
998
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +0200999 /* r/w error */
1000 bs_dest->on_read_error = bs_src->on_read_error;
1001 bs_dest->on_write_error = bs_src->on_write_error;
1002
1003 /* i/o status */
1004 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1005 bs_dest->iostatus = bs_src->iostatus;
1006
1007 /* dirty bitmap */
1008 bs_dest->dirty_count = bs_src->dirty_count;
1009 bs_dest->dirty_bitmap = bs_src->dirty_bitmap;
1010
1011 /* job */
1012 bs_dest->in_use = bs_src->in_use;
1013 bs_dest->job = bs_src->job;
1014
1015 /* keep the same entry in bdrv_states */
1016 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1017 bs_src->device_name);
1018 bs_dest->list = bs_src->list;
1019}
1020
1021/*
1022 * Swap bs contents for two image chains while they are live,
1023 * while keeping required fields on the BlockDriverState that is
1024 * actually attached to a device.
1025 *
1026 * This will modify the BlockDriverState fields, and swap contents
1027 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1028 *
1029 * bs_new is required to be anonymous.
1030 *
1031 * This function does not create any image files.
1032 */
1033void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
1034{
1035 BlockDriverState tmp;
1036
1037 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1038 assert(bs_new->device_name[0] == '\0');
1039 assert(bs_new->dirty_bitmap == NULL);
1040 assert(bs_new->job == NULL);
1041 assert(bs_new->dev == NULL);
1042 assert(bs_new->in_use == 0);
1043 assert(bs_new->io_limits_enabled == false);
1044 assert(bs_new->block_timer == NULL);
1045
1046 tmp = *bs_new;
1047 *bs_new = *bs_old;
1048 *bs_old = tmp;
1049
1050 /* there are some fields that should not be swapped, move them back */
1051 bdrv_move_feature_fields(&tmp, bs_old);
1052 bdrv_move_feature_fields(bs_old, bs_new);
1053 bdrv_move_feature_fields(bs_new, &tmp);
1054
1055 /* bs_new shouldn't be in bdrv_states even after the swap! */
1056 assert(bs_new->device_name[0] == '\0');
1057
1058 /* Check a few fields that should remain attached to the device */
1059 assert(bs_new->dev == NULL);
1060 assert(bs_new->job == NULL);
1061 assert(bs_new->in_use == 0);
1062 assert(bs_new->io_limits_enabled == false);
1063 assert(bs_new->block_timer == NULL);
1064
1065 bdrv_rebind(bs_new);
1066 bdrv_rebind(bs_old);
1067}
1068
Jeff Cody8802d1f2012-02-28 15:54:06 -05001069/*
1070 * Add new bs contents at the top of an image chain while the chain is
1071 * live, while keeping required fields on the top layer.
1072 *
1073 * This will modify the BlockDriverState fields, and swap contents
1074 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1075 *
Jeff Codyf6801b82012-03-27 16:30:19 -04001076 * bs_new is required to be anonymous.
1077 *
Jeff Cody8802d1f2012-02-28 15:54:06 -05001078 * This function does not create any image files.
1079 */
1080void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
1081{
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001082 bdrv_swap(bs_new, bs_top);
Jeff Cody8802d1f2012-02-28 15:54:06 -05001083
1084 /* The contents of 'tmp' will become bs_top, as we are
1085 * swapping bs_new and bs_top contents. */
Paolo Bonzini4ddc07c2012-06-14 16:55:02 +02001086 bs_top->backing_hd = bs_new;
1087 bs_top->open_flags &= ~BDRV_O_NO_BACKING;
1088 pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file),
1089 bs_new->filename);
1090 pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format),
1091 bs_new->drv ? bs_new->drv->format_name : "");
Jeff Cody8802d1f2012-02-28 15:54:06 -05001092}
1093
bellardb3380822004-03-14 21:38:54 +00001094void bdrv_delete(BlockDriverState *bs)
1095{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001096 assert(!bs->dev);
Paolo Bonzini3e914652012-03-30 13:17:11 +02001097 assert(!bs->job);
1098 assert(!bs->in_use);
Markus Armbruster18846de2010-06-29 16:58:30 +02001099
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01001100 /* remove from list, if necessary */
Ryan Harperd22b2f42011-03-29 20:51:47 -05001101 bdrv_make_anon(bs);
aurel3234c6f052008-04-08 19:51:21 +00001102
bellardb3380822004-03-14 21:38:54 +00001103 bdrv_close(bs);
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001104
Markus Armbrusterf9092b12010-06-25 10:33:39 +02001105 assert(bs != bs_snapshots);
Anthony Liguori7267c092011-08-20 22:09:37 -05001106 g_free(bs);
bellardfc01f7e2003-06-30 10:03:06 +00001107}
1108
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001109int bdrv_attach_dev(BlockDriverState *bs, void *dev)
1110/* TODO change to DeviceState *dev when all users are qdevified */
Markus Armbruster18846de2010-06-29 16:58:30 +02001111{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001112 if (bs->dev) {
Markus Armbruster18846de2010-06-29 16:58:30 +02001113 return -EBUSY;
1114 }
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001115 bs->dev = dev;
Luiz Capitulino28a72822011-09-26 17:43:50 -03001116 bdrv_iostatus_reset(bs);
Markus Armbruster18846de2010-06-29 16:58:30 +02001117 return 0;
1118}
1119
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001120/* TODO qdevified devices don't use this, remove when devices are qdevified */
1121void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
Markus Armbruster18846de2010-06-29 16:58:30 +02001122{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001123 if (bdrv_attach_dev(bs, dev) < 0) {
1124 abort();
1125 }
1126}
1127
1128void bdrv_detach_dev(BlockDriverState *bs, void *dev)
1129/* TODO change to DeviceState *dev when all users are qdevified */
1130{
1131 assert(bs->dev == dev);
1132 bs->dev = NULL;
Markus Armbruster0e49de52011-08-03 15:07:41 +02001133 bs->dev_ops = NULL;
1134 bs->dev_opaque = NULL;
Markus Armbruster29e05f22011-09-06 18:58:57 +02001135 bs->buffer_alignment = 512;
Markus Armbruster18846de2010-06-29 16:58:30 +02001136}
1137
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001138/* TODO change to return DeviceState * when all users are qdevified */
1139void *bdrv_get_attached_dev(BlockDriverState *bs)
Markus Armbruster18846de2010-06-29 16:58:30 +02001140{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001141 return bs->dev;
Markus Armbruster18846de2010-06-29 16:58:30 +02001142}
1143
Markus Armbruster0e49de52011-08-03 15:07:41 +02001144void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
1145 void *opaque)
1146{
1147 bs->dev_ops = ops;
1148 bs->dev_opaque = opaque;
Markus Armbruster2c6942f2011-09-06 18:58:51 +02001149 if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
1150 bs_snapshots = NULL;
1151 }
Markus Armbruster0e49de52011-08-03 15:07:41 +02001152}
1153
Luiz Capitulino329c0a42012-01-25 16:59:43 -02001154void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
1155 BlockQMPEventAction action, int is_read)
1156{
1157 QObject *data;
1158 const char *action_str;
1159
1160 switch (action) {
1161 case BDRV_ACTION_REPORT:
1162 action_str = "report";
1163 break;
1164 case BDRV_ACTION_IGNORE:
1165 action_str = "ignore";
1166 break;
1167 case BDRV_ACTION_STOP:
1168 action_str = "stop";
1169 break;
1170 default:
1171 abort();
1172 }
1173
1174 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1175 bdrv->device_name,
1176 action_str,
1177 is_read ? "read" : "write");
1178 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
1179
1180 qobject_decref(data);
1181}
1182
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02001183static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
1184{
1185 QObject *data;
1186
1187 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1188 bdrv_get_device_name(bs), ejected);
1189 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
1190
1191 qobject_decref(data);
1192}
1193
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02001194static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
Markus Armbruster0e49de52011-08-03 15:07:41 +02001195{
Markus Armbruster145feb12011-08-03 15:07:42 +02001196 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02001197 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02001198 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02001199 if (tray_was_closed) {
1200 /* tray open */
1201 bdrv_emit_qmp_eject_event(bs, true);
1202 }
1203 if (load) {
1204 /* tray close */
1205 bdrv_emit_qmp_eject_event(bs, false);
1206 }
Markus Armbruster145feb12011-08-03 15:07:42 +02001207 }
1208}
1209
Markus Armbruster2c6942f2011-09-06 18:58:51 +02001210bool bdrv_dev_has_removable_media(BlockDriverState *bs)
1211{
1212 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
1213}
1214
Paolo Bonzini025ccaa2011-11-07 17:50:13 +01001215void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
1216{
1217 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
1218 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
1219 }
1220}
1221
Markus Armbrustere4def802011-09-06 18:58:53 +02001222bool bdrv_dev_is_tray_open(BlockDriverState *bs)
1223{
1224 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
1225 return bs->dev_ops->is_tray_open(bs->dev_opaque);
1226 }
1227 return false;
1228}
1229
Markus Armbruster145feb12011-08-03 15:07:42 +02001230static void bdrv_dev_resize_cb(BlockDriverState *bs)
1231{
1232 if (bs->dev_ops && bs->dev_ops->resize_cb) {
1233 bs->dev_ops->resize_cb(bs->dev_opaque);
Markus Armbruster0e49de52011-08-03 15:07:41 +02001234 }
1235}
1236
Markus Armbrusterf1076392011-09-06 18:58:46 +02001237bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
1238{
1239 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
1240 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
1241 }
1242 return false;
1243}
1244
aliguorie97fc192009-04-21 23:11:50 +00001245/*
1246 * Run consistency checks on an image
1247 *
Kevin Wolfe076f332010-06-29 11:43:13 +02001248 * Returns 0 if the check could be completed (it doesn't mean that the image is
Stefan Weila1c72732011-04-28 17:20:38 +02001249 * free of errors) or -errno when an internal error occurred. The results of the
Kevin Wolfe076f332010-06-29 11:43:13 +02001250 * check are stored in res.
aliguorie97fc192009-04-21 23:11:50 +00001251 */
Kevin Wolf4534ff52012-05-11 16:07:02 +02001252int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
aliguorie97fc192009-04-21 23:11:50 +00001253{
1254 if (bs->drv->bdrv_check == NULL) {
1255 return -ENOTSUP;
1256 }
1257
Kevin Wolfe076f332010-06-29 11:43:13 +02001258 memset(res, 0, sizeof(*res));
Kevin Wolf4534ff52012-05-11 16:07:02 +02001259 return bs->drv->bdrv_check(bs, res, fix);
aliguorie97fc192009-04-21 23:11:50 +00001260}
1261
Kevin Wolf8a426612010-07-16 17:17:01 +02001262#define COMMIT_BUF_SECTORS 2048
1263
bellard33e39632003-07-06 17:15:21 +00001264/* commit COW file into the raw image */
1265int bdrv_commit(BlockDriverState *bs)
1266{
bellard19cb3732006-08-19 11:45:59 +00001267 BlockDriver *drv = bs->drv;
Kevin Wolfee181192010-08-05 13:05:22 +02001268 BlockDriver *backing_drv;
Kevin Wolf8a426612010-07-16 17:17:01 +02001269 int64_t sector, total_sectors;
1270 int n, ro, open_flags;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001271 int ret = 0, rw_ret = 0;
Kevin Wolf8a426612010-07-16 17:17:01 +02001272 uint8_t *buf;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001273 char filename[1024];
1274 BlockDriverState *bs_rw, *bs_ro;
bellard33e39632003-07-06 17:15:21 +00001275
bellard19cb3732006-08-19 11:45:59 +00001276 if (!drv)
1277 return -ENOMEDIUM;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001278
1279 if (!bs->backing_hd) {
1280 return -ENOTSUP;
bellard33e39632003-07-06 17:15:21 +00001281 }
1282
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001283 if (bs->backing_hd->keep_read_only) {
1284 return -EACCES;
1285 }
Kevin Wolfee181192010-08-05 13:05:22 +02001286
Stefan Hajnoczi2d3735d2012-01-18 14:40:41 +00001287 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
1288 return -EBUSY;
1289 }
1290
Kevin Wolfee181192010-08-05 13:05:22 +02001291 backing_drv = bs->backing_hd->drv;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001292 ro = bs->backing_hd->read_only;
1293 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
1294 open_flags = bs->backing_hd->open_flags;
1295
1296 if (ro) {
1297 /* re-open as RW */
1298 bdrv_delete(bs->backing_hd);
1299 bs->backing_hd = NULL;
1300 bs_rw = bdrv_new("");
Kevin Wolfee181192010-08-05 13:05:22 +02001301 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
1302 backing_drv);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001303 if (rw_ret < 0) {
1304 bdrv_delete(bs_rw);
1305 /* try to re-open read-only */
1306 bs_ro = bdrv_new("");
Kevin Wolfee181192010-08-05 13:05:22 +02001307 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1308 backing_drv);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001309 if (ret < 0) {
1310 bdrv_delete(bs_ro);
1311 /* drive not functional anymore */
1312 bs->drv = NULL;
1313 return ret;
1314 }
1315 bs->backing_hd = bs_ro;
1316 return rw_ret;
1317 }
1318 bs->backing_hd = bs_rw;
bellard33e39632003-07-06 17:15:21 +00001319 }
bellardea2384d2004-08-01 21:59:26 +00001320
Jan Kiszka6ea44302009-11-30 18:21:19 +01001321 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
Anthony Liguori7267c092011-08-20 22:09:37 -05001322 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
bellardea2384d2004-08-01 21:59:26 +00001323
Kevin Wolf8a426612010-07-16 17:17:01 +02001324 for (sector = 0; sector < total_sectors; sector += n) {
Stefan Hajnoczi05c4af52011-11-14 12:44:18 +00001325 if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
Kevin Wolf8a426612010-07-16 17:17:01 +02001326
1327 if (bdrv_read(bs, sector, buf, n) != 0) {
1328 ret = -EIO;
1329 goto ro_cleanup;
1330 }
1331
1332 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
1333 ret = -EIO;
1334 goto ro_cleanup;
1335 }
bellardea2384d2004-08-01 21:59:26 +00001336 }
1337 }
bellard95389c82005-12-18 18:28:15 +00001338
Christoph Hellwig1d449522010-01-17 12:32:30 +01001339 if (drv->bdrv_make_empty) {
1340 ret = drv->bdrv_make_empty(bs);
1341 bdrv_flush(bs);
1342 }
bellard95389c82005-12-18 18:28:15 +00001343
Christoph Hellwig3f5075a2010-01-12 13:49:23 +01001344 /*
1345 * Make sure all data we wrote to the backing device is actually
1346 * stable on disk.
1347 */
1348 if (bs->backing_hd)
1349 bdrv_flush(bs->backing_hd);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001350
1351ro_cleanup:
Anthony Liguori7267c092011-08-20 22:09:37 -05001352 g_free(buf);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001353
1354 if (ro) {
1355 /* re-open as RO */
1356 bdrv_delete(bs->backing_hd);
1357 bs->backing_hd = NULL;
1358 bs_ro = bdrv_new("");
Kevin Wolfee181192010-08-05 13:05:22 +02001359 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1360 backing_drv);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001361 if (ret < 0) {
1362 bdrv_delete(bs_ro);
1363 /* drive not functional anymore */
1364 bs->drv = NULL;
1365 return ret;
1366 }
1367 bs->backing_hd = bs_ro;
1368 bs->backing_hd->keep_read_only = 0;
1369 }
1370
Christoph Hellwig1d449522010-01-17 12:32:30 +01001371 return ret;
bellard33e39632003-07-06 17:15:21 +00001372}
1373
Stefan Hajnoczie8877492012-03-05 18:10:11 +00001374int bdrv_commit_all(void)
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02001375{
1376 BlockDriverState *bs;
1377
1378 QTAILQ_FOREACH(bs, &bdrv_states, list) {
Stefan Hajnoczie8877492012-03-05 18:10:11 +00001379 int ret = bdrv_commit(bs);
1380 if (ret < 0) {
1381 return ret;
1382 }
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02001383 }
Stefan Hajnoczie8877492012-03-05 18:10:11 +00001384 return 0;
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02001385}
1386
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001387struct BdrvTrackedRequest {
1388 BlockDriverState *bs;
1389 int64_t sector_num;
1390 int nb_sectors;
1391 bool is_write;
1392 QLIST_ENTRY(BdrvTrackedRequest) list;
Stefan Hajnoczi5f8b6492011-11-30 12:23:42 +00001393 Coroutine *co; /* owner, used for deadlock detection */
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001394 CoQueue wait_queue; /* coroutines blocked on this request */
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001395};
1396
1397/**
1398 * Remove an active request from the tracked requests list
1399 *
1400 * This function should be called when a tracked request is completing.
1401 */
1402static void tracked_request_end(BdrvTrackedRequest *req)
1403{
1404 QLIST_REMOVE(req, list);
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001405 qemu_co_queue_restart_all(&req->wait_queue);
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001406}
1407
1408/**
1409 * Add an active request to the tracked requests list
1410 */
1411static void tracked_request_begin(BdrvTrackedRequest *req,
1412 BlockDriverState *bs,
1413 int64_t sector_num,
1414 int nb_sectors, bool is_write)
1415{
1416 *req = (BdrvTrackedRequest){
1417 .bs = bs,
1418 .sector_num = sector_num,
1419 .nb_sectors = nb_sectors,
1420 .is_write = is_write,
Stefan Hajnoczi5f8b6492011-11-30 12:23:42 +00001421 .co = qemu_coroutine_self(),
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001422 };
1423
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001424 qemu_co_queue_init(&req->wait_queue);
1425
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001426 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
1427}
1428
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001429/**
1430 * Round a region to cluster boundaries
1431 */
1432static void round_to_clusters(BlockDriverState *bs,
1433 int64_t sector_num, int nb_sectors,
1434 int64_t *cluster_sector_num,
1435 int *cluster_nb_sectors)
1436{
1437 BlockDriverInfo bdi;
1438
1439 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
1440 *cluster_sector_num = sector_num;
1441 *cluster_nb_sectors = nb_sectors;
1442 } else {
1443 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
1444 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
1445 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
1446 nb_sectors, c);
1447 }
1448}
1449
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001450static bool tracked_request_overlaps(BdrvTrackedRequest *req,
1451 int64_t sector_num, int nb_sectors) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001452 /* aaaa bbbb */
1453 if (sector_num >= req->sector_num + req->nb_sectors) {
1454 return false;
1455 }
1456 /* bbbb aaaa */
1457 if (req->sector_num >= sector_num + nb_sectors) {
1458 return false;
1459 }
1460 return true;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001461}
1462
1463static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
1464 int64_t sector_num, int nb_sectors)
1465{
1466 BdrvTrackedRequest *req;
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001467 int64_t cluster_sector_num;
1468 int cluster_nb_sectors;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001469 bool retry;
1470
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001471 /* If we touch the same cluster it counts as an overlap. This guarantees
1472 * that allocating writes will be serialized and not race with each other
1473 * for the same cluster. For example, in copy-on-read it ensures that the
1474 * CoR read and write operations are atomic and guest writes cannot
1475 * interleave between them.
1476 */
1477 round_to_clusters(bs, sector_num, nb_sectors,
1478 &cluster_sector_num, &cluster_nb_sectors);
1479
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001480 do {
1481 retry = false;
1482 QLIST_FOREACH(req, &bs->tracked_requests, list) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001483 if (tracked_request_overlaps(req, cluster_sector_num,
1484 cluster_nb_sectors)) {
Stefan Hajnoczi5f8b6492011-11-30 12:23:42 +00001485 /* Hitting this means there was a reentrant request, for
1486 * example, a block driver issuing nested requests. This must
1487 * never happen since it means deadlock.
1488 */
1489 assert(qemu_coroutine_self() != req->co);
1490
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001491 qemu_co_queue_wait(&req->wait_queue);
1492 retry = true;
1493 break;
1494 }
1495 }
1496 } while (retry);
1497}
1498
Kevin Wolf756e6732010-01-12 12:55:17 +01001499/*
1500 * Return values:
1501 * 0 - success
1502 * -EINVAL - backing format specified, but no file
1503 * -ENOSPC - can't update the backing file because no space is left in the
1504 * image file header
1505 * -ENOTSUP - format driver doesn't support changing the backing file
1506 */
1507int bdrv_change_backing_file(BlockDriverState *bs,
1508 const char *backing_file, const char *backing_fmt)
1509{
1510 BlockDriver *drv = bs->drv;
Paolo Bonzini469ef352012-04-12 14:01:02 +02001511 int ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01001512
Paolo Bonzini5f377792012-04-12 14:01:01 +02001513 /* Backing file format doesn't make sense without a backing file */
1514 if (backing_fmt && !backing_file) {
1515 return -EINVAL;
1516 }
1517
Kevin Wolf756e6732010-01-12 12:55:17 +01001518 if (drv->bdrv_change_backing_file != NULL) {
Paolo Bonzini469ef352012-04-12 14:01:02 +02001519 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
Kevin Wolf756e6732010-01-12 12:55:17 +01001520 } else {
Paolo Bonzini469ef352012-04-12 14:01:02 +02001521 ret = -ENOTSUP;
Kevin Wolf756e6732010-01-12 12:55:17 +01001522 }
Paolo Bonzini469ef352012-04-12 14:01:02 +02001523
1524 if (ret == 0) {
1525 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
1526 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
1527 }
1528 return ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01001529}
1530
aliguori71d07702009-03-03 17:37:16 +00001531static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
1532 size_t size)
1533{
1534 int64_t len;
1535
1536 if (!bdrv_is_inserted(bs))
1537 return -ENOMEDIUM;
1538
1539 if (bs->growable)
1540 return 0;
1541
1542 len = bdrv_getlength(bs);
1543
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02001544 if (offset < 0)
1545 return -EIO;
1546
1547 if ((offset > len) || (len - offset < size))
aliguori71d07702009-03-03 17:37:16 +00001548 return -EIO;
1549
1550 return 0;
1551}
1552
1553static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
1554 int nb_sectors)
1555{
Jes Sorenseneb5a3162010-05-27 16:20:31 +02001556 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
1557 nb_sectors * BDRV_SECTOR_SIZE);
aliguori71d07702009-03-03 17:37:16 +00001558}
1559
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001560typedef struct RwCo {
1561 BlockDriverState *bs;
1562 int64_t sector_num;
1563 int nb_sectors;
1564 QEMUIOVector *qiov;
1565 bool is_write;
1566 int ret;
1567} RwCo;
1568
1569static void coroutine_fn bdrv_rw_co_entry(void *opaque)
1570{
1571 RwCo *rwco = opaque;
1572
1573 if (!rwco->is_write) {
1574 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001575 rwco->nb_sectors, rwco->qiov, 0);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001576 } else {
1577 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001578 rwco->nb_sectors, rwco->qiov, 0);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001579 }
1580}
1581
1582/*
1583 * Process a synchronous request using coroutines
1584 */
1585static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
1586 int nb_sectors, bool is_write)
1587{
1588 QEMUIOVector qiov;
1589 struct iovec iov = {
1590 .iov_base = (void *)buf,
1591 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1592 };
1593 Coroutine *co;
1594 RwCo rwco = {
1595 .bs = bs,
1596 .sector_num = sector_num,
1597 .nb_sectors = nb_sectors,
1598 .qiov = &qiov,
1599 .is_write = is_write,
1600 .ret = NOT_DONE,
1601 };
1602
1603 qemu_iovec_init_external(&qiov, &iov, 1);
1604
Zhi Yong Wu498e3862012-04-02 18:59:34 +08001605 /**
1606 * In sync call context, when the vcpu is blocked, this throttling timer
1607 * will not fire; so the I/O throttling function has to be disabled here
1608 * if it has been enabled.
1609 */
1610 if (bs->io_limits_enabled) {
1611 fprintf(stderr, "Disabling I/O throttling on '%s' due "
1612 "to synchronous I/O.\n", bdrv_get_device_name(bs));
1613 bdrv_io_limits_disable(bs);
1614 }
1615
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001616 if (qemu_in_coroutine()) {
1617 /* Fast-path if already in coroutine context */
1618 bdrv_rw_co_entry(&rwco);
1619 } else {
1620 co = qemu_coroutine_create(bdrv_rw_co_entry);
1621 qemu_coroutine_enter(co, &rwco);
1622 while (rwco.ret == NOT_DONE) {
1623 qemu_aio_wait();
1624 }
1625 }
1626 return rwco.ret;
1627}
1628
bellard19cb3732006-08-19 11:45:59 +00001629/* return < 0 if error. See bdrv_write() for the return codes */
ths5fafdf22007-09-16 21:08:06 +00001630int bdrv_read(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00001631 uint8_t *buf, int nb_sectors)
1632{
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001633 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
bellardfc01f7e2003-06-30 10:03:06 +00001634}
1635
Markus Armbruster07d27a42012-06-29 17:34:29 +02001636/* Just like bdrv_read(), but with I/O throttling temporarily disabled */
1637int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
1638 uint8_t *buf, int nb_sectors)
1639{
1640 bool enabled;
1641 int ret;
1642
1643 enabled = bs->io_limits_enabled;
1644 bs->io_limits_enabled = false;
1645 ret = bdrv_read(bs, 0, buf, 1);
1646 bs->io_limits_enabled = enabled;
1647 return ret;
1648}
1649
Paolo Bonzini71df14f2012-04-12 14:01:04 +02001650#define BITS_PER_LONG (sizeof(unsigned long) * 8)
1651
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02001652static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
Jan Kiszkaa55eb922009-11-30 18:21:19 +01001653 int nb_sectors, int dirty)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02001654{
1655 int64_t start, end;
Jan Kiszkac6d22832009-11-30 18:21:20 +01001656 unsigned long val, idx, bit;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01001657
Jan Kiszka6ea44302009-11-30 18:21:19 +01001658 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
Jan Kiszkac6d22832009-11-30 18:21:20 +01001659 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01001660
1661 for (; start <= end; start++) {
Paolo Bonzini71df14f2012-04-12 14:01:04 +02001662 idx = start / BITS_PER_LONG;
1663 bit = start % BITS_PER_LONG;
Jan Kiszkac6d22832009-11-30 18:21:20 +01001664 val = bs->dirty_bitmap[idx];
1665 if (dirty) {
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02001666 if (!(val & (1UL << bit))) {
Liran Schouraaa0eb72010-01-26 10:31:48 +02001667 bs->dirty_count++;
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02001668 val |= 1UL << bit;
Liran Schouraaa0eb72010-01-26 10:31:48 +02001669 }
Jan Kiszkac6d22832009-11-30 18:21:20 +01001670 } else {
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02001671 if (val & (1UL << bit)) {
Liran Schouraaa0eb72010-01-26 10:31:48 +02001672 bs->dirty_count--;
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02001673 val &= ~(1UL << bit);
Liran Schouraaa0eb72010-01-26 10:31:48 +02001674 }
Jan Kiszkac6d22832009-11-30 18:21:20 +01001675 }
1676 bs->dirty_bitmap[idx] = val;
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02001677 }
1678}
1679
ths5fafdf22007-09-16 21:08:06 +00001680/* Return < 0 if error. Important errors are:
bellard19cb3732006-08-19 11:45:59 +00001681 -EIO generic I/O error (may happen for all errors)
1682 -ENOMEDIUM No media inserted.
1683 -EINVAL Invalid sector number or nb_sectors
1684 -EACCES Trying to write a read-only device
1685*/
ths5fafdf22007-09-16 21:08:06 +00001686int bdrv_write(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00001687 const uint8_t *buf, int nb_sectors)
1688{
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001689 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
bellard83f64092006-08-01 16:21:11 +00001690}
1691
aliguorieda578e2009-03-12 19:57:16 +00001692int bdrv_pread(BlockDriverState *bs, int64_t offset,
1693 void *buf, int count1)
bellard83f64092006-08-01 16:21:11 +00001694{
Jan Kiszka6ea44302009-11-30 18:21:19 +01001695 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
bellard83f64092006-08-01 16:21:11 +00001696 int len, nb_sectors, count;
1697 int64_t sector_num;
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001698 int ret;
bellard83f64092006-08-01 16:21:11 +00001699
1700 count = count1;
1701 /* first read to align to sector start */
Jan Kiszka6ea44302009-11-30 18:21:19 +01001702 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
bellard83f64092006-08-01 16:21:11 +00001703 if (len > count)
1704 len = count;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001705 sector_num = offset >> BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001706 if (len > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001707 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1708 return ret;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001709 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
bellard83f64092006-08-01 16:21:11 +00001710 count -= len;
1711 if (count == 0)
1712 return count1;
1713 sector_num++;
1714 buf += len;
1715 }
1716
1717 /* read the sectors "in place" */
Jan Kiszka6ea44302009-11-30 18:21:19 +01001718 nb_sectors = count >> BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001719 if (nb_sectors > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001720 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
1721 return ret;
bellard83f64092006-08-01 16:21:11 +00001722 sector_num += nb_sectors;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001723 len = nb_sectors << BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001724 buf += len;
1725 count -= len;
1726 }
1727
1728 /* add data from the last sector */
1729 if (count > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001730 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1731 return ret;
bellard83f64092006-08-01 16:21:11 +00001732 memcpy(buf, tmp_buf, count);
1733 }
1734 return count1;
1735}
1736
aliguorieda578e2009-03-12 19:57:16 +00001737int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
1738 const void *buf, int count1)
bellard83f64092006-08-01 16:21:11 +00001739{
Jan Kiszka6ea44302009-11-30 18:21:19 +01001740 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
bellard83f64092006-08-01 16:21:11 +00001741 int len, nb_sectors, count;
1742 int64_t sector_num;
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001743 int ret;
bellard83f64092006-08-01 16:21:11 +00001744
1745 count = count1;
1746 /* first write to align to sector start */
Jan Kiszka6ea44302009-11-30 18:21:19 +01001747 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
bellard83f64092006-08-01 16:21:11 +00001748 if (len > count)
1749 len = count;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001750 sector_num = offset >> BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001751 if (len > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001752 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1753 return ret;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001754 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001755 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1756 return ret;
bellard83f64092006-08-01 16:21:11 +00001757 count -= len;
1758 if (count == 0)
1759 return count1;
1760 sector_num++;
1761 buf += len;
1762 }
1763
1764 /* write the sectors "in place" */
Jan Kiszka6ea44302009-11-30 18:21:19 +01001765 nb_sectors = count >> BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001766 if (nb_sectors > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001767 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
1768 return ret;
bellard83f64092006-08-01 16:21:11 +00001769 sector_num += nb_sectors;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001770 len = nb_sectors << BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001771 buf += len;
1772 count -= len;
1773 }
1774
1775 /* add data from the last sector */
1776 if (count > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001777 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1778 return ret;
bellard83f64092006-08-01 16:21:11 +00001779 memcpy(tmp_buf, buf, count);
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001780 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1781 return ret;
bellard83f64092006-08-01 16:21:11 +00001782 }
1783 return count1;
1784}
bellard83f64092006-08-01 16:21:11 +00001785
Kevin Wolff08145f2010-06-16 16:38:15 +02001786/*
1787 * Writes to the file and ensures that no writes are reordered across this
1788 * request (acts as a barrier)
1789 *
1790 * Returns 0 on success, -errno in error cases.
1791 */
1792int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
1793 const void *buf, int count)
1794{
1795 int ret;
1796
1797 ret = bdrv_pwrite(bs, offset, buf, count);
1798 if (ret < 0) {
1799 return ret;
1800 }
1801
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02001802 /* No flush needed for cache modes that already do it */
1803 if (bs->enable_write_cache) {
Kevin Wolff08145f2010-06-16 16:38:15 +02001804 bdrv_flush(bs);
1805 }
1806
1807 return 0;
1808}
1809
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001810static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
Stefan Hajnocziab185922011-11-17 13:40:31 +00001811 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1812{
1813 /* Perform I/O through a temporary buffer so that users who scribble over
1814 * their read buffer while the operation is in progress do not end up
1815 * modifying the image file. This is critical for zero-copy guest I/O
1816 * where anything might happen inside guest memory.
1817 */
1818 void *bounce_buffer;
1819
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001820 BlockDriver *drv = bs->drv;
Stefan Hajnocziab185922011-11-17 13:40:31 +00001821 struct iovec iov;
1822 QEMUIOVector bounce_qiov;
1823 int64_t cluster_sector_num;
1824 int cluster_nb_sectors;
1825 size_t skip_bytes;
1826 int ret;
1827
1828 /* Cover entire cluster so no additional backing file I/O is required when
1829 * allocating cluster in the image file.
1830 */
1831 round_to_clusters(bs, sector_num, nb_sectors,
1832 &cluster_sector_num, &cluster_nb_sectors);
1833
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001834 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
1835 cluster_sector_num, cluster_nb_sectors);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001836
1837 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
1838 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
1839 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
1840
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001841 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
1842 &bounce_qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001843 if (ret < 0) {
1844 goto err;
1845 }
1846
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001847 if (drv->bdrv_co_write_zeroes &&
1848 buffer_is_zero(bounce_buffer, iov.iov_len)) {
Kevin Wolf621f0582012-03-20 15:12:58 +01001849 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
1850 cluster_nb_sectors);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001851 } else {
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02001852 /* This does not change the data on the disk, it is not necessary
1853 * to flush even in cache=writethrough mode.
1854 */
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001855 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
Stefan Hajnocziab185922011-11-17 13:40:31 +00001856 &bounce_qiov);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001857 }
1858
Stefan Hajnocziab185922011-11-17 13:40:31 +00001859 if (ret < 0) {
1860 /* It might be okay to ignore write errors for guest requests. If this
1861 * is a deliberate copy-on-read then we don't want to ignore the error.
1862 * Simply report it in all cases.
1863 */
1864 goto err;
1865 }
1866
1867 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
Michael Tokarev03396142012-06-07 20:17:55 +04001868 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
1869 nb_sectors * BDRV_SECTOR_SIZE);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001870
1871err:
1872 qemu_vfree(bounce_buffer);
1873 return ret;
1874}
1875
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001876/*
1877 * Handle a read request in coroutine context
1878 */
1879static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001880 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1881 BdrvRequestFlags flags)
Kevin Wolfda1fa912011-07-14 17:27:13 +02001882{
1883 BlockDriver *drv = bs->drv;
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001884 BdrvTrackedRequest req;
1885 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02001886
Kevin Wolfda1fa912011-07-14 17:27:13 +02001887 if (!drv) {
1888 return -ENOMEDIUM;
1889 }
1890 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1891 return -EIO;
1892 }
1893
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08001894 /* throttling disk read I/O */
1895 if (bs->io_limits_enabled) {
1896 bdrv_io_limits_intercept(bs, false, nb_sectors);
1897 }
1898
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001899 if (bs->copy_on_read) {
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001900 flags |= BDRV_REQ_COPY_ON_READ;
1901 }
1902 if (flags & BDRV_REQ_COPY_ON_READ) {
1903 bs->copy_on_read_in_flight++;
1904 }
1905
1906 if (bs->copy_on_read_in_flight) {
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001907 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
1908 }
1909
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001910 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001911
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001912 if (flags & BDRV_REQ_COPY_ON_READ) {
Stefan Hajnocziab185922011-11-17 13:40:31 +00001913 int pnum;
1914
1915 ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
1916 if (ret < 0) {
1917 goto out;
1918 }
1919
1920 if (!ret || pnum != nb_sectors) {
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001921 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001922 goto out;
1923 }
1924 }
1925
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001926 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001927
1928out:
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001929 tracked_request_end(&req);
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001930
1931 if (flags & BDRV_REQ_COPY_ON_READ) {
1932 bs->copy_on_read_in_flight--;
1933 }
1934
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001935 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02001936}
1937
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001938int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
Kevin Wolfda1fa912011-07-14 17:27:13 +02001939 int nb_sectors, QEMUIOVector *qiov)
1940{
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001941 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
Kevin Wolfda1fa912011-07-14 17:27:13 +02001942
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001943 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1944}
1945
1946int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1947 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1948{
1949 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
1950
1951 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1952 BDRV_REQ_COPY_ON_READ);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001953}
1954
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001955static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1956 int64_t sector_num, int nb_sectors)
1957{
1958 BlockDriver *drv = bs->drv;
1959 QEMUIOVector qiov;
1960 struct iovec iov;
1961 int ret;
1962
Kevin Wolf621f0582012-03-20 15:12:58 +01001963 /* TODO Emulate only part of misaligned requests instead of letting block
1964 * drivers return -ENOTSUP and emulate everything */
1965
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001966 /* First try the efficient write zeroes operation */
1967 if (drv->bdrv_co_write_zeroes) {
Kevin Wolf621f0582012-03-20 15:12:58 +01001968 ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
1969 if (ret != -ENOTSUP) {
1970 return ret;
1971 }
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001972 }
1973
1974 /* Fall back to bounce buffer if write zeroes is unsupported */
1975 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
1976 iov.iov_base = qemu_blockalign(bs, iov.iov_len);
1977 memset(iov.iov_base, 0, iov.iov_len);
1978 qemu_iovec_init_external(&qiov, &iov, 1);
1979
1980 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
1981
1982 qemu_vfree(iov.iov_base);
1983 return ret;
1984}
1985
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001986/*
1987 * Handle a write request in coroutine context
1988 */
1989static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001990 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1991 BdrvRequestFlags flags)
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001992{
1993 BlockDriver *drv = bs->drv;
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001994 BdrvTrackedRequest req;
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01001995 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02001996
1997 if (!bs->drv) {
1998 return -ENOMEDIUM;
1999 }
2000 if (bs->read_only) {
2001 return -EACCES;
2002 }
2003 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
2004 return -EIO;
2005 }
2006
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08002007 /* throttling disk write I/O */
2008 if (bs->io_limits_enabled) {
2009 bdrv_io_limits_intercept(bs, true, nb_sectors);
2010 }
2011
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00002012 if (bs->copy_on_read_in_flight) {
Stefan Hajnoczif4658282011-11-17 13:40:29 +00002013 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
2014 }
2015
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002016 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
2017
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00002018 if (flags & BDRV_REQ_ZERO_WRITE) {
2019 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
2020 } else {
2021 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
2022 }
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01002023
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02002024 if (ret == 0 && !bs->enable_write_cache) {
2025 ret = bdrv_co_flush(bs);
2026 }
2027
Kevin Wolfda1fa912011-07-14 17:27:13 +02002028 if (bs->dirty_bitmap) {
2029 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2030 }
2031
2032 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
2033 bs->wr_highest_sector = sector_num + nb_sectors - 1;
2034 }
2035
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00002036 tracked_request_end(&req);
2037
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01002038 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02002039}
2040
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01002041int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
2042 int nb_sectors, QEMUIOVector *qiov)
2043{
2044 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
2045
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00002046 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
2047}
2048
2049int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
2050 int64_t sector_num, int nb_sectors)
2051{
2052 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
2053
2054 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
2055 BDRV_REQ_ZERO_WRITE);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01002056}
2057
bellard83f64092006-08-01 16:21:11 +00002058/**
bellard83f64092006-08-01 16:21:11 +00002059 * Truncate file to 'offset' bytes (needed only for file protocols)
2060 */
2061int bdrv_truncate(BlockDriverState *bs, int64_t offset)
2062{
2063 BlockDriver *drv = bs->drv;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01002064 int ret;
bellard83f64092006-08-01 16:21:11 +00002065 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002066 return -ENOMEDIUM;
bellard83f64092006-08-01 16:21:11 +00002067 if (!drv->bdrv_truncate)
2068 return -ENOTSUP;
Naphtali Sprei59f26892009-10-26 16:25:16 +02002069 if (bs->read_only)
2070 return -EACCES;
Marcelo Tosatti85916752011-01-26 12:12:35 -02002071 if (bdrv_in_use(bs))
2072 return -EBUSY;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01002073 ret = drv->bdrv_truncate(bs, offset);
2074 if (ret == 0) {
2075 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
Markus Armbruster145feb12011-08-03 15:07:42 +02002076 bdrv_dev_resize_cb(bs);
Stefan Hajnoczi51762282010-04-19 16:56:41 +01002077 }
2078 return ret;
bellard83f64092006-08-01 16:21:11 +00002079}
2080
2081/**
Fam Zheng4a1d5e12011-07-12 19:56:39 +08002082 * Length of a allocated file in bytes. Sparse files are counted by actual
2083 * allocated space. Return < 0 if error or unknown.
2084 */
2085int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
2086{
2087 BlockDriver *drv = bs->drv;
2088 if (!drv) {
2089 return -ENOMEDIUM;
2090 }
2091 if (drv->bdrv_get_allocated_file_size) {
2092 return drv->bdrv_get_allocated_file_size(bs);
2093 }
2094 if (bs->file) {
2095 return bdrv_get_allocated_file_size(bs->file);
2096 }
2097 return -ENOTSUP;
2098}
2099
2100/**
bellard83f64092006-08-01 16:21:11 +00002101 * Length of a file in bytes. Return < 0 if error or unknown.
2102 */
2103int64_t bdrv_getlength(BlockDriverState *bs)
2104{
2105 BlockDriver *drv = bs->drv;
2106 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002107 return -ENOMEDIUM;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01002108
Markus Armbruster2c6942f2011-09-06 18:58:51 +02002109 if (bs->growable || bdrv_dev_has_removable_media(bs)) {
Stefan Hajnoczi46a4e4e2011-03-29 20:04:41 +01002110 if (drv->bdrv_getlength) {
2111 return drv->bdrv_getlength(bs);
2112 }
bellard83f64092006-08-01 16:21:11 +00002113 }
Stefan Hajnoczi46a4e4e2011-03-29 20:04:41 +01002114 return bs->total_sectors * BDRV_SECTOR_SIZE;
bellardfc01f7e2003-06-30 10:03:06 +00002115}
2116
bellard19cb3732006-08-19 11:45:59 +00002117/* return 0 as number of sectors if no device present or error */
ths96b8f132007-12-17 01:35:20 +00002118void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
bellardfc01f7e2003-06-30 10:03:06 +00002119{
bellard19cb3732006-08-19 11:45:59 +00002120 int64_t length;
2121 length = bdrv_getlength(bs);
2122 if (length < 0)
2123 length = 0;
2124 else
Jan Kiszka6ea44302009-11-30 18:21:19 +01002125 length = length >> BDRV_SECTOR_BITS;
bellard19cb3732006-08-19 11:45:59 +00002126 *nb_sectors_ptr = length;
bellardfc01f7e2003-06-30 10:03:06 +00002127}
bellardcf989512004-02-16 21:56:36 +00002128
Zhi Yong Wu0563e192011-11-03 16:57:25 +08002129/* throttling disk io limits */
2130void bdrv_set_io_limits(BlockDriverState *bs,
2131 BlockIOLimit *io_limits)
2132{
2133 bs->io_limits = *io_limits;
2134 bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
2135}
2136
Markus Armbrusterabd7f682010-06-02 18:55:17 +02002137void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
2138 BlockErrorAction on_write_error)
2139{
2140 bs->on_read_error = on_read_error;
2141 bs->on_write_error = on_write_error;
2142}
2143
2144BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
2145{
2146 return is_read ? bs->on_read_error : bs->on_write_error;
2147}
2148
bellardb3380822004-03-14 21:38:54 +00002149int bdrv_is_read_only(BlockDriverState *bs)
2150{
2151 return bs->read_only;
2152}
2153
ths985a03b2007-12-24 16:10:43 +00002154int bdrv_is_sg(BlockDriverState *bs)
2155{
2156 return bs->sg;
2157}
2158
Christoph Hellwige900a7b2009-09-04 19:01:15 +02002159int bdrv_enable_write_cache(BlockDriverState *bs)
2160{
2161 return bs->enable_write_cache;
2162}
2163
Paolo Bonzini425b0142012-06-06 00:04:52 +02002164void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
2165{
2166 bs->enable_write_cache = wce;
2167}
2168
bellardea2384d2004-08-01 21:59:26 +00002169int bdrv_is_encrypted(BlockDriverState *bs)
2170{
2171 if (bs->backing_hd && bs->backing_hd->encrypted)
2172 return 1;
2173 return bs->encrypted;
2174}
2175
aliguoric0f4ce72009-03-05 23:01:01 +00002176int bdrv_key_required(BlockDriverState *bs)
2177{
2178 BlockDriverState *backing_hd = bs->backing_hd;
2179
2180 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
2181 return 1;
2182 return (bs->encrypted && !bs->valid_key);
2183}
2184
bellardea2384d2004-08-01 21:59:26 +00002185int bdrv_set_key(BlockDriverState *bs, const char *key)
2186{
2187 int ret;
2188 if (bs->backing_hd && bs->backing_hd->encrypted) {
2189 ret = bdrv_set_key(bs->backing_hd, key);
2190 if (ret < 0)
2191 return ret;
2192 if (!bs->encrypted)
2193 return 0;
2194 }
Shahar Havivifd04a2a2010-03-06 00:26:13 +02002195 if (!bs->encrypted) {
2196 return -EINVAL;
2197 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
2198 return -ENOMEDIUM;
2199 }
aliguoric0f4ce72009-03-05 23:01:01 +00002200 ret = bs->drv->bdrv_set_key(bs, key);
aliguoribb5fc202009-03-05 23:01:15 +00002201 if (ret < 0) {
2202 bs->valid_key = 0;
2203 } else if (!bs->valid_key) {
2204 bs->valid_key = 1;
2205 /* call the change callback now, we skipped it on open */
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02002206 bdrv_dev_change_media_cb(bs, true);
aliguoribb5fc202009-03-05 23:01:15 +00002207 }
aliguoric0f4ce72009-03-05 23:01:01 +00002208 return ret;
bellardea2384d2004-08-01 21:59:26 +00002209}
2210
Markus Armbrusterf8d6bba2012-06-13 10:11:48 +02002211const char *bdrv_get_format_name(BlockDriverState *bs)
bellardea2384d2004-08-01 21:59:26 +00002212{
Markus Armbrusterf8d6bba2012-06-13 10:11:48 +02002213 return bs->drv ? bs->drv->format_name : NULL;
bellardea2384d2004-08-01 21:59:26 +00002214}
2215
ths5fafdf22007-09-16 21:08:06 +00002216void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
bellardea2384d2004-08-01 21:59:26 +00002217 void *opaque)
2218{
2219 BlockDriver *drv;
2220
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +01002221 QLIST_FOREACH(drv, &bdrv_drivers, list) {
bellardea2384d2004-08-01 21:59:26 +00002222 it(opaque, drv->format_name);
2223 }
2224}
2225
bellardb3380822004-03-14 21:38:54 +00002226BlockDriverState *bdrv_find(const char *name)
2227{
2228 BlockDriverState *bs;
2229
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002230 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2231 if (!strcmp(name, bs->device_name)) {
bellardb3380822004-03-14 21:38:54 +00002232 return bs;
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002233 }
bellardb3380822004-03-14 21:38:54 +00002234 }
2235 return NULL;
2236}
2237
Markus Armbruster2f399b02010-06-02 18:55:20 +02002238BlockDriverState *bdrv_next(BlockDriverState *bs)
2239{
2240 if (!bs) {
2241 return QTAILQ_FIRST(&bdrv_states);
2242 }
2243 return QTAILQ_NEXT(bs, list);
2244}
2245
aliguori51de9762009-03-05 23:00:43 +00002246void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
bellard81d09122004-07-14 17:21:37 +00002247{
2248 BlockDriverState *bs;
2249
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002250 QTAILQ_FOREACH(bs, &bdrv_states, list) {
aliguori51de9762009-03-05 23:00:43 +00002251 it(opaque, bs);
bellard81d09122004-07-14 17:21:37 +00002252 }
2253}
2254
bellardea2384d2004-08-01 21:59:26 +00002255const char *bdrv_get_device_name(BlockDriverState *bs)
2256{
2257 return bs->device_name;
2258}
2259
Markus Armbrusterc8433282012-06-05 16:49:24 +02002260int bdrv_get_flags(BlockDriverState *bs)
2261{
2262 return bs->open_flags;
2263}
2264
aliguoric6ca28d2008-10-06 13:55:43 +00002265void bdrv_flush_all(void)
2266{
2267 BlockDriverState *bs;
2268
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002269 QTAILQ_FOREACH(bs, &bdrv_states, list) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01002270 bdrv_flush(bs);
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002271 }
aliguoric6ca28d2008-10-06 13:55:43 +00002272}
2273
Kevin Wolff2feebb2010-04-14 17:30:35 +02002274int bdrv_has_zero_init(BlockDriverState *bs)
2275{
2276 assert(bs->drv);
2277
Kevin Wolf336c1c12010-07-28 11:26:29 +02002278 if (bs->drv->bdrv_has_zero_init) {
2279 return bs->drv->bdrv_has_zero_init(bs);
Kevin Wolff2feebb2010-04-14 17:30:35 +02002280 }
2281
2282 return 1;
2283}
2284
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00002285typedef struct BdrvCoIsAllocatedData {
2286 BlockDriverState *bs;
2287 int64_t sector_num;
2288 int nb_sectors;
2289 int *pnum;
2290 int ret;
2291 bool done;
2292} BdrvCoIsAllocatedData;
2293
thsf58c7b32008-06-05 21:53:49 +00002294/*
2295 * Returns true iff the specified sector is present in the disk image. Drivers
2296 * not implementing the functionality are assumed to not support backing files,
2297 * hence all their sectors are reported as allocated.
2298 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00002299 * If 'sector_num' is beyond the end of the disk image the return value is 0
2300 * and 'pnum' is set to 0.
2301 *
thsf58c7b32008-06-05 21:53:49 +00002302 * 'pnum' is set to the number of sectors (including and immediately following
2303 * the specified sector) that are known to be in the same
2304 * allocated/unallocated state.
2305 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00002306 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
2307 * beyond the end of the disk image it will be clamped.
thsf58c7b32008-06-05 21:53:49 +00002308 */
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00002309int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
2310 int nb_sectors, int *pnum)
thsf58c7b32008-06-05 21:53:49 +00002311{
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00002312 int64_t n;
2313
2314 if (sector_num >= bs->total_sectors) {
2315 *pnum = 0;
2316 return 0;
2317 }
2318
2319 n = bs->total_sectors - sector_num;
2320 if (n < nb_sectors) {
2321 nb_sectors = n;
2322 }
2323
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00002324 if (!bs->drv->bdrv_co_is_allocated) {
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00002325 *pnum = nb_sectors;
thsf58c7b32008-06-05 21:53:49 +00002326 return 1;
2327 }
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00002328
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00002329 return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum);
2330}
2331
2332/* Coroutine wrapper for bdrv_is_allocated() */
2333static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
2334{
2335 BdrvCoIsAllocatedData *data = opaque;
2336 BlockDriverState *bs = data->bs;
2337
2338 data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors,
2339 data->pnum);
2340 data->done = true;
2341}
2342
2343/*
2344 * Synchronous wrapper around bdrv_co_is_allocated().
2345 *
2346 * See bdrv_co_is_allocated() for details.
2347 */
2348int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
2349 int *pnum)
2350{
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00002351 Coroutine *co;
2352 BdrvCoIsAllocatedData data = {
2353 .bs = bs,
2354 .sector_num = sector_num,
2355 .nb_sectors = nb_sectors,
2356 .pnum = pnum,
2357 .done = false,
2358 };
2359
2360 co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
2361 qemu_coroutine_enter(co, &data);
2362 while (!data.done) {
2363 qemu_aio_wait();
2364 }
2365 return data.ret;
thsf58c7b32008-06-05 21:53:49 +00002366}
2367
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02002368/*
2369 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2370 *
2371 * Return true if the given sector is allocated in any image between
2372 * BASE and TOP (inclusive). BASE can be NULL to check if the given
2373 * sector is allocated in any image of the chain. Return false otherwise.
2374 *
2375 * 'pnum' is set to the number of sectors (including and immediately following
2376 * the specified sector) that are known to be in the same
2377 * allocated/unallocated state.
2378 *
2379 */
2380int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
2381 BlockDriverState *base,
2382 int64_t sector_num,
2383 int nb_sectors, int *pnum)
2384{
2385 BlockDriverState *intermediate;
2386 int ret, n = nb_sectors;
2387
2388 intermediate = top;
2389 while (intermediate && intermediate != base) {
2390 int pnum_inter;
2391 ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors,
2392 &pnum_inter);
2393 if (ret < 0) {
2394 return ret;
2395 } else if (ret) {
2396 *pnum = pnum_inter;
2397 return 1;
2398 }
2399
2400 /*
2401 * [sector_num, nb_sectors] is unallocated on top but intermediate
2402 * might have
2403 *
2404 * [sector_num+x, nr_sectors] allocated.
2405 */
2406 if (n > pnum_inter) {
2407 n = pnum_inter;
2408 }
2409
2410 intermediate = intermediate->backing_hd;
2411 }
2412
2413 *pnum = n;
2414 return 0;
2415}
2416
Luiz Capitulinob2023812011-09-21 17:16:47 -03002417BlockInfoList *qmp_query_block(Error **errp)
bellardb3380822004-03-14 21:38:54 +00002418{
Luiz Capitulinob2023812011-09-21 17:16:47 -03002419 BlockInfoList *head = NULL, *cur_item = NULL;
bellardb3380822004-03-14 21:38:54 +00002420 BlockDriverState *bs;
2421
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002422 QTAILQ_FOREACH(bs, &bdrv_states, list) {
Luiz Capitulinob2023812011-09-21 17:16:47 -03002423 BlockInfoList *info = g_malloc0(sizeof(*info));
Luiz Capitulinod15e5462009-12-10 17:16:06 -02002424
Luiz Capitulinob2023812011-09-21 17:16:47 -03002425 info->value = g_malloc0(sizeof(*info->value));
2426 info->value->device = g_strdup(bs->device_name);
2427 info->value->type = g_strdup("unknown");
2428 info->value->locked = bdrv_dev_is_medium_locked(bs);
2429 info->value->removable = bdrv_dev_has_removable_media(bs);
Luiz Capitulinod15e5462009-12-10 17:16:06 -02002430
Markus Armbrustere4def802011-09-06 18:58:53 +02002431 if (bdrv_dev_has_removable_media(bs)) {
Luiz Capitulinob2023812011-09-21 17:16:47 -03002432 info->value->has_tray_open = true;
2433 info->value->tray_open = bdrv_dev_is_tray_open(bs);
Markus Armbrustere4def802011-09-06 18:58:53 +02002434 }
Luiz Capitulinof04ef602011-09-26 17:43:54 -03002435
2436 if (bdrv_iostatus_is_enabled(bs)) {
Luiz Capitulinob2023812011-09-21 17:16:47 -03002437 info->value->has_io_status = true;
2438 info->value->io_status = bs->iostatus;
Luiz Capitulinof04ef602011-09-26 17:43:54 -03002439 }
2440
bellard19cb3732006-08-19 11:45:59 +00002441 if (bs->drv) {
Luiz Capitulinob2023812011-09-21 17:16:47 -03002442 info->value->has_inserted = true;
2443 info->value->inserted = g_malloc0(sizeof(*info->value->inserted));
2444 info->value->inserted->file = g_strdup(bs->filename);
2445 info->value->inserted->ro = bs->read_only;
2446 info->value->inserted->drv = g_strdup(bs->drv->format_name);
2447 info->value->inserted->encrypted = bs->encrypted;
2448 if (bs->backing_file[0]) {
2449 info->value->inserted->has_backing_file = true;
2450 info->value->inserted->backing_file = g_strdup(bs->backing_file);
aliguori376253e2009-03-05 23:01:23 +00002451 }
Zhi Yong Wu727f0052011-11-08 13:00:31 +08002452
2453 if (bs->io_limits_enabled) {
2454 info->value->inserted->bps =
2455 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2456 info->value->inserted->bps_rd =
2457 bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
2458 info->value->inserted->bps_wr =
2459 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
2460 info->value->inserted->iops =
2461 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
2462 info->value->inserted->iops_rd =
2463 bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
2464 info->value->inserted->iops_wr =
2465 bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
2466 }
bellardb3380822004-03-14 21:38:54 +00002467 }
Luiz Capitulinob2023812011-09-21 17:16:47 -03002468
2469 /* XXX: waiting for the qapi to support GSList */
2470 if (!cur_item) {
2471 head = cur_item = info;
2472 } else {
2473 cur_item->next = info;
2474 cur_item = info;
2475 }
bellardb3380822004-03-14 21:38:54 +00002476 }
Luiz Capitulinod15e5462009-12-10 17:16:06 -02002477
Luiz Capitulinob2023812011-09-21 17:16:47 -03002478 return head;
bellardb3380822004-03-14 21:38:54 +00002479}
thsa36e69d2007-12-02 05:18:19 +00002480
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002481/* Consider exposing this as a full fledged QMP command */
2482static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp)
thsa36e69d2007-12-02 05:18:19 +00002483{
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002484 BlockStats *s;
Luiz Capitulino218a5362009-12-10 17:16:07 -02002485
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002486 s = g_malloc0(sizeof(*s));
Luiz Capitulino218a5362009-12-10 17:16:07 -02002487
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002488 if (bs->device_name[0]) {
2489 s->has_device = true;
2490 s->device = g_strdup(bs->device_name);
Kevin Wolf294cc352010-04-28 14:34:01 +02002491 }
2492
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002493 s->stats = g_malloc0(sizeof(*s->stats));
2494 s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
2495 s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
2496 s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
2497 s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
2498 s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
2499 s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
2500 s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
2501 s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
2502 s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
2503
Kevin Wolf294cc352010-04-28 14:34:01 +02002504 if (bs->file) {
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002505 s->has_parent = true;
2506 s->parent = qmp_query_blockstat(bs->file, NULL);
Kevin Wolf294cc352010-04-28 14:34:01 +02002507 }
2508
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002509 return s;
Kevin Wolf294cc352010-04-28 14:34:01 +02002510}
2511
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002512BlockStatsList *qmp_query_blockstats(Error **errp)
Luiz Capitulino218a5362009-12-10 17:16:07 -02002513{
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002514 BlockStatsList *head = NULL, *cur_item = NULL;
thsa36e69d2007-12-02 05:18:19 +00002515 BlockDriverState *bs;
2516
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002517 QTAILQ_FOREACH(bs, &bdrv_states, list) {
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002518 BlockStatsList *info = g_malloc0(sizeof(*info));
2519 info->value = qmp_query_blockstat(bs, NULL);
2520
2521 /* XXX: waiting for the qapi to support GSList */
2522 if (!cur_item) {
2523 head = cur_item = info;
2524 } else {
2525 cur_item->next = info;
2526 cur_item = info;
2527 }
thsa36e69d2007-12-02 05:18:19 +00002528 }
Luiz Capitulino218a5362009-12-10 17:16:07 -02002529
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002530 return head;
thsa36e69d2007-12-02 05:18:19 +00002531}
bellardea2384d2004-08-01 21:59:26 +00002532
aliguori045df332009-03-05 23:00:48 +00002533const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
2534{
2535 if (bs->backing_hd && bs->backing_hd->encrypted)
2536 return bs->backing_file;
2537 else if (bs->encrypted)
2538 return bs->filename;
2539 else
2540 return NULL;
2541}
2542
ths5fafdf22007-09-16 21:08:06 +00002543void bdrv_get_backing_filename(BlockDriverState *bs,
bellard83f64092006-08-01 16:21:11 +00002544 char *filename, int filename_size)
bellardea2384d2004-08-01 21:59:26 +00002545{
Kevin Wolf3574c602011-10-26 11:02:11 +02002546 pstrcpy(filename, filename_size, bs->backing_file);
bellardea2384d2004-08-01 21:59:26 +00002547}
2548
ths5fafdf22007-09-16 21:08:06 +00002549int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
bellardfaea38e2006-08-05 21:31:00 +00002550 const uint8_t *buf, int nb_sectors)
2551{
2552 BlockDriver *drv = bs->drv;
2553 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002554 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00002555 if (!drv->bdrv_write_compressed)
2556 return -ENOTSUP;
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02002557 if (bdrv_check_request(bs, sector_num, nb_sectors))
2558 return -EIO;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01002559
Jan Kiszkac6d22832009-11-30 18:21:20 +01002560 if (bs->dirty_bitmap) {
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02002561 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2562 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +01002563
bellardfaea38e2006-08-05 21:31:00 +00002564 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
2565}
ths3b46e622007-09-17 08:09:54 +00002566
bellardfaea38e2006-08-05 21:31:00 +00002567int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
2568{
2569 BlockDriver *drv = bs->drv;
2570 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002571 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00002572 if (!drv->bdrv_get_info)
2573 return -ENOTSUP;
2574 memset(bdi, 0, sizeof(*bdi));
2575 return drv->bdrv_get_info(bs, bdi);
2576}
2577
Christoph Hellwig45566e92009-07-10 23:11:57 +02002578int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2579 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00002580{
2581 BlockDriver *drv = bs->drv;
2582 if (!drv)
2583 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002584 if (drv->bdrv_save_vmstate)
2585 return drv->bdrv_save_vmstate(bs, buf, pos, size);
2586 if (bs->file)
2587 return bdrv_save_vmstate(bs->file, buf, pos, size);
2588 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00002589}
2590
Christoph Hellwig45566e92009-07-10 23:11:57 +02002591int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2592 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00002593{
2594 BlockDriver *drv = bs->drv;
2595 if (!drv)
2596 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002597 if (drv->bdrv_load_vmstate)
2598 return drv->bdrv_load_vmstate(bs, buf, pos, size);
2599 if (bs->file)
2600 return bdrv_load_vmstate(bs->file, buf, pos, size);
2601 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00002602}
2603
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01002604void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
2605{
2606 BlockDriver *drv = bs->drv;
2607
2608 if (!drv || !drv->bdrv_debug_event) {
2609 return;
2610 }
2611
Blue Swirl0ed8b6f2012-07-08 06:56:53 +00002612 drv->bdrv_debug_event(bs, event);
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01002613
2614}
2615
bellardfaea38e2006-08-05 21:31:00 +00002616/**************************************************************/
2617/* handling of snapshots */
2618
Miguel Di Ciurcio Filhofeeee5a2010-06-08 10:40:55 -03002619int bdrv_can_snapshot(BlockDriverState *bs)
2620{
2621 BlockDriver *drv = bs->drv;
Markus Armbruster07b70bf2011-08-03 15:08:11 +02002622 if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
Miguel Di Ciurcio Filhofeeee5a2010-06-08 10:40:55 -03002623 return 0;
2624 }
2625
2626 if (!drv->bdrv_snapshot_create) {
2627 if (bs->file != NULL) {
2628 return bdrv_can_snapshot(bs->file);
2629 }
2630 return 0;
2631 }
2632
2633 return 1;
2634}
2635
Blue Swirl199630b2010-07-25 20:49:34 +00002636int bdrv_is_snapshot(BlockDriverState *bs)
2637{
2638 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
2639}
2640
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002641BlockDriverState *bdrv_snapshots(void)
2642{
2643 BlockDriverState *bs;
2644
Markus Armbruster3ac906f2010-07-01 09:30:38 +02002645 if (bs_snapshots) {
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002646 return bs_snapshots;
Markus Armbruster3ac906f2010-07-01 09:30:38 +02002647 }
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002648
2649 bs = NULL;
2650 while ((bs = bdrv_next(bs))) {
2651 if (bdrv_can_snapshot(bs)) {
Markus Armbruster3ac906f2010-07-01 09:30:38 +02002652 bs_snapshots = bs;
2653 return bs;
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002654 }
2655 }
2656 return NULL;
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002657}
2658
ths5fafdf22007-09-16 21:08:06 +00002659int bdrv_snapshot_create(BlockDriverState *bs,
bellardfaea38e2006-08-05 21:31:00 +00002660 QEMUSnapshotInfo *sn_info)
2661{
2662 BlockDriver *drv = bs->drv;
2663 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002664 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002665 if (drv->bdrv_snapshot_create)
2666 return drv->bdrv_snapshot_create(bs, sn_info);
2667 if (bs->file)
2668 return bdrv_snapshot_create(bs->file, sn_info);
2669 return -ENOTSUP;
bellardfaea38e2006-08-05 21:31:00 +00002670}
2671
ths5fafdf22007-09-16 21:08:06 +00002672int bdrv_snapshot_goto(BlockDriverState *bs,
bellardfaea38e2006-08-05 21:31:00 +00002673 const char *snapshot_id)
2674{
2675 BlockDriver *drv = bs->drv;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002676 int ret, open_ret;
2677
bellardfaea38e2006-08-05 21:31:00 +00002678 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002679 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002680 if (drv->bdrv_snapshot_goto)
2681 return drv->bdrv_snapshot_goto(bs, snapshot_id);
2682
2683 if (bs->file) {
2684 drv->bdrv_close(bs);
2685 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
2686 open_ret = drv->bdrv_open(bs, bs->open_flags);
2687 if (open_ret < 0) {
2688 bdrv_delete(bs->file);
2689 bs->drv = NULL;
2690 return open_ret;
2691 }
2692 return ret;
2693 }
2694
2695 return -ENOTSUP;
bellardfaea38e2006-08-05 21:31:00 +00002696}
2697
2698int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2699{
2700 BlockDriver *drv = bs->drv;
2701 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002702 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002703 if (drv->bdrv_snapshot_delete)
2704 return drv->bdrv_snapshot_delete(bs, snapshot_id);
2705 if (bs->file)
2706 return bdrv_snapshot_delete(bs->file, snapshot_id);
2707 return -ENOTSUP;
bellardfaea38e2006-08-05 21:31:00 +00002708}
2709
ths5fafdf22007-09-16 21:08:06 +00002710int bdrv_snapshot_list(BlockDriverState *bs,
bellardfaea38e2006-08-05 21:31:00 +00002711 QEMUSnapshotInfo **psn_info)
2712{
2713 BlockDriver *drv = bs->drv;
2714 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002715 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002716 if (drv->bdrv_snapshot_list)
2717 return drv->bdrv_snapshot_list(bs, psn_info);
2718 if (bs->file)
2719 return bdrv_snapshot_list(bs->file, psn_info);
2720 return -ENOTSUP;
bellardfaea38e2006-08-05 21:31:00 +00002721}
2722
edison51ef6722010-09-21 19:58:41 -07002723int bdrv_snapshot_load_tmp(BlockDriverState *bs,
2724 const char *snapshot_name)
2725{
2726 BlockDriver *drv = bs->drv;
2727 if (!drv) {
2728 return -ENOMEDIUM;
2729 }
2730 if (!bs->read_only) {
2731 return -EINVAL;
2732 }
2733 if (drv->bdrv_snapshot_load_tmp) {
2734 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
2735 }
2736 return -ENOTSUP;
2737}
2738
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00002739BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
2740 const char *backing_file)
2741{
2742 if (!bs->drv) {
2743 return NULL;
2744 }
2745
2746 if (bs->backing_hd) {
2747 if (strcmp(bs->backing_file, backing_file) == 0) {
2748 return bs->backing_hd;
2749 } else {
2750 return bdrv_find_backing_image(bs->backing_hd, backing_file);
2751 }
2752 }
2753
2754 return NULL;
2755}
2756
Benoît Canetf198fd12012-08-02 10:22:47 +02002757int bdrv_get_backing_file_depth(BlockDriverState *bs)
2758{
2759 if (!bs->drv) {
2760 return 0;
2761 }
2762
2763 if (!bs->backing_hd) {
2764 return 0;
2765 }
2766
2767 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
2768}
2769
bellardfaea38e2006-08-05 21:31:00 +00002770#define NB_SUFFIXES 4
2771
2772char *get_human_readable_size(char *buf, int buf_size, int64_t size)
2773{
2774 static const char suffixes[NB_SUFFIXES] = "KMGT";
2775 int64_t base;
2776 int i;
2777
2778 if (size <= 999) {
2779 snprintf(buf, buf_size, "%" PRId64, size);
2780 } else {
2781 base = 1024;
2782 for(i = 0; i < NB_SUFFIXES; i++) {
2783 if (size < (10 * base)) {
ths5fafdf22007-09-16 21:08:06 +00002784 snprintf(buf, buf_size, "%0.1f%c",
bellardfaea38e2006-08-05 21:31:00 +00002785 (double)size / base,
2786 suffixes[i]);
2787 break;
2788 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
ths5fafdf22007-09-16 21:08:06 +00002789 snprintf(buf, buf_size, "%" PRId64 "%c",
bellardfaea38e2006-08-05 21:31:00 +00002790 ((size + (base >> 1)) / base),
2791 suffixes[i]);
2792 break;
2793 }
2794 base = base * 1024;
2795 }
2796 }
2797 return buf;
2798}
2799
2800char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
2801{
2802 char buf1[128], date_buf[128], clock_buf[128];
bellard3b9f94e2007-01-07 17:27:07 +00002803#ifdef _WIN32
2804 struct tm *ptm;
2805#else
bellardfaea38e2006-08-05 21:31:00 +00002806 struct tm tm;
bellard3b9f94e2007-01-07 17:27:07 +00002807#endif
bellardfaea38e2006-08-05 21:31:00 +00002808 time_t ti;
2809 int64_t secs;
2810
2811 if (!sn) {
ths5fafdf22007-09-16 21:08:06 +00002812 snprintf(buf, buf_size,
2813 "%-10s%-20s%7s%20s%15s",
bellardfaea38e2006-08-05 21:31:00 +00002814 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
2815 } else {
2816 ti = sn->date_sec;
bellard3b9f94e2007-01-07 17:27:07 +00002817#ifdef _WIN32
2818 ptm = localtime(&ti);
2819 strftime(date_buf, sizeof(date_buf),
2820 "%Y-%m-%d %H:%M:%S", ptm);
2821#else
bellardfaea38e2006-08-05 21:31:00 +00002822 localtime_r(&ti, &tm);
2823 strftime(date_buf, sizeof(date_buf),
2824 "%Y-%m-%d %H:%M:%S", &tm);
bellard3b9f94e2007-01-07 17:27:07 +00002825#endif
bellardfaea38e2006-08-05 21:31:00 +00002826 secs = sn->vm_clock_nsec / 1000000000;
2827 snprintf(clock_buf, sizeof(clock_buf),
2828 "%02d:%02d:%02d.%03d",
2829 (int)(secs / 3600),
2830 (int)((secs / 60) % 60),
ths5fafdf22007-09-16 21:08:06 +00002831 (int)(secs % 60),
bellardfaea38e2006-08-05 21:31:00 +00002832 (int)((sn->vm_clock_nsec / 1000000) % 1000));
2833 snprintf(buf, buf_size,
ths5fafdf22007-09-16 21:08:06 +00002834 "%-10s%-20s%7s%20s%15s",
bellardfaea38e2006-08-05 21:31:00 +00002835 sn->id_str, sn->name,
2836 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
2837 date_buf,
2838 clock_buf);
2839 }
2840 return buf;
2841}
2842
bellard83f64092006-08-01 16:21:11 +00002843/**************************************************************/
2844/* async I/Os */
2845
aliguori3b69e4b2009-01-22 16:59:24 +00002846BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
aliguorif141eaf2009-04-07 18:43:24 +00002847 QEMUIOVector *qiov, int nb_sectors,
aliguori3b69e4b2009-01-22 16:59:24 +00002848 BlockDriverCompletionFunc *cb, void *opaque)
2849{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01002850 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
2851
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01002852 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01002853 cb, opaque, false);
bellard83f64092006-08-01 16:21:11 +00002854}
2855
aliguorif141eaf2009-04-07 18:43:24 +00002856BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
2857 QEMUIOVector *qiov, int nb_sectors,
2858 BlockDriverCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00002859{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01002860 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
2861
Stefan Hajnoczi1a6e1152011-10-13 13:08:25 +01002862 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01002863 cb, opaque, true);
bellard83f64092006-08-01 16:21:11 +00002864}
2865
Kevin Wolf40b4f532009-09-09 17:53:37 +02002866
2867typedef struct MultiwriteCB {
2868 int error;
2869 int num_requests;
2870 int num_callbacks;
2871 struct {
2872 BlockDriverCompletionFunc *cb;
2873 void *opaque;
2874 QEMUIOVector *free_qiov;
Kevin Wolf40b4f532009-09-09 17:53:37 +02002875 } callbacks[];
2876} MultiwriteCB;
2877
2878static void multiwrite_user_cb(MultiwriteCB *mcb)
2879{
2880 int i;
2881
2882 for (i = 0; i < mcb->num_callbacks; i++) {
2883 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
Stefan Hajnoczi1e1ea482010-04-21 20:35:45 +01002884 if (mcb->callbacks[i].free_qiov) {
2885 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
2886 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002887 g_free(mcb->callbacks[i].free_qiov);
Kevin Wolf40b4f532009-09-09 17:53:37 +02002888 }
2889}
2890
2891static void multiwrite_cb(void *opaque, int ret)
2892{
2893 MultiwriteCB *mcb = opaque;
2894
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01002895 trace_multiwrite_cb(mcb, ret);
2896
Kevin Wolfcb6d3ca2010-04-01 22:48:44 +02002897 if (ret < 0 && !mcb->error) {
Kevin Wolf40b4f532009-09-09 17:53:37 +02002898 mcb->error = ret;
Kevin Wolf40b4f532009-09-09 17:53:37 +02002899 }
2900
2901 mcb->num_requests--;
2902 if (mcb->num_requests == 0) {
Kevin Wolfde189a12010-07-01 16:08:51 +02002903 multiwrite_user_cb(mcb);
Anthony Liguori7267c092011-08-20 22:09:37 -05002904 g_free(mcb);
Kevin Wolf40b4f532009-09-09 17:53:37 +02002905 }
2906}
2907
2908static int multiwrite_req_compare(const void *a, const void *b)
2909{
Christoph Hellwig77be4362010-05-19 20:53:10 +02002910 const BlockRequest *req1 = a, *req2 = b;
2911
2912 /*
2913 * Note that we can't simply subtract req2->sector from req1->sector
2914 * here as that could overflow the return value.
2915 */
2916 if (req1->sector > req2->sector) {
2917 return 1;
2918 } else if (req1->sector < req2->sector) {
2919 return -1;
2920 } else {
2921 return 0;
2922 }
Kevin Wolf40b4f532009-09-09 17:53:37 +02002923}
2924
2925/*
2926 * Takes a bunch of requests and tries to merge them. Returns the number of
2927 * requests that remain after merging.
2928 */
2929static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
2930 int num_reqs, MultiwriteCB *mcb)
2931{
2932 int i, outidx;
2933
2934 // Sort requests by start sector
2935 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
2936
2937 // Check if adjacent requests touch the same clusters. If so, combine them,
2938 // filling up gaps with zero sectors.
2939 outidx = 0;
2940 for (i = 1; i < num_reqs; i++) {
2941 int merge = 0;
2942 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
2943
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01002944 // Handle exactly sequential writes and overlapping writes.
Kevin Wolf40b4f532009-09-09 17:53:37 +02002945 if (reqs[i].sector <= oldreq_last) {
2946 merge = 1;
2947 }
2948
Christoph Hellwige2a305f2010-01-26 14:49:08 +01002949 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
2950 merge = 0;
2951 }
2952
Kevin Wolf40b4f532009-09-09 17:53:37 +02002953 if (merge) {
2954 size_t size;
Anthony Liguori7267c092011-08-20 22:09:37 -05002955 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
Kevin Wolf40b4f532009-09-09 17:53:37 +02002956 qemu_iovec_init(qiov,
2957 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
2958
2959 // Add the first request to the merged one. If the requests are
2960 // overlapping, drop the last sectors of the first request.
2961 size = (reqs[i].sector - reqs[outidx].sector) << 9;
Michael Tokarev1b093c42012-03-12 21:28:06 +04002962 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
Kevin Wolf40b4f532009-09-09 17:53:37 +02002963
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01002964 // We should need to add any zeros between the two requests
2965 assert (reqs[i].sector <= oldreq_last);
Kevin Wolf40b4f532009-09-09 17:53:37 +02002966
2967 // Add the second request
Michael Tokarev1b093c42012-03-12 21:28:06 +04002968 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
Kevin Wolf40b4f532009-09-09 17:53:37 +02002969
Kevin Wolfcbf1dff2010-05-21 11:09:42 +02002970 reqs[outidx].nb_sectors = qiov->size >> 9;
Kevin Wolf40b4f532009-09-09 17:53:37 +02002971 reqs[outidx].qiov = qiov;
2972
2973 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
2974 } else {
2975 outidx++;
2976 reqs[outidx].sector = reqs[i].sector;
2977 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
2978 reqs[outidx].qiov = reqs[i].qiov;
2979 }
2980 }
2981
2982 return outidx + 1;
2983}
2984
2985/*
2986 * Submit multiple AIO write requests at once.
2987 *
2988 * On success, the function returns 0 and all requests in the reqs array have
2989 * been submitted. In error case this function returns -1, and any of the
2990 * requests may or may not be submitted yet. In particular, this means that the
2991 * callback will be called for some of the requests, for others it won't. The
2992 * caller must check the error field of the BlockRequest to wait for the right
2993 * callbacks (if error != 0, no callback will be called).
2994 *
2995 * The implementation may modify the contents of the reqs array, e.g. to merge
2996 * requests. However, the fields opaque and error are left unmodified as they
2997 * are used to signal failure for a single request to the caller.
2998 */
2999int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
3000{
Kevin Wolf40b4f532009-09-09 17:53:37 +02003001 MultiwriteCB *mcb;
3002 int i;
3003
Ryan Harper301db7c2011-03-07 10:01:04 -06003004 /* don't submit writes if we don't have a medium */
3005 if (bs->drv == NULL) {
3006 for (i = 0; i < num_reqs; i++) {
3007 reqs[i].error = -ENOMEDIUM;
3008 }
3009 return -1;
3010 }
3011
Kevin Wolf40b4f532009-09-09 17:53:37 +02003012 if (num_reqs == 0) {
3013 return 0;
3014 }
3015
3016 // Create MultiwriteCB structure
Anthony Liguori7267c092011-08-20 22:09:37 -05003017 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
Kevin Wolf40b4f532009-09-09 17:53:37 +02003018 mcb->num_requests = 0;
3019 mcb->num_callbacks = num_reqs;
3020
3021 for (i = 0; i < num_reqs; i++) {
3022 mcb->callbacks[i].cb = reqs[i].cb;
3023 mcb->callbacks[i].opaque = reqs[i].opaque;
3024 }
3025
3026 // Check for mergable requests
3027 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
3028
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01003029 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
3030
Paolo Bonzinidf9309f2011-11-14 17:50:50 +01003031 /* Run the aio requests. */
3032 mcb->num_requests = num_reqs;
Kevin Wolf40b4f532009-09-09 17:53:37 +02003033 for (i = 0; i < num_reqs; i++) {
Paolo Bonziniad54ae82011-11-30 09:12:30 +01003034 bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
Kevin Wolf40b4f532009-09-09 17:53:37 +02003035 reqs[i].nb_sectors, multiwrite_cb, mcb);
Kevin Wolf40b4f532009-09-09 17:53:37 +02003036 }
3037
3038 return 0;
Kevin Wolf40b4f532009-09-09 17:53:37 +02003039}
3040
bellard83f64092006-08-01 16:21:11 +00003041void bdrv_aio_cancel(BlockDriverAIOCB *acb)
pbrookce1a14d2006-08-07 02:38:06 +00003042{
aliguori6bbff9a2009-03-20 18:25:59 +00003043 acb->pool->cancel(acb);
bellard83f64092006-08-01 16:21:11 +00003044}
3045
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08003046/* block I/O throttling */
3047static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
3048 bool is_write, double elapsed_time, uint64_t *wait)
3049{
3050 uint64_t bps_limit = 0;
3051 double bytes_limit, bytes_base, bytes_res;
3052 double slice_time, wait_time;
3053
3054 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
3055 bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
3056 } else if (bs->io_limits.bps[is_write]) {
3057 bps_limit = bs->io_limits.bps[is_write];
3058 } else {
3059 if (wait) {
3060 *wait = 0;
3061 }
3062
3063 return false;
3064 }
3065
3066 slice_time = bs->slice_end - bs->slice_start;
3067 slice_time /= (NANOSECONDS_PER_SECOND);
3068 bytes_limit = bps_limit * slice_time;
3069 bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
3070 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
3071 bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
3072 }
3073
3074 /* bytes_base: the bytes of data which have been read/written; and
3075 * it is obtained from the history statistic info.
3076 * bytes_res: the remaining bytes of data which need to be read/written.
3077 * (bytes_base + bytes_res) / bps_limit: used to calcuate
3078 * the total time for completing reading/writting all data.
3079 */
3080 bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
3081
3082 if (bytes_base + bytes_res <= bytes_limit) {
3083 if (wait) {
3084 *wait = 0;
3085 }
3086
3087 return false;
3088 }
3089
3090 /* Calc approx time to dispatch */
3091 wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
3092
3093 /* When the I/O rate at runtime exceeds the limits,
3094 * bs->slice_end need to be extended in order that the current statistic
3095 * info can be kept until the timer fire, so it is increased and tuned
3096 * based on the result of experiment.
3097 */
3098 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3099 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3100 if (wait) {
3101 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3102 }
3103
3104 return true;
3105}
3106
3107static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
3108 double elapsed_time, uint64_t *wait)
3109{
3110 uint64_t iops_limit = 0;
3111 double ios_limit, ios_base;
3112 double slice_time, wait_time;
3113
3114 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3115 iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
3116 } else if (bs->io_limits.iops[is_write]) {
3117 iops_limit = bs->io_limits.iops[is_write];
3118 } else {
3119 if (wait) {
3120 *wait = 0;
3121 }
3122
3123 return false;
3124 }
3125
3126 slice_time = bs->slice_end - bs->slice_start;
3127 slice_time /= (NANOSECONDS_PER_SECOND);
3128 ios_limit = iops_limit * slice_time;
3129 ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
3130 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3131 ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
3132 }
3133
3134 if (ios_base + 1 <= ios_limit) {
3135 if (wait) {
3136 *wait = 0;
3137 }
3138
3139 return false;
3140 }
3141
3142 /* Calc approx time to dispatch */
3143 wait_time = (ios_base + 1) / iops_limit;
3144 if (wait_time > elapsed_time) {
3145 wait_time = wait_time - elapsed_time;
3146 } else {
3147 wait_time = 0;
3148 }
3149
3150 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3151 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3152 if (wait) {
3153 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3154 }
3155
3156 return true;
3157}
3158
3159static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
3160 bool is_write, int64_t *wait)
3161{
3162 int64_t now, max_wait;
3163 uint64_t bps_wait = 0, iops_wait = 0;
3164 double elapsed_time;
3165 int bps_ret, iops_ret;
3166
3167 now = qemu_get_clock_ns(vm_clock);
3168 if ((bs->slice_start < now)
3169 && (bs->slice_end > now)) {
3170 bs->slice_end = now + bs->slice_time;
3171 } else {
3172 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
3173 bs->slice_start = now;
3174 bs->slice_end = now + bs->slice_time;
3175
3176 bs->io_base.bytes[is_write] = bs->nr_bytes[is_write];
3177 bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
3178
3179 bs->io_base.ios[is_write] = bs->nr_ops[is_write];
3180 bs->io_base.ios[!is_write] = bs->nr_ops[!is_write];
3181 }
3182
3183 elapsed_time = now - bs->slice_start;
3184 elapsed_time /= (NANOSECONDS_PER_SECOND);
3185
3186 bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors,
3187 is_write, elapsed_time, &bps_wait);
3188 iops_ret = bdrv_exceed_iops_limits(bs, is_write,
3189 elapsed_time, &iops_wait);
3190 if (bps_ret || iops_ret) {
3191 max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
3192 if (wait) {
3193 *wait = max_wait;
3194 }
3195
3196 now = qemu_get_clock_ns(vm_clock);
3197 if (bs->slice_end < now + max_wait) {
3198 bs->slice_end = now + max_wait;
3199 }
3200
3201 return true;
3202 }
3203
3204 if (wait) {
3205 *wait = 0;
3206 }
3207
3208 return false;
3209}
pbrookce1a14d2006-08-07 02:38:06 +00003210
bellard83f64092006-08-01 16:21:11 +00003211/**************************************************************/
3212/* async block device emulation */
3213
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02003214typedef struct BlockDriverAIOCBSync {
3215 BlockDriverAIOCB common;
3216 QEMUBH *bh;
3217 int ret;
3218 /* vector translation state */
3219 QEMUIOVector *qiov;
3220 uint8_t *bounce;
3221 int is_write;
3222} BlockDriverAIOCBSync;
3223
3224static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
3225{
Kevin Wolfb666d232010-05-05 11:44:39 +02003226 BlockDriverAIOCBSync *acb =
3227 container_of(blockacb, BlockDriverAIOCBSync, common);
Dor Laor6a7ad292009-06-01 12:07:23 +03003228 qemu_bh_delete(acb->bh);
Avi Kivity36afc452009-06-23 16:20:36 +03003229 acb->bh = NULL;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02003230 qemu_aio_release(acb);
3231}
3232
3233static AIOPool bdrv_em_aio_pool = {
3234 .aiocb_size = sizeof(BlockDriverAIOCBSync),
3235 .cancel = bdrv_aio_cancel_em,
3236};
3237
bellard83f64092006-08-01 16:21:11 +00003238static void bdrv_aio_bh_cb(void *opaque)
bellardbeac80c2006-06-26 20:08:57 +00003239{
pbrookce1a14d2006-08-07 02:38:06 +00003240 BlockDriverAIOCBSync *acb = opaque;
aliguorif141eaf2009-04-07 18:43:24 +00003241
aliguorif141eaf2009-04-07 18:43:24 +00003242 if (!acb->is_write)
Michael Tokarev03396142012-06-07 20:17:55 +04003243 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
aliguoriceb42de2009-04-07 18:43:28 +00003244 qemu_vfree(acb->bounce);
pbrookce1a14d2006-08-07 02:38:06 +00003245 acb->common.cb(acb->common.opaque, acb->ret);
Dor Laor6a7ad292009-06-01 12:07:23 +03003246 qemu_bh_delete(acb->bh);
Avi Kivity36afc452009-06-23 16:20:36 +03003247 acb->bh = NULL;
pbrookce1a14d2006-08-07 02:38:06 +00003248 qemu_aio_release(acb);
bellardbeac80c2006-06-26 20:08:57 +00003249}
bellardbeac80c2006-06-26 20:08:57 +00003250
aliguorif141eaf2009-04-07 18:43:24 +00003251static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
3252 int64_t sector_num,
3253 QEMUIOVector *qiov,
3254 int nb_sectors,
3255 BlockDriverCompletionFunc *cb,
3256 void *opaque,
3257 int is_write)
3258
bellardea2384d2004-08-01 21:59:26 +00003259{
pbrookce1a14d2006-08-07 02:38:06 +00003260 BlockDriverAIOCBSync *acb;
pbrookce1a14d2006-08-07 02:38:06 +00003261
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02003262 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
aliguorif141eaf2009-04-07 18:43:24 +00003263 acb->is_write = is_write;
3264 acb->qiov = qiov;
aliguorie268ca52009-04-22 20:20:00 +00003265 acb->bounce = qemu_blockalign(bs, qiov->size);
Paolo Bonzini3f3aace2011-11-14 17:50:54 +01003266 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
aliguorif141eaf2009-04-07 18:43:24 +00003267
3268 if (is_write) {
Michael Tokarevd5e6b162012-06-07 20:21:06 +04003269 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01003270 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00003271 } else {
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01003272 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00003273 }
3274
pbrookce1a14d2006-08-07 02:38:06 +00003275 qemu_bh_schedule(acb->bh);
aliguorif141eaf2009-04-07 18:43:24 +00003276
pbrookce1a14d2006-08-07 02:38:06 +00003277 return &acb->common;
pbrook7a6cba62006-06-04 11:39:07 +00003278}
3279
aliguorif141eaf2009-04-07 18:43:24 +00003280static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
3281 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
pbrookce1a14d2006-08-07 02:38:06 +00003282 BlockDriverCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00003283{
aliguorif141eaf2009-04-07 18:43:24 +00003284 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
bellard83f64092006-08-01 16:21:11 +00003285}
3286
aliguorif141eaf2009-04-07 18:43:24 +00003287static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
3288 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3289 BlockDriverCompletionFunc *cb, void *opaque)
3290{
3291 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
3292}
3293
Kevin Wolf68485422011-06-30 10:05:46 +02003294
3295typedef struct BlockDriverAIOCBCoroutine {
3296 BlockDriverAIOCB common;
3297 BlockRequest req;
3298 bool is_write;
3299 QEMUBH* bh;
3300} BlockDriverAIOCBCoroutine;
3301
3302static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
3303{
3304 qemu_aio_flush();
3305}
3306
3307static AIOPool bdrv_em_co_aio_pool = {
3308 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
3309 .cancel = bdrv_aio_co_cancel_em,
3310};
3311
Paolo Bonzini35246a62011-10-14 10:41:29 +02003312static void bdrv_co_em_bh(void *opaque)
Kevin Wolf68485422011-06-30 10:05:46 +02003313{
3314 BlockDriverAIOCBCoroutine *acb = opaque;
3315
3316 acb->common.cb(acb->common.opaque, acb->req.error);
3317 qemu_bh_delete(acb->bh);
3318 qemu_aio_release(acb);
3319}
3320
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01003321/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3322static void coroutine_fn bdrv_co_do_rw(void *opaque)
3323{
3324 BlockDriverAIOCBCoroutine *acb = opaque;
3325 BlockDriverState *bs = acb->common.bs;
3326
3327 if (!acb->is_write) {
3328 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003329 acb->req.nb_sectors, acb->req.qiov, 0);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01003330 } else {
3331 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003332 acb->req.nb_sectors, acb->req.qiov, 0);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01003333 }
3334
Paolo Bonzini35246a62011-10-14 10:41:29 +02003335 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01003336 qemu_bh_schedule(acb->bh);
3337}
3338
Kevin Wolf68485422011-06-30 10:05:46 +02003339static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
3340 int64_t sector_num,
3341 QEMUIOVector *qiov,
3342 int nb_sectors,
3343 BlockDriverCompletionFunc *cb,
3344 void *opaque,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01003345 bool is_write)
Kevin Wolf68485422011-06-30 10:05:46 +02003346{
3347 Coroutine *co;
3348 BlockDriverAIOCBCoroutine *acb;
3349
3350 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3351 acb->req.sector = sector_num;
3352 acb->req.nb_sectors = nb_sectors;
3353 acb->req.qiov = qiov;
3354 acb->is_write = is_write;
3355
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01003356 co = qemu_coroutine_create(bdrv_co_do_rw);
Kevin Wolf68485422011-06-30 10:05:46 +02003357 qemu_coroutine_enter(co, acb);
3358
3359 return &acb->common;
3360}
3361
Paolo Bonzini07f07612011-10-17 12:32:12 +02003362static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02003363{
Paolo Bonzini07f07612011-10-17 12:32:12 +02003364 BlockDriverAIOCBCoroutine *acb = opaque;
3365 BlockDriverState *bs = acb->common.bs;
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02003366
Paolo Bonzini07f07612011-10-17 12:32:12 +02003367 acb->req.error = bdrv_co_flush(bs);
3368 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02003369 qemu_bh_schedule(acb->bh);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02003370}
3371
Paolo Bonzini07f07612011-10-17 12:32:12 +02003372BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
Alexander Graf016f5cf2010-05-26 17:51:49 +02003373 BlockDriverCompletionFunc *cb, void *opaque)
3374{
Paolo Bonzini07f07612011-10-17 12:32:12 +02003375 trace_bdrv_aio_flush(bs, opaque);
Alexander Graf016f5cf2010-05-26 17:51:49 +02003376
Paolo Bonzini07f07612011-10-17 12:32:12 +02003377 Coroutine *co;
3378 BlockDriverAIOCBCoroutine *acb;
Alexander Graf016f5cf2010-05-26 17:51:49 +02003379
Paolo Bonzini07f07612011-10-17 12:32:12 +02003380 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3381 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
3382 qemu_coroutine_enter(co, acb);
Alexander Graf016f5cf2010-05-26 17:51:49 +02003383
Alexander Graf016f5cf2010-05-26 17:51:49 +02003384 return &acb->common;
3385}
3386
Paolo Bonzini4265d622011-10-17 12:32:14 +02003387static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
3388{
3389 BlockDriverAIOCBCoroutine *acb = opaque;
3390 BlockDriverState *bs = acb->common.bs;
3391
3392 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
3393 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3394 qemu_bh_schedule(acb->bh);
3395}
3396
3397BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
3398 int64_t sector_num, int nb_sectors,
3399 BlockDriverCompletionFunc *cb, void *opaque)
3400{
3401 Coroutine *co;
3402 BlockDriverAIOCBCoroutine *acb;
3403
3404 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
3405
3406 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3407 acb->req.sector = sector_num;
3408 acb->req.nb_sectors = nb_sectors;
3409 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
3410 qemu_coroutine_enter(co, acb);
3411
3412 return &acb->common;
3413}
3414
bellardea2384d2004-08-01 21:59:26 +00003415void bdrv_init(void)
3416{
Anthony Liguori5efa9d52009-05-09 17:03:42 -05003417 module_call_init(MODULE_INIT_BLOCK);
bellardea2384d2004-08-01 21:59:26 +00003418}
pbrookce1a14d2006-08-07 02:38:06 +00003419
Markus Armbrustereb852012009-10-27 18:41:44 +01003420void bdrv_init_with_whitelist(void)
3421{
3422 use_bdrv_whitelist = 1;
3423 bdrv_init();
3424}
3425
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02003426void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
3427 BlockDriverCompletionFunc *cb, void *opaque)
aliguori6bbff9a2009-03-20 18:25:59 +00003428{
pbrookce1a14d2006-08-07 02:38:06 +00003429 BlockDriverAIOCB *acb;
3430
aliguori6bbff9a2009-03-20 18:25:59 +00003431 if (pool->free_aiocb) {
3432 acb = pool->free_aiocb;
3433 pool->free_aiocb = acb->next;
pbrookce1a14d2006-08-07 02:38:06 +00003434 } else {
Anthony Liguori7267c092011-08-20 22:09:37 -05003435 acb = g_malloc0(pool->aiocb_size);
aliguori6bbff9a2009-03-20 18:25:59 +00003436 acb->pool = pool;
pbrookce1a14d2006-08-07 02:38:06 +00003437 }
3438 acb->bs = bs;
3439 acb->cb = cb;
3440 acb->opaque = opaque;
3441 return acb;
3442}
3443
3444void qemu_aio_release(void *p)
3445{
aliguori6bbff9a2009-03-20 18:25:59 +00003446 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
3447 AIOPool *pool = acb->pool;
3448 acb->next = pool->free_aiocb;
3449 pool->free_aiocb = acb;
pbrookce1a14d2006-08-07 02:38:06 +00003450}
bellard19cb3732006-08-19 11:45:59 +00003451
3452/**************************************************************/
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003453/* Coroutine block device emulation */
3454
3455typedef struct CoroutineIOCompletion {
3456 Coroutine *coroutine;
3457 int ret;
3458} CoroutineIOCompletion;
3459
3460static void bdrv_co_io_em_complete(void *opaque, int ret)
3461{
3462 CoroutineIOCompletion *co = opaque;
3463
3464 co->ret = ret;
3465 qemu_coroutine_enter(co->coroutine, NULL);
3466}
3467
3468static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
3469 int nb_sectors, QEMUIOVector *iov,
3470 bool is_write)
3471{
3472 CoroutineIOCompletion co = {
3473 .coroutine = qemu_coroutine_self(),
3474 };
3475 BlockDriverAIOCB *acb;
3476
3477 if (is_write) {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01003478 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
3479 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003480 } else {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01003481 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
3482 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003483 }
3484
Stefan Hajnoczi59370aa2011-09-30 17:34:58 +01003485 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003486 if (!acb) {
3487 return -EIO;
3488 }
3489 qemu_coroutine_yield();
3490
3491 return co.ret;
3492}
3493
3494static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
3495 int64_t sector_num, int nb_sectors,
3496 QEMUIOVector *iov)
3497{
3498 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
3499}
3500
3501static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
3502 int64_t sector_num, int nb_sectors,
3503 QEMUIOVector *iov)
3504{
3505 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
3506}
3507
Paolo Bonzini07f07612011-10-17 12:32:12 +02003508static void coroutine_fn bdrv_flush_co_entry(void *opaque)
Kevin Wolfe7a8a782011-07-15 16:05:00 +02003509{
Paolo Bonzini07f07612011-10-17 12:32:12 +02003510 RwCo *rwco = opaque;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02003511
Paolo Bonzini07f07612011-10-17 12:32:12 +02003512 rwco->ret = bdrv_co_flush(rwco->bs);
3513}
3514
3515int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
3516{
Kevin Wolfeb489bb2011-11-10 18:10:11 +01003517 int ret;
3518
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003519 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
Paolo Bonzini07f07612011-10-17 12:32:12 +02003520 return 0;
Kevin Wolfeb489bb2011-11-10 18:10:11 +01003521 }
3522
Kevin Wolfca716362011-11-10 18:13:59 +01003523 /* Write back cached data to the OS even with cache=unsafe */
Kevin Wolfeb489bb2011-11-10 18:10:11 +01003524 if (bs->drv->bdrv_co_flush_to_os) {
3525 ret = bs->drv->bdrv_co_flush_to_os(bs);
3526 if (ret < 0) {
3527 return ret;
3528 }
3529 }
3530
Kevin Wolfca716362011-11-10 18:13:59 +01003531 /* But don't actually force it to the disk with cache=unsafe */
3532 if (bs->open_flags & BDRV_O_NO_FLUSH) {
3533 return 0;
3534 }
3535
Kevin Wolfeb489bb2011-11-10 18:10:11 +01003536 if (bs->drv->bdrv_co_flush_to_disk) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003537 ret = bs->drv->bdrv_co_flush_to_disk(bs);
Paolo Bonzini07f07612011-10-17 12:32:12 +02003538 } else if (bs->drv->bdrv_aio_flush) {
3539 BlockDriverAIOCB *acb;
3540 CoroutineIOCompletion co = {
3541 .coroutine = qemu_coroutine_self(),
3542 };
3543
3544 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3545 if (acb == NULL) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003546 ret = -EIO;
Paolo Bonzini07f07612011-10-17 12:32:12 +02003547 } else {
3548 qemu_coroutine_yield();
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003549 ret = co.ret;
Paolo Bonzini07f07612011-10-17 12:32:12 +02003550 }
Paolo Bonzini07f07612011-10-17 12:32:12 +02003551 } else {
3552 /*
3553 * Some block drivers always operate in either writethrough or unsafe
3554 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3555 * know how the server works (because the behaviour is hardcoded or
3556 * depends on server-side configuration), so we can't ensure that
3557 * everything is safe on disk. Returning an error doesn't work because
3558 * that would break guests even if the server operates in writethrough
3559 * mode.
3560 *
3561 * Let's hope the user knows what he's doing.
3562 */
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003563 ret = 0;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02003564 }
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003565 if (ret < 0) {
3566 return ret;
3567 }
3568
3569 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
3570 * in the case of cache=unsafe, so there are no useless flushes.
3571 */
3572 return bdrv_co_flush(bs->file);
Paolo Bonzini07f07612011-10-17 12:32:12 +02003573}
3574
Anthony Liguori0f154232011-11-14 15:09:45 -06003575void bdrv_invalidate_cache(BlockDriverState *bs)
3576{
3577 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
3578 bs->drv->bdrv_invalidate_cache(bs);
3579 }
3580}
3581
3582void bdrv_invalidate_cache_all(void)
3583{
3584 BlockDriverState *bs;
3585
3586 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3587 bdrv_invalidate_cache(bs);
3588 }
3589}
3590
Benoît Canet07789262012-03-23 08:36:49 +01003591void bdrv_clear_incoming_migration_all(void)
3592{
3593 BlockDriverState *bs;
3594
3595 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3596 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
3597 }
3598}
3599
Paolo Bonzini07f07612011-10-17 12:32:12 +02003600int bdrv_flush(BlockDriverState *bs)
3601{
3602 Coroutine *co;
3603 RwCo rwco = {
3604 .bs = bs,
3605 .ret = NOT_DONE,
3606 };
3607
3608 if (qemu_in_coroutine()) {
3609 /* Fast-path if already in coroutine context */
3610 bdrv_flush_co_entry(&rwco);
3611 } else {
3612 co = qemu_coroutine_create(bdrv_flush_co_entry);
3613 qemu_coroutine_enter(co, &rwco);
3614 while (rwco.ret == NOT_DONE) {
3615 qemu_aio_wait();
3616 }
3617 }
3618
3619 return rwco.ret;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02003620}
3621
Paolo Bonzini4265d622011-10-17 12:32:14 +02003622static void coroutine_fn bdrv_discard_co_entry(void *opaque)
3623{
3624 RwCo *rwco = opaque;
3625
3626 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
3627}
3628
3629int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
3630 int nb_sectors)
3631{
3632 if (!bs->drv) {
3633 return -ENOMEDIUM;
3634 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
3635 return -EIO;
3636 } else if (bs->read_only) {
3637 return -EROFS;
3638 } else if (bs->drv->bdrv_co_discard) {
3639 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
3640 } else if (bs->drv->bdrv_aio_discard) {
3641 BlockDriverAIOCB *acb;
3642 CoroutineIOCompletion co = {
3643 .coroutine = qemu_coroutine_self(),
3644 };
3645
3646 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
3647 bdrv_co_io_em_complete, &co);
3648 if (acb == NULL) {
3649 return -EIO;
3650 } else {
3651 qemu_coroutine_yield();
3652 return co.ret;
3653 }
Paolo Bonzini4265d622011-10-17 12:32:14 +02003654 } else {
3655 return 0;
3656 }
3657}
3658
3659int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
3660{
3661 Coroutine *co;
3662 RwCo rwco = {
3663 .bs = bs,
3664 .sector_num = sector_num,
3665 .nb_sectors = nb_sectors,
3666 .ret = NOT_DONE,
3667 };
3668
3669 if (qemu_in_coroutine()) {
3670 /* Fast-path if already in coroutine context */
3671 bdrv_discard_co_entry(&rwco);
3672 } else {
3673 co = qemu_coroutine_create(bdrv_discard_co_entry);
3674 qemu_coroutine_enter(co, &rwco);
3675 while (rwco.ret == NOT_DONE) {
3676 qemu_aio_wait();
3677 }
3678 }
3679
3680 return rwco.ret;
3681}
3682
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003683/**************************************************************/
bellard19cb3732006-08-19 11:45:59 +00003684/* removable device support */
3685
3686/**
3687 * Return TRUE if the media is present
3688 */
3689int bdrv_is_inserted(BlockDriverState *bs)
3690{
3691 BlockDriver *drv = bs->drv;
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02003692
bellard19cb3732006-08-19 11:45:59 +00003693 if (!drv)
3694 return 0;
3695 if (!drv->bdrv_is_inserted)
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02003696 return 1;
3697 return drv->bdrv_is_inserted(bs);
bellard19cb3732006-08-19 11:45:59 +00003698}
3699
3700/**
Markus Armbruster8e49ca42011-08-03 15:08:08 +02003701 * Return whether the media changed since the last call to this
3702 * function, or -ENOTSUP if we don't know. Most drivers don't know.
bellard19cb3732006-08-19 11:45:59 +00003703 */
3704int bdrv_media_changed(BlockDriverState *bs)
3705{
3706 BlockDriver *drv = bs->drv;
bellard19cb3732006-08-19 11:45:59 +00003707
Markus Armbruster8e49ca42011-08-03 15:08:08 +02003708 if (drv && drv->bdrv_media_changed) {
3709 return drv->bdrv_media_changed(bs);
3710 }
3711 return -ENOTSUP;
bellard19cb3732006-08-19 11:45:59 +00003712}
3713
3714/**
3715 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
3716 */
Luiz Capitulinof36f3942012-02-03 16:24:53 -02003717void bdrv_eject(BlockDriverState *bs, bool eject_flag)
bellard19cb3732006-08-19 11:45:59 +00003718{
3719 BlockDriver *drv = bs->drv;
bellard19cb3732006-08-19 11:45:59 +00003720
Markus Armbruster822e1cd2011-07-20 18:23:42 +02003721 if (drv && drv->bdrv_eject) {
3722 drv->bdrv_eject(bs, eject_flag);
bellard19cb3732006-08-19 11:45:59 +00003723 }
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02003724
3725 if (bs->device_name[0] != '\0') {
3726 bdrv_emit_qmp_eject_event(bs, eject_flag);
3727 }
bellard19cb3732006-08-19 11:45:59 +00003728}
3729
bellard19cb3732006-08-19 11:45:59 +00003730/**
3731 * Lock or unlock the media (if it is locked, the user won't be able
3732 * to eject it manually).
3733 */
Markus Armbruster025e8492011-09-06 18:58:47 +02003734void bdrv_lock_medium(BlockDriverState *bs, bool locked)
bellard19cb3732006-08-19 11:45:59 +00003735{
3736 BlockDriver *drv = bs->drv;
3737
Markus Armbruster025e8492011-09-06 18:58:47 +02003738 trace_bdrv_lock_medium(bs, locked);
Stefan Hajnoczib8c6d092011-03-29 20:04:40 +01003739
Markus Armbruster025e8492011-09-06 18:58:47 +02003740 if (drv && drv->bdrv_lock_medium) {
3741 drv->bdrv_lock_medium(bs, locked);
bellard19cb3732006-08-19 11:45:59 +00003742 }
3743}
ths985a03b2007-12-24 16:10:43 +00003744
3745/* needed for generic scsi interface */
3746
3747int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
3748{
3749 BlockDriver *drv = bs->drv;
3750
3751 if (drv && drv->bdrv_ioctl)
3752 return drv->bdrv_ioctl(bs, req, buf);
3753 return -ENOTSUP;
3754}
aliguori7d780662009-03-12 19:57:08 +00003755
aliguori221f7152009-03-28 17:28:41 +00003756BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
3757 unsigned long int req, void *buf,
3758 BlockDriverCompletionFunc *cb, void *opaque)
aliguori7d780662009-03-12 19:57:08 +00003759{
aliguori221f7152009-03-28 17:28:41 +00003760 BlockDriver *drv = bs->drv;
aliguori7d780662009-03-12 19:57:08 +00003761
aliguori221f7152009-03-28 17:28:41 +00003762 if (drv && drv->bdrv_aio_ioctl)
3763 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
3764 return NULL;
aliguori7d780662009-03-12 19:57:08 +00003765}
aliguorie268ca52009-04-22 20:20:00 +00003766
Markus Armbruster7b6f9302011-09-06 18:58:56 +02003767void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
3768{
3769 bs->buffer_alignment = align;
3770}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02003771
aliguorie268ca52009-04-22 20:20:00 +00003772void *qemu_blockalign(BlockDriverState *bs, size_t size)
3773{
3774 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
3775}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02003776
3777void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
3778{
3779 int64_t bitmap_size;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003780
Liran Schouraaa0eb72010-01-26 10:31:48 +02003781 bs->dirty_count = 0;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003782 if (enable) {
Jan Kiszkac6d22832009-11-30 18:21:20 +01003783 if (!bs->dirty_bitmap) {
3784 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
Paolo Bonzini71df14f2012-04-12 14:01:04 +02003785 BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG - 1;
3786 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003787
Paolo Bonzini71df14f2012-04-12 14:01:04 +02003788 bs->dirty_bitmap = g_new0(unsigned long, bitmap_size);
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003789 }
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02003790 } else {
Jan Kiszkac6d22832009-11-30 18:21:20 +01003791 if (bs->dirty_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -05003792 g_free(bs->dirty_bitmap);
Jan Kiszkac6d22832009-11-30 18:21:20 +01003793 bs->dirty_bitmap = NULL;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003794 }
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02003795 }
3796}
3797
3798int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
3799{
Jan Kiszka6ea44302009-11-30 18:21:19 +01003800 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003801
Jan Kiszkac6d22832009-11-30 18:21:20 +01003802 if (bs->dirty_bitmap &&
3803 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02003804 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
3805 (1UL << (chunk % (sizeof(unsigned long) * 8))));
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02003806 } else {
3807 return 0;
3808 }
3809}
3810
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003811void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
3812 int nr_sectors)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02003813{
3814 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
3815}
Liran Schouraaa0eb72010-01-26 10:31:48 +02003816
3817int64_t bdrv_get_dirty_count(BlockDriverState *bs)
3818{
3819 return bs->dirty_count;
3820}
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003821
Marcelo Tosattidb593f22011-01-26 12:12:34 -02003822void bdrv_set_in_use(BlockDriverState *bs, int in_use)
3823{
3824 assert(bs->in_use != in_use);
3825 bs->in_use = in_use;
3826}
3827
3828int bdrv_in_use(BlockDriverState *bs)
3829{
3830 return bs->in_use;
3831}
3832
Luiz Capitulino28a72822011-09-26 17:43:50 -03003833void bdrv_iostatus_enable(BlockDriverState *bs)
3834{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03003835 bs->iostatus_enabled = true;
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03003836 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Luiz Capitulino28a72822011-09-26 17:43:50 -03003837}
3838
3839/* The I/O status is only enabled if the drive explicitly
3840 * enables it _and_ the VM is configured to stop on errors */
3841bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
3842{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03003843 return (bs->iostatus_enabled &&
Luiz Capitulino28a72822011-09-26 17:43:50 -03003844 (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC ||
3845 bs->on_write_error == BLOCK_ERR_STOP_ANY ||
3846 bs->on_read_error == BLOCK_ERR_STOP_ANY));
3847}
3848
3849void bdrv_iostatus_disable(BlockDriverState *bs)
3850{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03003851 bs->iostatus_enabled = false;
Luiz Capitulino28a72822011-09-26 17:43:50 -03003852}
3853
3854void bdrv_iostatus_reset(BlockDriverState *bs)
3855{
3856 if (bdrv_iostatus_is_enabled(bs)) {
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03003857 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Luiz Capitulino28a72822011-09-26 17:43:50 -03003858 }
3859}
3860
3861/* XXX: Today this is set by device models because it makes the implementation
3862 quite simple. However, the block layer knows about the error, so it's
3863 possible to implement this without device models being involved */
3864void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
3865{
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03003866 if (bdrv_iostatus_is_enabled(bs) &&
3867 bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
Luiz Capitulino28a72822011-09-26 17:43:50 -03003868 assert(error >= 0);
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03003869 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
3870 BLOCK_DEVICE_IO_STATUS_FAILED;
Luiz Capitulino28a72822011-09-26 17:43:50 -03003871 }
3872}
3873
Christoph Hellwiga597e792011-08-25 08:26:01 +02003874void
3875bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
3876 enum BlockAcctType type)
3877{
3878 assert(type < BDRV_MAX_IOTYPE);
3879
3880 cookie->bytes = bytes;
Christoph Hellwigc488c7f2011-08-25 08:26:10 +02003881 cookie->start_time_ns = get_clock();
Christoph Hellwiga597e792011-08-25 08:26:01 +02003882 cookie->type = type;
3883}
3884
3885void
3886bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
3887{
3888 assert(cookie->type < BDRV_MAX_IOTYPE);
3889
3890 bs->nr_bytes[cookie->type] += cookie->bytes;
3891 bs->nr_ops[cookie->type]++;
Christoph Hellwigc488c7f2011-08-25 08:26:10 +02003892 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
Christoph Hellwiga597e792011-08-25 08:26:01 +02003893}
3894
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003895int bdrv_img_create(const char *filename, const char *fmt,
3896 const char *base_filename, const char *base_fmt,
3897 char *options, uint64_t img_size, int flags)
3898{
3899 QEMUOptionParameter *param = NULL, *create_options = NULL;
Kevin Wolfd2208942011-06-01 14:03:31 +02003900 QEMUOptionParameter *backing_fmt, *backing_file, *size;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003901 BlockDriverState *bs = NULL;
3902 BlockDriver *drv, *proto_drv;
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00003903 BlockDriver *backing_drv = NULL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003904 int ret = 0;
3905
3906 /* Find driver and parse its options */
3907 drv = bdrv_find_format(fmt);
3908 if (!drv) {
3909 error_report("Unknown file format '%s'", fmt);
Jes Sorensen4f70f242010-12-16 13:52:18 +01003910 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003911 goto out;
3912 }
3913
3914 proto_drv = bdrv_find_protocol(filename);
3915 if (!proto_drv) {
3916 error_report("Unknown protocol '%s'", filename);
Jes Sorensen4f70f242010-12-16 13:52:18 +01003917 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003918 goto out;
3919 }
3920
3921 create_options = append_option_parameters(create_options,
3922 drv->create_options);
3923 create_options = append_option_parameters(create_options,
3924 proto_drv->create_options);
3925
3926 /* Create parameter list with default values */
3927 param = parse_option_parameters("", create_options, param);
3928
3929 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
3930
3931 /* Parse -o options */
3932 if (options) {
3933 param = parse_option_parameters(options, create_options, param);
3934 if (param == NULL) {
3935 error_report("Invalid options for file format '%s'.", fmt);
Jes Sorensen4f70f242010-12-16 13:52:18 +01003936 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003937 goto out;
3938 }
3939 }
3940
3941 if (base_filename) {
3942 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
3943 base_filename)) {
3944 error_report("Backing file not supported for file format '%s'",
3945 fmt);
Jes Sorensen4f70f242010-12-16 13:52:18 +01003946 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003947 goto out;
3948 }
3949 }
3950
3951 if (base_fmt) {
3952 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
3953 error_report("Backing file format not supported for file "
3954 "format '%s'", fmt);
Jes Sorensen4f70f242010-12-16 13:52:18 +01003955 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003956 goto out;
3957 }
3958 }
3959
Jes Sorensen792da932010-12-16 13:52:17 +01003960 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
3961 if (backing_file && backing_file->value.s) {
3962 if (!strcmp(filename, backing_file->value.s)) {
3963 error_report("Error: Trying to create an image with the "
3964 "same filename as the backing file");
Jes Sorensen4f70f242010-12-16 13:52:18 +01003965 ret = -EINVAL;
Jes Sorensen792da932010-12-16 13:52:17 +01003966 goto out;
3967 }
3968 }
3969
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003970 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
3971 if (backing_fmt && backing_fmt->value.s) {
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00003972 backing_drv = bdrv_find_format(backing_fmt->value.s);
3973 if (!backing_drv) {
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003974 error_report("Unknown backing file format '%s'",
3975 backing_fmt->value.s);
Jes Sorensen4f70f242010-12-16 13:52:18 +01003976 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003977 goto out;
3978 }
3979 }
3980
3981 // The size for the image must always be specified, with one exception:
3982 // If we are using a backing file, we can obtain the size from there
Kevin Wolfd2208942011-06-01 14:03:31 +02003983 size = get_option_parameter(param, BLOCK_OPT_SIZE);
3984 if (size && size->value.n == -1) {
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003985 if (backing_file && backing_file->value.s) {
3986 uint64_t size;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003987 char buf[32];
Paolo Bonzini63090da2012-04-12 14:01:03 +02003988 int back_flags;
3989
3990 /* backing files always opened read-only */
3991 back_flags =
3992 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003993
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003994 bs = bdrv_new("");
3995
Paolo Bonzini63090da2012-04-12 14:01:03 +02003996 ret = bdrv_open(bs, backing_file->value.s, back_flags, backing_drv);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003997 if (ret < 0) {
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00003998 error_report("Could not open '%s'", backing_file->value.s);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01003999 goto out;
4000 }
4001 bdrv_get_geometry(bs, &size);
4002 size *= 512;
4003
4004 snprintf(buf, sizeof(buf), "%" PRId64, size);
4005 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
4006 } else {
4007 error_report("Image creation needs a size parameter");
Jes Sorensen4f70f242010-12-16 13:52:18 +01004008 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004009 goto out;
4010 }
4011 }
4012
4013 printf("Formatting '%s', fmt=%s ", filename, fmt);
4014 print_option_parameters(param);
4015 puts("");
4016
4017 ret = bdrv_create(drv, filename, param);
4018
4019 if (ret < 0) {
4020 if (ret == -ENOTSUP) {
4021 error_report("Formatting or formatting option not supported for "
4022 "file format '%s'", fmt);
4023 } else if (ret == -EFBIG) {
4024 error_report("The image size is too large for file format '%s'",
4025 fmt);
4026 } else {
4027 error_report("%s: error while creating %s: %s", filename, fmt,
4028 strerror(-ret));
4029 }
4030 }
4031
4032out:
4033 free_option_parameters(create_options);
4034 free_option_parameters(param);
4035
4036 if (bs) {
4037 bdrv_delete(bs);
4038 }
Jes Sorensen4f70f242010-12-16 13:52:18 +01004039
4040 return ret;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004041}
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004042
4043void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
Stefan Hajnoczic83c66c2012-04-25 16:51:03 +01004044 int64_t speed, BlockDriverCompletionFunc *cb,
4045 void *opaque, Error **errp)
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004046{
4047 BlockJob *job;
4048
4049 if (bs->job || bdrv_in_use(bs)) {
Stefan Hajnoczifd7f8c62012-04-25 16:51:00 +01004050 error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004051 return NULL;
4052 }
4053 bdrv_set_in_use(bs, 1);
4054
4055 job = g_malloc0(job_type->instance_size);
4056 job->job_type = job_type;
4057 job->bs = bs;
4058 job->cb = cb;
4059 job->opaque = opaque;
Paolo Bonzini4513eaf2012-05-08 16:51:45 +02004060 job->busy = true;
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004061 bs->job = job;
Stefan Hajnoczic83c66c2012-04-25 16:51:03 +01004062
4063 /* Only set speed when necessary to avoid NotSupported error */
4064 if (speed != 0) {
4065 Error *local_err = NULL;
4066
4067 block_job_set_speed(job, speed, &local_err);
4068 if (error_is_set(&local_err)) {
4069 bs->job = NULL;
4070 g_free(job);
4071 bdrv_set_in_use(bs, 0);
4072 error_propagate(errp, local_err);
4073 return NULL;
4074 }
4075 }
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004076 return job;
4077}
4078
4079void block_job_complete(BlockJob *job, int ret)
4080{
4081 BlockDriverState *bs = job->bs;
4082
4083 assert(bs->job == job);
4084 job->cb(job->opaque, ret);
4085 bs->job = NULL;
4086 g_free(job);
4087 bdrv_set_in_use(bs, 0);
4088}
4089
Stefan Hajnoczi882ec7c2012-04-25 16:51:02 +01004090void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004091{
Stefan Hajnoczi9e6636c2012-04-25 16:51:01 +01004092 Error *local_err = NULL;
Paolo Bonzini9f25ecc2012-03-30 13:17:12 +02004093
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004094 if (!job->job_type->set_speed) {
Stefan Hajnoczi9e6636c2012-04-25 16:51:01 +01004095 error_set(errp, QERR_NOT_SUPPORTED);
4096 return;
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004097 }
Stefan Hajnoczi882ec7c2012-04-25 16:51:02 +01004098 job->job_type->set_speed(job, speed, &local_err);
Stefan Hajnoczi9e6636c2012-04-25 16:51:01 +01004099 if (error_is_set(&local_err)) {
4100 error_propagate(errp, local_err);
4101 return;
Paolo Bonzini9f25ecc2012-03-30 13:17:12 +02004102 }
Stefan Hajnoczi9e6636c2012-04-25 16:51:01 +01004103
Stefan Hajnoczi882ec7c2012-04-25 16:51:02 +01004104 job->speed = speed;
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004105}
4106
4107void block_job_cancel(BlockJob *job)
4108{
4109 job->cancelled = true;
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004110 if (job->co && !job->busy) {
4111 qemu_coroutine_enter(job->co, NULL);
4112 }
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004113}
4114
4115bool block_job_is_cancelled(BlockJob *job)
4116{
4117 return job->cancelled;
4118}
Paolo Bonzini3e914652012-03-30 13:17:11 +02004119
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004120struct BlockCancelData {
4121 BlockJob *job;
4122 BlockDriverCompletionFunc *cb;
4123 void *opaque;
4124 bool cancelled;
4125 int ret;
4126};
4127
4128static void block_job_cancel_cb(void *opaque, int ret)
Paolo Bonzini3e914652012-03-30 13:17:11 +02004129{
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004130 struct BlockCancelData *data = opaque;
4131
4132 data->cancelled = block_job_is_cancelled(data->job);
4133 data->ret = ret;
4134 data->cb(data->opaque, ret);
4135}
4136
4137int block_job_cancel_sync(BlockJob *job)
4138{
4139 struct BlockCancelData data;
Paolo Bonzini3e914652012-03-30 13:17:11 +02004140 BlockDriverState *bs = job->bs;
4141
4142 assert(bs->job == job);
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004143
4144 /* Set up our own callback to store the result and chain to
4145 * the original callback.
4146 */
4147 data.job = job;
4148 data.cb = job->cb;
4149 data.opaque = job->opaque;
4150 data.ret = -EINPROGRESS;
4151 job->cb = block_job_cancel_cb;
4152 job->opaque = &data;
Paolo Bonzini3e914652012-03-30 13:17:11 +02004153 block_job_cancel(job);
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004154 while (data.ret == -EINPROGRESS) {
Paolo Bonzini3e914652012-03-30 13:17:11 +02004155 qemu_aio_wait();
4156 }
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004157 return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
Paolo Bonzini3e914652012-03-30 13:17:11 +02004158}
Paolo Bonzini4513eaf2012-05-08 16:51:45 +02004159
4160void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
4161{
4162 /* Check cancellation *before* setting busy = false, too! */
4163 if (!block_job_is_cancelled(job)) {
4164 job->busy = false;
4165 co_sleep_ns(clock, ns);
4166 job->busy = true;
4167 }
4168}