blob: 719169cccd6d28a5c3cc39f244c1f4edc589719a [file] [log] [blame]
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +02001/*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydelld38ea872016-01-29 17:50:05 +000026#include "qemu/osdep.h"
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +020027#include "qemu-common.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010028#include "block/block.h"
John Snowc87621e2016-10-27 12:07:00 -040029#include "block/blockjob_int.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010030#include "block/block_int.h"
Max Reitz373340b2015-10-19 17:53:22 +020031#include "sysemu/block-backend.h"
Markus Armbrustere688df62018-02-01 12:18:31 +010032#include "qapi/error.h"
Markus Armbruster9af23982018-02-11 10:36:01 +010033#include "qapi/qapi-events-block-core.h"
Markus Armbrustercc7a8ea2015-03-17 17:22:46 +010034#include "qapi/qmp/qerror.h"
Daniel P. Berrange10817bf2015-09-01 14:48:02 +010035#include "qemu/coroutine.h"
Alberto Garcia7f0317c2016-07-05 17:28:56 +030036#include "qemu/id.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +020038
Paolo Bonzinifc249082017-11-29 11:25:13 +010039/* Right now, this mutex is only needed to synchronize accesses to job->busy
40 * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
41 * block_job_enter. */
42static QemuMutex block_job_mutex;
43
44static void block_job_lock(void)
45{
46 qemu_mutex_lock(&block_job_mutex);
47}
48
49static void block_job_unlock(void)
50{
51 qemu_mutex_unlock(&block_job_mutex);
52}
53
54static void __attribute__((__constructor__)) block_job_init(void)
55{
56 qemu_mutex_init(&block_job_mutex);
57}
58
John Snow8254b6d2016-10-27 12:06:58 -040059static void block_job_event_cancelled(BlockJob *job);
60static void block_job_event_completed(BlockJob *job, const char *msg);
John Snowaa9ef2e2017-12-13 15:46:11 -050061static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job));
John Snow8254b6d2016-10-27 12:06:58 -040062
Fam Zhengc55a8322015-11-05 18:13:15 -050063/* Transactional group of block jobs */
64struct BlockJobTxn {
65
66 /* Is this txn being cancelled? */
67 bool aborting;
68
69 /* List of jobs */
70 QLIST_HEAD(, BlockJob) jobs;
71
72 /* Reference count */
73 int refcnt;
74};
75
Alberto Garciaa7112792016-04-04 16:43:51 +030076static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
77
Paolo Bonzini88691b32017-05-08 16:13:04 +020078/*
79 * The block job API is composed of two categories of functions.
80 *
81 * The first includes functions used by the monitor. The monitor is
82 * peculiar in that it accesses the block job list with block_job_get, and
83 * therefore needs consistency across block_job_get and the actual operation
84 * (e.g. block_job_set_speed). The consistency is achieved with
85 * aio_context_acquire/release. These functions are declared in blockjob.h.
86 *
87 * The second includes functions used by the block job drivers and sometimes
88 * by the core block layer. These do not care about locking, because the
89 * whole coroutine runs under the AioContext lock, and are declared in
90 * blockjob_int.h.
91 */
92
Alberto Garciaa7112792016-04-04 16:43:51 +030093BlockJob *block_job_next(BlockJob *job)
94{
95 if (!job) {
96 return QLIST_FIRST(&block_jobs);
97 }
98 return QLIST_NEXT(job, job_list);
99}
100
Alberto Garciaffb1f102016-07-05 17:28:54 +0300101BlockJob *block_job_get(const char *id)
102{
103 BlockJob *job;
104
105 QLIST_FOREACH(job, &block_jobs, job_list) {
John Snow559b9352016-10-27 12:06:55 -0400106 if (job->id && !strcmp(id, job->id)) {
Alberto Garciaffb1f102016-07-05 17:28:54 +0300107 return job;
108 }
109 }
110
111 return NULL;
112}
113
Paolo Bonzinic8ab5c22017-05-08 16:13:07 +0200114BlockJobTxn *block_job_txn_new(void)
115{
116 BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
117 QLIST_INIT(&txn->jobs);
118 txn->refcnt = 1;
119 return txn;
120}
121
122static void block_job_txn_ref(BlockJobTxn *txn)
123{
124 txn->refcnt++;
125}
126
127void block_job_txn_unref(BlockJobTxn *txn)
128{
129 if (txn && --txn->refcnt == 0) {
130 g_free(txn);
131 }
132}
133
134void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
135{
136 if (!txn) {
137 return;
138 }
139
140 assert(!job->txn);
141 job->txn = txn;
142
143 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
144 block_job_txn_ref(txn);
145}
146
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200147static void block_job_pause(BlockJob *job)
148{
149 job->pause_count++;
150}
151
152static void block_job_resume(BlockJob *job)
153{
154 assert(job->pause_count > 0);
155 job->pause_count--;
156 if (job->pause_count) {
157 return;
158 }
159 block_job_enter(job);
160}
161
sochin.jiang4172a002017-06-15 14:47:33 +0800162void block_job_ref(BlockJob *job)
Paolo Bonzini05b0d8e2017-05-08 16:13:02 +0200163{
164 ++job->refcnt;
165}
166
167static void block_job_attached_aio_context(AioContext *new_context,
168 void *opaque);
169static void block_job_detach_aio_context(void *opaque);
170
sochin.jiang4172a002017-06-15 14:47:33 +0800171void block_job_unref(BlockJob *job)
Paolo Bonzini05b0d8e2017-05-08 16:13:02 +0200172{
173 if (--job->refcnt == 0) {
174 BlockDriverState *bs = blk_bs(job->blk);
Alberto Garcia0a3e1552017-11-28 16:53:27 +0200175 QLIST_REMOVE(job, job_list);
Paolo Bonzini05b0d8e2017-05-08 16:13:02 +0200176 bs->job = NULL;
177 block_job_remove_all_bdrv(job);
178 blk_remove_aio_context_notifier(job->blk,
179 block_job_attached_aio_context,
180 block_job_detach_aio_context, job);
181 blk_unref(job->blk);
182 error_free(job->blocker);
183 g_free(job->id);
Paolo Bonzinifc249082017-11-29 11:25:13 +0100184 assert(!timer_pending(&job->sleep_timer));
Paolo Bonzini05b0d8e2017-05-08 16:13:02 +0200185 g_free(job);
186 }
187}
188
Stefan Hajnoczi463e0be2016-06-16 17:56:27 +0100189static void block_job_attached_aio_context(AioContext *new_context,
190 void *opaque)
191{
192 BlockJob *job = opaque;
193
194 if (job->driver->attached_aio_context) {
195 job->driver->attached_aio_context(job, new_context);
196 }
197
198 block_job_resume(job);
199}
200
Paolo Bonzinibae81962016-10-27 12:48:50 +0200201static void block_job_drain(BlockJob *job)
202{
203 /* If job is !job->busy this kicks it into the next pause point. */
204 block_job_enter(job);
205
206 blk_drain(job->blk);
207 if (job->driver->drain) {
208 job->driver->drain(job);
209 }
210}
211
Stefan Hajnoczi463e0be2016-06-16 17:56:27 +0100212static void block_job_detach_aio_context(void *opaque)
213{
214 BlockJob *job = opaque;
215
216 /* In case the job terminates during aio_poll()... */
217 block_job_ref(job);
218
219 block_job_pause(job);
220
Stefan Hajnoczi463e0be2016-06-16 17:56:27 +0100221 while (!job->paused && !job->completed) {
Paolo Bonzinibae81962016-10-27 12:48:50 +0200222 block_job_drain(job);
Stefan Hajnoczi463e0be2016-06-16 17:56:27 +0100223 }
224
225 block_job_unref(job);
226}
227
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200228static char *child_job_get_parent_desc(BdrvChild *c)
229{
230 BlockJob *job = c->opaque;
231 return g_strdup_printf("%s job '%s'",
Markus Armbruster977c7362017-08-24 10:46:08 +0200232 BlockJobType_str(job->driver->job_type),
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200233 job->id);
234}
235
Kevin Wolfad90feb2017-12-12 19:04:28 +0100236static void child_job_drained_begin(BdrvChild *c)
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200237{
Kevin Wolfad90feb2017-12-12 19:04:28 +0100238 BlockJob *job = c->opaque;
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200239 block_job_pause(job);
240}
241
Kevin Wolfad90feb2017-12-12 19:04:28 +0100242static void child_job_drained_end(BdrvChild *c)
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200243{
Kevin Wolfad90feb2017-12-12 19:04:28 +0100244 BlockJob *job = c->opaque;
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200245 block_job_resume(job);
246}
247
Kevin Wolfad90feb2017-12-12 19:04:28 +0100248static const BdrvChildRole child_job = {
249 .get_parent_desc = child_job_get_parent_desc,
250 .drained_begin = child_job_drained_begin,
251 .drained_end = child_job_drained_end,
252 .stay_at_node = true,
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200253};
254
Kevin Wolfbbc02b92017-02-28 12:45:58 +0100255void block_job_remove_all_bdrv(BlockJob *job)
256{
257 GSList *l;
258 for (l = job->nodes; l; l = l->next) {
259 BdrvChild *c = l->data;
260 bdrv_op_unblock_all(c->bs, job->blocker);
261 bdrv_root_unref_child(c);
262 }
263 g_slist_free(job->nodes);
264 job->nodes = NULL;
265}
266
Kevin Wolf76d554e2017-01-17 11:56:42 +0100267int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
268 uint64_t perm, uint64_t shared_perm, Error **errp)
Alberto Garcia23d402d2016-10-28 10:08:04 +0300269{
Kevin Wolf76d554e2017-01-17 11:56:42 +0100270 BdrvChild *c;
271
272 c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm,
273 job, errp);
274 if (c == NULL) {
275 return -EPERM;
276 }
277
278 job->nodes = g_slist_prepend(job->nodes, c);
Alberto Garcia23d402d2016-10-28 10:08:04 +0300279 bdrv_ref(bs);
280 bdrv_op_block_all(bs, job->blocker);
Kevin Wolf76d554e2017-01-17 11:56:42 +0100281
282 return 0;
Alberto Garcia23d402d2016-10-28 10:08:04 +0300283}
284
John Snow559b9352016-10-27 12:06:55 -0400285bool block_job_is_internal(BlockJob *job)
286{
287 return (job->id == NULL);
288}
289
John Snow5ccac6f2016-11-08 01:50:37 -0500290static bool block_job_started(BlockJob *job)
291{
292 return job->co;
293}
294
John Snowe3796a22017-03-16 17:23:49 -0400295/**
296 * All jobs must allow a pause point before entering their job proper. This
297 * ensures that jobs can be paused prior to being started, then resumed later.
298 */
299static void coroutine_fn block_job_co_entry(void *opaque)
300{
301 BlockJob *job = opaque;
302
303 assert(job && job->driver && job->driver->start);
304 block_job_pause_point(job);
305 job->driver->start(job);
306}
307
Paolo Bonzinifc249082017-11-29 11:25:13 +0100308static void block_job_sleep_timer_cb(void *opaque)
309{
310 BlockJob *job = opaque;
311
312 block_job_enter(job);
313}
314
John Snow5ccac6f2016-11-08 01:50:37 -0500315void block_job_start(BlockJob *job)
316{
317 assert(job && !block_job_started(job) && job->paused &&
John Snowe3796a22017-03-16 17:23:49 -0400318 job->driver && job->driver->start);
319 job->co = qemu_coroutine_create(block_job_co_entry, job);
320 job->pause_count--;
321 job->busy = true;
322 job->paused = false;
John Snow58b295b2018-03-10 03:27:29 -0500323 job->status = BLOCK_JOB_STATUS_RUNNING;
Fam Zhengaef42782017-04-10 20:12:05 +0800324 bdrv_coroutine_enter(blk_bs(job->blk), job->co);
John Snow5ccac6f2016-11-08 01:50:37 -0500325}
326
Fam Zhengc55a8322015-11-05 18:13:15 -0500327static void block_job_completed_single(BlockJob *job)
328{
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200329 assert(job->completed);
330
Fam Zhengc55a8322015-11-05 18:13:15 -0500331 if (!job->ret) {
332 if (job->driver->commit) {
333 job->driver->commit(job);
334 }
335 } else {
336 if (job->driver->abort) {
337 job->driver->abort(job);
338 }
339 }
John Snowe8a40bf2016-11-08 01:50:35 -0500340 if (job->driver->clean) {
341 job->driver->clean(job);
342 }
John Snow8254b6d2016-10-27 12:06:58 -0400343
344 if (job->cb) {
345 job->cb(job->opaque, job->ret);
346 }
John Snow5ccac6f2016-11-08 01:50:37 -0500347
348 /* Emit events only if we actually started */
349 if (block_job_started(job)) {
350 if (block_job_is_cancelled(job)) {
351 block_job_event_cancelled(job);
352 } else {
353 const char *msg = NULL;
354 if (job->ret < 0) {
355 msg = strerror(-job->ret);
356 }
357 block_job_event_completed(job, msg);
John Snow8254b6d2016-10-27 12:06:58 -0400358 }
John Snow8254b6d2016-10-27 12:06:58 -0400359 }
360
John Snow75859b92018-03-10 03:27:27 -0500361 QLIST_REMOVE(job, txn_list);
362 block_job_txn_unref(job->txn);
Fam Zhengc55a8322015-11-05 18:13:15 -0500363 block_job_unref(job);
364}
365
Paolo Bonzini4c241cf2017-05-08 16:13:06 +0200366static void block_job_cancel_async(BlockJob *job)
367{
368 if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) {
369 block_job_iostatus_reset(job);
370 }
371 if (job->user_paused) {
372 /* Do not call block_job_enter here, the caller will handle it. */
373 job->user_paused = false;
374 job->pause_count--;
375 }
376 job->cancelled = true;
377}
378
Paolo Bonzinic8ab5c22017-05-08 16:13:07 +0200379static int block_job_finish_sync(BlockJob *job,
380 void (*finish)(BlockJob *, Error **errp),
381 Error **errp)
382{
383 Error *local_err = NULL;
384 int ret;
385
386 assert(blk_bs(job->blk)->job == job);
387
388 block_job_ref(job);
389
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200390 if (finish) {
391 finish(job, &local_err);
392 }
Paolo Bonzinic8ab5c22017-05-08 16:13:07 +0200393 if (local_err) {
394 error_propagate(errp, local_err);
395 block_job_unref(job);
396 return -EBUSY;
397 }
398 /* block_job_drain calls block_job_enter, and it should be enough to
399 * induce progress until the job completes or moves to the main thread.
400 */
401 while (!job->deferred_to_main_loop && !job->completed) {
402 block_job_drain(job);
403 }
404 while (!job->completed) {
405 aio_poll(qemu_get_aio_context(), true);
406 }
407 ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
408 block_job_unref(job);
409 return ret;
410}
411
Fam Zhengc55a8322015-11-05 18:13:15 -0500412static void block_job_completed_txn_abort(BlockJob *job)
413{
414 AioContext *ctx;
415 BlockJobTxn *txn = job->txn;
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200416 BlockJob *other_job;
Fam Zhengc55a8322015-11-05 18:13:15 -0500417
418 if (txn->aborting) {
419 /*
420 * We are cancelled by another job, which will handle everything.
421 */
422 return;
423 }
424 txn->aborting = true;
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200425 block_job_txn_ref(txn);
426
Fam Zhengc55a8322015-11-05 18:13:15 -0500427 /* We are the first failed job. Cancel other jobs. */
428 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200429 ctx = blk_get_aio_context(other_job->blk);
Fam Zhengc55a8322015-11-05 18:13:15 -0500430 aio_context_acquire(ctx);
431 }
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200432
433 /* Other jobs are effectively cancelled by us, set the status for
434 * them; this job, however, may or may not be cancelled, depending
435 * on the caller, so leave it. */
Fam Zhengc55a8322015-11-05 18:13:15 -0500436 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200437 if (other_job != job) {
438 block_job_cancel_async(other_job);
Fam Zhengc55a8322015-11-05 18:13:15 -0500439 }
Fam Zhengc55a8322015-11-05 18:13:15 -0500440 }
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200441 while (!QLIST_EMPTY(&txn->jobs)) {
442 other_job = QLIST_FIRST(&txn->jobs);
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200443 ctx = blk_get_aio_context(other_job->blk);
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200444 if (!other_job->completed) {
445 assert(other_job->cancelled);
446 block_job_finish_sync(other_job, NULL, NULL);
447 }
Fam Zhengc55a8322015-11-05 18:13:15 -0500448 block_job_completed_single(other_job);
449 aio_context_release(ctx);
450 }
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200451
452 block_job_txn_unref(txn);
Fam Zhengc55a8322015-11-05 18:13:15 -0500453}
454
455static void block_job_completed_txn_success(BlockJob *job)
456{
457 AioContext *ctx;
458 BlockJobTxn *txn = job->txn;
459 BlockJob *other_job, *next;
460 /*
461 * Successful completion, see if there are other running jobs in this
462 * txn.
463 */
464 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
465 if (!other_job->completed) {
466 return;
467 }
468 }
469 /* We are the last completed job, commit the transaction. */
470 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200471 ctx = blk_get_aio_context(other_job->blk);
Fam Zhengc55a8322015-11-05 18:13:15 -0500472 aio_context_acquire(ctx);
473 assert(other_job->ret == 0);
474 block_job_completed_single(other_job);
475 aio_context_release(ctx);
476 }
477}
478
John Snowaa9ef2e2017-12-13 15:46:11 -0500479/* Assumes the block_job_mutex is held */
480static bool block_job_timer_pending(BlockJob *job)
481{
482 return timer_pending(&job->sleep_timer);
483}
484
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +0200485void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
486{
487 Error *local_err = NULL;
John Snowaa9ef2e2017-12-13 15:46:11 -0500488 int64_t old_speed = job->speed;
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +0200489
Fam Zheng3fc4b102013-10-08 17:29:38 +0800490 if (!job->driver->set_speed) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100491 error_setg(errp, QERR_UNSUPPORTED);
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +0200492 return;
493 }
Fam Zheng3fc4b102013-10-08 17:29:38 +0800494 job->driver->set_speed(job, speed, &local_err);
Markus Armbruster84d18f02014-01-30 15:07:28 +0100495 if (local_err) {
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +0200496 error_propagate(errp, local_err);
497 return;
498 }
499
500 job->speed = speed;
John Snowd4fce182018-03-10 03:27:26 -0500501 if (speed && speed <= old_speed) {
John Snowaa9ef2e2017-12-13 15:46:11 -0500502 return;
503 }
504
505 /* kick only if a timer is pending */
506 block_job_enter_cond(job, block_job_timer_pending);
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +0200507}
508
Paolo Bonziniaeae8832012-10-18 16:49:21 +0200509void block_job_complete(BlockJob *job, Error **errp)
510{
John Snow559b9352016-10-27 12:06:55 -0400511 /* Should not be reachable via external interface for internal jobs */
512 assert(job->id);
John Snow5ccac6f2016-11-08 01:50:37 -0500513 if (job->pause_count || job->cancelled ||
514 !block_job_started(job) || !job->driver->complete) {
Alberto Garcia9df229c2016-07-05 17:28:53 +0300515 error_setg(errp, "The active block job '%s' cannot be completed",
516 job->id);
Paolo Bonziniaeae8832012-10-18 16:49:21 +0200517 return;
518 }
519
Fam Zheng3fc4b102013-10-08 17:29:38 +0800520 job->driver->complete(job, errp);
Paolo Bonziniaeae8832012-10-18 16:49:21 +0200521}
522
John Snow0df4ba52016-10-27 12:06:59 -0400523void block_job_user_pause(BlockJob *job)
524{
525 job->user_paused = true;
526 block_job_pause(job);
527}
528
John Snow0df4ba52016-10-27 12:06:59 -0400529bool block_job_user_paused(BlockJob *job)
530{
Paolo Bonzini6573d9c2017-05-08 16:13:00 +0200531 return job->user_paused;
John Snow0df4ba52016-10-27 12:06:59 -0400532}
533
John Snow0df4ba52016-10-27 12:06:59 -0400534void block_job_user_resume(BlockJob *job)
535{
536 if (job && job->user_paused && job->pause_count > 0) {
Paolo Bonzini2caf63a2017-05-08 16:13:05 +0200537 block_job_iostatus_reset(job);
Paolo Bonzini4c241cf2017-05-08 16:13:06 +0200538 job->user_paused = false;
John Snow0df4ba52016-10-27 12:06:59 -0400539 block_job_resume(job);
540 }
541}
542
Paolo Bonzini8acc72a2012-09-28 17:22:50 +0200543void block_job_cancel(BlockJob *job)
544{
John Snow5ccac6f2016-11-08 01:50:37 -0500545 if (block_job_started(job)) {
Paolo Bonzini4c241cf2017-05-08 16:13:06 +0200546 block_job_cancel_async(job);
John Snow5ccac6f2016-11-08 01:50:37 -0500547 block_job_enter(job);
548 } else {
549 block_job_completed(job, -ECANCELED);
550 }
Paolo Bonzini8acc72a2012-09-28 17:22:50 +0200551}
552
Max Reitz345f9e12014-10-24 15:57:33 +0200553/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
554 * used with block_job_finish_sync() without the need for (rather nasty)
555 * function pointer casts there. */
556static void block_job_cancel_err(BlockJob *job, Error **errp)
557{
558 block_job_cancel(job);
559}
560
561int block_job_cancel_sync(BlockJob *job)
562{
563 return block_job_finish_sync(job, &block_job_cancel_err, NULL);
564}
565
Kevin Wolfa1a2af02016-04-08 18:26:37 +0200566void block_job_cancel_sync_all(void)
567{
568 BlockJob *job;
569 AioContext *aio_context;
570
571 while ((job = QLIST_FIRST(&block_jobs))) {
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200572 aio_context = blk_get_aio_context(job->blk);
Kevin Wolfa1a2af02016-04-08 18:26:37 +0200573 aio_context_acquire(aio_context);
574 block_job_cancel_sync(job);
575 aio_context_release(aio_context);
576 }
577}
578
Max Reitz345f9e12014-10-24 15:57:33 +0200579int block_job_complete_sync(BlockJob *job, Error **errp)
580{
581 return block_job_finish_sync(job, &block_job_complete, errp);
582}
583
John Snow559b9352016-10-27 12:06:55 -0400584BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
Paolo Bonzini30e628b2012-09-28 17:22:48 +0200585{
John Snow559b9352016-10-27 12:06:55 -0400586 BlockJobInfo *info;
587
588 if (block_job_is_internal(job)) {
589 error_setg(errp, "Cannot query QEMU internal jobs");
590 return NULL;
591 }
592 info = g_new0(BlockJobInfo, 1);
Markus Armbruster977c7362017-08-24 10:46:08 +0200593 info->type = g_strdup(BlockJobType_str(job->driver->job_type));
Kevin Wolf8ccb9562015-09-16 13:34:54 +0200594 info->device = g_strdup(job->id);
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200595 info->len = job->len;
Paolo Bonzinifc249082017-11-29 11:25:13 +0100596 info->busy = atomic_read(&job->busy);
Fam Zheng751ebd72015-04-03 22:05:18 +0800597 info->paused = job->pause_count > 0;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200598 info->offset = job->offset;
599 info->speed = job->speed;
600 info->io_status = job->iostatus;
Max Reitzef6dbf12014-10-24 15:57:34 +0200601 info->ready = job->ready;
John Snow58b295b2018-03-10 03:27:29 -0500602 info->status = job->status;
Paolo Bonzini30e628b2012-09-28 17:22:48 +0200603 return info;
604}
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200605
606static void block_job_iostatus_set_err(BlockJob *job, int error)
607{
608 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
609 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
610 BLOCK_DEVICE_IO_STATUS_FAILED;
611 }
612}
613
John Snow8254b6d2016-10-27 12:06:58 -0400614static void block_job_event_cancelled(BlockJob *job)
Paolo Bonzinia66a2a32012-07-23 15:15:47 +0200615{
John Snow559b9352016-10-27 12:06:55 -0400616 if (block_job_is_internal(job)) {
617 return;
618 }
619
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200620 qapi_event_send_block_job_cancelled(job->driver->job_type,
Kevin Wolf8ccb9562015-09-16 13:34:54 +0200621 job->id,
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200622 job->len,
623 job->offset,
624 job->speed,
625 &error_abort);
Paolo Bonzinia66a2a32012-07-23 15:15:47 +0200626}
627
John Snow8254b6d2016-10-27 12:06:58 -0400628static void block_job_event_completed(BlockJob *job, const char *msg)
Paolo Bonzinia66a2a32012-07-23 15:15:47 +0200629{
John Snow559b9352016-10-27 12:06:55 -0400630 if (block_job_is_internal(job)) {
631 return;
632 }
633
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200634 qapi_event_send_block_job_completed(job->driver->job_type,
Kevin Wolf8ccb9562015-09-16 13:34:54 +0200635 job->id,
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200636 job->len,
637 job->offset,
638 job->speed,
639 !!msg,
640 msg,
641 &error_abort);
642}
643
Paolo Bonzini88691b32017-05-08 16:13:04 +0200644/*
645 * API for block job drivers and the block layer. These functions are
646 * declared in blockjob_int.h.
647 */
648
649void *block_job_create(const char *job_id, const BlockJobDriver *driver,
John Snow75859b92018-03-10 03:27:27 -0500650 BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm,
Paolo Bonzini88691b32017-05-08 16:13:04 +0200651 uint64_t shared_perm, int64_t speed, int flags,
652 BlockCompletionFunc *cb, void *opaque, Error **errp)
653{
654 BlockBackend *blk;
655 BlockJob *job;
656 int ret;
657
658 if (bs->job) {
659 error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
660 return NULL;
661 }
662
663 if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
664 job_id = bdrv_get_device_name(bs);
665 if (!*job_id) {
666 error_setg(errp, "An explicit job ID is required for this node");
667 return NULL;
668 }
669 }
670
671 if (job_id) {
672 if (flags & BLOCK_JOB_INTERNAL) {
673 error_setg(errp, "Cannot specify job ID for internal block job");
674 return NULL;
675 }
676
677 if (!id_wellformed(job_id)) {
678 error_setg(errp, "Invalid job ID '%s'", job_id);
679 return NULL;
680 }
681
682 if (block_job_get(job_id)) {
683 error_setg(errp, "Job ID '%s' already in use", job_id);
684 return NULL;
685 }
686 }
687
688 blk = blk_new(perm, shared_perm);
689 ret = blk_insert_bs(blk, bs, errp);
690 if (ret < 0) {
691 blk_unref(blk);
692 return NULL;
693 }
694
695 job = g_malloc0(driver->instance_size);
696 job->driver = driver;
697 job->id = g_strdup(job_id);
698 job->blk = blk;
699 job->cb = cb;
700 job->opaque = opaque;
701 job->busy = false;
702 job->paused = true;
703 job->pause_count = 1;
704 job->refcnt = 1;
John Snow58b295b2018-03-10 03:27:29 -0500705 job->status = BLOCK_JOB_STATUS_CREATED;
Paolo Bonzinifc249082017-11-29 11:25:13 +0100706 aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
707 QEMU_CLOCK_REALTIME, SCALE_NS,
708 block_job_sleep_timer_cb, job);
Paolo Bonzini88691b32017-05-08 16:13:04 +0200709
710 error_setg(&job->blocker, "block device is in use by block job: %s",
Markus Armbruster977c7362017-08-24 10:46:08 +0200711 BlockJobType_str(driver->job_type));
Paolo Bonzini88691b32017-05-08 16:13:04 +0200712 block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
713 bs->job = job;
714
Paolo Bonzini88691b32017-05-08 16:13:04 +0200715 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
716
717 QLIST_INSERT_HEAD(&block_jobs, job, job_list);
718
719 blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
720 block_job_detach_aio_context, job);
721
722 /* Only set speed when necessary to avoid NotSupported error */
723 if (speed != 0) {
724 Error *local_err = NULL;
725
726 block_job_set_speed(job, speed, &local_err);
727 if (local_err) {
728 block_job_unref(job);
729 error_propagate(errp, local_err);
730 return NULL;
731 }
732 }
John Snow75859b92018-03-10 03:27:27 -0500733
734 /* Single jobs are modeled as single-job transactions for sake of
735 * consolidating the job management logic */
736 if (!txn) {
737 txn = block_job_txn_new();
738 block_job_txn_add_job(txn, job);
739 block_job_txn_unref(txn);
740 } else {
741 block_job_txn_add_job(txn, job);
742 }
743
Paolo Bonzini88691b32017-05-08 16:13:04 +0200744 return job;
745}
746
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200747void block_job_pause_all(void)
748{
749 BlockJob *job = NULL;
750 while ((job = block_job_next(job))) {
751 AioContext *aio_context = blk_get_aio_context(job->blk);
752
753 aio_context_acquire(aio_context);
Alberto Garcia3d5d3192017-11-29 19:56:34 +0200754 block_job_ref(job);
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200755 block_job_pause(job);
756 aio_context_release(aio_context);
757 }
758}
759
Paolo Bonzini88691b32017-05-08 16:13:04 +0200760void block_job_early_fail(BlockJob *job)
761{
762 block_job_unref(job);
763}
764
765void block_job_completed(BlockJob *job, int ret)
766{
John Snow75859b92018-03-10 03:27:27 -0500767 assert(job && job->txn && !job->completed);
Paolo Bonzini88691b32017-05-08 16:13:04 +0200768 assert(blk_bs(job->blk)->job == job);
Paolo Bonzini88691b32017-05-08 16:13:04 +0200769 job->completed = true;
770 job->ret = ret;
John Snow75859b92018-03-10 03:27:27 -0500771 if (ret < 0 || block_job_is_cancelled(job)) {
Paolo Bonzini88691b32017-05-08 16:13:04 +0200772 block_job_completed_txn_abort(job);
773 } else {
774 block_job_completed_txn_success(job);
775 }
776}
777
778static bool block_job_should_pause(BlockJob *job)
779{
780 return job->pause_count > 0;
781}
782
Paolo Bonzinifc249082017-11-29 11:25:13 +0100783/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
784 * Reentering the job coroutine with block_job_enter() before the timer has
785 * expired is allowed and cancels the timer.
786 *
787 * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
788 * called explicitly. */
789static void block_job_do_yield(BlockJob *job, uint64_t ns)
Paolo Bonzini356f59b2017-11-29 11:25:12 +0100790{
Paolo Bonzinifc249082017-11-29 11:25:13 +0100791 block_job_lock();
792 if (ns != -1) {
793 timer_mod(&job->sleep_timer, ns);
794 }
Paolo Bonzini356f59b2017-11-29 11:25:12 +0100795 job->busy = false;
Paolo Bonzinifc249082017-11-29 11:25:13 +0100796 block_job_unlock();
Paolo Bonzini356f59b2017-11-29 11:25:12 +0100797 qemu_coroutine_yield();
798
799 /* Set by block_job_enter before re-entering the coroutine. */
800 assert(job->busy);
801}
802
Paolo Bonzini88691b32017-05-08 16:13:04 +0200803void coroutine_fn block_job_pause_point(BlockJob *job)
804{
805 assert(job && block_job_started(job));
806
807 if (!block_job_should_pause(job)) {
808 return;
809 }
810 if (block_job_is_cancelled(job)) {
811 return;
812 }
813
814 if (job->driver->pause) {
815 job->driver->pause(job);
816 }
817
818 if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
John Snow58b295b2018-03-10 03:27:29 -0500819 BlockJobStatus status = job->status;
820 job->status = status == BLOCK_JOB_STATUS_READY ? \
821 BLOCK_JOB_STATUS_STANDBY : \
822 BLOCK_JOB_STATUS_PAUSED;
Paolo Bonzini88691b32017-05-08 16:13:04 +0200823 job->paused = true;
Paolo Bonzinifc249082017-11-29 11:25:13 +0100824 block_job_do_yield(job, -1);
Paolo Bonzini88691b32017-05-08 16:13:04 +0200825 job->paused = false;
John Snow58b295b2018-03-10 03:27:29 -0500826 job->status = status;
Paolo Bonzini88691b32017-05-08 16:13:04 +0200827 }
828
829 if (job->driver->resume) {
830 job->driver->resume(job);
831 }
832}
833
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200834void block_job_resume_all(void)
835{
Alberto Garcia3d5d3192017-11-29 19:56:34 +0200836 BlockJob *job, *next;
837
838 QLIST_FOREACH_SAFE(job, &block_jobs, job_list, next) {
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200839 AioContext *aio_context = blk_get_aio_context(job->blk);
840
841 aio_context_acquire(aio_context);
842 block_job_resume(job);
Alberto Garcia3d5d3192017-11-29 19:56:34 +0200843 block_job_unref(job);
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200844 aio_context_release(aio_context);
845 }
846}
847
John Snowaa9ef2e2017-12-13 15:46:11 -0500848/*
849 * Conditionally enter a block_job pending a call to fn() while
850 * under the block_job_lock critical section.
851 */
852static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job))
Paolo Bonzini88691b32017-05-08 16:13:04 +0200853{
Paolo Bonzinieb05e012017-05-08 16:13:10 +0200854 if (!block_job_started(job)) {
855 return;
856 }
857 if (job->deferred_to_main_loop) {
858 return;
859 }
860
Paolo Bonzinifc249082017-11-29 11:25:13 +0100861 block_job_lock();
Paolo Bonzini356f59b2017-11-29 11:25:12 +0100862 if (job->busy) {
Paolo Bonzinifc249082017-11-29 11:25:13 +0100863 block_job_unlock();
Paolo Bonzini356f59b2017-11-29 11:25:12 +0100864 return;
Paolo Bonzini88691b32017-05-08 16:13:04 +0200865 }
Paolo Bonzini356f59b2017-11-29 11:25:12 +0100866
John Snowaa9ef2e2017-12-13 15:46:11 -0500867 if (fn && !fn(job)) {
868 block_job_unlock();
869 return;
870 }
871
Paolo Bonzinifc249082017-11-29 11:25:13 +0100872 assert(!job->deferred_to_main_loop);
873 timer_del(&job->sleep_timer);
Paolo Bonzini356f59b2017-11-29 11:25:12 +0100874 job->busy = true;
Paolo Bonzinifc249082017-11-29 11:25:13 +0100875 block_job_unlock();
Paolo Bonzini356f59b2017-11-29 11:25:12 +0100876 aio_co_wake(job->co);
Paolo Bonzini88691b32017-05-08 16:13:04 +0200877}
878
John Snowaa9ef2e2017-12-13 15:46:11 -0500879void block_job_enter(BlockJob *job)
880{
881 block_job_enter_cond(job, NULL);
882}
883
Paolo Bonzini88691b32017-05-08 16:13:04 +0200884bool block_job_is_cancelled(BlockJob *job)
885{
886 return job->cancelled;
887}
888
Paolo Bonzini5bf1d5a2017-11-29 11:25:11 +0100889void block_job_sleep_ns(BlockJob *job, int64_t ns)
Paolo Bonzini88691b32017-05-08 16:13:04 +0200890{
891 assert(job->busy);
892
893 /* Check cancellation *before* setting busy = false, too! */
894 if (block_job_is_cancelled(job)) {
895 return;
896 }
897
Paolo Bonzini88691b32017-05-08 16:13:04 +0200898 if (!block_job_should_pause(job)) {
Paolo Bonzinifc249082017-11-29 11:25:13 +0100899 block_job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
Paolo Bonzini88691b32017-05-08 16:13:04 +0200900 }
Paolo Bonzini88691b32017-05-08 16:13:04 +0200901
902 block_job_pause_point(job);
903}
904
905void block_job_yield(BlockJob *job)
906{
907 assert(job->busy);
908
909 /* Check cancellation *before* setting busy = false, too! */
910 if (block_job_is_cancelled(job)) {
911 return;
912 }
913
Paolo Bonzini88691b32017-05-08 16:13:04 +0200914 if (!block_job_should_pause(job)) {
Paolo Bonzinifc249082017-11-29 11:25:13 +0100915 block_job_do_yield(job, -1);
Paolo Bonzini88691b32017-05-08 16:13:04 +0200916 }
Paolo Bonzini88691b32017-05-08 16:13:04 +0200917
918 block_job_pause_point(job);
919}
920
Paolo Bonzini2caf63a2017-05-08 16:13:05 +0200921void block_job_iostatus_reset(BlockJob *job)
922{
Paolo Bonzini4c241cf2017-05-08 16:13:06 +0200923 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
924 return;
925 }
926 assert(job->user_paused && job->pause_count > 0);
Paolo Bonzini2caf63a2017-05-08 16:13:05 +0200927 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
928}
929
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200930void block_job_event_ready(BlockJob *job)
931{
John Snow58b295b2018-03-10 03:27:29 -0500932 job->status = BLOCK_JOB_STATUS_READY;
Max Reitzef6dbf12014-10-24 15:57:34 +0200933 job->ready = true;
934
John Snow559b9352016-10-27 12:06:55 -0400935 if (block_job_is_internal(job)) {
936 return;
937 }
938
Markus Armbruster518848a2014-06-27 19:24:13 +0200939 qapi_event_send_block_job_ready(job->driver->job_type,
Kevin Wolf8ccb9562015-09-16 13:34:54 +0200940 job->id,
Markus Armbruster518848a2014-06-27 19:24:13 +0200941 job->len,
942 job->offset,
943 job->speed, &error_abort);
Paolo Bonzinia66a2a32012-07-23 15:15:47 +0200944}
945
Kevin Wolf81e254d2016-04-18 11:36:38 +0200946BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200947 int is_read, int error)
948{
949 BlockErrorAction action;
950
951 switch (on_err) {
952 case BLOCKDEV_ON_ERROR_ENOSPC:
Kevin Wolf8c398252016-06-29 17:41:35 +0200953 case BLOCKDEV_ON_ERROR_AUTO:
Wenchao Xiaa5895692014-06-18 08:43:30 +0200954 action = (error == ENOSPC) ?
955 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200956 break;
957 case BLOCKDEV_ON_ERROR_STOP:
Wenchao Xiaa5895692014-06-18 08:43:30 +0200958 action = BLOCK_ERROR_ACTION_STOP;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200959 break;
960 case BLOCKDEV_ON_ERROR_REPORT:
Wenchao Xiaa5895692014-06-18 08:43:30 +0200961 action = BLOCK_ERROR_ACTION_REPORT;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200962 break;
963 case BLOCKDEV_ON_ERROR_IGNORE:
Wenchao Xiaa5895692014-06-18 08:43:30 +0200964 action = BLOCK_ERROR_ACTION_IGNORE;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200965 break;
966 default:
967 abort();
968 }
John Snow559b9352016-10-27 12:06:55 -0400969 if (!block_job_is_internal(job)) {
970 qapi_event_send_block_job_error(job->id,
971 is_read ? IO_OPERATION_TYPE_READ :
972 IO_OPERATION_TYPE_WRITE,
973 action, &error_abort);
974 }
Wenchao Xiaa5895692014-06-18 08:43:30 +0200975 if (action == BLOCK_ERROR_ACTION_STOP) {
Fam Zheng751ebd72015-04-03 22:05:18 +0800976 /* make the pause user visible, which will be resumed from QMP. */
John Snow0df4ba52016-10-27 12:06:59 -0400977 block_job_user_pause(job);
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200978 block_job_iostatus_set_err(job, error);
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200979 }
980 return action;
981}
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100982
983typedef struct {
984 BlockJob *job;
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100985 AioContext *aio_context;
986 BlockJobDeferToMainLoopFn *fn;
987 void *opaque;
988} BlockJobDeferToMainLoopData;
989
990static void block_job_defer_to_main_loop_bh(void *opaque)
991{
992 BlockJobDeferToMainLoopData *data = opaque;
993 AioContext *aio_context;
994
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100995 /* Prevent race with block_job_defer_to_main_loop() */
996 aio_context_acquire(data->aio_context);
997
998 /* Fetch BDS AioContext again, in case it has changed */
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200999 aio_context = blk_get_aio_context(data->job->blk);
Paolo Bonzinid79df2a2017-03-21 18:48:10 +01001000 if (aio_context != data->aio_context) {
1001 aio_context_acquire(aio_context);
1002 }
Stefan Hajnoczidec7d422014-10-21 12:03:54 +01001003
1004 data->fn(data->job, data->opaque);
1005
Paolo Bonzinid79df2a2017-03-21 18:48:10 +01001006 if (aio_context != data->aio_context) {
1007 aio_context_release(aio_context);
1008 }
Stefan Hajnoczidec7d422014-10-21 12:03:54 +01001009
1010 aio_context_release(data->aio_context);
1011
1012 g_free(data);
1013}
1014
1015void block_job_defer_to_main_loop(BlockJob *job,
1016 BlockJobDeferToMainLoopFn *fn,
1017 void *opaque)
1018{
1019 BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
1020 data->job = job;
Kevin Wolfb6d2e592016-04-08 14:51:09 +02001021 data->aio_context = blk_get_aio_context(job->blk);
Stefan Hajnoczidec7d422014-10-21 12:03:54 +01001022 data->fn = fn;
1023 data->opaque = opaque;
Fam Zheng794f0142016-02-02 10:12:24 +08001024 job->deferred_to_main_loop = true;
Stefan Hajnoczidec7d422014-10-21 12:03:54 +01001025
Paolo Bonzinifffb6e12016-10-03 18:14:16 +02001026 aio_bh_schedule_oneshot(qemu_get_aio_context(),
1027 block_job_defer_to_main_loop_bh, data);
Stefan Hajnoczidec7d422014-10-21 12:03:54 +01001028}