blob: efce14b63a84e4f8d21d59e212ba49df5401b753 [file] [log] [blame]
Kevin Wolf4f999d02009-10-22 17:54:37 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu-common.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010026#include "block/aio.h"
Stefan Hajnoczi9b342772013-03-07 13:41:47 +010027#include "block/thread-pool.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010028#include "qemu/main-loop.h"
Paolo Bonzini0ceb8492014-07-07 15:18:04 +020029#include "qemu/atomic.h"
Kevin Wolf9a1e9482009-10-22 17:54:38 +020030
Kevin Wolf4f999d02009-10-22 17:54:37 +020031/***********************************************************/
32/* bottom halves (can be seen as timers which expire ASAP) */
33
34struct QEMUBH {
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +020035 AioContext *ctx;
Kevin Wolf4f999d02009-10-22 17:54:37 +020036 QEMUBHFunc *cb;
37 void *opaque;
Kevin Wolf4f999d02009-10-22 17:54:37 +020038 QEMUBH *next;
Stefan Weil9b47b172012-04-29 19:08:45 +020039 bool scheduled;
40 bool idle;
41 bool deleted;
Kevin Wolf4f999d02009-10-22 17:54:37 +020042};
43
Paolo Bonzinif627aab2012-10-29 23:45:23 +010044QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
Kevin Wolf4f999d02009-10-22 17:54:37 +020045{
46 QEMUBH *bh;
Paolo Bonziniee823102014-12-17 16:10:00 +010047 bh = g_new(QEMUBH, 1);
48 *bh = (QEMUBH){
49 .ctx = ctx,
50 .cb = cb,
51 .opaque = opaque,
52 };
Liu Ping Fandcc772e2013-07-16 12:28:58 +080053 qemu_mutex_lock(&ctx->bh_lock);
Paolo Bonzinif627aab2012-10-29 23:45:23 +010054 bh->next = ctx->first_bh;
Liu Ping Fandcc772e2013-07-16 12:28:58 +080055 /* Make sure that the members are ready before putting bh into list */
56 smp_wmb();
Paolo Bonzinif627aab2012-10-29 23:45:23 +010057 ctx->first_bh = bh;
Liu Ping Fandcc772e2013-07-16 12:28:58 +080058 qemu_mutex_unlock(&ctx->bh_lock);
Kevin Wolf4f999d02009-10-22 17:54:37 +020059 return bh;
60}
61
Liu Ping Fandcc772e2013-07-16 12:28:58 +080062/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
Paolo Bonzinif627aab2012-10-29 23:45:23 +010063int aio_bh_poll(AioContext *ctx)
Kevin Wolf4f999d02009-10-22 17:54:37 +020064{
Kevin Wolf7887f622011-06-07 17:51:21 +020065 QEMUBH *bh, **bhp, *next;
Kevin Wolf4f999d02009-10-22 17:54:37 +020066 int ret;
Kevin Wolf648fb0e2011-09-01 16:16:10 +020067
Paolo Bonzinif627aab2012-10-29 23:45:23 +010068 ctx->walking_bh++;
Kevin Wolf4f999d02009-10-22 17:54:37 +020069
70 ret = 0;
Paolo Bonzinif627aab2012-10-29 23:45:23 +010071 for (bh = ctx->first_bh; bh; bh = next) {
Liu Ping Fandcc772e2013-07-16 12:28:58 +080072 /* Make sure that fetching bh happens before accessing its members */
73 smp_read_barrier_depends();
Kevin Wolf7887f622011-06-07 17:51:21 +020074 next = bh->next;
Paolo Bonzinie8d3b1a2015-04-07 17:16:19 +020075 /* The atomic_xchg is paired with the one in qemu_bh_schedule. The
76 * implicit memory barrier ensures that the callback sees all writes
77 * done by the scheduling thread. It also ensures that the scheduling
78 * thread sees the zero before bh->cb has run, and thus will call
79 * aio_notify again if necessary.
80 */
81 if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
Stefan Hajnoczica96ac42015-07-28 18:34:09 +020082 /* Idle BHs and the notify BH don't count as progress */
83 if (!bh->idle && bh != ctx->notify_dummy_bh) {
Kevin Wolf4f999d02009-10-22 17:54:37 +020084 ret = 1;
Stefan Hajnoczica96ac42015-07-28 18:34:09 +020085 }
Kevin Wolf4f999d02009-10-22 17:54:37 +020086 bh->idle = 0;
87 bh->cb(bh->opaque);
88 }
89 }
90
Paolo Bonzinif627aab2012-10-29 23:45:23 +010091 ctx->walking_bh--;
Kevin Wolf648fb0e2011-09-01 16:16:10 +020092
Kevin Wolf4f999d02009-10-22 17:54:37 +020093 /* remove deleted bhs */
Paolo Bonzinif627aab2012-10-29 23:45:23 +010094 if (!ctx->walking_bh) {
Liu Ping Fandcc772e2013-07-16 12:28:58 +080095 qemu_mutex_lock(&ctx->bh_lock);
Paolo Bonzinif627aab2012-10-29 23:45:23 +010096 bhp = &ctx->first_bh;
Kevin Wolf648fb0e2011-09-01 16:16:10 +020097 while (*bhp) {
98 bh = *bhp;
99 if (bh->deleted) {
100 *bhp = bh->next;
101 g_free(bh);
102 } else {
103 bhp = &bh->next;
104 }
105 }
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800106 qemu_mutex_unlock(&ctx->bh_lock);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200107 }
108
109 return ret;
110}
111
112void qemu_bh_schedule_idle(QEMUBH *bh)
113{
Kevin Wolf4f999d02009-10-22 17:54:37 +0200114 bh->idle = 1;
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800115 /* Make sure that idle & any writes needed by the callback are done
116 * before the locations are read in the aio_bh_poll.
117 */
Paolo Bonzinie8d3b1a2015-04-07 17:16:19 +0200118 atomic_mb_set(&bh->scheduled, 1);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200119}
120
121void qemu_bh_schedule(QEMUBH *bh)
122{
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200123 AioContext *ctx;
124
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200125 ctx = bh->ctx;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200126 bh->idle = 0;
Paolo Bonzinie8d3b1a2015-04-07 17:16:19 +0200127 /* The memory barrier implicit in atomic_xchg makes sure that:
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200128 * 1. idle & any writes needed by the callback are done before the
129 * locations are read in the aio_bh_poll.
130 * 2. ctx is loaded before scheduled is set and the callback has a chance
131 * to execute.
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800132 */
Paolo Bonzinie8d3b1a2015-04-07 17:16:19 +0200133 if (atomic_xchg(&bh->scheduled, 1) == 0) {
134 aio_notify(ctx);
135 }
Kevin Wolf4f999d02009-10-22 17:54:37 +0200136}
137
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800138
139/* This func is async.
140 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200141void qemu_bh_cancel(QEMUBH *bh)
142{
143 bh->scheduled = 0;
144}
145
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800146/* This func is async.The bottom half will do the delete action at the finial
147 * end.
148 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200149void qemu_bh_delete(QEMUBH *bh)
150{
151 bh->scheduled = 0;
152 bh->deleted = 1;
153}
154
Paolo Bonzini845ca102014-07-09 11:53:01 +0200155int64_t
156aio_compute_timeout(AioContext *ctx)
Kevin Wolf4f999d02009-10-22 17:54:37 +0200157{
Paolo Bonzini845ca102014-07-09 11:53:01 +0200158 int64_t deadline;
159 int timeout = -1;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200160 QEMUBH *bh;
161
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100162 for (bh = ctx->first_bh; bh; bh = bh->next) {
Kevin Wolf4f999d02009-10-22 17:54:37 +0200163 if (!bh->deleted && bh->scheduled) {
164 if (bh->idle) {
165 /* idle bottom halves will be polled at least
166 * every 10ms */
Paolo Bonzini845ca102014-07-09 11:53:01 +0200167 timeout = 10000000;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200168 } else {
169 /* non-idle bottom halves will be executed
170 * immediately */
Paolo Bonzini845ca102014-07-09 11:53:01 +0200171 return 0;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200172 }
173 }
174 }
Kevin Wolf4f999d02009-10-22 17:54:37 +0200175
Paolo Bonzini845ca102014-07-09 11:53:01 +0200176 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
Alex Bligh533a8cf2013-08-21 16:02:51 +0100177 if (deadline == 0) {
Paolo Bonzini845ca102014-07-09 11:53:01 +0200178 return 0;
Alex Bligh533a8cf2013-08-21 16:02:51 +0100179 } else {
Paolo Bonzini845ca102014-07-09 11:53:01 +0200180 return qemu_soonest_timeout(timeout, deadline);
Alex Bligh533a8cf2013-08-21 16:02:51 +0100181 }
Paolo Bonzini845ca102014-07-09 11:53:01 +0200182}
Alex Bligh533a8cf2013-08-21 16:02:51 +0100183
Paolo Bonzini845ca102014-07-09 11:53:01 +0200184static gboolean
185aio_ctx_prepare(GSource *source, gint *timeout)
186{
187 AioContext *ctx = (AioContext *) source;
188
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200189 atomic_or(&ctx->notify_me, 1);
190
Paolo Bonzini845ca102014-07-09 11:53:01 +0200191 /* We assume there is no timeout already supplied */
192 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
Paolo Bonzinia3462c62014-07-09 11:53:08 +0200193
194 if (aio_prepare(ctx)) {
195 *timeout = 0;
196 }
197
Paolo Bonzini845ca102014-07-09 11:53:01 +0200198 return *timeout == 0;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200199}
200
201static gboolean
202aio_ctx_check(GSource *source)
203{
204 AioContext *ctx = (AioContext *) source;
205 QEMUBH *bh;
206
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200207 atomic_and(&ctx->notify_me, ~1);
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200208 aio_notify_accept(ctx);
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200209
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200210 for (bh = ctx->first_bh; bh; bh = bh->next) {
211 if (!bh->deleted && bh->scheduled) {
212 return true;
213 }
214 }
Alex Bligh533a8cf2013-08-21 16:02:51 +0100215 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200216}
217
218static gboolean
219aio_ctx_dispatch(GSource *source,
220 GSourceFunc callback,
221 gpointer user_data)
222{
223 AioContext *ctx = (AioContext *) source;
224
225 assert(callback == NULL);
Paolo Bonzinie4c7e2d2014-07-09 11:53:05 +0200226 aio_dispatch(ctx);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200227 return true;
228}
229
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200230static void
231aio_ctx_finalize(GSource *source)
232{
233 AioContext *ctx = (AioContext *) source;
234
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200235 qemu_bh_delete(ctx->notify_dummy_bh);
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100236 thread_pool_free(ctx->thread_pool);
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200237
238 qemu_mutex_lock(&ctx->bh_lock);
239 while (ctx->first_bh) {
240 QEMUBH *next = ctx->first_bh->next;
241
242 /* qemu_bh_delete() must have been called on BHs in this AioContext */
243 assert(ctx->first_bh->deleted);
244
245 g_free(ctx->first_bh);
246 ctx->first_bh = next;
247 }
248 qemu_mutex_unlock(&ctx->bh_lock);
249
Stefan Hajnoczif2e5dca2013-04-11 17:26:25 +0200250 aio_set_event_notifier(ctx, &ctx->notifier, NULL);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200251 event_notifier_cleanup(&ctx->notifier);
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100252 rfifolock_destroy(&ctx->lock);
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800253 qemu_mutex_destroy(&ctx->bh_lock);
Alex Blighdae21b92013-08-21 16:02:49 +0100254 timerlistgroup_deinit(&ctx->tlg);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200255}
256
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200257static GSourceFuncs aio_source_funcs = {
258 aio_ctx_prepare,
259 aio_ctx_check,
260 aio_ctx_dispatch,
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200261 aio_ctx_finalize
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200262};
263
264GSource *aio_get_g_source(AioContext *ctx)
265{
266 g_source_ref(&ctx->source);
267 return &ctx->source;
268}
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200269
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100270ThreadPool *aio_get_thread_pool(AioContext *ctx)
271{
272 if (!ctx->thread_pool) {
273 ctx->thread_pool = thread_pool_new(ctx);
274 }
275 return ctx->thread_pool;
276}
277
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200278void aio_notify(AioContext *ctx)
279{
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200280 /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
281 * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
282 */
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200283 smp_mb();
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200284 if (ctx->notify_me) {
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200285 event_notifier_set(&ctx->notifier);
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200286 atomic_mb_set(&ctx->notified, true);
287 }
288}
289
290void aio_notify_accept(AioContext *ctx)
291{
292 if (atomic_xchg(&ctx->notified, false)) {
293 event_notifier_test_and_clear(&ctx->notifier);
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200294 }
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200295}
296
Alex Blighd5541d82013-08-21 16:02:50 +0100297static void aio_timerlist_notify(void *opaque)
298{
299 aio_notify(opaque);
300}
301
Stefan Hajnoczida5e1de2015-06-03 10:15:33 +0100302static void aio_rfifolock_cb(void *opaque)
303{
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200304 AioContext *ctx = opaque;
305
Stefan Hajnoczida5e1de2015-06-03 10:15:33 +0100306 /* Kick owner thread in case they are blocked in aio_poll() */
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200307 qemu_bh_schedule(ctx->notify_dummy_bh);
308}
309
310static void notify_dummy_bh(void *opaque)
311{
312 /* Do nothing, we were invoked just to force the event loop to iterate */
Stefan Hajnoczida5e1de2015-06-03 10:15:33 +0100313}
314
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200315static void event_notifier_dummy_cb(EventNotifier *e)
316{
317}
318
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300319AioContext *aio_context_new(Error **errp)
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100320{
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300321 int ret;
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200322 AioContext *ctx;
323 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300324 ret = event_notifier_init(&ctx->notifier, false);
325 if (ret < 0) {
326 g_source_destroy(&ctx->source);
327 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
328 return NULL;
329 }
Paolo Bonzinifcf5def2014-12-17 16:09:58 +0100330 g_source_set_can_recurse(&ctx->source, true);
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300331 aio_set_event_notifier(ctx, &ctx->notifier,
332 (EventNotifierHandler *)
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200333 event_notifier_dummy_cb);
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100334 ctx->thread_pool = NULL;
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800335 qemu_mutex_init(&ctx->bh_lock);
Stefan Hajnoczida5e1de2015-06-03 10:15:33 +0100336 rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
Alex Blighd5541d82013-08-21 16:02:50 +0100337 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200338
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200339 ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);
340
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200341 return ctx;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200342}
343
344void aio_context_ref(AioContext *ctx)
345{
346 g_source_ref(&ctx->source);
347}
348
349void aio_context_unref(AioContext *ctx)
350{
351 g_source_unref(&ctx->source);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100352}
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100353
354void aio_context_acquire(AioContext *ctx)
355{
356 rfifolock_lock(&ctx->lock);
357}
358
359void aio_context_release(AioContext *ctx)
360{
361 rfifolock_unlock(&ctx->lock);
362}