blob: 5b6fe6b4cc2c5fe5f5dbd86b6c8fccf272dd926f [file] [log] [blame]
Kevin Wolf4f999d02009-10-22 17:54:37 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu-common.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010026#include "block/aio.h"
Stefan Hajnoczi9b342772013-03-07 13:41:47 +010027#include "block/thread-pool.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010028#include "qemu/main-loop.h"
Kevin Wolf9a1e9482009-10-22 17:54:38 +020029
Kevin Wolf4f999d02009-10-22 17:54:37 +020030/***********************************************************/
31/* bottom halves (can be seen as timers which expire ASAP) */
32
33struct QEMUBH {
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +020034 AioContext *ctx;
Kevin Wolf4f999d02009-10-22 17:54:37 +020035 QEMUBHFunc *cb;
36 void *opaque;
Kevin Wolf4f999d02009-10-22 17:54:37 +020037 QEMUBH *next;
Stefan Weil9b47b172012-04-29 19:08:45 +020038 bool scheduled;
39 bool idle;
40 bool deleted;
Kevin Wolf4f999d02009-10-22 17:54:37 +020041};
42
Paolo Bonzinif627aab2012-10-29 23:45:23 +010043QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
Kevin Wolf4f999d02009-10-22 17:54:37 +020044{
45 QEMUBH *bh;
Anthony Liguori7267c092011-08-20 22:09:37 -050046 bh = g_malloc0(sizeof(QEMUBH));
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +020047 bh->ctx = ctx;
Kevin Wolf4f999d02009-10-22 17:54:37 +020048 bh->cb = cb;
49 bh->opaque = opaque;
Liu Ping Fandcc772e2013-07-16 12:28:58 +080050 qemu_mutex_lock(&ctx->bh_lock);
Paolo Bonzinif627aab2012-10-29 23:45:23 +010051 bh->next = ctx->first_bh;
Liu Ping Fandcc772e2013-07-16 12:28:58 +080052 /* Make sure that the members are ready before putting bh into list */
53 smp_wmb();
Paolo Bonzinif627aab2012-10-29 23:45:23 +010054 ctx->first_bh = bh;
Liu Ping Fandcc772e2013-07-16 12:28:58 +080055 qemu_mutex_unlock(&ctx->bh_lock);
Kevin Wolf4f999d02009-10-22 17:54:37 +020056 return bh;
57}
58
Liu Ping Fandcc772e2013-07-16 12:28:58 +080059/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
Paolo Bonzinif627aab2012-10-29 23:45:23 +010060int aio_bh_poll(AioContext *ctx)
Kevin Wolf4f999d02009-10-22 17:54:37 +020061{
Kevin Wolf7887f622011-06-07 17:51:21 +020062 QEMUBH *bh, **bhp, *next;
Kevin Wolf4f999d02009-10-22 17:54:37 +020063 int ret;
Kevin Wolf648fb0e2011-09-01 16:16:10 +020064
Paolo Bonzinif627aab2012-10-29 23:45:23 +010065 ctx->walking_bh++;
Kevin Wolf4f999d02009-10-22 17:54:37 +020066
67 ret = 0;
Paolo Bonzinif627aab2012-10-29 23:45:23 +010068 for (bh = ctx->first_bh; bh; bh = next) {
Liu Ping Fandcc772e2013-07-16 12:28:58 +080069 /* Make sure that fetching bh happens before accessing its members */
70 smp_read_barrier_depends();
Kevin Wolf7887f622011-06-07 17:51:21 +020071 next = bh->next;
Kevin Wolf4f999d02009-10-22 17:54:37 +020072 if (!bh->deleted && bh->scheduled) {
73 bh->scheduled = 0;
Liu Ping Fandcc772e2013-07-16 12:28:58 +080074 /* Paired with write barrier in bh schedule to ensure reading for
75 * idle & callbacks coming after bh's scheduling.
76 */
77 smp_rmb();
Kevin Wolf4f999d02009-10-22 17:54:37 +020078 if (!bh->idle)
79 ret = 1;
80 bh->idle = 0;
81 bh->cb(bh->opaque);
82 }
83 }
84
Paolo Bonzinif627aab2012-10-29 23:45:23 +010085 ctx->walking_bh--;
Kevin Wolf648fb0e2011-09-01 16:16:10 +020086
Kevin Wolf4f999d02009-10-22 17:54:37 +020087 /* remove deleted bhs */
Paolo Bonzinif627aab2012-10-29 23:45:23 +010088 if (!ctx->walking_bh) {
Liu Ping Fandcc772e2013-07-16 12:28:58 +080089 qemu_mutex_lock(&ctx->bh_lock);
Paolo Bonzinif627aab2012-10-29 23:45:23 +010090 bhp = &ctx->first_bh;
Kevin Wolf648fb0e2011-09-01 16:16:10 +020091 while (*bhp) {
92 bh = *bhp;
93 if (bh->deleted) {
94 *bhp = bh->next;
95 g_free(bh);
96 } else {
97 bhp = &bh->next;
98 }
99 }
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800100 qemu_mutex_unlock(&ctx->bh_lock);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200101 }
102
103 return ret;
104}
105
106void qemu_bh_schedule_idle(QEMUBH *bh)
107{
108 if (bh->scheduled)
109 return;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200110 bh->idle = 1;
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800111 /* Make sure that idle & any writes needed by the callback are done
112 * before the locations are read in the aio_bh_poll.
113 */
114 smp_wmb();
115 bh->scheduled = 1;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200116}
117
118void qemu_bh_schedule(QEMUBH *bh)
119{
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200120 AioContext *ctx;
121
Kevin Wolf4f999d02009-10-22 17:54:37 +0200122 if (bh->scheduled)
123 return;
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200124 ctx = bh->ctx;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200125 bh->idle = 0;
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200126 /* Make sure that:
127 * 1. idle & any writes needed by the callback are done before the
128 * locations are read in the aio_bh_poll.
129 * 2. ctx is loaded before scheduled is set and the callback has a chance
130 * to execute.
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800131 */
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200132 smp_mb();
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800133 bh->scheduled = 1;
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200134 aio_notify(ctx);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200135}
136
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800137
138/* This func is async.
139 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200140void qemu_bh_cancel(QEMUBH *bh)
141{
142 bh->scheduled = 0;
143}
144
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800145/* This func is async.The bottom half will do the delete action at the finial
146 * end.
147 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200148void qemu_bh_delete(QEMUBH *bh)
149{
150 bh->scheduled = 0;
151 bh->deleted = 1;
152}
153
Paolo Bonzini22bfa752012-09-24 15:11:48 +0200154static gboolean
155aio_ctx_prepare(GSource *source, gint *timeout)
Kevin Wolf4f999d02009-10-22 17:54:37 +0200156{
Paolo Bonzini22bfa752012-09-24 15:11:48 +0200157 AioContext *ctx = (AioContext *) source;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200158 QEMUBH *bh;
Alex Bligh533a8cf2013-08-21 16:02:51 +0100159 int deadline;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200160
Alex Bligh533a8cf2013-08-21 16:02:51 +0100161 /* We assume there is no timeout already supplied */
162 *timeout = -1;
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100163 for (bh = ctx->first_bh; bh; bh = bh->next) {
Kevin Wolf4f999d02009-10-22 17:54:37 +0200164 if (!bh->deleted && bh->scheduled) {
165 if (bh->idle) {
166 /* idle bottom halves will be polled at least
167 * every 10ms */
Paolo Bonzini22bfa752012-09-24 15:11:48 +0200168 *timeout = 10;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200169 } else {
170 /* non-idle bottom halves will be executed
171 * immediately */
172 *timeout = 0;
Paolo Bonzinif5022a12012-11-12 13:30:10 +0100173 return true;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200174 }
175 }
176 }
Kevin Wolf4f999d02009-10-22 17:54:37 +0200177
Alex Bligh533a8cf2013-08-21 16:02:51 +0100178 deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg));
179 if (deadline == 0) {
180 *timeout = 0;
181 return true;
182 } else {
183 *timeout = qemu_soonest_timeout(*timeout, deadline);
184 }
185
Paolo Bonzinif5022a12012-11-12 13:30:10 +0100186 return false;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200187}
188
189static gboolean
190aio_ctx_check(GSource *source)
191{
192 AioContext *ctx = (AioContext *) source;
193 QEMUBH *bh;
194
195 for (bh = ctx->first_bh; bh; bh = bh->next) {
196 if (!bh->deleted && bh->scheduled) {
197 return true;
198 }
199 }
Alex Bligh533a8cf2013-08-21 16:02:51 +0100200 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200201}
202
203static gboolean
204aio_ctx_dispatch(GSource *source,
205 GSourceFunc callback,
206 gpointer user_data)
207{
208 AioContext *ctx = (AioContext *) source;
209
210 assert(callback == NULL);
211 aio_poll(ctx, false);
212 return true;
213}
214
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200215static void
216aio_ctx_finalize(GSource *source)
217{
218 AioContext *ctx = (AioContext *) source;
219
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100220 thread_pool_free(ctx->thread_pool);
Stefan Hajnoczif2e5dca2013-04-11 17:26:25 +0200221 aio_set_event_notifier(ctx, &ctx->notifier, NULL);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200222 event_notifier_cleanup(&ctx->notifier);
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100223 rfifolock_destroy(&ctx->lock);
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800224 qemu_mutex_destroy(&ctx->bh_lock);
Stefan Hajnoczi6b5f8762013-02-20 11:28:32 +0100225 g_array_free(ctx->pollfds, TRUE);
Alex Blighdae21b92013-08-21 16:02:49 +0100226 timerlistgroup_deinit(&ctx->tlg);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200227}
228
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200229static GSourceFuncs aio_source_funcs = {
230 aio_ctx_prepare,
231 aio_ctx_check,
232 aio_ctx_dispatch,
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200233 aio_ctx_finalize
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200234};
235
236GSource *aio_get_g_source(AioContext *ctx)
237{
238 g_source_ref(&ctx->source);
239 return &ctx->source;
240}
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200241
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100242ThreadPool *aio_get_thread_pool(AioContext *ctx)
243{
244 if (!ctx->thread_pool) {
245 ctx->thread_pool = thread_pool_new(ctx);
246 }
247 return ctx->thread_pool;
248}
249
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200250void aio_notify(AioContext *ctx)
251{
252 event_notifier_set(&ctx->notifier);
253}
254
Alex Blighd5541d82013-08-21 16:02:50 +0100255static void aio_timerlist_notify(void *opaque)
256{
257 aio_notify(opaque);
258}
259
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100260static void aio_rfifolock_cb(void *opaque)
261{
262 /* Kick owner thread in case they are blocked in aio_poll() */
263 aio_notify(opaque);
264}
265
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100266AioContext *aio_context_new(void)
267{
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200268 AioContext *ctx;
269 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
Stefan Hajnoczi6b5f8762013-02-20 11:28:32 +0100270 ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100271 ctx->thread_pool = NULL;
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800272 qemu_mutex_init(&ctx->bh_lock);
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100273 rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200274 event_notifier_init(&ctx->notifier, false);
275 aio_set_event_notifier(ctx, &ctx->notifier,
276 (EventNotifierHandler *)
Stefan Hajnoczif2e5dca2013-04-11 17:26:25 +0200277 event_notifier_test_and_clear);
Alex Blighd5541d82013-08-21 16:02:50 +0100278 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200279
280 return ctx;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200281}
282
283void aio_context_ref(AioContext *ctx)
284{
285 g_source_ref(&ctx->source);
286}
287
288void aio_context_unref(AioContext *ctx)
289{
290 g_source_unref(&ctx->source);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100291}
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100292
293void aio_context_acquire(AioContext *ctx)
294{
295 rfifolock_lock(&ctx->lock);
296}
297
298void aio_context_release(AioContext *ctx)
299{
300 rfifolock_unlock(&ctx->lock);
301}