aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2008 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Anthony Liguori <aliguori@us.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
Paolo Bonzini | 6b620ca | 2012-01-13 17:44:23 +0100 | [diff] [blame] | 12 | * Contributions after 2012-01-13 are licensed under the terms of the |
| 13 | * GNU GPL, version 2 or (at your option) any later version. |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 14 | */ |
| 15 | |
| 16 | #include "qemu-common.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 17 | #include "block/block.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 18 | #include "qemu/queue.h" |
| 19 | #include "qemu/sockets.h" |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 20 | #ifdef CONFIG_EPOLL |
| 21 | #include <sys/epoll.h> |
| 22 | #endif |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 23 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 24 | struct AioHandler |
| 25 | { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 26 | GPollFD pfd; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 27 | IOHandler *io_read; |
| 28 | IOHandler *io_write; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 29 | int deleted; |
| 30 | void *opaque; |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 31 | bool is_external; |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 32 | QLIST_ENTRY(AioHandler) node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 33 | }; |
| 34 | |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 35 | #ifdef CONFIG_EPOLL |
| 36 | |
| 37 | /* The fd number threashold to switch to epoll */ |
| 38 | #define EPOLL_ENABLE_THRESHOLD 64 |
| 39 | |
| 40 | static void aio_epoll_disable(AioContext *ctx) |
| 41 | { |
| 42 | ctx->epoll_available = false; |
| 43 | if (!ctx->epoll_enabled) { |
| 44 | return; |
| 45 | } |
| 46 | ctx->epoll_enabled = false; |
| 47 | close(ctx->epollfd); |
| 48 | } |
| 49 | |
| 50 | static inline int epoll_events_from_pfd(int pfd_events) |
| 51 | { |
| 52 | return (pfd_events & G_IO_IN ? EPOLLIN : 0) | |
| 53 | (pfd_events & G_IO_OUT ? EPOLLOUT : 0) | |
| 54 | (pfd_events & G_IO_HUP ? EPOLLHUP : 0) | |
| 55 | (pfd_events & G_IO_ERR ? EPOLLERR : 0); |
| 56 | } |
| 57 | |
| 58 | static bool aio_epoll_try_enable(AioContext *ctx) |
| 59 | { |
| 60 | AioHandler *node; |
| 61 | struct epoll_event event; |
| 62 | |
| 63 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 64 | int r; |
| 65 | if (node->deleted || !node->pfd.events) { |
| 66 | continue; |
| 67 | } |
| 68 | event.events = epoll_events_from_pfd(node->pfd.events); |
| 69 | event.data.ptr = node; |
| 70 | r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event); |
| 71 | if (r) { |
| 72 | return false; |
| 73 | } |
| 74 | } |
| 75 | ctx->epoll_enabled = true; |
| 76 | return true; |
| 77 | } |
| 78 | |
| 79 | static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new) |
| 80 | { |
| 81 | struct epoll_event event; |
| 82 | int r; |
| 83 | |
| 84 | if (!ctx->epoll_enabled) { |
| 85 | return; |
| 86 | } |
| 87 | if (!node->pfd.events) { |
| 88 | r = epoll_ctl(ctx->epollfd, EPOLL_CTL_DEL, node->pfd.fd, &event); |
| 89 | if (r) { |
| 90 | aio_epoll_disable(ctx); |
| 91 | } |
| 92 | } else { |
| 93 | event.data.ptr = node; |
| 94 | event.events = epoll_events_from_pfd(node->pfd.events); |
| 95 | if (is_new) { |
| 96 | r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event); |
| 97 | if (r) { |
| 98 | aio_epoll_disable(ctx); |
| 99 | } |
| 100 | } else { |
| 101 | r = epoll_ctl(ctx->epollfd, EPOLL_CTL_MOD, node->pfd.fd, &event); |
| 102 | if (r) { |
| 103 | aio_epoll_disable(ctx); |
| 104 | } |
| 105 | } |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | static int aio_epoll(AioContext *ctx, GPollFD *pfds, |
| 110 | unsigned npfd, int64_t timeout) |
| 111 | { |
| 112 | AioHandler *node; |
| 113 | int i, ret = 0; |
| 114 | struct epoll_event events[128]; |
| 115 | |
| 116 | assert(npfd == 1); |
| 117 | assert(pfds[0].fd == ctx->epollfd); |
| 118 | if (timeout > 0) { |
| 119 | ret = qemu_poll_ns(pfds, npfd, timeout); |
| 120 | } |
| 121 | if (timeout <= 0 || ret > 0) { |
| 122 | ret = epoll_wait(ctx->epollfd, events, |
| 123 | sizeof(events) / sizeof(events[0]), |
| 124 | timeout); |
| 125 | if (ret <= 0) { |
| 126 | goto out; |
| 127 | } |
| 128 | for (i = 0; i < ret; i++) { |
| 129 | int ev = events[i].events; |
| 130 | node = events[i].data.ptr; |
| 131 | node->pfd.revents = (ev & EPOLLIN ? G_IO_IN : 0) | |
| 132 | (ev & EPOLLOUT ? G_IO_OUT : 0) | |
| 133 | (ev & EPOLLHUP ? G_IO_HUP : 0) | |
| 134 | (ev & EPOLLERR ? G_IO_ERR : 0); |
| 135 | } |
| 136 | } |
| 137 | out: |
| 138 | return ret; |
| 139 | } |
| 140 | |
| 141 | static bool aio_epoll_enabled(AioContext *ctx) |
| 142 | { |
| 143 | /* Fall back to ppoll when external clients are disabled. */ |
| 144 | return !aio_external_disabled(ctx) && ctx->epoll_enabled; |
| 145 | } |
| 146 | |
| 147 | static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds, |
| 148 | unsigned npfd, int64_t timeout) |
| 149 | { |
| 150 | if (!ctx->epoll_available) { |
| 151 | return false; |
| 152 | } |
| 153 | if (aio_epoll_enabled(ctx)) { |
| 154 | return true; |
| 155 | } |
| 156 | if (npfd >= EPOLL_ENABLE_THRESHOLD) { |
| 157 | if (aio_epoll_try_enable(ctx)) { |
| 158 | return true; |
| 159 | } else { |
| 160 | aio_epoll_disable(ctx); |
| 161 | } |
| 162 | } |
| 163 | return false; |
| 164 | } |
| 165 | |
| 166 | #else |
| 167 | |
| 168 | static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new) |
| 169 | { |
| 170 | } |
| 171 | |
| 172 | static int aio_epoll(AioContext *ctx, GPollFD *pfds, |
| 173 | unsigned npfd, int64_t timeout) |
| 174 | { |
| 175 | assert(false); |
| 176 | } |
| 177 | |
| 178 | static bool aio_epoll_enabled(AioContext *ctx) |
| 179 | { |
| 180 | return false; |
| 181 | } |
| 182 | |
| 183 | static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds, |
| 184 | unsigned npfd, int64_t timeout) |
| 185 | { |
| 186 | return false; |
| 187 | } |
| 188 | |
| 189 | #endif |
| 190 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 191 | static AioHandler *find_aio_handler(AioContext *ctx, int fd) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 192 | { |
| 193 | AioHandler *node; |
| 194 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 195 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 196 | if (node->pfd.fd == fd) |
Alexander Graf | 79d5ca5 | 2009-05-06 02:58:48 +0200 | [diff] [blame] | 197 | if (!node->deleted) |
| 198 | return node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | return NULL; |
| 202 | } |
| 203 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 204 | void aio_set_fd_handler(AioContext *ctx, |
| 205 | int fd, |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 206 | bool is_external, |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 207 | IOHandler *io_read, |
| 208 | IOHandler *io_write, |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 209 | void *opaque) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 210 | { |
| 211 | AioHandler *node; |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 212 | bool is_new = false; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 213 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 214 | node = find_aio_handler(ctx, fd); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 215 | |
| 216 | /* Are we deleting the fd handler? */ |
| 217 | if (!io_read && !io_write) { |
| 218 | if (node) { |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 219 | g_source_remove_poll(&ctx->source, &node->pfd); |
| 220 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 221 | /* If the lock is held, just mark the node as deleted */ |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 222 | if (ctx->walking_handlers) { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 223 | node->deleted = 1; |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 224 | node->pfd.revents = 0; |
| 225 | } else { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 226 | /* Otherwise, delete it for real. We can't just mark it as |
| 227 | * deleted because deleted nodes are only cleaned up after |
| 228 | * releasing the walking_handlers lock. |
| 229 | */ |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 230 | QLIST_REMOVE(node, node); |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 231 | g_free(node); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 232 | } |
| 233 | } |
| 234 | } else { |
| 235 | if (node == NULL) { |
| 236 | /* Alloc and insert if it's not already there */ |
Markus Armbruster | 3ba235a | 2014-12-04 13:55:09 +0100 | [diff] [blame] | 237 | node = g_new0(AioHandler, 1); |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 238 | node->pfd.fd = fd; |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 239 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 240 | |
| 241 | g_source_add_poll(&ctx->source, &node->pfd); |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 242 | is_new = true; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 243 | } |
| 244 | /* Update handler with latest information */ |
| 245 | node->io_read = io_read; |
| 246 | node->io_write = io_write; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 247 | node->opaque = opaque; |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 248 | node->is_external = is_external; |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 249 | |
Stefan Hajnoczi | b5a01a7 | 2013-02-20 11:28:33 +0100 | [diff] [blame] | 250 | node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0); |
| 251 | node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 252 | } |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 253 | |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 254 | aio_epoll_update(ctx, node, is_new); |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 255 | aio_notify(ctx); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 256 | } |
| 257 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 258 | void aio_set_event_notifier(AioContext *ctx, |
| 259 | EventNotifier *notifier, |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 260 | bool is_external, |
Stefan Hajnoczi | f2e5dca | 2013-04-11 17:26:25 +0200 | [diff] [blame] | 261 | EventNotifierHandler *io_read) |
Paolo Bonzini | 9958c35 | 2012-06-09 03:44:00 +0200 | [diff] [blame] | 262 | { |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 263 | aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 264 | is_external, (IOHandler *)io_read, NULL, notifier); |
Paolo Bonzini | 9958c35 | 2012-06-09 03:44:00 +0200 | [diff] [blame] | 265 | } |
| 266 | |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 267 | bool aio_prepare(AioContext *ctx) |
| 268 | { |
| 269 | return false; |
| 270 | } |
| 271 | |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 272 | bool aio_pending(AioContext *ctx) |
| 273 | { |
| 274 | AioHandler *node; |
| 275 | |
| 276 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 277 | int revents; |
| 278 | |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 279 | revents = node->pfd.revents & node->pfd.events; |
| 280 | if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { |
| 281 | return true; |
| 282 | } |
| 283 | if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) { |
| 284 | return true; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | return false; |
| 289 | } |
| 290 | |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 291 | bool aio_dispatch(AioContext *ctx) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 292 | { |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 293 | AioHandler *node; |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 294 | bool progress = false; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 295 | |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 296 | /* |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 297 | * If there are callbacks left that have been queued, we need to call them. |
| 298 | * Do not call select in this case, because it is possible that the caller |
| 299 | * does not need a complete flush (as is the case for aio_poll loops). |
| 300 | */ |
| 301 | if (aio_bh_poll(ctx)) { |
| 302 | progress = true; |
| 303 | } |
| 304 | |
| 305 | /* |
Paolo Bonzini | 87f68d3 | 2014-07-07 15:18:02 +0200 | [diff] [blame] | 306 | * We have to walk very carefully in case aio_set_fd_handler is |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 307 | * called while we're walking. |
| 308 | */ |
| 309 | node = QLIST_FIRST(&ctx->aio_handlers); |
| 310 | while (node) { |
| 311 | AioHandler *tmp; |
| 312 | int revents; |
| 313 | |
| 314 | ctx->walking_handlers++; |
| 315 | |
| 316 | revents = node->pfd.revents & node->pfd.events; |
| 317 | node->pfd.revents = 0; |
| 318 | |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 319 | if (!node->deleted && |
| 320 | (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && |
| 321 | node->io_read) { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 322 | node->io_read(node->opaque); |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 323 | |
| 324 | /* aio_notify() does not count as progress */ |
| 325 | if (node->opaque != &ctx->notifier) { |
| 326 | progress = true; |
| 327 | } |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 328 | } |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 329 | if (!node->deleted && |
| 330 | (revents & (G_IO_OUT | G_IO_ERR)) && |
| 331 | node->io_write) { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 332 | node->io_write(node->opaque); |
| 333 | progress = true; |
| 334 | } |
| 335 | |
| 336 | tmp = node; |
| 337 | node = QLIST_NEXT(node, node); |
| 338 | |
| 339 | ctx->walking_handlers--; |
| 340 | |
| 341 | if (!ctx->walking_handlers && tmp->deleted) { |
| 342 | QLIST_REMOVE(tmp, node); |
| 343 | g_free(tmp); |
| 344 | } |
| 345 | } |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 346 | |
| 347 | /* Run our timers */ |
| 348 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
| 349 | |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 350 | return progress; |
| 351 | } |
| 352 | |
Paolo Bonzini | e98ab09 | 2015-02-20 17:26:50 +0100 | [diff] [blame] | 353 | /* These thread-local variables are used only in a small part of aio_poll |
| 354 | * around the call to the poll() system call. In particular they are not |
| 355 | * used while aio_poll is performing callbacks, which makes it much easier |
| 356 | * to think about reentrancy! |
| 357 | * |
| 358 | * Stack-allocated arrays would be perfect but they have size limitations; |
| 359 | * heap allocation is expensive enough that we want to reuse arrays across |
| 360 | * calls to aio_poll(). And because poll() has to be called without holding |
| 361 | * any lock, the arrays cannot be stored in AioContext. Thread-local data |
| 362 | * has none of the disadvantages of these three options. |
| 363 | */ |
| 364 | static __thread GPollFD *pollfds; |
| 365 | static __thread AioHandler **nodes; |
| 366 | static __thread unsigned npfd, nalloc; |
| 367 | static __thread Notifier pollfds_cleanup_notifier; |
| 368 | |
| 369 | static void pollfds_cleanup(Notifier *n, void *unused) |
| 370 | { |
| 371 | g_assert(npfd == 0); |
| 372 | g_free(pollfds); |
| 373 | g_free(nodes); |
| 374 | nalloc = 0; |
| 375 | } |
| 376 | |
| 377 | static void add_pollfd(AioHandler *node) |
| 378 | { |
| 379 | if (npfd == nalloc) { |
| 380 | if (nalloc == 0) { |
| 381 | pollfds_cleanup_notifier.notify = pollfds_cleanup; |
| 382 | qemu_thread_atexit_add(&pollfds_cleanup_notifier); |
| 383 | nalloc = 8; |
| 384 | } else { |
| 385 | g_assert(nalloc <= INT_MAX); |
| 386 | nalloc *= 2; |
| 387 | } |
| 388 | pollfds = g_renew(GPollFD, pollfds, nalloc); |
| 389 | nodes = g_renew(AioHandler *, nodes, nalloc); |
| 390 | } |
| 391 | nodes[npfd] = node; |
| 392 | pollfds[npfd] = (GPollFD) { |
| 393 | .fd = node->pfd.fd, |
| 394 | .events = node->pfd.events, |
| 395 | }; |
| 396 | npfd++; |
| 397 | } |
| 398 | |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 399 | bool aio_poll(AioContext *ctx, bool blocking) |
| 400 | { |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 401 | AioHandler *node; |
Paolo Bonzini | e98ab09 | 2015-02-20 17:26:50 +0100 | [diff] [blame] | 402 | int i, ret; |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 403 | bool progress; |
Paolo Bonzini | e98ab09 | 2015-02-20 17:26:50 +0100 | [diff] [blame] | 404 | int64_t timeout; |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 405 | |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 406 | aio_context_acquire(ctx); |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 407 | progress = false; |
| 408 | |
Paolo Bonzini | 0ceb849 | 2014-07-07 15:18:04 +0200 | [diff] [blame] | 409 | /* aio_notify can avoid the expensive event_notifier_set if |
| 410 | * everything (file descriptors, bottom halves, timers) will |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 411 | * be re-evaluated before the next blocking poll(). This is |
| 412 | * already true when aio_poll is called with blocking == false; |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 413 | * if blocking == true, it is only true after poll() returns, |
| 414 | * so disable the optimization now. |
Paolo Bonzini | 0ceb849 | 2014-07-07 15:18:04 +0200 | [diff] [blame] | 415 | */ |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 416 | if (blocking) { |
| 417 | atomic_add(&ctx->notify_me, 2); |
| 418 | } |
Paolo Bonzini | 0ceb849 | 2014-07-07 15:18:04 +0200 | [diff] [blame] | 419 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 420 | ctx->walking_handlers++; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 421 | |
Paolo Bonzini | e98ab09 | 2015-02-20 17:26:50 +0100 | [diff] [blame] | 422 | assert(npfd == 0); |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 423 | |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 424 | /* fill pollfds */ |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 425 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 426 | if (!node->deleted && node->pfd.events |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 427 | && !aio_epoll_enabled(ctx) |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 428 | && aio_node_check(ctx, node->is_external)) { |
Paolo Bonzini | e98ab09 | 2015-02-20 17:26:50 +0100 | [diff] [blame] | 429 | add_pollfd(node); |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 430 | } |
| 431 | } |
| 432 | |
Paolo Bonzini | e98ab09 | 2015-02-20 17:26:50 +0100 | [diff] [blame] | 433 | timeout = blocking ? aio_compute_timeout(ctx) : 0; |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 434 | |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 435 | /* wait until next event */ |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 436 | if (timeout) { |
| 437 | aio_context_release(ctx); |
| 438 | } |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 439 | if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) { |
| 440 | AioHandler epoll_handler; |
| 441 | |
| 442 | epoll_handler.pfd.fd = ctx->epollfd; |
| 443 | epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR; |
| 444 | npfd = 0; |
| 445 | add_pollfd(&epoll_handler); |
| 446 | ret = aio_epoll(ctx, pollfds, npfd, timeout); |
| 447 | } else { |
| 448 | ret = qemu_poll_ns(pollfds, npfd, timeout); |
| 449 | } |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 450 | if (blocking) { |
| 451 | atomic_sub(&ctx->notify_me, 2); |
| 452 | } |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 453 | if (timeout) { |
| 454 | aio_context_acquire(ctx); |
| 455 | } |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 456 | |
Paolo Bonzini | 05e514b | 2015-07-21 16:07:53 +0200 | [diff] [blame] | 457 | aio_notify_accept(ctx); |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 458 | |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 459 | /* if we have any readable fds, dispatch event */ |
| 460 | if (ret > 0) { |
Paolo Bonzini | e98ab09 | 2015-02-20 17:26:50 +0100 | [diff] [blame] | 461 | for (i = 0; i < npfd; i++) { |
| 462 | nodes[i]->pfd.revents = pollfds[i].revents; |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 463 | } |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 464 | } |
| 465 | |
Paolo Bonzini | e98ab09 | 2015-02-20 17:26:50 +0100 | [diff] [blame] | 466 | npfd = 0; |
| 467 | ctx->walking_handlers--; |
| 468 | |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 469 | /* Run dispatch even if there were no readable fds to run timers */ |
| 470 | if (aio_dispatch(ctx)) { |
| 471 | progress = true; |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 472 | } |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 473 | |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 474 | aio_context_release(ctx); |
| 475 | |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 476 | return progress; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 477 | } |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 478 | |
| 479 | void aio_context_setup(AioContext *ctx, Error **errp) |
| 480 | { |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 481 | #ifdef CONFIG_EPOLL |
| 482 | assert(!ctx->epollfd); |
| 483 | ctx->epollfd = epoll_create1(EPOLL_CLOEXEC); |
| 484 | if (ctx->epollfd == -1) { |
| 485 | ctx->epoll_available = false; |
| 486 | } else { |
| 487 | ctx->epoll_available = true; |
| 488 | } |
| 489 | #endif |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 490 | } |