Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 1 | /* |
| 2 | * QEMU System Emulator |
| 3 | * |
| 4 | * Copyright (c) 2003-2008 Fabrice Bellard |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to deal |
| 8 | * in the Software without restriction, including without limitation the rights |
| 9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 10 | * copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| 22 | * THE SOFTWARE. |
| 23 | */ |
| 24 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 25 | #include "qemu/osdep.h" |
Peter Maydell | 1ac0206 | 2015-01-20 16:16:40 +0000 | [diff] [blame] | 26 | #include "qemu/main-loop.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 27 | #include "qemu/timer.h" |
Pavel Dovgalyuk | 8eda206 | 2015-09-17 19:24:28 +0300 | [diff] [blame] | 28 | #include "sysemu/replay.h" |
Paolo Bonzini | d2528bd | 2017-03-03 12:01:16 +0100 | [diff] [blame] | 29 | #include "sysemu/cpus.h" |
Peter Maydell | 1ac0206 | 2015-01-20 16:16:40 +0000 | [diff] [blame] | 30 | |
Anthony Liguori | 30ea833 | 2012-11-02 16:12:53 -0500 | [diff] [blame] | 31 | #ifdef CONFIG_POSIX |
| 32 | #include <pthread.h> |
| 33 | #endif |
Stefan Weil | bff9f8b | 2012-04-20 10:27:06 +0200 | [diff] [blame] | 34 | |
Alex Bligh | 4e0c652 | 2013-08-21 16:02:43 +0100 | [diff] [blame] | 35 | #ifdef CONFIG_PPOLL |
| 36 | #include <poll.h> |
| 37 | #endif |
| 38 | |
Alex Bligh | cd758dd | 2013-08-21 16:02:44 +0100 | [diff] [blame] | 39 | #ifdef CONFIG_PRCTL_PR_SET_TIMERSLACK |
| 40 | #include <sys/prctl.h> |
| 41 | #endif |
| 42 | |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 43 | /***********************************************************/ |
| 44 | /* timers */ |
| 45 | |
Alex Bligh | b4049b7 | 2013-08-21 16:03:09 +0100 | [diff] [blame] | 46 | typedef struct QEMUClock { |
Liu Ping Fan | 3c05341 | 2013-09-25 14:21:00 +0800 | [diff] [blame] | 47 | /* We rely on BQL to protect the timerlists */ |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 48 | QLIST_HEAD(, QEMUTimerList) timerlists; |
Jan Kiszka | 691a0c9 | 2011-06-20 14:06:27 +0200 | [diff] [blame] | 49 | |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 50 | QEMUClockType type; |
Stefan Weil | 9a14b29 | 2012-04-20 11:51:58 +0200 | [diff] [blame] | 51 | bool enabled; |
Alex Bligh | b4049b7 | 2013-08-21 16:03:09 +0100 | [diff] [blame] | 52 | } QEMUClock; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 53 | |
Alex Bligh | 754d6a5 | 2013-08-21 16:02:48 +0100 | [diff] [blame] | 54 | QEMUTimerListGroup main_loop_tlg; |
Stefan Weil | fbdb664 | 2014-05-03 08:12:15 +0200 | [diff] [blame] | 55 | static QEMUClock qemu_clocks[QEMU_CLOCK_MAX]; |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 56 | |
| 57 | /* A QEMUTimerList is a list of timers attached to a clock. More |
| 58 | * than one QEMUTimerList can be attached to each clock, for instance |
| 59 | * used by different AioContexts / threads. Each clock also has |
| 60 | * a list of the QEMUTimerLists associated with it, in order that |
| 61 | * reenabling the clock can call all the notifiers. |
| 62 | */ |
| 63 | |
| 64 | struct QEMUTimerList { |
Stefan Weil | 9a14b29 | 2012-04-20 11:51:58 +0200 | [diff] [blame] | 65 | QEMUClock *clock; |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 66 | QemuMutex active_timers_lock; |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 67 | QEMUTimer *active_timers; |
| 68 | QLIST_ENTRY(QEMUTimerList) list; |
Alex Bligh | d5541d8 | 2013-08-21 16:02:50 +0100 | [diff] [blame] | 69 | QEMUTimerListNotifyCB *notify_cb; |
| 70 | void *notify_opaque; |
Liu Ping Fan | 3c05341 | 2013-09-25 14:21:00 +0800 | [diff] [blame] | 71 | |
| 72 | /* lightweight method to mark the end of timerlist's running */ |
| 73 | QemuEvent timers_done_ev; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 74 | }; |
| 75 | |
Alex Bligh | 7bf8fbd | 2013-08-21 16:03:03 +0100 | [diff] [blame] | 76 | /** |
| 77 | * qemu_clock_ptr: |
| 78 | * @type: type of clock |
| 79 | * |
| 80 | * Translate a clock type into a pointer to QEMUClock object. |
| 81 | * |
| 82 | * Returns: a pointer to the QEMUClock object |
| 83 | */ |
Alex Bligh | b4049b7 | 2013-08-21 16:03:09 +0100 | [diff] [blame] | 84 | static inline QEMUClock *qemu_clock_ptr(QEMUClockType type) |
Alex Bligh | 7bf8fbd | 2013-08-21 16:03:03 +0100 | [diff] [blame] | 85 | { |
| 86 | return &qemu_clocks[type]; |
| 87 | } |
| 88 | |
Alex Bligh | e93379b | 2013-08-21 16:02:39 +0100 | [diff] [blame] | 89 | static bool timer_expired_ns(QEMUTimer *timer_head, int64_t current_time) |
Stefan Weil | 45c7b37 | 2011-03-24 21:31:24 +0100 | [diff] [blame] | 90 | { |
| 91 | return timer_head && (timer_head->expire_time <= current_time); |
| 92 | } |
| 93 | |
Alex Bligh | 7bf8fbd | 2013-08-21 16:03:03 +0100 | [diff] [blame] | 94 | QEMUTimerList *timerlist_new(QEMUClockType type, |
| 95 | QEMUTimerListNotifyCB *cb, |
| 96 | void *opaque) |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 97 | { |
| 98 | QEMUTimerList *timer_list; |
Alex Bligh | 7bf8fbd | 2013-08-21 16:03:03 +0100 | [diff] [blame] | 99 | QEMUClock *clock = qemu_clock_ptr(type); |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 100 | |
| 101 | timer_list = g_malloc0(sizeof(QEMUTimerList)); |
Paolo Bonzini | e4efd8a | 2015-07-21 16:07:48 +0200 | [diff] [blame] | 102 | qemu_event_init(&timer_list->timers_done_ev, true); |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 103 | timer_list->clock = clock; |
Alex Bligh | d5541d8 | 2013-08-21 16:02:50 +0100 | [diff] [blame] | 104 | timer_list->notify_cb = cb; |
| 105 | timer_list->notify_opaque = opaque; |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 106 | qemu_mutex_init(&timer_list->active_timers_lock); |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 107 | QLIST_INSERT_HEAD(&clock->timerlists, timer_list, list); |
| 108 | return timer_list; |
| 109 | } |
| 110 | |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 111 | void timerlist_free(QEMUTimerList *timer_list) |
| 112 | { |
| 113 | assert(!timerlist_has_timers(timer_list)); |
| 114 | if (timer_list->clock) { |
| 115 | QLIST_REMOVE(timer_list, list); |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 116 | } |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 117 | qemu_mutex_destroy(&timer_list->active_timers_lock); |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 118 | g_free(timer_list); |
| 119 | } |
| 120 | |
Paolo Bonzini | 3f53bc6 | 2017-03-03 11:50:29 +0100 | [diff] [blame] | 121 | static void qemu_clock_init(QEMUClockType type, QEMUTimerListNotifyCB *notify_cb) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 122 | { |
Alex Bligh | 7bf8fbd | 2013-08-21 16:03:03 +0100 | [diff] [blame] | 123 | QEMUClock *clock = qemu_clock_ptr(type); |
Jan Kiszka | 691a0c9 | 2011-06-20 14:06:27 +0200 | [diff] [blame] | 124 | |
Kirill Batuzov | 02ce232 | 2014-05-06 16:59:53 +0400 | [diff] [blame] | 125 | /* Assert that the clock of type TYPE has not been initialized yet. */ |
| 126 | assert(main_loop_tlg.tl[type] == NULL); |
| 127 | |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 128 | clock->type = type; |
Gonglei | 3fdd0ee | 2016-08-09 15:49:15 +0800 | [diff] [blame] | 129 | clock->enabled = (type == QEMU_CLOCK_VIRTUAL ? false : true); |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 130 | QLIST_INIT(&clock->timerlists); |
Paolo Bonzini | 3f53bc6 | 2017-03-03 11:50:29 +0100 | [diff] [blame] | 131 | main_loop_tlg.tl[type] = timerlist_new(type, notify_cb, NULL); |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 132 | } |
| 133 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 134 | bool qemu_clock_use_for_deadline(QEMUClockType type) |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 135 | { |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 136 | return !(use_icount && (type == QEMU_CLOCK_VIRTUAL)); |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 137 | } |
| 138 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 139 | void qemu_clock_notify(QEMUClockType type) |
Alex Bligh | b1bbfe7 | 2013-08-21 16:02:55 +0100 | [diff] [blame] | 140 | { |
| 141 | QEMUTimerList *timer_list; |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 142 | QEMUClock *clock = qemu_clock_ptr(type); |
Alex Bligh | b1bbfe7 | 2013-08-21 16:02:55 +0100 | [diff] [blame] | 143 | QLIST_FOREACH(timer_list, &clock->timerlists, list) { |
| 144 | timerlist_notify(timer_list); |
| 145 | } |
| 146 | } |
| 147 | |
Liu Ping Fan | 3c05341 | 2013-09-25 14:21:00 +0800 | [diff] [blame] | 148 | /* Disabling the clock will wait for related timerlists to stop |
| 149 | * executing qemu_run_timers. Thus, this functions should not |
| 150 | * be used from the callback of a timer that is based on @clock. |
| 151 | * Doing so would cause a deadlock. |
| 152 | * |
| 153 | * Caller should hold BQL. |
| 154 | */ |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 155 | void qemu_clock_enable(QEMUClockType type, bool enabled) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 156 | { |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 157 | QEMUClock *clock = qemu_clock_ptr(type); |
Liu Ping Fan | 3c05341 | 2013-09-25 14:21:00 +0800 | [diff] [blame] | 158 | QEMUTimerList *tl; |
Paolo Bonzini | fbdc14e | 2011-09-27 18:23:14 +0200 | [diff] [blame] | 159 | bool old = clock->enabled; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 160 | clock->enabled = enabled; |
Paolo Bonzini | fbdc14e | 2011-09-27 18:23:14 +0200 | [diff] [blame] | 161 | if (enabled && !old) { |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 162 | qemu_clock_notify(type); |
Liu Ping Fan | 3c05341 | 2013-09-25 14:21:00 +0800 | [diff] [blame] | 163 | } else if (!enabled && old) { |
| 164 | QLIST_FOREACH(tl, &clock->timerlists, list) { |
| 165 | qemu_event_wait(&tl->timers_done_ev); |
| 166 | } |
Paolo Bonzini | fbdc14e | 2011-09-27 18:23:14 +0200 | [diff] [blame] | 167 | } |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 168 | } |
| 169 | |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 170 | bool timerlist_has_timers(QEMUTimerList *timer_list) |
Paolo Bonzini | dc2dfcf | 2011-09-12 15:50:16 +0200 | [diff] [blame] | 171 | { |
Paolo Bonzini | 8caa05d | 2016-12-01 09:58:02 +0100 | [diff] [blame] | 172 | return !!atomic_read(&timer_list->active_timers); |
Paolo Bonzini | dc2dfcf | 2011-09-12 15:50:16 +0200 | [diff] [blame] | 173 | } |
| 174 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 175 | bool qemu_clock_has_timers(QEMUClockType type) |
Paolo Bonzini | dc2dfcf | 2011-09-12 15:50:16 +0200 | [diff] [blame] | 176 | { |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 177 | return timerlist_has_timers( |
Alex Bligh | 7bf8fbd | 2013-08-21 16:03:03 +0100 | [diff] [blame] | 178 | main_loop_tlg.tl[type]); |
Paolo Bonzini | dc2dfcf | 2011-09-12 15:50:16 +0200 | [diff] [blame] | 179 | } |
| 180 | |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 181 | bool timerlist_expired(QEMUTimerList *timer_list) |
| 182 | { |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 183 | int64_t expire_time; |
| 184 | |
Paolo Bonzini | 8caa05d | 2016-12-01 09:58:02 +0100 | [diff] [blame] | 185 | if (!atomic_read(&timer_list->active_timers)) { |
| 186 | return false; |
| 187 | } |
| 188 | |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 189 | qemu_mutex_lock(&timer_list->active_timers_lock); |
| 190 | if (!timer_list->active_timers) { |
| 191 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
| 192 | return false; |
| 193 | } |
| 194 | expire_time = timer_list->active_timers->expire_time; |
| 195 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
| 196 | |
Paolo Bonzini | 33bef0b | 2017-03-03 11:37:57 +0100 | [diff] [blame] | 197 | return expire_time <= qemu_clock_get_ns(timer_list->clock->type); |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 198 | } |
| 199 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 200 | bool qemu_clock_expired(QEMUClockType type) |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 201 | { |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 202 | return timerlist_expired( |
Alex Bligh | 7bf8fbd | 2013-08-21 16:03:03 +0100 | [diff] [blame] | 203 | main_loop_tlg.tl[type]); |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 204 | } |
| 205 | |
Alex Bligh | 02a03a9 | 2013-08-21 16:02:41 +0100 | [diff] [blame] | 206 | /* |
| 207 | * As above, but return -1 for no deadline, and do not cap to 2^32 |
| 208 | * as we know the result is always positive. |
| 209 | */ |
| 210 | |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 211 | int64_t timerlist_deadline_ns(QEMUTimerList *timer_list) |
Alex Bligh | 02a03a9 | 2013-08-21 16:02:41 +0100 | [diff] [blame] | 212 | { |
| 213 | int64_t delta; |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 214 | int64_t expire_time; |
Alex Bligh | 02a03a9 | 2013-08-21 16:02:41 +0100 | [diff] [blame] | 215 | |
Paolo Bonzini | 8caa05d | 2016-12-01 09:58:02 +0100 | [diff] [blame] | 216 | if (!atomic_read(&timer_list->active_timers)) { |
| 217 | return -1; |
| 218 | } |
| 219 | |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 220 | if (!timer_list->clock->enabled) { |
Alex Bligh | 02a03a9 | 2013-08-21 16:02:41 +0100 | [diff] [blame] | 221 | return -1; |
| 222 | } |
| 223 | |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 224 | /* The active timers list may be modified before the caller uses our return |
| 225 | * value but ->notify_cb() is called when the deadline changes. Therefore |
| 226 | * the caller should notice the change and there is no race condition. |
| 227 | */ |
| 228 | qemu_mutex_lock(&timer_list->active_timers_lock); |
| 229 | if (!timer_list->active_timers) { |
| 230 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
| 231 | return -1; |
| 232 | } |
| 233 | expire_time = timer_list->active_timers->expire_time; |
| 234 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
| 235 | |
| 236 | delta = expire_time - qemu_clock_get_ns(timer_list->clock->type); |
Alex Bligh | 02a03a9 | 2013-08-21 16:02:41 +0100 | [diff] [blame] | 237 | |
| 238 | if (delta <= 0) { |
| 239 | return 0; |
| 240 | } |
| 241 | |
| 242 | return delta; |
| 243 | } |
| 244 | |
Alex Bligh | ac70aaf | 2013-08-21 16:02:57 +0100 | [diff] [blame] | 245 | /* Calculate the soonest deadline across all timerlists attached |
| 246 | * to the clock. This is used for the icount timeout so we |
| 247 | * ignore whether or not the clock should be used in deadline |
| 248 | * calculations. |
| 249 | */ |
Pavel Dovgalyuk | dcb1578 | 2019-07-25 11:44:26 +0300 | [diff] [blame] | 250 | int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask) |
Alex Bligh | ac70aaf | 2013-08-21 16:02:57 +0100 | [diff] [blame] | 251 | { |
| 252 | int64_t deadline = -1; |
Pavel Dovgalyuk | dcb1578 | 2019-07-25 11:44:26 +0300 | [diff] [blame] | 253 | int64_t delta; |
| 254 | int64_t expire_time; |
| 255 | QEMUTimer *ts; |
Alex Bligh | ac70aaf | 2013-08-21 16:02:57 +0100 | [diff] [blame] | 256 | QEMUTimerList *timer_list; |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 257 | QEMUClock *clock = qemu_clock_ptr(type); |
Pavel Dovgalyuk | dcb1578 | 2019-07-25 11:44:26 +0300 | [diff] [blame] | 258 | |
| 259 | if (!clock->enabled) { |
| 260 | return -1; |
| 261 | } |
| 262 | |
Alex Bligh | ac70aaf | 2013-08-21 16:02:57 +0100 | [diff] [blame] | 263 | QLIST_FOREACH(timer_list, &clock->timerlists, list) { |
Pavel Dovgalyuk | dcb1578 | 2019-07-25 11:44:26 +0300 | [diff] [blame] | 264 | qemu_mutex_lock(&timer_list->active_timers_lock); |
| 265 | ts = timer_list->active_timers; |
| 266 | /* Skip all external timers */ |
| 267 | while (ts && (ts->attributes & ~attr_mask)) { |
| 268 | ts = ts->next; |
| 269 | } |
| 270 | if (!ts) { |
| 271 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
| 272 | continue; |
| 273 | } |
| 274 | expire_time = ts->expire_time; |
| 275 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
| 276 | |
| 277 | delta = expire_time - qemu_clock_get_ns(type); |
| 278 | if (delta <= 0) { |
| 279 | delta = 0; |
| 280 | } |
| 281 | deadline = qemu_soonest_timeout(deadline, delta); |
Alex Bligh | ac70aaf | 2013-08-21 16:02:57 +0100 | [diff] [blame] | 282 | } |
| 283 | return deadline; |
| 284 | } |
| 285 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 286 | QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list) |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 287 | { |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 288 | return timer_list->clock->type; |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 289 | } |
| 290 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 291 | QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type) |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 292 | { |
Alex Bligh | 7bf8fbd | 2013-08-21 16:03:03 +0100 | [diff] [blame] | 293 | return main_loop_tlg.tl[type]; |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 294 | } |
| 295 | |
Alex Bligh | d5541d8 | 2013-08-21 16:02:50 +0100 | [diff] [blame] | 296 | void timerlist_notify(QEMUTimerList *timer_list) |
| 297 | { |
| 298 | if (timer_list->notify_cb) { |
Paolo Bonzini | 3f53bc6 | 2017-03-03 11:50:29 +0100 | [diff] [blame] | 299 | timer_list->notify_cb(timer_list->notify_opaque, timer_list->clock->type); |
Alex Bligh | d5541d8 | 2013-08-21 16:02:50 +0100 | [diff] [blame] | 300 | } else { |
| 301 | qemu_notify_event(); |
| 302 | } |
| 303 | } |
| 304 | |
Alex Bligh | 02a03a9 | 2013-08-21 16:02:41 +0100 | [diff] [blame] | 305 | /* Transition function to convert a nanosecond timeout to ms |
| 306 | * This is used where a system does not support ppoll |
| 307 | */ |
| 308 | int qemu_timeout_ns_to_ms(int64_t ns) |
| 309 | { |
| 310 | int64_t ms; |
| 311 | if (ns < 0) { |
| 312 | return -1; |
| 313 | } |
| 314 | |
| 315 | if (!ns) { |
| 316 | return 0; |
| 317 | } |
| 318 | |
| 319 | /* Always round up, because it's better to wait too long than to wait too |
| 320 | * little and effectively busy-wait |
| 321 | */ |
Laurent Vivier | 5029b96 | 2016-05-31 18:36:04 +0200 | [diff] [blame] | 322 | ms = DIV_ROUND_UP(ns, SCALE_MS); |
Alex Bligh | 02a03a9 | 2013-08-21 16:02:41 +0100 | [diff] [blame] | 323 | |
| 324 | /* To avoid overflow problems, limit this to 2^31, i.e. approx 25 days */ |
| 325 | if (ms > (int64_t) INT32_MAX) { |
| 326 | ms = INT32_MAX; |
| 327 | } |
| 328 | |
| 329 | return (int) ms; |
| 330 | } |
| 331 | |
| 332 | |
Alex Bligh | 4e0c652 | 2013-08-21 16:02:43 +0100 | [diff] [blame] | 333 | /* qemu implementation of g_poll which uses a nanosecond timeout but is |
| 334 | * otherwise identical to g_poll |
| 335 | */ |
| 336 | int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout) |
| 337 | { |
| 338 | #ifdef CONFIG_PPOLL |
| 339 | if (timeout < 0) { |
| 340 | return ppoll((struct pollfd *)fds, nfds, NULL, NULL); |
| 341 | } else { |
| 342 | struct timespec ts; |
Peter Maydell | 490309f | 2014-11-25 18:21:45 +0000 | [diff] [blame] | 343 | int64_t tvsec = timeout / 1000000000LL; |
| 344 | /* Avoid possibly overflowing and specifying a negative number of |
| 345 | * seconds, which would turn a very long timeout into a busy-wait. |
| 346 | */ |
| 347 | if (tvsec > (int64_t)INT32_MAX) { |
| 348 | tvsec = INT32_MAX; |
| 349 | } |
| 350 | ts.tv_sec = tvsec; |
Alex Bligh | 4e0c652 | 2013-08-21 16:02:43 +0100 | [diff] [blame] | 351 | ts.tv_nsec = timeout % 1000000000LL; |
| 352 | return ppoll((struct pollfd *)fds, nfds, &ts, NULL); |
| 353 | } |
| 354 | #else |
| 355 | return g_poll(fds, nfds, qemu_timeout_ns_to_ms(timeout)); |
| 356 | #endif |
| 357 | } |
| 358 | |
| 359 | |
Artem Pisarenko | 89a603a | 2018-10-17 14:24:19 +0600 | [diff] [blame] | 360 | void timer_init_full(QEMUTimer *ts, |
| 361 | QEMUTimerListGroup *timer_list_group, QEMUClockType type, |
| 362 | int scale, int attributes, |
| 363 | QEMUTimerCB *cb, void *opaque) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 364 | { |
Artem Pisarenko | 89a603a | 2018-10-17 14:24:19 +0600 | [diff] [blame] | 365 | if (!timer_list_group) { |
| 366 | timer_list_group = &main_loop_tlg; |
| 367 | } |
| 368 | ts->timer_list = timer_list_group->tl[type]; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 369 | ts->cb = cb; |
| 370 | ts->opaque = opaque; |
Paolo Bonzini | 4a99874 | 2011-03-11 16:33:58 +0100 | [diff] [blame] | 371 | ts->scale = scale; |
Artem Pisarenko | 89a603a | 2018-10-17 14:24:19 +0600 | [diff] [blame] | 372 | ts->attributes = attributes; |
Paolo Bonzini | 3db1ee7 | 2013-09-12 11:02:20 +0200 | [diff] [blame] | 373 | ts->expire_time = -1; |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 374 | } |
| 375 | |
Paolo Bonzini | cd1bd53 | 2014-12-24 10:57:04 +0100 | [diff] [blame] | 376 | void timer_deinit(QEMUTimer *ts) |
| 377 | { |
| 378 | assert(ts->expire_time == -1); |
| 379 | ts->timer_list = NULL; |
| 380 | } |
| 381 | |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 382 | static void timer_del_locked(QEMUTimerList *timer_list, QEMUTimer *ts) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 383 | { |
| 384 | QEMUTimer **pt, *t; |
| 385 | |
Paolo Bonzini | 3db1ee7 | 2013-09-12 11:02:20 +0200 | [diff] [blame] | 386 | ts->expire_time = -1; |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 387 | pt = &timer_list->active_timers; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 388 | for(;;) { |
| 389 | t = *pt; |
| 390 | if (!t) |
| 391 | break; |
| 392 | if (t == ts) { |
Paolo Bonzini | 8caa05d | 2016-12-01 09:58:02 +0100 | [diff] [blame] | 393 | atomic_set(pt, t->next); |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 394 | break; |
| 395 | } |
| 396 | pt = &t->next; |
| 397 | } |
| 398 | } |
| 399 | |
Paolo Bonzini | 0f809e5 | 2013-10-03 15:06:39 +0200 | [diff] [blame] | 400 | static bool timer_mod_ns_locked(QEMUTimerList *timer_list, |
| 401 | QEMUTimer *ts, int64_t expire_time) |
| 402 | { |
| 403 | QEMUTimer **pt, *t; |
| 404 | |
| 405 | /* add the timer in the sorted list */ |
| 406 | pt = &timer_list->active_timers; |
| 407 | for (;;) { |
| 408 | t = *pt; |
| 409 | if (!timer_expired_ns(t, expire_time)) { |
| 410 | break; |
| 411 | } |
| 412 | pt = &t->next; |
| 413 | } |
| 414 | ts->expire_time = MAX(expire_time, 0); |
| 415 | ts->next = *pt; |
Paolo Bonzini | 8caa05d | 2016-12-01 09:58:02 +0100 | [diff] [blame] | 416 | atomic_set(pt, ts); |
Paolo Bonzini | 0f809e5 | 2013-10-03 15:06:39 +0200 | [diff] [blame] | 417 | |
| 418 | return pt == &timer_list->active_timers; |
| 419 | } |
| 420 | |
| 421 | static void timerlist_rearm(QEMUTimerList *timer_list) |
| 422 | { |
| 423 | /* Interrupt execution to force deadline recalculation. */ |
Pavel Dovgalyuk | e76d179 | 2016-03-10 14:56:09 +0300 | [diff] [blame] | 424 | if (timer_list->clock->type == QEMU_CLOCK_VIRTUAL) { |
| 425 | qemu_start_warp_timer(); |
| 426 | } |
Paolo Bonzini | 0f809e5 | 2013-10-03 15:06:39 +0200 | [diff] [blame] | 427 | timerlist_notify(timer_list); |
| 428 | } |
| 429 | |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 430 | /* stop a timer, but do not dealloc it */ |
| 431 | void timer_del(QEMUTimer *ts) |
| 432 | { |
| 433 | QEMUTimerList *timer_list = ts->timer_list; |
| 434 | |
Paolo Bonzini | cd1bd53 | 2014-12-24 10:57:04 +0100 | [diff] [blame] | 435 | if (timer_list) { |
| 436 | qemu_mutex_lock(&timer_list->active_timers_lock); |
| 437 | timer_del_locked(timer_list, ts); |
| 438 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
| 439 | } |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 440 | } |
| 441 | |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 442 | /* modify the current timer so that it will be fired when current_time |
| 443 | >= expire_time. The corresponding callback will be called. */ |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 444 | void timer_mod_ns(QEMUTimer *ts, int64_t expire_time) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 445 | { |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 446 | QEMUTimerList *timer_list = ts->timer_list; |
Paolo Bonzini | 0f809e5 | 2013-10-03 15:06:39 +0200 | [diff] [blame] | 447 | bool rearm; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 448 | |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 449 | qemu_mutex_lock(&timer_list->active_timers_lock); |
| 450 | timer_del_locked(timer_list, ts); |
Paolo Bonzini | 0f809e5 | 2013-10-03 15:06:39 +0200 | [diff] [blame] | 451 | rearm = timer_mod_ns_locked(timer_list, ts, expire_time); |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 452 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 453 | |
Paolo Bonzini | 0f809e5 | 2013-10-03 15:06:39 +0200 | [diff] [blame] | 454 | if (rearm) { |
| 455 | timerlist_rearm(timer_list); |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 456 | } |
| 457 | } |
| 458 | |
Paolo Bonzini | add40e9 | 2013-10-03 15:11:43 +0200 | [diff] [blame] | 459 | /* modify the current timer so that it will be fired when current_time |
| 460 | >= expire_time or the current deadline, whichever comes earlier. |
| 461 | The corresponding callback will be called. */ |
| 462 | void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time) |
| 463 | { |
| 464 | QEMUTimerList *timer_list = ts->timer_list; |
| 465 | bool rearm; |
| 466 | |
| 467 | qemu_mutex_lock(&timer_list->active_timers_lock); |
| 468 | if (ts->expire_time == -1 || ts->expire_time > expire_time) { |
| 469 | if (ts->expire_time != -1) { |
| 470 | timer_del_locked(timer_list, ts); |
| 471 | } |
| 472 | rearm = timer_mod_ns_locked(timer_list, ts, expire_time); |
| 473 | } else { |
| 474 | rearm = false; |
| 475 | } |
| 476 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
| 477 | |
| 478 | if (rearm) { |
| 479 | timerlist_rearm(timer_list); |
| 480 | } |
| 481 | } |
| 482 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 483 | void timer_mod(QEMUTimer *ts, int64_t expire_time) |
Paolo Bonzini | 4a99874 | 2011-03-11 16:33:58 +0100 | [diff] [blame] | 484 | { |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 485 | timer_mod_ns(ts, expire_time * ts->scale); |
Paolo Bonzini | 4a99874 | 2011-03-11 16:33:58 +0100 | [diff] [blame] | 486 | } |
| 487 | |
Paolo Bonzini | add40e9 | 2013-10-03 15:11:43 +0200 | [diff] [blame] | 488 | void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time) |
| 489 | { |
| 490 | timer_mod_anticipate_ns(ts, expire_time * ts->scale); |
| 491 | } |
| 492 | |
Alex Bligh | e93379b | 2013-08-21 16:02:39 +0100 | [diff] [blame] | 493 | bool timer_pending(QEMUTimer *ts) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 494 | { |
Paolo Bonzini | 3db1ee7 | 2013-09-12 11:02:20 +0200 | [diff] [blame] | 495 | return ts->expire_time >= 0; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 496 | } |
| 497 | |
Alex Bligh | e93379b | 2013-08-21 16:02:39 +0100 | [diff] [blame] | 498 | bool timer_expired(QEMUTimer *timer_head, int64_t current_time) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 499 | { |
Alex Bligh | e93379b | 2013-08-21 16:02:39 +0100 | [diff] [blame] | 500 | return timer_expired_ns(timer_head, current_time * timer_head->scale); |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 501 | } |
| 502 | |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 503 | bool timerlist_run_timers(QEMUTimerList *timer_list) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 504 | { |
Paolo Bonzini | 144b97c | 2012-09-19 15:52:44 +0200 | [diff] [blame] | 505 | QEMUTimer *ts; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 506 | int64_t current_time; |
Alex Bligh | f9a976b | 2013-08-21 16:02:45 +0100 | [diff] [blame] | 507 | bool progress = false; |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 508 | QEMUTimerCB *cb; |
| 509 | void *opaque; |
Artem Pisarenko | e81f867 | 2018-10-17 14:24:20 +0600 | [diff] [blame] | 510 | bool need_replay_checkpoint = false; |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 511 | |
Paolo Bonzini | 8caa05d | 2016-12-01 09:58:02 +0100 | [diff] [blame] | 512 | if (!atomic_read(&timer_list->active_timers)) { |
| 513 | return false; |
| 514 | } |
| 515 | |
Liu Ping Fan | 3c05341 | 2013-09-25 14:21:00 +0800 | [diff] [blame] | 516 | qemu_event_reset(&timer_list->timers_done_ev); |
Paolo Bonzini | 8caa05d | 2016-12-01 09:58:02 +0100 | [diff] [blame] | 517 | if (!timer_list->clock->enabled) { |
Liu Ping Fan | 3c05341 | 2013-09-25 14:21:00 +0800 | [diff] [blame] | 518 | goto out; |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 519 | } |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 520 | |
Pavel Dovgalyuk | 8bd7f71 | 2015-09-17 19:24:44 +0300 | [diff] [blame] | 521 | switch (timer_list->clock->type) { |
| 522 | case QEMU_CLOCK_REALTIME: |
| 523 | break; |
| 524 | default: |
| 525 | case QEMU_CLOCK_VIRTUAL: |
Artem Pisarenko | e81f867 | 2018-10-17 14:24:20 +0600 | [diff] [blame] | 526 | if (replay_mode != REPLAY_MODE_NONE) { |
| 527 | /* Checkpoint for virtual clock is redundant in cases where |
| 528 | * it's being triggered with only non-EXTERNAL timers, because |
| 529 | * these timers don't change guest state directly. |
| 530 | * Since it has conditional dependence on specific timers, it is |
| 531 | * subject to race conditions and requires special handling. |
| 532 | * See below. |
| 533 | */ |
| 534 | need_replay_checkpoint = true; |
Pavel Dovgalyuk | 8bd7f71 | 2015-09-17 19:24:44 +0300 | [diff] [blame] | 535 | } |
| 536 | break; |
| 537 | case QEMU_CLOCK_HOST: |
| 538 | if (!replay_checkpoint(CHECKPOINT_CLOCK_HOST)) { |
| 539 | goto out; |
| 540 | } |
| 541 | break; |
| 542 | case QEMU_CLOCK_VIRTUAL_RT: |
| 543 | if (!replay_checkpoint(CHECKPOINT_CLOCK_VIRTUAL_RT)) { |
| 544 | goto out; |
| 545 | } |
| 546 | break; |
| 547 | } |
| 548 | |
Artem Pisarenko | e81f867 | 2018-10-17 14:24:20 +0600 | [diff] [blame] | 549 | /* |
| 550 | * Extract expired timers from active timers list and and process them. |
| 551 | * |
| 552 | * In rr mode we need "filtered" checkpointing for virtual clock. The |
| 553 | * checkpoint must be recorded/replayed before processing any non-EXTERNAL timer, |
| 554 | * and that must only be done once since the clock value stays the same. Because |
| 555 | * non-EXTERNAL timers may appear in the timers list while it being processed, |
| 556 | * the checkpoint can be issued at a time until no timers are left and we are |
| 557 | * done". |
| 558 | */ |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 559 | current_time = qemu_clock_get_ns(timer_list->clock->type); |
Artem Pisarenko | e81f867 | 2018-10-17 14:24:20 +0600 | [diff] [blame] | 560 | qemu_mutex_lock(&timer_list->active_timers_lock); |
| 561 | while ((ts = timer_list->active_timers)) { |
Alex Bligh | e93379b | 2013-08-21 16:02:39 +0100 | [diff] [blame] | 562 | if (!timer_expired_ns(ts, current_time)) { |
Artem Pisarenko | e81f867 | 2018-10-17 14:24:20 +0600 | [diff] [blame] | 563 | /* No expired timers left. The checkpoint can be skipped |
| 564 | * if no timers fired or they were all external. |
| 565 | */ |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 566 | break; |
Stefan Weil | 45c7b37 | 2011-03-24 21:31:24 +0100 | [diff] [blame] | 567 | } |
Artem Pisarenko | e81f867 | 2018-10-17 14:24:20 +0600 | [diff] [blame] | 568 | if (need_replay_checkpoint |
| 569 | && !(ts->attributes & QEMU_TIMER_ATTR_EXTERNAL)) { |
| 570 | /* once we got here, checkpoint clock only once */ |
| 571 | need_replay_checkpoint = false; |
| 572 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
| 573 | if (!replay_checkpoint(CHECKPOINT_CLOCK_VIRTUAL)) { |
| 574 | goto out; |
| 575 | } |
| 576 | qemu_mutex_lock(&timer_list->active_timers_lock); |
| 577 | /* The lock was released; start over again in case the list was |
| 578 | * modified. |
| 579 | */ |
| 580 | continue; |
| 581 | } |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 582 | |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 583 | /* remove timer from the list before calling the callback */ |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 584 | timer_list->active_timers = ts->next; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 585 | ts->next = NULL; |
Paolo Bonzini | 3db1ee7 | 2013-09-12 11:02:20 +0200 | [diff] [blame] | 586 | ts->expire_time = -1; |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 587 | cb = ts->cb; |
| 588 | opaque = ts->opaque; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 589 | |
| 590 | /* run the callback (the timer list can be modified) */ |
Artem Pisarenko | e81f867 | 2018-10-17 14:24:20 +0600 | [diff] [blame] | 591 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
Stefan Hajnoczi | 978f220 | 2013-09-12 11:02:19 +0200 | [diff] [blame] | 592 | cb(opaque); |
Artem Pisarenko | e81f867 | 2018-10-17 14:24:20 +0600 | [diff] [blame] | 593 | qemu_mutex_lock(&timer_list->active_timers_lock); |
| 594 | |
Alex Bligh | f9a976b | 2013-08-21 16:02:45 +0100 | [diff] [blame] | 595 | progress = true; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 596 | } |
Artem Pisarenko | e81f867 | 2018-10-17 14:24:20 +0600 | [diff] [blame] | 597 | qemu_mutex_unlock(&timer_list->active_timers_lock); |
Liu Ping Fan | 3c05341 | 2013-09-25 14:21:00 +0800 | [diff] [blame] | 598 | |
| 599 | out: |
| 600 | qemu_event_set(&timer_list->timers_done_ev); |
Alex Bligh | f9a976b | 2013-08-21 16:02:45 +0100 | [diff] [blame] | 601 | return progress; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 602 | } |
| 603 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 604 | bool qemu_clock_run_timers(QEMUClockType type) |
| 605 | { |
Alex Bligh | 7bf8fbd | 2013-08-21 16:03:03 +0100 | [diff] [blame] | 606 | return timerlist_run_timers(main_loop_tlg.tl[type]); |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 607 | } |
| 608 | |
Alex Bligh | d5541d8 | 2013-08-21 16:02:50 +0100 | [diff] [blame] | 609 | void timerlistgroup_init(QEMUTimerListGroup *tlg, |
| 610 | QEMUTimerListNotifyCB *cb, void *opaque) |
Alex Bligh | 754d6a5 | 2013-08-21 16:02:48 +0100 | [diff] [blame] | 611 | { |
| 612 | QEMUClockType type; |
| 613 | for (type = 0; type < QEMU_CLOCK_MAX; type++) { |
Alex Bligh | d5541d8 | 2013-08-21 16:02:50 +0100 | [diff] [blame] | 614 | tlg->tl[type] = timerlist_new(type, cb, opaque); |
Alex Bligh | 754d6a5 | 2013-08-21 16:02:48 +0100 | [diff] [blame] | 615 | } |
| 616 | } |
| 617 | |
| 618 | void timerlistgroup_deinit(QEMUTimerListGroup *tlg) |
| 619 | { |
| 620 | QEMUClockType type; |
| 621 | for (type = 0; type < QEMU_CLOCK_MAX; type++) { |
| 622 | timerlist_free(tlg->tl[type]); |
| 623 | } |
| 624 | } |
| 625 | |
| 626 | bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg) |
| 627 | { |
| 628 | QEMUClockType type; |
| 629 | bool progress = false; |
| 630 | for (type = 0; type < QEMU_CLOCK_MAX; type++) { |
| 631 | progress |= timerlist_run_timers(tlg->tl[type]); |
| 632 | } |
| 633 | return progress; |
| 634 | } |
| 635 | |
| 636 | int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg) |
| 637 | { |
| 638 | int64_t deadline = -1; |
| 639 | QEMUClockType type; |
| 640 | for (type = 0; type < QEMU_CLOCK_MAX; type++) { |
Pavel Dovgalyuk | 8bd7f71 | 2015-09-17 19:24:44 +0300 | [diff] [blame] | 641 | if (qemu_clock_use_for_deadline(type)) { |
Pavel Dovgalyuk | e4dab94 | 2018-07-25 15:15:26 +0300 | [diff] [blame] | 642 | deadline = qemu_soonest_timeout(deadline, |
| 643 | timerlist_deadline_ns(tlg->tl[type])); |
Alex Bligh | 754d6a5 | 2013-08-21 16:02:48 +0100 | [diff] [blame] | 644 | } |
| 645 | } |
| 646 | return deadline; |
| 647 | } |
| 648 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 649 | int64_t qemu_clock_get_ns(QEMUClockType type) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 650 | { |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 651 | switch (type) { |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 652 | case QEMU_CLOCK_REALTIME: |
| 653 | return get_clock(); |
| 654 | default: |
| 655 | case QEMU_CLOCK_VIRTUAL: |
| 656 | if (use_icount) { |
| 657 | return cpu_get_icount(); |
| 658 | } else { |
| 659 | return cpu_get_clock(); |
| 660 | } |
| 661 | case QEMU_CLOCK_HOST: |
Dr. David Alan Gilbert | 3c2d4c8 | 2019-07-24 12:58:23 +0100 | [diff] [blame] | 662 | return REPLAY_CLOCK(REPLAY_CLOCK_HOST, get_clock_realtime()); |
Pavel Dovgalyuk | 4e7fa73 | 2014-11-26 13:40:50 +0300 | [diff] [blame] | 663 | case QEMU_CLOCK_VIRTUAL_RT: |
Pavel Dovgalyuk | 8eda206 | 2015-09-17 19:24:28 +0300 | [diff] [blame] | 664 | return REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT, cpu_get_clock()); |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 665 | } |
| 666 | } |
| 667 | |
Paolo Bonzini | 3f53bc6 | 2017-03-03 11:50:29 +0100 | [diff] [blame] | 668 | void init_clocks(QEMUTimerListNotifyCB *notify_cb) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 669 | { |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 670 | QEMUClockType type; |
| 671 | for (type = 0; type < QEMU_CLOCK_MAX; type++) { |
Paolo Bonzini | 3f53bc6 | 2017-03-03 11:50:29 +0100 | [diff] [blame] | 672 | qemu_clock_init(type, notify_cb); |
Paolo Bonzini | 744ca8e | 2012-10-29 15:26:28 +0100 | [diff] [blame] | 673 | } |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 674 | |
Alex Bligh | cd758dd | 2013-08-21 16:02:44 +0100 | [diff] [blame] | 675 | #ifdef CONFIG_PRCTL_PR_SET_TIMERSLACK |
| 676 | prctl(PR_SET_TIMERSLACK, 1, 0, 0, 0); |
| 677 | #endif |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 678 | } |
| 679 | |
Alex Bligh | e93379b | 2013-08-21 16:02:39 +0100 | [diff] [blame] | 680 | uint64_t timer_expire_time_ns(QEMUTimer *ts) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 681 | { |
Alex Bligh | e93379b | 2013-08-21 16:02:39 +0100 | [diff] [blame] | 682 | return timer_pending(ts) ? ts->expire_time : -1; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 683 | } |
| 684 | |
Alex Bligh | 40daca5 | 2013-08-21 16:03:02 +0100 | [diff] [blame] | 685 | bool qemu_clock_run_all_timers(void) |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 686 | { |
Alex Bligh | f9a976b | 2013-08-21 16:02:45 +0100 | [diff] [blame] | 687 | bool progress = false; |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 688 | QEMUClockType type; |
Alex Bligh | 6d32717 | 2013-08-21 16:02:59 +0100 | [diff] [blame] | 689 | |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 690 | for (type = 0; type < QEMU_CLOCK_MAX; type++) { |
Paolo Bonzini | 6b8f018 | 2017-03-02 19:56:40 +0100 | [diff] [blame] | 691 | if (qemu_clock_use_for_deadline(type)) { |
| 692 | progress |= qemu_clock_run_timers(type); |
| 693 | } |
Alex Bligh | ff83c66 | 2013-08-21 16:02:46 +0100 | [diff] [blame] | 694 | } |
Peter Portante | 158fd3c | 2012-04-05 11:00:45 -0400 | [diff] [blame] | 695 | |
Alex Bligh | f9a976b | 2013-08-21 16:02:45 +0100 | [diff] [blame] | 696 | return progress; |
Paolo Bonzini | db1a497 | 2010-03-10 11:38:55 +0100 | [diff] [blame] | 697 | } |