blob: 1fa5f1fdd437112331a23e9ee95738db31f7c3af [file] [log] [blame]
Blue Swirlad960902010-03-29 19:23:52 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24#include <stdint.h>
25#include <stdarg.h>
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +020026#include <stdlib.h>
Blue Swirlad960902010-03-29 19:23:52 +000027#ifndef _WIN32
Blue Swirl1c47cb12010-03-30 19:27:34 +000028#include <sys/types.h>
Blue Swirlad960902010-03-29 19:23:52 +000029#include <sys/mman.h>
30#endif
31#include "config.h"
Paolo Bonzini83c90892012-12-17 18:19:49 +010032#include "monitor/monitor.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010034#include "qemu/bitops.h"
35#include "qemu/bitmap.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010036#include "sysemu/arch_init.h"
Blue Swirlad960902010-03-29 19:23:52 +000037#include "audio/audio.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010038#include "hw/i386/pc.h"
Michael S. Tsirkina2cb15b2012-12-12 14:24:50 +020039#include "hw/pci/pci.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010040#include "hw/audio/audio.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/kvm.h"
Paolo Bonzinicaf71f82012-12-17 18:19:50 +010042#include "migration/migration.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010043#include "hw/i386/smbios.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010044#include "exec/address-spaces.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010045#include "hw/audio/pcspk.h"
Paolo Bonzinicaf71f82012-12-17 18:19:50 +010046#include "migration/page_cache.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010047#include "qemu/config-file.h"
Daniel P. Berrange99afc912012-08-20 15:31:38 +010048#include "qmp-commands.h"
Juan Quintela3c121932012-09-04 13:08:57 +020049#include "trace.h"
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010050#include "exec/cpu-all.h"
Juan Quintela12291ec2013-10-14 17:14:47 +020051#include "exec/ram_addr.h"
Michael S. Tsirkin04452592013-04-15 09:19:22 +030052#include "hw/acpi/acpi.h"
Juan Quintelaaa8dc042013-11-06 11:33:05 +010053#include "qemu/host-utils.h"
Blue Swirlad960902010-03-29 19:23:52 +000054
Orit Wasserman3a697f62012-06-19 18:43:15 +030055#ifdef DEBUG_ARCH_INIT
56#define DPRINTF(fmt, ...) \
57 do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
58#else
59#define DPRINTF(fmt, ...) \
60 do { } while (0)
61#endif
62
Blue Swirlad960902010-03-29 19:23:52 +000063#ifdef TARGET_SPARC
64int graphic_width = 1024;
65int graphic_height = 768;
66int graphic_depth = 8;
67#else
68int graphic_width = 800;
69int graphic_height = 600;
Alexander Graff1ff0e82013-06-20 14:06:27 +020070int graphic_depth = 32;
Blue Swirlad960902010-03-29 19:23:52 +000071#endif
72
Blue Swirlad960902010-03-29 19:23:52 +000073
74#if defined(TARGET_ALPHA)
75#define QEMU_ARCH QEMU_ARCH_ALPHA
76#elif defined(TARGET_ARM)
77#define QEMU_ARCH QEMU_ARCH_ARM
78#elif defined(TARGET_CRIS)
79#define QEMU_ARCH QEMU_ARCH_CRIS
80#elif defined(TARGET_I386)
81#define QEMU_ARCH QEMU_ARCH_I386
82#elif defined(TARGET_M68K)
83#define QEMU_ARCH QEMU_ARCH_M68K
Michael Walle81ea0e12011-02-17 23:45:02 +010084#elif defined(TARGET_LM32)
85#define QEMU_ARCH QEMU_ARCH_LM32
Blue Swirlad960902010-03-29 19:23:52 +000086#elif defined(TARGET_MICROBLAZE)
87#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
88#elif defined(TARGET_MIPS)
89#define QEMU_ARCH QEMU_ARCH_MIPS
Anthony Greend15a9c22013-03-18 15:49:25 -040090#elif defined(TARGET_MOXIE)
91#define QEMU_ARCH QEMU_ARCH_MOXIE
Jia Liue67db062012-07-20 15:50:39 +080092#elif defined(TARGET_OPENRISC)
93#define QEMU_ARCH QEMU_ARCH_OPENRISC
Blue Swirlad960902010-03-29 19:23:52 +000094#elif defined(TARGET_PPC)
95#define QEMU_ARCH QEMU_ARCH_PPC
96#elif defined(TARGET_S390X)
97#define QEMU_ARCH QEMU_ARCH_S390X
98#elif defined(TARGET_SH4)
99#define QEMU_ARCH QEMU_ARCH_SH4
100#elif defined(TARGET_SPARC)
101#define QEMU_ARCH QEMU_ARCH_SPARC
Max Filippov23288262011-09-06 03:55:25 +0400102#elif defined(TARGET_XTENSA)
103#define QEMU_ARCH QEMU_ARCH_XTENSA
Guan Xuetao4f23a1e2012-08-10 14:42:21 +0800104#elif defined(TARGET_UNICORE32)
105#define QEMU_ARCH QEMU_ARCH_UNICORE32
Blue Swirlad960902010-03-29 19:23:52 +0000106#endif
107
108const uint32_t arch_type = QEMU_ARCH;
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -0600109static bool mig_throttle_on;
110static int dirty_rate_high_cnt;
111static void check_guest_throttling(void);
Blue Swirlad960902010-03-29 19:23:52 +0000112
113/***********************************************************/
114/* ram save/restore */
115
Yoshiaki Tamurad20878d2010-08-18 13:30:12 +0900116#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
117#define RAM_SAVE_FLAG_COMPRESS 0x02
118#define RAM_SAVE_FLAG_MEM_SIZE 0x04
119#define RAM_SAVE_FLAG_PAGE 0x08
120#define RAM_SAVE_FLAG_EOS 0x10
121#define RAM_SAVE_FLAG_CONTINUE 0x20
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300122#define RAM_SAVE_FLAG_XBZRLE 0x40
Michael R. Hines0033b8b2013-07-22 10:01:55 -0400123/* 0x80 is reserved in migration.h start with 0x100 next */
Blue Swirlad960902010-03-29 19:23:52 +0000124
Eduardo Habkostb5a8fe52012-05-02 13:07:25 -0300125
Eduardo Habkost756557d2012-05-02 13:07:27 -0300126static struct defconfig_file {
127 const char *filename;
Eduardo Habkostf29a5612012-05-02 13:07:29 -0300128 /* Indicates it is an user config file (disabled by -no-user-config) */
129 bool userconfig;
Eduardo Habkost756557d2012-05-02 13:07:27 -0300130} default_config_files[] = {
Eduardo Habkostf29a5612012-05-02 13:07:29 -0300131 { CONFIG_QEMU_CONFDIR "/qemu.conf", true },
Paolo Bonzini2e599152013-06-04 14:45:27 +0200132 { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
Eduardo Habkost756557d2012-05-02 13:07:27 -0300133 { NULL }, /* end of list */
134};
135
136
Eduardo Habkostf29a5612012-05-02 13:07:29 -0300137int qemu_read_default_config_files(bool userconfig)
Eduardo Habkostb5a8fe52012-05-02 13:07:25 -0300138{
139 int ret;
Eduardo Habkost756557d2012-05-02 13:07:27 -0300140 struct defconfig_file *f;
141
142 for (f = default_config_files; f->filename; f++) {
Eduardo Habkostf29a5612012-05-02 13:07:29 -0300143 if (!userconfig && f->userconfig) {
144 continue;
145 }
Eduardo Habkost756557d2012-05-02 13:07:27 -0300146 ret = qemu_read_config_file(f->filename);
147 if (ret < 0 && ret != -ENOENT) {
148 return ret;
149 }
150 }
Laszlo Ersek4d8b3c62013-03-21 00:23:13 +0100151
Eduardo Habkostb5a8fe52012-05-02 13:07:25 -0300152 return 0;
153}
154
Isaku Yamahatadc3c26a2013-09-21 01:23:36 +0900155static inline bool is_zero_range(uint8_t *p, uint64_t size)
Blue Swirlad960902010-03-29 19:23:52 +0000156{
Isaku Yamahatadc3c26a2013-09-21 01:23:36 +0900157 return buffer_find_nonzero_offset(p, size) == size;
Blue Swirlad960902010-03-29 19:23:52 +0000158}
159
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300160/* struct contains XBZRLE cache and a static page
161 used by the compression */
162static struct {
163 /* buffer used for XBZRLE encoding */
164 uint8_t *encoded_buf;
165 /* buffer for storing page content */
166 uint8_t *current_buf;
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300167 /* Cache for XBZRLE */
168 PageCache *cache;
169} XBZRLE = {
170 .encoded_buf = NULL,
171 .current_buf = NULL,
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300172 .cache = NULL,
173};
Gonglei (Arei)905f26f2014-01-30 20:08:35 +0200174/* buffer used for XBZRLE decoding */
175static uint8_t *xbzrle_decoded_buf;
Orit Wasserman9e1ba4c2012-08-06 21:42:54 +0300176
177int64_t xbzrle_cache_resize(int64_t new_size)
178{
Orit Wassermanc91e6812014-01-30 20:08:34 +0200179 if (new_size < TARGET_PAGE_SIZE) {
180 return -1;
181 }
182
Orit Wasserman9e1ba4c2012-08-06 21:42:54 +0300183 if (XBZRLE.cache != NULL) {
184 return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
185 TARGET_PAGE_SIZE;
186 }
187 return pow2floor(new_size);
188}
189
Orit Wasserman004d4c12012-08-06 21:42:56 +0300190/* accounting for migration statistics */
191typedef struct AccountingInfo {
192 uint64_t dup_pages;
Peter Lievenf1c72792013-03-26 10:58:37 +0100193 uint64_t skipped_pages;
Orit Wasserman004d4c12012-08-06 21:42:56 +0300194 uint64_t norm_pages;
195 uint64_t iterations;
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300196 uint64_t xbzrle_bytes;
197 uint64_t xbzrle_pages;
198 uint64_t xbzrle_cache_miss;
199 uint64_t xbzrle_overflows;
Orit Wasserman004d4c12012-08-06 21:42:56 +0300200} AccountingInfo;
201
202static AccountingInfo acct_info;
203
204static void acct_clear(void)
205{
206 memset(&acct_info, 0, sizeof(acct_info));
207}
208
209uint64_t dup_mig_bytes_transferred(void)
210{
211 return acct_info.dup_pages * TARGET_PAGE_SIZE;
212}
213
214uint64_t dup_mig_pages_transferred(void)
215{
216 return acct_info.dup_pages;
217}
218
Peter Lievenf1c72792013-03-26 10:58:37 +0100219uint64_t skipped_mig_bytes_transferred(void)
220{
221 return acct_info.skipped_pages * TARGET_PAGE_SIZE;
222}
223
224uint64_t skipped_mig_pages_transferred(void)
225{
226 return acct_info.skipped_pages;
227}
228
Orit Wasserman004d4c12012-08-06 21:42:56 +0300229uint64_t norm_mig_bytes_transferred(void)
230{
231 return acct_info.norm_pages * TARGET_PAGE_SIZE;
232}
233
234uint64_t norm_mig_pages_transferred(void)
235{
236 return acct_info.norm_pages;
237}
238
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300239uint64_t xbzrle_mig_bytes_transferred(void)
240{
241 return acct_info.xbzrle_bytes;
242}
243
244uint64_t xbzrle_mig_pages_transferred(void)
245{
246 return acct_info.xbzrle_pages;
247}
248
249uint64_t xbzrle_mig_pages_cache_miss(void)
250{
251 return acct_info.xbzrle_cache_miss;
252}
253
254uint64_t xbzrle_mig_pages_overflow(void)
255{
256 return acct_info.xbzrle_overflows;
257}
258
Juan Quintela3f7d7b02012-10-18 13:56:35 +0200259static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
260 int cont, int flag)
Orit Wasserman0c51f432012-06-19 18:43:14 +0300261{
Juan Quintela3f7d7b02012-10-18 13:56:35 +0200262 size_t size;
Orit Wasserman0c51f432012-06-19 18:43:14 +0300263
Juan Quintela3f7d7b02012-10-18 13:56:35 +0200264 qemu_put_be64(f, offset | cont | flag);
265 size = 8;
266
267 if (!cont) {
268 qemu_put_byte(f, strlen(block->idstr));
269 qemu_put_buffer(f, (uint8_t *)block->idstr,
270 strlen(block->idstr));
271 size += 1 + strlen(block->idstr);
272 }
273 return size;
Orit Wasserman0c51f432012-06-19 18:43:14 +0300274}
275
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300276#define ENCODING_FLAG_XBZRLE 0x1
277
278static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
279 ram_addr_t current_addr, RAMBlock *block,
Juan Quinteladd051c72012-08-06 21:42:58 +0300280 ram_addr_t offset, int cont, bool last_stage)
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300281{
282 int encoded_len = 0, bytes_sent = -1;
283 uint8_t *prev_cached_page;
284
285 if (!cache_is_cached(XBZRLE.cache, current_addr)) {
Juan Quinteladd051c72012-08-06 21:42:58 +0300286 if (!last_stage) {
Peter Lievenee0b44a2013-02-25 19:12:04 +0200287 cache_insert(XBZRLE.cache, current_addr, current_data);
Juan Quinteladd051c72012-08-06 21:42:58 +0300288 }
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300289 acct_info.xbzrle_cache_miss++;
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300290 return -1;
291 }
292
293 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
294
295 /* save current buffer into memory */
296 memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
297
298 /* XBZRLE encoding (if there is no overflow) */
299 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
300 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
301 TARGET_PAGE_SIZE);
302 if (encoded_len == 0) {
303 DPRINTF("Skipping unmodified page\n");
304 return 0;
305 } else if (encoded_len == -1) {
306 DPRINTF("Overflow\n");
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300307 acct_info.xbzrle_overflows++;
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300308 /* update data in the cache */
309 memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
310 return -1;
311 }
312
313 /* we need to update the data in the cache, in order to get the same data */
Juan Quinteladd051c72012-08-06 21:42:58 +0300314 if (!last_stage) {
315 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
316 }
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300317
318 /* Send XBZRLE based compressed page */
Juan Quintela3f7d7b02012-10-18 13:56:35 +0200319 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300320 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
321 qemu_put_be16(f, encoded_len);
322 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
Juan Quintela3f7d7b02012-10-18 13:56:35 +0200323 bytes_sent += encoded_len + 1 + 2;
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300324 acct_info.xbzrle_pages++;
325 acct_info.xbzrle_bytes += bytes_sent;
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300326
327 return bytes_sent;
328}
329
Juan Quintelab23a9a52012-10-17 20:08:04 +0200330
331/* This is the last block that we have visited serching for dirty pages
332 */
333static RAMBlock *last_seen_block;
Juan Quintela5f718a12012-10-17 20:10:55 +0200334/* This is the last block from where we have sent data */
335static RAMBlock *last_sent_block;
Alex Williamson760e77e2010-08-19 10:18:42 -0300336static ram_addr_t last_offset;
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200337static unsigned long *migration_bitmap;
338static uint64_t migration_dirty_pages;
Umesh Deshpandef798b072011-08-18 11:41:17 -0700339static uint32_t last_version;
Peter Lieven78d07ae2013-03-26 10:58:36 +0100340static bool ram_bulk_stage;
Alex Williamson760e77e2010-08-19 10:18:42 -0300341
Juan Quintela4c8ae0f2012-10-18 00:00:59 +0200342static inline
343ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
344 ram_addr_t start)
Juan Quintela69268cd2012-07-20 10:36:12 +0200345{
Juan Quintela4c8ae0f2012-10-18 00:00:59 +0200346 unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
347 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
Michael S. Tsirkin0851c9f2013-08-19 17:26:52 +0300348 uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
349 unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200350
Peter Lieven70c86522013-03-26 10:58:38 +0100351 unsigned long next;
352
353 if (ram_bulk_stage && nr > base) {
354 next = nr + 1;
355 } else {
356 next = find_next_bit(migration_bitmap, size, nr);
357 }
Juan Quintela69268cd2012-07-20 10:36:12 +0200358
Juan Quintela4c8ae0f2012-10-18 00:00:59 +0200359 if (next < size) {
360 clear_bit(next, migration_bitmap);
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200361 migration_dirty_pages--;
Juan Quintela69268cd2012-07-20 10:36:12 +0200362 }
Juan Quintela4c8ae0f2012-10-18 00:00:59 +0200363 return (next - base) << TARGET_PAGE_BITS;
Juan Quintela69268cd2012-07-20 10:36:12 +0200364}
365
Juan Quintela791fa2a2013-11-05 16:47:20 +0100366static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
Juan Quintelae44d26c2012-07-20 10:16:08 +0200367{
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200368 bool ret;
Juan Quintela791fa2a2013-11-05 16:47:20 +0100369 int nr = addr >> TARGET_PAGE_BITS;
Juan Quintelae44d26c2012-07-20 10:16:08 +0200370
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200371 ret = test_and_set_bit(nr, migration_bitmap);
372
373 if (!ret) {
374 migration_dirty_pages++;
Juan Quintelae44d26c2012-07-20 10:16:08 +0200375 }
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200376 return ret;
Juan Quintelae44d26c2012-07-20 10:16:08 +0200377}
378
Juan Quintela791fa2a2013-11-05 16:47:20 +0100379static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
380{
381 ram_addr_t addr;
Juan Quintelaaa8dc042013-11-06 11:33:05 +0100382 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
Juan Quintela791fa2a2013-11-05 16:47:20 +0100383
Juan Quintelaaa8dc042013-11-06 11:33:05 +0100384 /* start address is aligned at the start of a word? */
385 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
386 int k;
387 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
388 unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
389
390 for (k = page; k < page + nr; k++) {
391 if (src[k]) {
392 unsigned long new_dirty;
393 new_dirty = ~migration_bitmap[k];
394 migration_bitmap[k] |= src[k];
395 new_dirty &= src[k];
396 migration_dirty_pages += ctpopl(new_dirty);
397 src[k] = 0;
398 }
399 }
400 } else {
401 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
402 if (cpu_physical_memory_get_dirty(start + addr,
403 TARGET_PAGE_SIZE,
404 DIRTY_MEMORY_MIGRATION)) {
405 cpu_physical_memory_reset_dirty(start + addr,
406 TARGET_PAGE_SIZE,
407 DIRTY_MEMORY_MIGRATION);
408 migration_bitmap_set_dirty(start + addr);
409 }
Juan Quintela791fa2a2013-11-05 16:47:20 +0100410 }
411 }
412}
413
414
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100415/* Needs iothread lock! */
416
Juan Quinteladd2df732012-07-20 10:52:51 +0200417static void migration_bitmap_sync(void)
418{
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200419 RAMBlock *block;
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200420 uint64_t num_dirty_pages_init = migration_dirty_pages;
Juan Quintela8d017192012-08-13 12:31:25 +0200421 MigrationState *s = migrate_get_current();
422 static int64_t start_time;
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -0600423 static int64_t bytes_xfer_prev;
Juan Quintela8d017192012-08-13 12:31:25 +0200424 static int64_t num_dirty_pages_period;
425 int64_t end_time;
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -0600426 int64_t bytes_xfer_now;
427
428 if (!bytes_xfer_prev) {
429 bytes_xfer_prev = ram_bytes_transferred();
430 }
Juan Quintela8d017192012-08-13 12:31:25 +0200431
432 if (!start_time) {
Alex Blighbc72ad62013-08-21 16:03:08 +0100433 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Juan Quintela8d017192012-08-13 12:31:25 +0200434 }
Juan Quintela3c121932012-09-04 13:08:57 +0200435
436 trace_migration_bitmap_sync_start();
Paolo Bonzini1d671362013-04-24 10:46:55 +0200437 address_space_sync_dirty_bitmap(&address_space_memory);
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200438
Paolo Bonzinia3161032012-11-14 15:54:48 +0100439 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Juan Quintela791fa2a2013-11-05 16:47:20 +0100440 migration_bitmap_sync_range(block->mr->ram_addr, block->length);
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200441 }
442 trace_migration_bitmap_sync_end(migration_dirty_pages
Juan Quintela3c121932012-09-04 13:08:57 +0200443 - num_dirty_pages_init);
Juan Quintela8d017192012-08-13 12:31:25 +0200444 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
Alex Blighbc72ad62013-08-21 16:03:08 +0100445 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Juan Quintela8d017192012-08-13 12:31:25 +0200446
447 /* more than 1 second = 1000 millisecons */
448 if (end_time > start_time + 1000) {
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -0600449 if (migrate_auto_converge()) {
450 /* The following detection logic can be refined later. For now:
451 Check to see if the dirtied bytes is 50% more than the approx.
452 amount of bytes that just got transferred since the last time we
453 were in this routine. If that happens >N times (for now N==4)
454 we turn on the throttle down logic */
455 bytes_xfer_now = ram_bytes_transferred();
456 if (s->dirty_pages_rate &&
457 (num_dirty_pages_period * TARGET_PAGE_SIZE >
458 (bytes_xfer_now - bytes_xfer_prev)/2) &&
459 (dirty_rate_high_cnt++ > 4)) {
460 trace_migration_throttle();
461 mig_throttle_on = true;
462 dirty_rate_high_cnt = 0;
463 }
464 bytes_xfer_prev = bytes_xfer_now;
465 } else {
466 mig_throttle_on = false;
467 }
Juan Quintela8d017192012-08-13 12:31:25 +0200468 s->dirty_pages_rate = num_dirty_pages_period * 1000
469 / (end_time - start_time);
Juan Quintela90f8ae72013-02-01 13:22:37 +0100470 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
Juan Quintela8d017192012-08-13 12:31:25 +0200471 start_time = end_time;
472 num_dirty_pages_period = 0;
473 }
Juan Quinteladd2df732012-07-20 10:52:51 +0200474}
475
Orit Wasserman6c779f22012-07-10 12:37:13 +0300476/*
477 * ram_save_block: Writes a page of memory to the stream f
478 *
Juan Quintelab823cea2012-12-10 13:27:50 +0100479 * Returns: The number of bytes written.
480 * 0 means no dirty pages
Orit Wasserman6c779f22012-07-10 12:37:13 +0300481 */
482
Juan Quinteladd051c72012-08-06 21:42:58 +0300483static int ram_save_block(QEMUFile *f, bool last_stage)
Blue Swirlad960902010-03-29 19:23:52 +0000484{
Juan Quintelab23a9a52012-10-17 20:08:04 +0200485 RAMBlock *block = last_seen_block;
Alex Williamsone44359c2010-06-25 11:09:57 -0600486 ram_addr_t offset = last_offset;
Juan Quintela4c8ae0f2012-10-18 00:00:59 +0200487 bool complete_round = false;
Juan Quintelab823cea2012-12-10 13:27:50 +0100488 int bytes_sent = 0;
Avi Kivity71c510e2011-12-21 13:11:22 +0200489 MemoryRegion *mr;
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300490 ram_addr_t current_addr;
Blue Swirlad960902010-03-29 19:23:52 +0000491
Alex Williamsone44359c2010-06-25 11:09:57 -0600492 if (!block)
Paolo Bonzinia3161032012-11-14 15:54:48 +0100493 block = QTAILQ_FIRST(&ram_list.blocks);
Alex Williamsone44359c2010-06-25 11:09:57 -0600494
Juan Quintela4c8ae0f2012-10-18 00:00:59 +0200495 while (true) {
Avi Kivity71c510e2011-12-21 13:11:22 +0200496 mr = block->mr;
Juan Quintela4c8ae0f2012-10-18 00:00:59 +0200497 offset = migration_bitmap_find_and_reset_dirty(mr, offset);
498 if (complete_round && block == last_seen_block &&
499 offset >= last_offset) {
500 break;
501 }
502 if (offset >= block->length) {
503 offset = 0;
504 block = QTAILQ_NEXT(block, next);
505 if (!block) {
506 block = QTAILQ_FIRST(&ram_list.blocks);
507 complete_round = true;
Peter Lieven78d07ae2013-03-26 10:58:36 +0100508 ram_bulk_stage = false;
Juan Quintela4c8ae0f2012-10-18 00:00:59 +0200509 }
510 } else {
Michael R. Hines0033b8b2013-07-22 10:01:55 -0400511 int ret;
Blue Swirlad960902010-03-29 19:23:52 +0000512 uint8_t *p;
Juan Quintela5f718a12012-10-17 20:10:55 +0200513 int cont = (block == last_sent_block) ?
Juan Quintelab23a9a52012-10-17 20:08:04 +0200514 RAM_SAVE_FLAG_CONTINUE : 0;
Blue Swirlad960902010-03-29 19:23:52 +0000515
Avi Kivity71c510e2011-12-21 13:11:22 +0200516 p = memory_region_get_ram_ptr(mr) + offset;
Blue Swirlad960902010-03-29 19:23:52 +0000517
Juan Quintelab823cea2012-12-10 13:27:50 +0100518 /* In doubt sent page as normal */
519 bytes_sent = -1;
Michael R. Hines0033b8b2013-07-22 10:01:55 -0400520 ret = ram_control_save_page(f, block->offset,
521 offset, TARGET_PAGE_SIZE, &bytes_sent);
522
523 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
524 if (ret != RAM_SAVE_CONTROL_DELAYED) {
525 if (bytes_sent > 0) {
526 acct_info.norm_pages++;
527 } else if (bytes_sent == 0) {
528 acct_info.dup_pages++;
529 }
530 }
Isaku Yamahatadc3c26a2013-09-21 01:23:36 +0900531 } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
Orit Wasserman004d4c12012-08-06 21:42:56 +0300532 acct_info.dup_pages++;
Peter Lieven9ef051e2013-06-10 12:14:19 +0200533 bytes_sent = save_block_hdr(f, block, offset, cont,
534 RAM_SAVE_FLAG_COMPRESS);
535 qemu_put_byte(f, 0);
536 bytes_sent++;
Peter Lieven5cc11c42013-03-26 10:58:39 +0100537 } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300538 current_addr = block->offset + offset;
539 bytes_sent = save_xbzrle_page(f, p, current_addr, block,
Juan Quinteladd051c72012-08-06 21:42:58 +0300540 offset, cont, last_stage);
541 if (!last_stage) {
542 p = get_cached_data(XBZRLE.cache, current_addr);
543 }
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300544 }
545
Juan Quintelab823cea2012-12-10 13:27:50 +0100546 /* XBZRLE overflow or normal page */
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300547 if (bytes_sent == -1) {
Juan Quintela3f7d7b02012-10-18 13:56:35 +0200548 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
Orit Wasserman500f0062013-03-22 16:48:03 +0200549 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
Juan Quintela3f7d7b02012-10-18 13:56:35 +0200550 bytes_sent += TARGET_PAGE_SIZE;
Orit Wasserman004d4c12012-08-06 21:42:56 +0300551 acct_info.norm_pages++;
Blue Swirlad960902010-03-29 19:23:52 +0000552 }
553
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300554 /* if page is unmodified, continue to the next */
Juan Quintelab823cea2012-12-10 13:27:50 +0100555 if (bytes_sent > 0) {
Juan Quintela5f718a12012-10-17 20:10:55 +0200556 last_sent_block = block;
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300557 break;
558 }
Blue Swirlad960902010-03-29 19:23:52 +0000559 }
Juan Quintela4c8ae0f2012-10-18 00:00:59 +0200560 }
Juan Quintelab23a9a52012-10-17 20:08:04 +0200561 last_seen_block = block;
Alex Williamsone44359c2010-06-25 11:09:57 -0600562 last_offset = offset;
Blue Swirlad960902010-03-29 19:23:52 +0000563
Pierre Riteau3fc250b2010-05-12 15:12:44 +0200564 return bytes_sent;
Blue Swirlad960902010-03-29 19:23:52 +0000565}
566
567static uint64_t bytes_transferred;
568
Michael R. Hines2b0ce072013-06-25 21:35:28 -0400569void acct_update_position(QEMUFile *f, size_t size, bool zero)
570{
571 uint64_t pages = size / TARGET_PAGE_SIZE;
572 if (zero) {
573 acct_info.dup_pages += pages;
574 } else {
575 acct_info.norm_pages += pages;
576 bytes_transferred += size;
577 qemu_update_position(f, size);
578 }
579}
580
Blue Swirlad960902010-03-29 19:23:52 +0000581static ram_addr_t ram_save_remaining(void)
582{
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200583 return migration_dirty_pages;
Blue Swirlad960902010-03-29 19:23:52 +0000584}
585
586uint64_t ram_bytes_remaining(void)
587{
588 return ram_save_remaining() * TARGET_PAGE_SIZE;
589}
590
591uint64_t ram_bytes_transferred(void)
592{
593 return bytes_transferred;
594}
595
596uint64_t ram_bytes_total(void)
597{
Alex Williamsond17b5282010-06-25 11:08:38 -0600598 RAMBlock *block;
599 uint64_t total = 0;
600
Paolo Bonzinia3161032012-11-14 15:54:48 +0100601 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600602 total += block->length;
603
604 return total;
Blue Swirlad960902010-03-29 19:23:52 +0000605}
606
Gonglei (Arei)905f26f2014-01-30 20:08:35 +0200607void free_xbzrle_decoded_buf(void)
608{
609 g_free(xbzrle_decoded_buf);
610 xbzrle_decoded_buf = NULL;
611}
612
Orit Wasserman8e21cd32012-06-19 18:43:17 +0300613static void migration_end(void)
614{
Paolo Bonzini244eaa72012-12-12 12:54:43 +0100615 if (migration_bitmap) {
616 memory_global_dirty_log_stop();
617 g_free(migration_bitmap);
618 migration_bitmap = NULL;
619 }
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300620
Paolo Bonzini244eaa72012-12-12 12:54:43 +0100621 if (XBZRLE.cache) {
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300622 cache_fini(XBZRLE.cache);
623 g_free(XBZRLE.cache);
624 g_free(XBZRLE.encoded_buf);
625 g_free(XBZRLE.current_buf);
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300626 XBZRLE.cache = NULL;
Orit Wassermanf6c64832014-01-30 20:08:33 +0200627 XBZRLE.encoded_buf = NULL;
628 XBZRLE.current_buf = NULL;
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300629 }
Orit Wasserman8e21cd32012-06-19 18:43:17 +0300630}
631
Juan Quintela9b5bfab2012-06-26 19:26:41 +0200632static void ram_migration_cancel(void *opaque)
633{
634 migration_end();
635}
636
Juan Quintela5a170772012-07-17 17:02:24 +0200637static void reset_ram_globals(void)
638{
Juan Quintelab23a9a52012-10-17 20:08:04 +0200639 last_seen_block = NULL;
Juan Quintela5f718a12012-10-17 20:10:55 +0200640 last_sent_block = NULL;
Juan Quintela5a170772012-07-17 17:02:24 +0200641 last_offset = 0;
Umesh Deshpandef798b072011-08-18 11:41:17 -0700642 last_version = ram_list.version;
Peter Lieven78d07ae2013-03-26 10:58:36 +0100643 ram_bulk_stage = true;
Juan Quintela5a170772012-07-17 17:02:24 +0200644}
645
Juan Quintela4508bd92012-05-22 16:27:59 +0200646#define MAX_WAIT 50 /* ms, half buffered_file limit */
647
Juan Quintelad1315aa2012-06-28 15:11:57 +0200648static int ram_save_setup(QEMUFile *f, void *opaque)
Blue Swirlad960902010-03-29 19:23:52 +0000649{
Juan Quintelad1315aa2012-06-28 15:11:57 +0200650 RAMBlock *block;
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200651 int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
652
653 migration_bitmap = bitmap_new(ram_pages);
David Gibson7ec81e52012-12-04 11:38:38 +1100654 bitmap_set(migration_bitmap, 0, ram_pages);
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200655 migration_dirty_pages = ram_pages;
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -0600656 mig_throttle_on = false;
657 dirty_rate_high_cnt = 0;
Blue Swirlad960902010-03-29 19:23:52 +0000658
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300659 if (migrate_use_xbzrle()) {
660 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
661 TARGET_PAGE_SIZE,
662 TARGET_PAGE_SIZE);
663 if (!XBZRLE.cache) {
664 DPRINTF("Error creating cache\n");
665 return -1;
666 }
Orit Wassermana17b2fd2014-01-30 20:08:37 +0200667
668 /* We prefer not to abort if there is no memory */
669 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
670 if (!XBZRLE.encoded_buf) {
671 DPRINTF("Error allocating encoded_buf\n");
672 return -1;
673 }
674
675 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
676 if (!XBZRLE.current_buf) {
677 DPRINTF("Error allocating current_buf\n");
678 g_free(XBZRLE.encoded_buf);
679 XBZRLE.encoded_buf = NULL;
680 return -1;
681 }
682
Orit Wasserman004d4c12012-08-06 21:42:56 +0300683 acct_clear();
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300684 }
685
Paolo Bonzini9b095032013-02-22 17:36:28 +0100686 qemu_mutex_lock_iothread();
687 qemu_mutex_lock_ramlist();
688 bytes_transferred = 0;
689 reset_ram_globals();
690
Juan Quintelad1315aa2012-06-28 15:11:57 +0200691 memory_global_dirty_log_start();
Juan Quintelac6bf8e02012-07-20 12:33:00 +0200692 migration_bitmap_sync();
Paolo Bonzini9b095032013-02-22 17:36:28 +0100693 qemu_mutex_unlock_iothread();
Juan Quintelad1315aa2012-06-28 15:11:57 +0200694
695 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
696
Paolo Bonzinia3161032012-11-14 15:54:48 +0100697 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Juan Quintelad1315aa2012-06-28 15:11:57 +0200698 qemu_put_byte(f, strlen(block->idstr));
699 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
700 qemu_put_be64(f, block->length);
701 }
702
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700703 qemu_mutex_unlock_ramlist();
Michael R. Hines0033b8b2013-07-22 10:01:55 -0400704
705 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
706 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
707
Juan Quintelad1315aa2012-06-28 15:11:57 +0200708 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
709
710 return 0;
711}
712
Juan Quintela16310a32012-06-28 15:31:37 +0200713static int ram_save_iterate(QEMUFile *f, void *opaque)
Juan Quintelad1315aa2012-06-28 15:11:57 +0200714{
Blue Swirlad960902010-03-29 19:23:52 +0000715 int ret;
716 int i;
Juan Quintelae4ed1542012-09-21 11:18:18 +0200717 int64_t t0;
Juan Quintelab823cea2012-12-10 13:27:50 +0100718 int total_sent = 0;
Blue Swirlad960902010-03-29 19:23:52 +0000719
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700720 qemu_mutex_lock_ramlist();
721
Umesh Deshpandef798b072011-08-18 11:41:17 -0700722 if (ram_list.version != last_version) {
723 reset_ram_globals();
724 }
725
Michael R. Hines0033b8b2013-07-22 10:01:55 -0400726 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
727
Alex Blighbc72ad62013-08-21 16:03:08 +0100728 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Juan Quintela4508bd92012-05-22 16:27:59 +0200729 i = 0;
Juan Quintela29757252011-10-19 15:22:18 +0200730 while ((ret = qemu_file_rate_limit(f)) == 0) {
Pierre Riteau3fc250b2010-05-12 15:12:44 +0200731 int bytes_sent;
Blue Swirlad960902010-03-29 19:23:52 +0000732
Juan Quinteladd051c72012-08-06 21:42:58 +0300733 bytes_sent = ram_save_block(f, false);
Orit Wasserman6c779f22012-07-10 12:37:13 +0300734 /* no more blocks to sent */
Juan Quintelab823cea2012-12-10 13:27:50 +0100735 if (bytes_sent == 0) {
Blue Swirlad960902010-03-29 19:23:52 +0000736 break;
737 }
Juan Quintelab823cea2012-12-10 13:27:50 +0100738 total_sent += bytes_sent;
Orit Wasserman004d4c12012-08-06 21:42:56 +0300739 acct_info.iterations++;
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -0600740 check_guest_throttling();
Juan Quintela4508bd92012-05-22 16:27:59 +0200741 /* we want to check in the 1st loop, just in case it was the 1st time
742 and we had to sync the dirty bitmap.
743 qemu_get_clock_ns() is a bit expensive, so we only check each some
744 iterations
745 */
746 if ((i & 63) == 0) {
Alex Blighbc72ad62013-08-21 16:03:08 +0100747 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
Juan Quintela4508bd92012-05-22 16:27:59 +0200748 if (t1 > MAX_WAIT) {
Igor Mitsyankoef37a692012-09-05 13:04:56 +0400749 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
Juan Quintela4508bd92012-05-22 16:27:59 +0200750 t1, i);
751 break;
752 }
753 }
754 i++;
Blue Swirlad960902010-03-29 19:23:52 +0000755 }
756
Paolo Bonzinifb3409d2012-12-20 11:25:45 +0100757 qemu_mutex_unlock_ramlist();
758
Michael R. Hines0033b8b2013-07-22 10:01:55 -0400759 /*
760 * Must occur before EOS (or any QEMUFile operation)
761 * because of RDMA protocol.
762 */
763 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
764
Lei Li6cd0bed2013-09-04 17:02:36 +0800765 bytes_transferred += total_sent;
766
767 /*
768 * Do not count these 8 bytes into total_sent, so that we can
769 * return 0 if no page had been dirtied.
770 */
771 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
772 bytes_transferred += 8;
773
774 ret = qemu_file_get_error(f);
Juan Quintela29757252011-10-19 15:22:18 +0200775 if (ret < 0) {
776 return ret;
777 }
778
Juan Quintelab823cea2012-12-10 13:27:50 +0100779 return total_sent;
Blue Swirlad960902010-03-29 19:23:52 +0000780}
781
Juan Quintela16310a32012-06-28 15:31:37 +0200782static int ram_save_complete(QEMUFile *f, void *opaque)
783{
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700784 qemu_mutex_lock_ramlist();
Paolo Bonzini9c339482012-12-20 11:26:04 +0100785 migration_bitmap_sync();
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700786
Michael R. Hines0033b8b2013-07-22 10:01:55 -0400787 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
788
Juan Quintela16310a32012-06-28 15:31:37 +0200789 /* try transferring iterative blocks of memory */
790
791 /* flush all remaining blocks regardless of rate limiting */
Orit Wasserman6c779f22012-07-10 12:37:13 +0300792 while (true) {
793 int bytes_sent;
794
Juan Quinteladd051c72012-08-06 21:42:58 +0300795 bytes_sent = ram_save_block(f, true);
Orit Wasserman6c779f22012-07-10 12:37:13 +0300796 /* no more blocks to sent */
Juan Quintelab823cea2012-12-10 13:27:50 +0100797 if (bytes_sent == 0) {
Orit Wasserman6c779f22012-07-10 12:37:13 +0300798 break;
799 }
Juan Quintela16310a32012-06-28 15:31:37 +0200800 bytes_transferred += bytes_sent;
801 }
Michael R. Hines0033b8b2013-07-22 10:01:55 -0400802
803 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
Paolo Bonzini244eaa72012-12-12 12:54:43 +0100804 migration_end();
Juan Quintela16310a32012-06-28 15:31:37 +0200805
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700806 qemu_mutex_unlock_ramlist();
Blue Swirlad960902010-03-29 19:23:52 +0000807 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
808
Alex Williamsona55bbe32010-06-25 11:10:05 -0600809 return 0;
810}
811
Juan Quintelae4ed1542012-09-21 11:18:18 +0200812static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
813{
814 uint64_t remaining_size;
815
816 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
817
818 if (remaining_size < max_size) {
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100819 qemu_mutex_lock_iothread();
Juan Quintelae4ed1542012-09-21 11:18:18 +0200820 migration_bitmap_sync();
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100821 qemu_mutex_unlock_iothread();
Juan Quintelae4ed1542012-09-21 11:18:18 +0200822 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
823 }
824 return remaining_size;
825}
826
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300827static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
828{
829 int ret, rc = 0;
830 unsigned int xh_len;
831 int xh_flags;
832
Gonglei (Arei)905f26f2014-01-30 20:08:35 +0200833 if (!xbzrle_decoded_buf) {
834 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300835 }
836
837 /* extract RLE header */
838 xh_flags = qemu_get_byte(f);
839 xh_len = qemu_get_be16(f);
840
841 if (xh_flags != ENCODING_FLAG_XBZRLE) {
842 fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
843 return -1;
844 }
845
846 if (xh_len > TARGET_PAGE_SIZE) {
847 fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
848 return -1;
849 }
850 /* load data and decode */
Gonglei (Arei)905f26f2014-01-30 20:08:35 +0200851 qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300852
853 /* decode RLE */
Gonglei (Arei)905f26f2014-01-30 20:08:35 +0200854 ret = xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300855 TARGET_PAGE_SIZE);
856 if (ret == -1) {
857 fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
858 rc = -1;
859 } else if (ret > TARGET_PAGE_SIZE) {
860 fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
861 ret, TARGET_PAGE_SIZE);
862 abort();
863 }
864
865 return rc;
866}
867
Alex Williamsona55bbe32010-06-25 11:10:05 -0600868static inline void *host_from_stream_offset(QEMUFile *f,
869 ram_addr_t offset,
870 int flags)
871{
872 static RAMBlock *block = NULL;
873 char id[256];
874 uint8_t len;
875
876 if (flags & RAM_SAVE_FLAG_CONTINUE) {
877 if (!block) {
878 fprintf(stderr, "Ack, bad migration stream!\n");
879 return NULL;
880 }
881
Avi Kivitydc94a7e2011-12-21 13:54:33 +0200882 return memory_region_get_ram_ptr(block->mr) + offset;
Alex Williamsona55bbe32010-06-25 11:10:05 -0600883 }
884
885 len = qemu_get_byte(f);
886 qemu_get_buffer(f, (uint8_t *)id, len);
887 id[len] = 0;
888
Paolo Bonzinia3161032012-11-14 15:54:48 +0100889 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsona55bbe32010-06-25 11:10:05 -0600890 if (!strncmp(id, block->idstr, sizeof(id)))
Avi Kivitydc94a7e2011-12-21 13:54:33 +0200891 return memory_region_get_ram_ptr(block->mr) + offset;
Alex Williamsona55bbe32010-06-25 11:10:05 -0600892 }
893
894 fprintf(stderr, "Can't find block %s!\n", id);
895 return NULL;
896}
897
Michael R. Hines44c3b582013-07-22 10:01:53 -0400898/*
899 * If a page (or a whole RDMA chunk) has been
900 * determined to be zero, then zap it.
901 */
902void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
903{
Isaku Yamahatad613a562013-09-21 01:23:37 +0900904 if (ch != 0 || !is_zero_range(host, size)) {
Michael R. Hines44c3b582013-07-22 10:01:53 -0400905 memset(host, ch, size);
Michael R. Hines44c3b582013-07-22 10:01:53 -0400906 }
907}
908
Juan Quintela7908c782012-06-26 18:46:10 +0200909static int ram_load(QEMUFile *f, void *opaque, int version_id)
Blue Swirlad960902010-03-29 19:23:52 +0000910{
911 ram_addr_t addr;
Orit Wasserman3a697f62012-06-19 18:43:15 +0300912 int flags, ret = 0;
Juan Quintela42802d42011-10-05 01:14:46 +0200913 int error;
Orit Wasserman3a697f62012-06-19 18:43:15 +0300914 static uint64_t seq_iter;
915
916 seq_iter++;
Blue Swirlad960902010-03-29 19:23:52 +0000917
Avi Kivityf09f2182011-12-21 13:37:56 +0200918 if (version_id < 4 || version_id > 4) {
Blue Swirlad960902010-03-29 19:23:52 +0000919 return -EINVAL;
920 }
921
922 do {
923 addr = qemu_get_be64(f);
924
925 flags = addr & ~TARGET_PAGE_MASK;
926 addr &= TARGET_PAGE_MASK;
927
928 if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
Avi Kivityf09f2182011-12-21 13:37:56 +0200929 if (version_id == 4) {
Alex Williamson97ab12d2010-06-25 11:09:50 -0600930 /* Synchronize RAM block list */
931 char id[256];
932 ram_addr_t length;
933 ram_addr_t total_ram_bytes = addr;
934
935 while (total_ram_bytes) {
936 RAMBlock *block;
937 uint8_t len;
938
939 len = qemu_get_byte(f);
940 qemu_get_buffer(f, (uint8_t *)id, len);
941 id[len] = 0;
942 length = qemu_get_be64(f);
943
Paolo Bonzinia3161032012-11-14 15:54:48 +0100944 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson97ab12d2010-06-25 11:09:50 -0600945 if (!strncmp(id, block->idstr, sizeof(id))) {
Orit Wasserman3a697f62012-06-19 18:43:15 +0300946 if (block->length != length) {
Stefan Weil6bedfe92013-06-27 21:00:06 +0200947 fprintf(stderr,
948 "Length mismatch: %s: " RAM_ADDR_FMT
949 " in != " RAM_ADDR_FMT "\n", id, length,
Alon Levy87d2f822013-05-12 14:16:28 +0300950 block->length);
Orit Wasserman3a697f62012-06-19 18:43:15 +0300951 ret = -EINVAL;
952 goto done;
953 }
Alex Williamson97ab12d2010-06-25 11:09:50 -0600954 break;
955 }
956 }
957
958 if (!block) {
Alex Williamsonfb787f82010-07-02 11:13:29 -0600959 fprintf(stderr, "Unknown ramblock \"%s\", cannot "
960 "accept migration\n", id);
Orit Wasserman3a697f62012-06-19 18:43:15 +0300961 ret = -EINVAL;
962 goto done;
Alex Williamson97ab12d2010-06-25 11:09:50 -0600963 }
964
965 total_ram_bytes -= length;
966 }
Blue Swirlad960902010-03-29 19:23:52 +0000967 }
968 }
969
970 if (flags & RAM_SAVE_FLAG_COMPRESS) {
Alex Williamson97ab12d2010-06-25 11:09:50 -0600971 void *host;
972 uint8_t ch;
973
Avi Kivityf09f2182011-12-21 13:37:56 +0200974 host = host_from_stream_offset(f, addr, flags);
Michael S. Tsirkin492fb992010-10-17 20:43:40 +0200975 if (!host) {
976 return -EINVAL;
977 }
Alex Williamson97ab12d2010-06-25 11:09:50 -0600978
Alex Williamson97ab12d2010-06-25 11:09:50 -0600979 ch = qemu_get_byte(f);
Michael R. Hines44c3b582013-07-22 10:01:53 -0400980 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
Blue Swirlad960902010-03-29 19:23:52 +0000981 } else if (flags & RAM_SAVE_FLAG_PAGE) {
Alex Williamson97ab12d2010-06-25 11:09:50 -0600982 void *host;
983
Avi Kivityf09f2182011-12-21 13:37:56 +0200984 host = host_from_stream_offset(f, addr, flags);
Orit Wasserman0ff1f9f2012-06-19 11:51:37 +0300985 if (!host) {
986 return -EINVAL;
987 }
Alex Williamson97ab12d2010-06-25 11:09:50 -0600988
Alex Williamson97ab12d2010-06-25 11:09:50 -0600989 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300990 } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
Orit Wasserman17ad9b32012-08-06 21:42:53 +0300991 void *host = host_from_stream_offset(f, addr, flags);
992 if (!host) {
993 return -EINVAL;
994 }
995
996 if (load_xbzrle(f, addr, host) < 0) {
997 ret = -EINVAL;
998 goto done;
999 }
Michael R. Hines0033b8b2013-07-22 10:01:55 -04001000 } else if (flags & RAM_SAVE_FLAG_HOOK) {
1001 ram_control_load_hook(f, flags);
Blue Swirlad960902010-03-29 19:23:52 +00001002 }
Juan Quintela42802d42011-10-05 01:14:46 +02001003 error = qemu_file_get_error(f);
1004 if (error) {
Orit Wasserman3a697f62012-06-19 18:43:15 +03001005 ret = error;
1006 goto done;
Blue Swirlad960902010-03-29 19:23:52 +00001007 }
1008 } while (!(flags & RAM_SAVE_FLAG_EOS));
1009
Orit Wasserman3a697f62012-06-19 18:43:15 +03001010done:
Igor Mitsyankoef37a692012-09-05 13:04:56 +04001011 DPRINTF("Completed load of VM with exit code %d seq iteration "
1012 "%" PRIu64 "\n", ret, seq_iter);
Orit Wasserman3a697f62012-06-19 18:43:15 +03001013 return ret;
Blue Swirlad960902010-03-29 19:23:52 +00001014}
1015
Juan Quintela7908c782012-06-26 18:46:10 +02001016SaveVMHandlers savevm_ram_handlers = {
Juan Quintelad1315aa2012-06-28 15:11:57 +02001017 .save_live_setup = ram_save_setup,
Juan Quintela16310a32012-06-28 15:31:37 +02001018 .save_live_iterate = ram_save_iterate,
1019 .save_live_complete = ram_save_complete,
Juan Quintelae4ed1542012-09-21 11:18:18 +02001020 .save_live_pending = ram_save_pending,
Juan Quintela7908c782012-06-26 18:46:10 +02001021 .load_state = ram_load,
Juan Quintela9b5bfab2012-06-26 19:26:41 +02001022 .cancel = ram_migration_cancel,
Juan Quintela7908c782012-06-26 18:46:10 +02001023};
1024
Isaku Yamahata0dfa5ef2011-01-21 19:53:45 +09001025struct soundhw {
1026 const char *name;
1027 const char *descr;
1028 int enabled;
1029 int isa;
1030 union {
Hervé Poussineau4a0f0312011-12-15 22:10:01 +01001031 int (*init_isa) (ISABus *bus);
Isaku Yamahata0dfa5ef2011-01-21 19:53:45 +09001032 int (*init_pci) (PCIBus *bus);
1033 } init;
1034};
1035
Paolo Bonzini36cd6f62013-04-18 18:43:58 +02001036static struct soundhw soundhw[9];
1037static int soundhw_count;
Blue Swirlad960902010-03-29 19:23:52 +00001038
Paolo Bonzini36cd6f62013-04-18 18:43:58 +02001039void isa_register_soundhw(const char *name, const char *descr,
1040 int (*init_isa)(ISABus *bus))
1041{
1042 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1043 soundhw[soundhw_count].name = name;
1044 soundhw[soundhw_count].descr = descr;
1045 soundhw[soundhw_count].isa = 1;
1046 soundhw[soundhw_count].init.init_isa = init_isa;
1047 soundhw_count++;
1048}
Blue Swirlad960902010-03-29 19:23:52 +00001049
Paolo Bonzini36cd6f62013-04-18 18:43:58 +02001050void pci_register_soundhw(const char *name, const char *descr,
1051 int (*init_pci)(PCIBus *bus))
1052{
1053 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1054 soundhw[soundhw_count].name = name;
1055 soundhw[soundhw_count].descr = descr;
1056 soundhw[soundhw_count].isa = 0;
1057 soundhw[soundhw_count].init.init_pci = init_pci;
1058 soundhw_count++;
1059}
Blue Swirlad960902010-03-29 19:23:52 +00001060
1061void select_soundhw(const char *optarg)
1062{
1063 struct soundhw *c;
1064
Peter Maydellc8057f92012-08-02 13:45:54 +01001065 if (is_help_option(optarg)) {
Blue Swirlad960902010-03-29 19:23:52 +00001066 show_valid_cards:
1067
Paolo Bonzini36cd6f62013-04-18 18:43:58 +02001068 if (soundhw_count) {
1069 printf("Valid sound card names (comma separated):\n");
1070 for (c = soundhw; c->name; ++c) {
1071 printf ("%-11s %s\n", c->name, c->descr);
1072 }
1073 printf("\n-soundhw all will enable all of the above\n");
1074 } else {
1075 printf("Machine has no user-selectable audio hardware "
1076 "(it may or may not have always-present audio hardware).\n");
Blue Swirlad960902010-03-29 19:23:52 +00001077 }
Peter Maydellc8057f92012-08-02 13:45:54 +01001078 exit(!is_help_option(optarg));
Blue Swirlad960902010-03-29 19:23:52 +00001079 }
1080 else {
1081 size_t l;
1082 const char *p;
1083 char *e;
1084 int bad_card = 0;
1085
1086 if (!strcmp(optarg, "all")) {
1087 for (c = soundhw; c->name; ++c) {
1088 c->enabled = 1;
1089 }
1090 return;
1091 }
1092
1093 p = optarg;
1094 while (*p) {
1095 e = strchr(p, ',');
1096 l = !e ? strlen(p) : (size_t) (e - p);
1097
1098 for (c = soundhw; c->name; ++c) {
1099 if (!strncmp(c->name, p, l) && !c->name[l]) {
1100 c->enabled = 1;
1101 break;
1102 }
1103 }
1104
1105 if (!c->name) {
1106 if (l > 80) {
1107 fprintf(stderr,
1108 "Unknown sound card name (too big to show)\n");
1109 }
1110 else {
1111 fprintf(stderr, "Unknown sound card name `%.*s'\n",
1112 (int) l, p);
1113 }
1114 bad_card = 1;
1115 }
1116 p += l + (e != NULL);
1117 }
1118
1119 if (bad_card) {
1120 goto show_valid_cards;
1121 }
1122 }
1123}
Isaku Yamahata0dfa5ef2011-01-21 19:53:45 +09001124
Paolo Bonzinif81222b2013-04-18 18:44:03 +02001125void audio_init(void)
Isaku Yamahata0dfa5ef2011-01-21 19:53:45 +09001126{
1127 struct soundhw *c;
Paolo Bonzinif81222b2013-04-18 18:44:03 +02001128 ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1129 PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
Isaku Yamahata0dfa5ef2011-01-21 19:53:45 +09001130
1131 for (c = soundhw; c->name; ++c) {
1132 if (c->enabled) {
1133 if (c->isa) {
Paolo Bonzinif81222b2013-04-18 18:44:03 +02001134 if (!isa_bus) {
1135 fprintf(stderr, "ISA bus not available for %s\n", c->name);
1136 exit(1);
Isaku Yamahata0dfa5ef2011-01-21 19:53:45 +09001137 }
Paolo Bonzinif81222b2013-04-18 18:44:03 +02001138 c->init.init_isa(isa_bus);
Isaku Yamahata0dfa5ef2011-01-21 19:53:45 +09001139 } else {
Paolo Bonzinif81222b2013-04-18 18:44:03 +02001140 if (!pci_bus) {
1141 fprintf(stderr, "PCI bus not available for %s\n", c->name);
1142 exit(1);
Isaku Yamahata0dfa5ef2011-01-21 19:53:45 +09001143 }
Paolo Bonzinif81222b2013-04-18 18:44:03 +02001144 c->init.init_pci(pci_bus);
Isaku Yamahata0dfa5ef2011-01-21 19:53:45 +09001145 }
1146 }
1147 }
1148}
Blue Swirlad960902010-03-29 19:23:52 +00001149
1150int qemu_uuid_parse(const char *str, uint8_t *uuid)
1151{
1152 int ret;
1153
1154 if (strlen(str) != 36) {
1155 return -1;
1156 }
1157
1158 ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1159 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1160 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1161 &uuid[15]);
1162
1163 if (ret != 16) {
1164 return -1;
1165 }
Blue Swirlad960902010-03-29 19:23:52 +00001166 return 0;
1167}
1168
Laszlo Ersek0c764a92013-03-21 00:23:17 +01001169void do_acpitable_option(const QemuOpts *opts)
Blue Swirlad960902010-03-29 19:23:52 +00001170{
1171#ifdef TARGET_I386
Laszlo Ersek23084322013-03-21 00:23:19 +01001172 Error *err = NULL;
1173
1174 acpi_table_add(opts, &err);
1175 if (err) {
Seiji Aguchi4a44d852013-08-05 15:40:44 -04001176 error_report("Wrong acpi table provided: %s",
1177 error_get_pretty(err));
Laszlo Ersek23084322013-03-21 00:23:19 +01001178 error_free(err);
Blue Swirlad960902010-03-29 19:23:52 +00001179 exit(1);
1180 }
1181#endif
1182}
1183
Markus Armbruster4f953d22013-08-16 15:18:29 +02001184void do_smbios_option(QemuOpts *opts)
Blue Swirlad960902010-03-29 19:23:52 +00001185{
1186#ifdef TARGET_I386
Markus Armbruster4f953d22013-08-16 15:18:29 +02001187 smbios_entry_add(opts);
Blue Swirlad960902010-03-29 19:23:52 +00001188#endif
1189}
1190
1191void cpudef_init(void)
1192{
1193#if defined(cpudef_setup)
1194 cpudef_setup(); /* parse cpu definitions in target config file */
1195#endif
1196}
1197
Anthony PERARD303d4e82010-09-21 20:05:31 +01001198int tcg_available(void)
1199{
1200 return 1;
1201}
1202
Blue Swirlad960902010-03-29 19:23:52 +00001203int kvm_available(void)
1204{
1205#ifdef CONFIG_KVM
1206 return 1;
1207#else
1208 return 0;
1209#endif
1210}
1211
1212int xen_available(void)
1213{
1214#ifdef CONFIG_XEN
1215 return 1;
1216#else
1217 return 0;
1218#endif
1219}
Daniel P. Berrange99afc912012-08-20 15:31:38 +01001220
1221
1222TargetInfo *qmp_query_target(Error **errp)
1223{
1224 TargetInfo *info = g_malloc0(sizeof(*info));
1225
Paolo Bonzinic02a9552013-06-04 14:45:28 +02001226 info->arch = g_strdup(TARGET_NAME);
Daniel P. Berrange99afc912012-08-20 15:31:38 +01001227
1228 return info;
1229}
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -06001230
1231/* Stub function that's gets run on the vcpu when its brought out of the
1232 VM to run inside qemu via async_run_on_cpu()*/
1233static void mig_sleep_cpu(void *opq)
1234{
1235 qemu_mutex_unlock_iothread();
1236 g_usleep(30*1000);
1237 qemu_mutex_lock_iothread();
1238}
1239
1240/* To reduce the dirty rate explicitly disallow the VCPUs from spending
1241 much time in the VM. The migration thread will try to catchup.
1242 Workload will experience a performance drop.
1243*/
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -06001244static void mig_throttle_guest_down(void)
1245{
Andreas Färber38fcbd32013-07-07 19:50:23 +02001246 CPUState *cpu;
1247
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -06001248 qemu_mutex_lock_iothread();
Andreas Färber38fcbd32013-07-07 19:50:23 +02001249 CPU_FOREACH(cpu) {
1250 async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1251 }
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -06001252 qemu_mutex_unlock_iothread();
1253}
1254
1255static void check_guest_throttling(void)
1256{
1257 static int64_t t0;
1258 int64_t t1;
1259
1260 if (!mig_throttle_on) {
1261 return;
1262 }
1263
1264 if (!t0) {
Alex Blighbc72ad62013-08-21 16:03:08 +01001265 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -06001266 return;
1267 }
1268
Alex Blighbc72ad62013-08-21 16:03:08 +01001269 t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Chegu Vinod7ca1dfa2013-06-24 03:47:39 -06001270
1271 /* If it has been more than 40 ms since the last time the guest
1272 * was throttled then do it again.
1273 */
1274 if (40 < (t1-t0)/1000000) {
1275 mig_throttle_guest_down();
1276 t0 = t1;
1277 }
1278}