blob: 809a7e8389fcd3add34bb01a8e824a229199df95 [file] [log] [blame]
Paolo Bonzini0ce265f2016-11-22 11:34:02 +01001/*
2 * Physical memory access templates
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22/* warning: addr must be aligned */
23static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
24 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
25 enum device_endian endian)
26{
27 uint8_t *ptr;
28 uint64_t val;
29 MemoryRegion *mr;
30 hwaddr l = 4;
31 hwaddr addr1;
32 MemTxResult r;
33 bool release_lock = false;
34
35 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +010036 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
Eric Augera99761d2018-06-13 15:19:06 +020037 if (l < 4 || !memory_access_is_direct(mr, false)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +010038 release_lock |= prepare_mmio_access(mr);
39
40 /* I/O case */
Tony Nguyend5d680c2019-08-24 04:36:52 +100041 /* TODO: Merge bswap32 into memory_region_dispatch_read. */
42 r = memory_region_dispatch_read(mr, addr1, &val,
43 MO_32 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +010044#if defined(TARGET_WORDS_BIGENDIAN)
45 if (endian == DEVICE_LITTLE_ENDIAN) {
46 val = bswap32(val);
47 }
48#else
49 if (endian == DEVICE_BIG_ENDIAN) {
50 val = bswap32(val);
51 }
52#endif
53 } else {
54 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +020055 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +010056 switch (endian) {
57 case DEVICE_LITTLE_ENDIAN:
58 val = ldl_le_p(ptr);
59 break;
60 case DEVICE_BIG_ENDIAN:
61 val = ldl_be_p(ptr);
62 break;
63 default:
64 val = ldl_p(ptr);
65 break;
66 }
67 r = MEMTX_OK;
68 }
69 if (result) {
70 *result = r;
71 }
72 if (release_lock) {
73 qemu_mutex_unlock_iothread();
74 }
75 RCU_READ_UNLOCK();
76 return val;
77}
78
79uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
80 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
81{
82 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
83 DEVICE_NATIVE_ENDIAN);
84}
85
86uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
87 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
88{
89 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
90 DEVICE_LITTLE_ENDIAN);
91}
92
93uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
94 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
95{
96 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
97 DEVICE_BIG_ENDIAN);
98}
99
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100100/* warning: addr must be aligned */
101static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
102 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
103 enum device_endian endian)
104{
105 uint8_t *ptr;
106 uint64_t val;
107 MemoryRegion *mr;
108 hwaddr l = 8;
109 hwaddr addr1;
110 MemTxResult r;
111 bool release_lock = false;
112
113 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100114 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200115 if (l < 8 || !memory_access_is_direct(mr, false)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100116 release_lock |= prepare_mmio_access(mr);
117
118 /* I/O case */
Tony Nguyend5d680c2019-08-24 04:36:52 +1000119 /* TODO: Merge bswap64 into memory_region_dispatch_read. */
120 r = memory_region_dispatch_read(mr, addr1, &val,
121 MO_64 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100122#if defined(TARGET_WORDS_BIGENDIAN)
123 if (endian == DEVICE_LITTLE_ENDIAN) {
124 val = bswap64(val);
125 }
126#else
127 if (endian == DEVICE_BIG_ENDIAN) {
128 val = bswap64(val);
129 }
130#endif
131 } else {
132 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200133 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100134 switch (endian) {
135 case DEVICE_LITTLE_ENDIAN:
136 val = ldq_le_p(ptr);
137 break;
138 case DEVICE_BIG_ENDIAN:
139 val = ldq_be_p(ptr);
140 break;
141 default:
142 val = ldq_p(ptr);
143 break;
144 }
145 r = MEMTX_OK;
146 }
147 if (result) {
148 *result = r;
149 }
150 if (release_lock) {
151 qemu_mutex_unlock_iothread();
152 }
153 RCU_READ_UNLOCK();
154 return val;
155}
156
157uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
158 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
159{
160 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
161 DEVICE_NATIVE_ENDIAN);
162}
163
164uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
165 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
166{
167 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
168 DEVICE_LITTLE_ENDIAN);
169}
170
171uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
172 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
173{
174 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
175 DEVICE_BIG_ENDIAN);
176}
177
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100178uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
179 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
180{
181 uint8_t *ptr;
182 uint64_t val;
183 MemoryRegion *mr;
184 hwaddr l = 1;
185 hwaddr addr1;
186 MemTxResult r;
187 bool release_lock = false;
188
189 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100190 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200191 if (!memory_access_is_direct(mr, false)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100192 release_lock |= prepare_mmio_access(mr);
193
194 /* I/O case */
Tony Nguyen07f08342019-08-24 04:36:51 +1000195 r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100196 } else {
197 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200198 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100199 val = ldub_p(ptr);
200 r = MEMTX_OK;
201 }
202 if (result) {
203 *result = r;
204 }
205 if (release_lock) {
206 qemu_mutex_unlock_iothread();
207 }
208 RCU_READ_UNLOCK();
209 return val;
210}
211
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100212/* warning: addr must be aligned */
213static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
214 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
215 enum device_endian endian)
216{
217 uint8_t *ptr;
218 uint64_t val;
219 MemoryRegion *mr;
220 hwaddr l = 2;
221 hwaddr addr1;
222 MemTxResult r;
223 bool release_lock = false;
224
225 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100226 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200227 if (l < 2 || !memory_access_is_direct(mr, false)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100228 release_lock |= prepare_mmio_access(mr);
229
230 /* I/O case */
Tony Nguyend5d680c2019-08-24 04:36:52 +1000231 /* TODO: Merge bswap16 into memory_region_dispatch_read. */
232 r = memory_region_dispatch_read(mr, addr1, &val,
233 MO_16 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100234#if defined(TARGET_WORDS_BIGENDIAN)
235 if (endian == DEVICE_LITTLE_ENDIAN) {
236 val = bswap16(val);
237 }
238#else
239 if (endian == DEVICE_BIG_ENDIAN) {
240 val = bswap16(val);
241 }
242#endif
243 } else {
244 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200245 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100246 switch (endian) {
247 case DEVICE_LITTLE_ENDIAN:
248 val = lduw_le_p(ptr);
249 break;
250 case DEVICE_BIG_ENDIAN:
251 val = lduw_be_p(ptr);
252 break;
253 default:
254 val = lduw_p(ptr);
255 break;
256 }
257 r = MEMTX_OK;
258 }
259 if (result) {
260 *result = r;
261 }
262 if (release_lock) {
263 qemu_mutex_unlock_iothread();
264 }
265 RCU_READ_UNLOCK();
266 return val;
267}
268
269uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
270 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
271{
272 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
273 DEVICE_NATIVE_ENDIAN);
274}
275
276uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
277 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
278{
279 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
280 DEVICE_LITTLE_ENDIAN);
281}
282
283uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
284 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
285{
286 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
287 DEVICE_BIG_ENDIAN);
288}
289
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100290/* warning: addr must be aligned. The ram page is not masked as dirty
291 and the code inside is not invalidated. It is useful if the dirty
292 bits are used to track modified PTEs */
293void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
294 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
295{
296 uint8_t *ptr;
297 MemoryRegion *mr;
298 hwaddr l = 4;
299 hwaddr addr1;
300 MemTxResult r;
301 uint8_t dirty_log_mask;
302 bool release_lock = false;
303
304 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100305 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200306 if (l < 4 || !memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100307 release_lock |= prepare_mmio_access(mr);
308
Tony Nguyen07f08342019-08-24 04:36:51 +1000309 r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100310 } else {
Eric Augera99761d2018-06-13 15:19:06 +0200311 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100312 stl_p(ptr, val);
313
314 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
315 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
316 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
317 4, dirty_log_mask);
318 r = MEMTX_OK;
319 }
320 if (result) {
321 *result = r;
322 }
323 if (release_lock) {
324 qemu_mutex_unlock_iothread();
325 }
326 RCU_READ_UNLOCK();
327}
328
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100329/* warning: addr must be aligned */
330static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
331 hwaddr addr, uint32_t val, MemTxAttrs attrs,
332 MemTxResult *result, enum device_endian endian)
333{
334 uint8_t *ptr;
335 MemoryRegion *mr;
336 hwaddr l = 4;
337 hwaddr addr1;
338 MemTxResult r;
339 bool release_lock = false;
340
341 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100342 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200343 if (l < 4 || !memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100344 release_lock |= prepare_mmio_access(mr);
345
346#if defined(TARGET_WORDS_BIGENDIAN)
347 if (endian == DEVICE_LITTLE_ENDIAN) {
348 val = bswap32(val);
349 }
350#else
351 if (endian == DEVICE_BIG_ENDIAN) {
352 val = bswap32(val);
353 }
354#endif
Tony Nguyend5d680c2019-08-24 04:36:52 +1000355 /* TODO: Merge bswap32 into memory_region_dispatch_write. */
356 r = memory_region_dispatch_write(mr, addr1, val,
357 MO_32 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100358 } else {
359 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200360 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100361 switch (endian) {
362 case DEVICE_LITTLE_ENDIAN:
363 stl_le_p(ptr, val);
364 break;
365 case DEVICE_BIG_ENDIAN:
366 stl_be_p(ptr, val);
367 break;
368 default:
369 stl_p(ptr, val);
370 break;
371 }
Eric Augera99761d2018-06-13 15:19:06 +0200372 invalidate_and_set_dirty(mr, addr1, 4);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100373 r = MEMTX_OK;
374 }
375 if (result) {
376 *result = r;
377 }
378 if (release_lock) {
379 qemu_mutex_unlock_iothread();
380 }
381 RCU_READ_UNLOCK();
382}
383
384void glue(address_space_stl, SUFFIX)(ARG1_DECL,
385 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
386{
387 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
388 result, DEVICE_NATIVE_ENDIAN);
389}
390
391void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
392 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
393{
394 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
395 result, DEVICE_LITTLE_ENDIAN);
396}
397
398void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
399 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
400{
401 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
402 result, DEVICE_BIG_ENDIAN);
403}
404
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100405void glue(address_space_stb, SUFFIX)(ARG1_DECL,
406 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
407{
408 uint8_t *ptr;
409 MemoryRegion *mr;
410 hwaddr l = 1;
411 hwaddr addr1;
412 MemTxResult r;
413 bool release_lock = false;
414
415 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100416 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200417 if (!memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100418 release_lock |= prepare_mmio_access(mr);
Tony Nguyen07f08342019-08-24 04:36:51 +1000419 r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100420 } else {
421 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200422 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100423 stb_p(ptr, val);
Eric Augera99761d2018-06-13 15:19:06 +0200424 invalidate_and_set_dirty(mr, addr1, 1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100425 r = MEMTX_OK;
426 }
427 if (result) {
428 *result = r;
429 }
430 if (release_lock) {
431 qemu_mutex_unlock_iothread();
432 }
433 RCU_READ_UNLOCK();
434}
435
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100436/* warning: addr must be aligned */
437static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
438 hwaddr addr, uint32_t val, MemTxAttrs attrs,
439 MemTxResult *result, enum device_endian endian)
440{
441 uint8_t *ptr;
442 MemoryRegion *mr;
443 hwaddr l = 2;
444 hwaddr addr1;
445 MemTxResult r;
446 bool release_lock = false;
447
448 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100449 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200450 if (l < 2 || !memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100451 release_lock |= prepare_mmio_access(mr);
452
453#if defined(TARGET_WORDS_BIGENDIAN)
454 if (endian == DEVICE_LITTLE_ENDIAN) {
455 val = bswap16(val);
456 }
457#else
458 if (endian == DEVICE_BIG_ENDIAN) {
459 val = bswap16(val);
460 }
461#endif
Tony Nguyend5d680c2019-08-24 04:36:52 +1000462 /* TODO: Merge bswap16 into memory_region_dispatch_write. */
463 r = memory_region_dispatch_write(mr, addr1, val,
464 MO_16 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100465 } else {
466 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200467 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100468 switch (endian) {
469 case DEVICE_LITTLE_ENDIAN:
470 stw_le_p(ptr, val);
471 break;
472 case DEVICE_BIG_ENDIAN:
473 stw_be_p(ptr, val);
474 break;
475 default:
476 stw_p(ptr, val);
477 break;
478 }
Eric Augera99761d2018-06-13 15:19:06 +0200479 invalidate_and_set_dirty(mr, addr1, 2);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100480 r = MEMTX_OK;
481 }
482 if (result) {
483 *result = r;
484 }
485 if (release_lock) {
486 qemu_mutex_unlock_iothread();
487 }
488 RCU_READ_UNLOCK();
489}
490
491void glue(address_space_stw, SUFFIX)(ARG1_DECL,
492 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
493{
494 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
495 DEVICE_NATIVE_ENDIAN);
496}
497
498void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
499 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
500{
501 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
502 DEVICE_LITTLE_ENDIAN);
503}
504
505void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
506 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
507{
508 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
509 DEVICE_BIG_ENDIAN);
510}
511
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100512static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
513 hwaddr addr, uint64_t val, MemTxAttrs attrs,
514 MemTxResult *result, enum device_endian endian)
515{
516 uint8_t *ptr;
517 MemoryRegion *mr;
518 hwaddr l = 8;
519 hwaddr addr1;
520 MemTxResult r;
521 bool release_lock = false;
522
523 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100524 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200525 if (l < 8 || !memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100526 release_lock |= prepare_mmio_access(mr);
527
528#if defined(TARGET_WORDS_BIGENDIAN)
529 if (endian == DEVICE_LITTLE_ENDIAN) {
530 val = bswap64(val);
531 }
532#else
533 if (endian == DEVICE_BIG_ENDIAN) {
534 val = bswap64(val);
535 }
536#endif
Tony Nguyend5d680c2019-08-24 04:36:52 +1000537 /* TODO: Merge bswap64 into memory_region_dispatch_write. */
538 r = memory_region_dispatch_write(mr, addr1, val,
539 MO_64 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100540 } else {
541 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200542 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100543 switch (endian) {
544 case DEVICE_LITTLE_ENDIAN:
545 stq_le_p(ptr, val);
546 break;
547 case DEVICE_BIG_ENDIAN:
548 stq_be_p(ptr, val);
549 break;
550 default:
551 stq_p(ptr, val);
552 break;
553 }
Eric Augera99761d2018-06-13 15:19:06 +0200554 invalidate_and_set_dirty(mr, addr1, 8);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100555 r = MEMTX_OK;
556 }
557 if (result) {
558 *result = r;
559 }
560 if (release_lock) {
561 qemu_mutex_unlock_iothread();
562 }
563 RCU_READ_UNLOCK();
564}
565
566void glue(address_space_stq, SUFFIX)(ARG1_DECL,
567 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
568{
569 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
570 DEVICE_NATIVE_ENDIAN);
571}
572
573void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
574 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
575{
576 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
577 DEVICE_LITTLE_ENDIAN);
578}
579
580void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
581 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
582{
583 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
584 DEVICE_BIG_ENDIAN);
585}
586
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100587#undef ARG1_DECL
588#undef ARG1
589#undef SUFFIX
590#undef TRANSLATE
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100591#undef RCU_READ_LOCK
592#undef RCU_READ_UNLOCK