blob: 5cd237834d9a63c799efaf1243c4c633c1cf2f18 [file] [log] [blame]
bellard31e31b82003-02-18 22:55:36 +00001/*
bellard66fb9762003-03-23 01:06:05 +00002 * Emulation of Linux signals
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard31e31b82003-02-18 22:55:36 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * along with this program; if not, see <http://www.gnu.org/licenses/>.
bellard31e31b82003-02-18 22:55:36 +000018 */
Peter Maydelld39594e2016-01-26 18:17:02 +000019#include "qemu/osdep.h"
Peter Maydella70dadc2016-05-27 15:51:59 +010020#include "qemu/bitops.h"
bellard31e31b82003-02-18 22:55:36 +000021#include <sys/ucontext.h>
Mika Westerbergedf8e2a2009-04-07 09:57:11 +030022#include <sys/resource.h>
bellard31e31b82003-02-18 22:55:36 +000023
bellard3ef693a2003-03-23 20:17:16 +000024#include "qemu.h"
Paolo Bonzinic8ee0a42015-11-13 13:52:21 +010025#include "trace.h"
Laurent Vivierbefb7442018-04-24 21:26:16 +020026#include "signal-common.h"
bellard66fb9762003-03-23 01:06:05 +000027
Laurent Vivierbefb7442018-04-24 21:26:16 +020028struct target_sigaltstack target_sigaltstack_used = {
thsa04e1342007-09-27 13:57:58 +000029 .ss_sp = 0,
30 .ss_size = 0,
31 .ss_flags = TARGET_SS_DISABLE,
32};
33
pbrook624f7972008-05-31 16:11:38 +000034static struct target_sigaction sigact_table[TARGET_NSIG];
bellard31e31b82003-02-18 22:55:36 +000035
ths5fafdf22007-09-16 21:08:06 +000036static void host_signal_handler(int host_signum, siginfo_t *info,
bellard66fb9762003-03-23 01:06:05 +000037 void *puc);
38
Arnaud Patard3ca05582009-03-30 01:18:20 +020039static uint8_t host_to_target_signal_table[_NSIG] = {
bellard9e5f5282003-07-13 17:33:54 +000040 [SIGHUP] = TARGET_SIGHUP,
41 [SIGINT] = TARGET_SIGINT,
42 [SIGQUIT] = TARGET_SIGQUIT,
43 [SIGILL] = TARGET_SIGILL,
44 [SIGTRAP] = TARGET_SIGTRAP,
45 [SIGABRT] = TARGET_SIGABRT,
bellard01e3b762003-09-30 21:10:14 +000046/* [SIGIOT] = TARGET_SIGIOT,*/
bellard9e5f5282003-07-13 17:33:54 +000047 [SIGBUS] = TARGET_SIGBUS,
48 [SIGFPE] = TARGET_SIGFPE,
49 [SIGKILL] = TARGET_SIGKILL,
50 [SIGUSR1] = TARGET_SIGUSR1,
51 [SIGSEGV] = TARGET_SIGSEGV,
52 [SIGUSR2] = TARGET_SIGUSR2,
53 [SIGPIPE] = TARGET_SIGPIPE,
54 [SIGALRM] = TARGET_SIGALRM,
55 [SIGTERM] = TARGET_SIGTERM,
56#ifdef SIGSTKFLT
57 [SIGSTKFLT] = TARGET_SIGSTKFLT,
58#endif
59 [SIGCHLD] = TARGET_SIGCHLD,
60 [SIGCONT] = TARGET_SIGCONT,
61 [SIGSTOP] = TARGET_SIGSTOP,
62 [SIGTSTP] = TARGET_SIGTSTP,
63 [SIGTTIN] = TARGET_SIGTTIN,
64 [SIGTTOU] = TARGET_SIGTTOU,
65 [SIGURG] = TARGET_SIGURG,
66 [SIGXCPU] = TARGET_SIGXCPU,
67 [SIGXFSZ] = TARGET_SIGXFSZ,
68 [SIGVTALRM] = TARGET_SIGVTALRM,
69 [SIGPROF] = TARGET_SIGPROF,
70 [SIGWINCH] = TARGET_SIGWINCH,
71 [SIGIO] = TARGET_SIGIO,
72 [SIGPWR] = TARGET_SIGPWR,
73 [SIGSYS] = TARGET_SIGSYS,
74 /* next signals stay the same */
pbrook624f7972008-05-31 16:11:38 +000075 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
Dong Xu Wangb4916d72011-11-22 18:06:17 +080076 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
pbrook624f7972008-05-31 16:11:38 +000077 To fix this properly we need to do manual signal delivery multiplexed
78 over a single host signal. */
79 [__SIGRTMIN] = __SIGRTMAX,
80 [__SIGRTMAX] = __SIGRTMIN,
bellard9e5f5282003-07-13 17:33:54 +000081};
Arnaud Patard3ca05582009-03-30 01:18:20 +020082static uint8_t target_to_host_signal_table[_NSIG];
bellard9e5f5282003-07-13 17:33:54 +000083
pbrook1d9d8b52009-04-16 15:17:02 +000084int host_to_target_signal(int sig)
bellard31e31b82003-02-18 22:55:36 +000085{
Andreas Schwab167c50d2013-07-02 14:04:12 +010086 if (sig < 0 || sig >= _NSIG)
pbrook4cb05962008-05-30 18:05:19 +000087 return sig;
bellard9e5f5282003-07-13 17:33:54 +000088 return host_to_target_signal_table[sig];
bellard31e31b82003-02-18 22:55:36 +000089}
90
pbrook4cb05962008-05-30 18:05:19 +000091int target_to_host_signal(int sig)
bellard31e31b82003-02-18 22:55:36 +000092{
Andreas Schwab167c50d2013-07-02 14:04:12 +010093 if (sig < 0 || sig >= _NSIG)
pbrook4cb05962008-05-30 18:05:19 +000094 return sig;
bellard9e5f5282003-07-13 17:33:54 +000095 return target_to_host_signal_table[sig];
bellard31e31b82003-02-18 22:55:36 +000096}
97
Anthony Liguoric227f092009-10-01 16:12:16 -050098static inline void target_sigaddset(target_sigset_t *set, int signum)
pbrookf5545b52008-05-30 22:37:07 +000099{
100 signum--;
101 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
102 set->sig[signum / TARGET_NSIG_BPW] |= mask;
103}
104
Anthony Liguoric227f092009-10-01 16:12:16 -0500105static inline int target_sigismember(const target_sigset_t *set, int signum)
pbrookf5545b52008-05-30 22:37:07 +0000106{
107 signum--;
108 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
109 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
110}
111
Laurent Vivierbefb7442018-04-24 21:26:16 +0200112void host_to_target_sigset_internal(target_sigset_t *d,
113 const sigset_t *s)
bellard66fb9762003-03-23 01:06:05 +0000114{
115 int i;
pbrookf5545b52008-05-30 22:37:07 +0000116 target_sigemptyset(d);
117 for (i = 1; i <= TARGET_NSIG; i++) {
118 if (sigismember(s, i)) {
119 target_sigaddset(d, host_to_target_signal(i));
120 }
bellard9e5f5282003-07-13 17:33:54 +0000121 }
bellard66fb9762003-03-23 01:06:05 +0000122}
123
Anthony Liguoric227f092009-10-01 16:12:16 -0500124void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
bellard92319442004-06-19 16:58:13 +0000125{
Anthony Liguoric227f092009-10-01 16:12:16 -0500126 target_sigset_t d1;
bellard92319442004-06-19 16:58:13 +0000127 int i;
128
129 host_to_target_sigset_internal(&d1, s);
130 for(i = 0;i < TARGET_NSIG_WORDS; i++)
Matthias Brauncbb21ee2011-08-12 19:57:41 +0200131 d->sig[i] = tswapal(d1.sig[i]);
bellard92319442004-06-19 16:58:13 +0000132}
133
Laurent Vivierbefb7442018-04-24 21:26:16 +0200134void target_to_host_sigset_internal(sigset_t *d,
135 const target_sigset_t *s)
bellard66fb9762003-03-23 01:06:05 +0000136{
137 int i;
pbrookf5545b52008-05-30 22:37:07 +0000138 sigemptyset(d);
139 for (i = 1; i <= TARGET_NSIG; i++) {
140 if (target_sigismember(s, i)) {
141 sigaddset(d, target_to_host_signal(i));
142 }
Timothy E Baldwinda7c8642016-05-12 18:47:27 +0100143 }
bellard66fb9762003-03-23 01:06:05 +0000144}
145
Anthony Liguoric227f092009-10-01 16:12:16 -0500146void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
bellard92319442004-06-19 16:58:13 +0000147{
Anthony Liguoric227f092009-10-01 16:12:16 -0500148 target_sigset_t s1;
bellard92319442004-06-19 16:58:13 +0000149 int i;
150
151 for(i = 0;i < TARGET_NSIG_WORDS; i++)
Matthias Brauncbb21ee2011-08-12 19:57:41 +0200152 s1.sig[i] = tswapal(s->sig[i]);
bellard92319442004-06-19 16:58:13 +0000153 target_to_host_sigset_internal(d, &s1);
154}
ths3b46e622007-09-17 08:09:54 +0000155
blueswir1992f48a2007-10-14 16:27:31 +0000156void host_to_target_old_sigset(abi_ulong *old_sigset,
bellard66fb9762003-03-23 01:06:05 +0000157 const sigset_t *sigset)
158{
Anthony Liguoric227f092009-10-01 16:12:16 -0500159 target_sigset_t d;
bellard9e5f5282003-07-13 17:33:54 +0000160 host_to_target_sigset(&d, sigset);
161 *old_sigset = d.sig[0];
bellard66fb9762003-03-23 01:06:05 +0000162}
163
ths5fafdf22007-09-16 21:08:06 +0000164void target_to_host_old_sigset(sigset_t *sigset,
blueswir1992f48a2007-10-14 16:27:31 +0000165 const abi_ulong *old_sigset)
bellard66fb9762003-03-23 01:06:05 +0000166{
Anthony Liguoric227f092009-10-01 16:12:16 -0500167 target_sigset_t d;
bellard9e5f5282003-07-13 17:33:54 +0000168 int i;
169
170 d.sig[0] = *old_sigset;
171 for(i = 1;i < TARGET_NSIG_WORDS; i++)
172 d.sig[i] = 0;
173 target_to_host_sigset(sigset, &d);
bellard66fb9762003-03-23 01:06:05 +0000174}
175
Peter Maydell3d3efba2016-05-27 15:51:49 +0100176int block_signals(void)
177{
178 TaskState *ts = (TaskState *)thread_cpu->opaque;
179 sigset_t set;
Peter Maydell3d3efba2016-05-27 15:51:49 +0100180
181 /* It's OK to block everything including SIGSEGV, because we won't
182 * run any further guest code before unblocking signals in
183 * process_pending_signals().
184 */
185 sigfillset(&set);
186 sigprocmask(SIG_SETMASK, &set, 0);
187
Eduardo Habkost9be38592016-06-13 18:57:58 -0300188 return atomic_xchg(&ts->signal_pending, 1);
Peter Maydell3d3efba2016-05-27 15:51:49 +0100189}
190
Alex Barcelo1c275922014-03-14 14:36:55 +0000191/* Wrapper for sigprocmask function
192 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
Peter Maydell3d3efba2016-05-27 15:51:49 +0100193 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
194 * a signal was already pending and the syscall must be restarted, or
195 * 0 on success.
196 * If set is NULL, this is guaranteed not to fail.
Alex Barcelo1c275922014-03-14 14:36:55 +0000197 */
198int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
199{
Peter Maydell3d3efba2016-05-27 15:51:49 +0100200 TaskState *ts = (TaskState *)thread_cpu->opaque;
201
202 if (oldset) {
203 *oldset = ts->signal_mask;
204 }
Peter Maydella7ec0f92014-03-14 14:36:56 +0000205
206 if (set) {
Peter Maydell3d3efba2016-05-27 15:51:49 +0100207 int i;
Peter Maydella7ec0f92014-03-14 14:36:56 +0000208
Peter Maydell3d3efba2016-05-27 15:51:49 +0100209 if (block_signals()) {
210 return -TARGET_ERESTARTSYS;
211 }
Peter Maydella7ec0f92014-03-14 14:36:56 +0000212
213 switch (how) {
214 case SIG_BLOCK:
Peter Maydell3d3efba2016-05-27 15:51:49 +0100215 sigorset(&ts->signal_mask, &ts->signal_mask, set);
Peter Maydella7ec0f92014-03-14 14:36:56 +0000216 break;
217 case SIG_UNBLOCK:
Peter Maydell3d3efba2016-05-27 15:51:49 +0100218 for (i = 1; i <= NSIG; ++i) {
219 if (sigismember(set, i)) {
220 sigdelset(&ts->signal_mask, i);
221 }
Peter Maydella7ec0f92014-03-14 14:36:56 +0000222 }
223 break;
224 case SIG_SETMASK:
Peter Maydell3d3efba2016-05-27 15:51:49 +0100225 ts->signal_mask = *set;
Peter Maydella7ec0f92014-03-14 14:36:56 +0000226 break;
227 default:
228 g_assert_not_reached();
229 }
Peter Maydell3d3efba2016-05-27 15:51:49 +0100230
231 /* Silently ignore attempts to change blocking status of KILL or STOP */
232 sigdelset(&ts->signal_mask, SIGKILL);
233 sigdelset(&ts->signal_mask, SIGSTOP);
Peter Maydella7ec0f92014-03-14 14:36:56 +0000234 }
Peter Maydell3d3efba2016-05-27 15:51:49 +0100235 return 0;
Alex Barcelo1c275922014-03-14 14:36:55 +0000236}
237
Richard Hendersone8f29042018-05-27 14:02:17 -0500238#if !defined(TARGET_NIOS2)
Peter Maydell3d3efba2016-05-27 15:51:49 +0100239/* Just set the guest's signal mask to the specified value; the
240 * caller is assumed to have called block_signals() already.
241 */
Laurent Vivierbefb7442018-04-24 21:26:16 +0200242void set_sigmask(const sigset_t *set)
Peter Maydell9eede5b2016-05-27 15:51:46 +0100243{
Peter Maydell3d3efba2016-05-27 15:51:49 +0100244 TaskState *ts = (TaskState *)thread_cpu->opaque;
245
246 ts->signal_mask = *set;
Peter Maydell9eede5b2016-05-27 15:51:46 +0100247}
248#endif
249
Laurent Vivier465e2372018-04-11 21:23:47 +0200250/* sigaltstack management */
251
252int on_sig_stack(unsigned long sp)
253{
254 return (sp - target_sigaltstack_used.ss_sp
255 < target_sigaltstack_used.ss_size);
256}
257
258int sas_ss_flags(unsigned long sp)
259{
260 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
261 : on_sig_stack(sp) ? SS_ONSTACK : 0);
262}
263
264abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
265{
266 /*
267 * This is the X/Open sanctioned signal stack switching.
268 */
269 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
270 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
271 }
272 return sp;
273}
274
275void target_save_altstack(target_stack_t *uss, CPUArchState *env)
276{
277 __put_user(target_sigaltstack_used.ss_sp, &uss->ss_sp);
278 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
279 __put_user(target_sigaltstack_used.ss_size, &uss->ss_size);
280}
281
bellard9de5e442003-03-23 16:49:39 +0000282/* siginfo conversion */
283
Anthony Liguoric227f092009-10-01 16:12:16 -0500284static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
bellard9de5e442003-03-23 16:49:39 +0000285 const siginfo_t *info)
bellard66fb9762003-03-23 01:06:05 +0000286{
Richard Hendersona05c6402012-09-15 11:34:20 -0700287 int sig = host_to_target_signal(info->si_signo);
Peter Maydella70dadc2016-05-27 15:51:59 +0100288 int si_code = info->si_code;
289 int si_type;
bellard9de5e442003-03-23 16:49:39 +0000290 tinfo->si_signo = sig;
291 tinfo->si_errno = 0;
pbrookafd7cd92008-05-31 12:14:21 +0000292 tinfo->si_code = info->si_code;
Richard Hendersona05c6402012-09-15 11:34:20 -0700293
Peter Maydell55d72a72016-06-13 11:22:05 +0100294 /* This memset serves two purposes:
295 * (1) ensure we don't leak random junk to the guest later
296 * (2) placate false positives from gcc about fields
297 * being used uninitialized if it chooses to inline both this
298 * function and tswap_siginfo() into host_to_target_siginfo().
299 */
300 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
301
Peter Maydella70dadc2016-05-27 15:51:59 +0100302 /* This is awkward, because we have to use a combination of
303 * the si_code and si_signo to figure out which of the union's
304 * members are valid. (Within the host kernel it is always possible
305 * to tell, but the kernel carefully avoids giving userspace the
306 * high 16 bits of si_code, so we don't have the information to
307 * do this the easy way...) We therefore make our best guess,
308 * bearing in mind that a guest can spoof most of the si_codes
309 * via rt_sigqueueinfo() if it likes.
310 *
311 * Once we have made our guess, we record it in the top 16 bits of
312 * the si_code, so that tswap_siginfo() later can use it.
313 * tswap_siginfo() will strip these top bits out before writing
314 * si_code to the guest (sign-extending the lower bits).
315 */
316
317 switch (si_code) {
318 case SI_USER:
319 case SI_TKILL:
320 case SI_KERNEL:
321 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
322 * These are the only unspoofable si_code values.
323 */
324 tinfo->_sifields._kill._pid = info->si_pid;
325 tinfo->_sifields._kill._uid = info->si_uid;
326 si_type = QEMU_SI_KILL;
327 break;
328 default:
329 /* Everything else is spoofable. Make best guess based on signal */
330 switch (sig) {
331 case TARGET_SIGCHLD:
332 tinfo->_sifields._sigchld._pid = info->si_pid;
333 tinfo->_sifields._sigchld._uid = info->si_uid;
334 tinfo->_sifields._sigchld._status
Timothy E Baldwinda7c8642016-05-12 18:47:27 +0100335 = host_to_target_waitstatus(info->si_status);
Peter Maydella70dadc2016-05-27 15:51:59 +0100336 tinfo->_sifields._sigchld._utime = info->si_utime;
337 tinfo->_sifields._sigchld._stime = info->si_stime;
338 si_type = QEMU_SI_CHLD;
339 break;
340 case TARGET_SIGIO:
341 tinfo->_sifields._sigpoll._band = info->si_band;
342 tinfo->_sifields._sigpoll._fd = info->si_fd;
343 si_type = QEMU_SI_POLL;
344 break;
345 default:
346 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
347 tinfo->_sifields._rt._pid = info->si_pid;
348 tinfo->_sifields._rt._uid = info->si_uid;
349 /* XXX: potential problem if 64 bit */
350 tinfo->_sifields._rt._sigval.sival_ptr
Timothy E Baldwinda7c8642016-05-12 18:47:27 +0100351 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
Peter Maydella70dadc2016-05-27 15:51:59 +0100352 si_type = QEMU_SI_RT;
353 break;
354 }
355 break;
bellard9de5e442003-03-23 16:49:39 +0000356 }
Peter Maydella70dadc2016-05-27 15:51:59 +0100357
358 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
bellard66fb9762003-03-23 01:06:05 +0000359}
360
Laurent Vivierbefb7442018-04-24 21:26:16 +0200361void tswap_siginfo(target_siginfo_t *tinfo,
362 const target_siginfo_t *info)
bellard9de5e442003-03-23 16:49:39 +0000363{
Peter Maydella70dadc2016-05-27 15:51:59 +0100364 int si_type = extract32(info->si_code, 16, 16);
365 int si_code = sextract32(info->si_code, 0, 16);
Richard Hendersona05c6402012-09-15 11:34:20 -0700366
Peter Maydella70dadc2016-05-27 15:51:59 +0100367 __put_user(info->si_signo, &tinfo->si_signo);
368 __put_user(info->si_errno, &tinfo->si_errno);
369 __put_user(si_code, &tinfo->si_code);
370
371 /* We can use our internal marker of which fields in the structure
372 * are valid, rather than duplicating the guesswork of
373 * host_to_target_siginfo_noswap() here.
374 */
375 switch (si_type) {
376 case QEMU_SI_KILL:
377 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
378 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
379 break;
380 case QEMU_SI_TIMER:
381 __put_user(info->_sifields._timer._timer1,
382 &tinfo->_sifields._timer._timer1);
383 __put_user(info->_sifields._timer._timer2,
384 &tinfo->_sifields._timer._timer2);
385 break;
386 case QEMU_SI_POLL:
387 __put_user(info->_sifields._sigpoll._band,
388 &tinfo->_sifields._sigpoll._band);
389 __put_user(info->_sifields._sigpoll._fd,
390 &tinfo->_sifields._sigpoll._fd);
391 break;
392 case QEMU_SI_FAULT:
393 __put_user(info->_sifields._sigfault._addr,
394 &tinfo->_sifields._sigfault._addr);
395 break;
396 case QEMU_SI_CHLD:
397 __put_user(info->_sifields._sigchld._pid,
398 &tinfo->_sifields._sigchld._pid);
399 __put_user(info->_sifields._sigchld._uid,
400 &tinfo->_sifields._sigchld._uid);
401 __put_user(info->_sifields._sigchld._status,
402 &tinfo->_sifields._sigchld._status);
403 __put_user(info->_sifields._sigchld._utime,
404 &tinfo->_sifields._sigchld._utime);
405 __put_user(info->_sifields._sigchld._stime,
406 &tinfo->_sifields._sigchld._stime);
407 break;
408 case QEMU_SI_RT:
409 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
410 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
411 __put_user(info->_sifields._rt._sigval.sival_ptr,
412 &tinfo->_sifields._rt._sigval.sival_ptr);
413 break;
414 default:
415 g_assert_not_reached();
bellard9de5e442003-03-23 16:49:39 +0000416 }
417}
418
Anthony Liguoric227f092009-10-01 16:12:16 -0500419void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
bellard9de5e442003-03-23 16:49:39 +0000420{
Peter Maydell55d72a72016-06-13 11:22:05 +0100421 target_siginfo_t tgt_tmp;
422 host_to_target_siginfo_noswap(&tgt_tmp, info);
423 tswap_siginfo(tinfo, &tgt_tmp);
bellard9de5e442003-03-23 16:49:39 +0000424}
425
426/* XXX: we support only POSIX RT signals are used. */
thsaa1f17c2007-07-11 22:48:58 +0000427/* XXX: find a solution for 64 bit (additional malloced data is needed) */
Anthony Liguoric227f092009-10-01 16:12:16 -0500428void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
bellard66fb9762003-03-23 01:06:05 +0000429{
Peter Maydell90c0f082016-05-27 15:52:01 +0100430 /* This conversion is used only for the rt_sigqueueinfo syscall,
431 * and so we know that the _rt fields are the valid ones.
432 */
433 abi_ulong sival_ptr;
434
435 __get_user(info->si_signo, &tinfo->si_signo);
436 __get_user(info->si_errno, &tinfo->si_errno);
437 __get_user(info->si_code, &tinfo->si_code);
438 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
439 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
440 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
441 info->si_value.sival_ptr = (void *)(long)sival_ptr;
bellard66fb9762003-03-23 01:06:05 +0000442}
443
aurel32ca587a82008-12-18 22:44:13 +0000444static int fatal_signal (int sig)
445{
446 switch (sig) {
447 case TARGET_SIGCHLD:
448 case TARGET_SIGURG:
449 case TARGET_SIGWINCH:
450 /* Ignored by default. */
451 return 0;
452 case TARGET_SIGCONT:
453 case TARGET_SIGSTOP:
454 case TARGET_SIGTSTP:
455 case TARGET_SIGTTIN:
456 case TARGET_SIGTTOU:
457 /* Job control signals. */
458 return 0;
459 default:
460 return 1;
461 }
462}
463
Mika Westerbergedf8e2a2009-04-07 09:57:11 +0300464/* returns 1 if given signal should dump core if not handled */
465static int core_dump_signal(int sig)
466{
467 switch (sig) {
468 case TARGET_SIGABRT:
469 case TARGET_SIGFPE:
470 case TARGET_SIGILL:
471 case TARGET_SIGQUIT:
472 case TARGET_SIGSEGV:
473 case TARGET_SIGTRAP:
474 case TARGET_SIGBUS:
475 return (1);
476 default:
477 return (0);
478 }
479}
480
bellard31e31b82003-02-18 22:55:36 +0000481void signal_init(void)
482{
Peter Maydell3d3efba2016-05-27 15:51:49 +0100483 TaskState *ts = (TaskState *)thread_cpu->opaque;
bellard31e31b82003-02-18 22:55:36 +0000484 struct sigaction act;
pbrook624f7972008-05-31 16:11:38 +0000485 struct sigaction oact;
bellard9e5f5282003-07-13 17:33:54 +0000486 int i, j;
pbrook624f7972008-05-31 16:11:38 +0000487 int host_sig;
bellard31e31b82003-02-18 22:55:36 +0000488
bellard9e5f5282003-07-13 17:33:54 +0000489 /* generate signal conversion tables */
Arnaud Patard3ca05582009-03-30 01:18:20 +0200490 for(i = 1; i < _NSIG; i++) {
bellard9e5f5282003-07-13 17:33:54 +0000491 if (host_to_target_signal_table[i] == 0)
492 host_to_target_signal_table[i] = i;
493 }
Arnaud Patard3ca05582009-03-30 01:18:20 +0200494 for(i = 1; i < _NSIG; i++) {
bellard9e5f5282003-07-13 17:33:54 +0000495 j = host_to_target_signal_table[i];
496 target_to_host_signal_table[j] = i;
497 }
ths3b46e622007-09-17 08:09:54 +0000498
Peter Maydell3d3efba2016-05-27 15:51:49 +0100499 /* Set the signal mask from the host mask. */
500 sigprocmask(0, 0, &ts->signal_mask);
501
bellard9de5e442003-03-23 16:49:39 +0000502 /* set all host signal handlers. ALL signals are blocked during
503 the handlers to serialize them. */
pbrook624f7972008-05-31 16:11:38 +0000504 memset(sigact_table, 0, sizeof(sigact_table));
505
bellard9de5e442003-03-23 16:49:39 +0000506 sigfillset(&act.sa_mask);
bellard31e31b82003-02-18 22:55:36 +0000507 act.sa_flags = SA_SIGINFO;
508 act.sa_sigaction = host_signal_handler;
pbrook624f7972008-05-31 16:11:38 +0000509 for(i = 1; i <= TARGET_NSIG; i++) {
Alex Bennée716cdbe2019-05-02 15:58:46 +0100510#ifdef TARGET_GPROF
511 if (i == SIGPROF) {
512 continue;
513 }
514#endif
pbrook624f7972008-05-31 16:11:38 +0000515 host_sig = target_to_host_signal(i);
516 sigaction(host_sig, NULL, &oact);
517 if (oact.sa_sigaction == (void *)SIG_IGN) {
518 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
519 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
520 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
521 }
522 /* If there's already a handler installed then something has
523 gone horribly wrong, so don't even try to handle that case. */
aurel32ca587a82008-12-18 22:44:13 +0000524 /* Install some handlers for our own use. We need at least
525 SIGSEGV and SIGBUS, to detect exceptions. We can not just
526 trap all signals because it affects syscall interrupt
527 behavior. But do trap all default-fatal signals. */
528 if (fatal_signal (i))
pbrook624f7972008-05-31 16:11:38 +0000529 sigaction(host_sig, &act, NULL);
bellard31e31b82003-02-18 22:55:36 +0000530 }
bellard31e31b82003-02-18 22:55:36 +0000531}
532
Peter Maydellc599d4d2016-07-28 16:44:49 +0100533/* Force a synchronously taken signal. The kernel force_sig() function
534 * also forces the signal to "not blocked, not ignored", but for QEMU
535 * that work is done in process_pending_signals().
536 */
Laurent Vivierbefb7442018-04-24 21:26:16 +0200537void force_sig(int sig)
Peter Maydellc599d4d2016-07-28 16:44:49 +0100538{
539 CPUState *cpu = thread_cpu;
540 CPUArchState *env = cpu->env_ptr;
541 target_siginfo_t info;
542
543 info.si_signo = sig;
544 info.si_errno = 0;
545 info.si_code = TARGET_SI_KERNEL;
546 info._sifields._kill._pid = 0;
547 info._sifields._kill._uid = 0;
548 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
549}
Peter Maydell09391662016-07-28 16:44:47 +0100550
551/* Force a SIGSEGV if we couldn't write to memory trying to set
552 * up the signal frame. oldsig is the signal we were trying to handle
553 * at the point of failure.
554 */
Michael Clark47ae93c2018-03-03 01:31:11 +1300555#if !defined(TARGET_RISCV)
Laurent Vivierbefb7442018-04-24 21:26:16 +0200556void force_sigsegv(int oldsig)
Peter Maydell09391662016-07-28 16:44:47 +0100557{
Peter Maydell09391662016-07-28 16:44:47 +0100558 if (oldsig == SIGSEGV) {
559 /* Make sure we don't try to deliver the signal again; this will
Peter Maydellc599d4d2016-07-28 16:44:49 +0100560 * end up with handle_pending_signal() calling dump_core_and_abort().
Peter Maydell09391662016-07-28 16:44:47 +0100561 */
562 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
563 }
Peter Maydellc4b35742016-07-28 16:44:50 +0100564 force_sig(TARGET_SIGSEGV);
Peter Maydell09391662016-07-28 16:44:47 +0100565}
bellard66fb9762003-03-23 01:06:05 +0000566
Michael Clark47ae93c2018-03-03 01:31:11 +1300567#endif
568
bellard9de5e442003-03-23 16:49:39 +0000569/* abort execution with signal */
Peter Maydellc599d4d2016-07-28 16:44:49 +0100570static void QEMU_NORETURN dump_core_and_abort(int target_sig)
bellard66fb9762003-03-23 01:06:05 +0000571{
Andreas Färber0429a972013-08-26 18:14:44 +0200572 CPUState *cpu = thread_cpu;
573 CPUArchState *env = cpu->env_ptr;
574 TaskState *ts = (TaskState *)cpu->opaque;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +0300575 int host_sig, core_dumped = 0;
aurel32603e4fd2009-04-15 16:18:38 +0000576 struct sigaction act;
Paolo Bonzinic8ee0a42015-11-13 13:52:21 +0100577
Riku Voipio66393fb2009-12-04 15:16:32 +0200578 host_sig = target_to_host_signal(target_sig);
Paolo Bonzinic8ee0a42015-11-13 13:52:21 +0100579 trace_user_force_sig(env, target_sig, host_sig);
Andreas Färbera2247f82013-06-09 19:47:04 +0200580 gdb_signalled(env, target_sig);
aurel32603e4fd2009-04-15 16:18:38 +0000581
Mika Westerbergedf8e2a2009-04-07 09:57:11 +0300582 /* dump core if supported by target binary format */
Riku Voipio66393fb2009-12-04 15:16:32 +0200583 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
Mika Westerbergedf8e2a2009-04-07 09:57:11 +0300584 stop_all_tasks();
585 core_dumped =
Andreas Färbera2247f82013-06-09 19:47:04 +0200586 ((*ts->bprm->core_dump)(target_sig, env) == 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +0300587 }
588 if (core_dumped) {
589 /* we already dumped the core of target process, we don't want
590 * a coredump of qemu itself */
591 struct rlimit nodump;
592 getrlimit(RLIMIT_CORE, &nodump);
593 nodump.rlim_cur=0;
594 setrlimit(RLIMIT_CORE, &nodump);
595 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
Riku Voipio66393fb2009-12-04 15:16:32 +0200596 target_sig, strsignal(host_sig), "core dumped" );
Mika Westerbergedf8e2a2009-04-07 09:57:11 +0300597 }
598
Stefan Weil0c587512011-04-28 17:20:32 +0200599 /* The proper exit code for dying from an uncaught signal is
aurel32603e4fd2009-04-15 16:18:38 +0000600 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
601 * a negative value. To get the proper exit code we need to
602 * actually die from an uncaught signal. Here the default signal
603 * handler is installed, we send ourself a signal and we wait for
604 * it to arrive. */
605 sigfillset(&act.sa_mask);
606 act.sa_handler = SIG_DFL;
Peter Maydell3a5d30b2014-02-17 18:55:32 +0000607 act.sa_flags = 0;
aurel32603e4fd2009-04-15 16:18:38 +0000608 sigaction(host_sig, &act, NULL);
609
610 /* For some reason raise(host_sig) doesn't send the signal when
611 * statically linked on x86-64. */
612 kill(getpid(), host_sig);
613
614 /* Make sure the signal isn't masked (just reuse the mask inside
615 of act) */
616 sigdelset(&act.sa_mask, host_sig);
617 sigsuspend(&act.sa_mask);
618
619 /* unreachable */
Blue Swirla6c6f762010-03-13 14:18:50 +0000620 abort();
bellard66fb9762003-03-23 01:06:05 +0000621}
622
bellard9de5e442003-03-23 16:49:39 +0000623/* queue a signal so that it will be send to the virtual CPU as soon
624 as possible */
Peter Maydell9d2803f2016-07-28 16:44:46 +0100625int queue_signal(CPUArchState *env, int sig, int si_type,
626 target_siginfo_t *info)
bellard31e31b82003-02-18 22:55:36 +0000627{
Richard Henderson29a0af62019-03-22 16:07:18 -0700628 CPUState *cpu = env_cpu(env);
Andreas Färber0429a972013-08-26 18:14:44 +0200629 TaskState *ts = cpu->opaque;
bellard66fb9762003-03-23 01:06:05 +0000630
Paolo Bonzinic8ee0a42015-11-13 13:52:21 +0100631 trace_user_queue_signal(env, sig);
Peter Maydella7ec0f92014-03-14 14:36:56 +0000632
Peter Maydell9d2803f2016-07-28 16:44:46 +0100633 info->si_code = deposit32(info->si_code, 16, 16, si_type);
Peter Maydella70dadc2016-05-27 15:51:59 +0100634
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100635 ts->sync_signal.info = *info;
636 ts->sync_signal.pending = sig;
Timothy E Baldwin907f5fd2016-05-27 15:51:52 +0100637 /* signal that a new signal is pending */
638 atomic_set(&ts->signal_pending, 1);
639 return 1; /* indicates that the signal was queued */
bellard9de5e442003-03-23 16:49:39 +0000640}
641
Timothy E Baldwin4d330ce2016-05-12 18:47:46 +0100642#ifndef HAVE_SAFE_SYSCALL
643static inline void rewind_if_in_safe_syscall(void *puc)
644{
645 /* Default version: never rewind */
646}
647#endif
648
ths5fafdf22007-09-16 21:08:06 +0000649static void host_signal_handler(int host_signum, siginfo_t *info,
bellard9de5e442003-03-23 16:49:39 +0000650 void *puc)
651{
Andreas Färbera2247f82013-06-09 19:47:04 +0200652 CPUArchState *env = thread_cpu->env_ptr;
Richard Henderson29a0af62019-03-22 16:07:18 -0700653 CPUState *cpu = env_cpu(env);
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100654 TaskState *ts = cpu->opaque;
655
bellard9de5e442003-03-23 16:49:39 +0000656 int sig;
Anthony Liguoric227f092009-10-01 16:12:16 -0500657 target_siginfo_t tinfo;
Peter Maydell3d3efba2016-05-27 15:51:49 +0100658 ucontext_t *uc = puc;
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100659 struct emulated_sigtable *k;
bellard9de5e442003-03-23 16:49:39 +0000660
661 /* the CPU emulator uses some host signals to detect exceptions,
aurel32eaa449b2009-01-03 13:14:52 +0000662 we forward to it some signals */
aurel32ca587a82008-12-18 22:44:13 +0000663 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
aurel32eaa449b2009-01-03 13:14:52 +0000664 && info->si_code > 0) {
bellardb346ff42003-06-15 20:05:50 +0000665 if (cpu_signal_handler(host_signum, info, puc))
bellard9de5e442003-03-23 16:49:39 +0000666 return;
667 }
668
669 /* get target signal number */
670 sig = host_to_target_signal(host_signum);
671 if (sig < 1 || sig > TARGET_NSIG)
672 return;
Paolo Bonzinic8ee0a42015-11-13 13:52:21 +0100673 trace_user_host_signal(env, host_signum, sig);
Timothy E Baldwin4d330ce2016-05-12 18:47:46 +0100674
675 rewind_if_in_safe_syscall(puc);
676
bellard9de5e442003-03-23 16:49:39 +0000677 host_to_target_siginfo_noswap(&tinfo, info);
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100678 k = &ts->sigtab[sig - 1];
679 k->info = tinfo;
680 k->pending = sig;
681 ts->signal_pending = 1;
Peter Maydell3d3efba2016-05-27 15:51:49 +0100682
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100683 /* Block host signals until target signal handler entered. We
684 * can't block SIGSEGV or SIGBUS while we're executing guest
685 * code in case the guest code provokes one in the window between
686 * now and it getting out to the main loop. Signals will be
687 * unblocked again in process_pending_signals().
Peter Maydell1d48fdd2016-06-14 12:49:18 +0100688 *
689 * WARNING: we cannot use sigfillset() here because the uc_sigmask
690 * field is a kernel sigset_t, which is much smaller than the
691 * libc sigset_t which sigfillset() operates on. Using sigfillset()
692 * would write 0xff bytes off the end of the structure and trash
693 * data on the struct.
694 * We can't use sizeof(uc->uc_sigmask) either, because the libc
695 * headers define the struct field with the wrong (too large) type.
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100696 */
Peter Maydell1d48fdd2016-06-14 12:49:18 +0100697 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100698 sigdelset(&uc->uc_sigmask, SIGSEGV);
699 sigdelset(&uc->uc_sigmask, SIGBUS);
700
701 /* interrupt the virtual CPU as soon as possible */
702 cpu_exit(thread_cpu);
bellard31e31b82003-02-18 22:55:36 +0000703}
704
ths0da46a62007-10-20 20:23:07 +0000705/* do_sigaltstack() returns target values and errnos. */
bellard579a97f2007-11-11 14:26:47 +0000706/* compare linux/kernel/signal.c:do_sigaltstack() */
707abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
thsa04e1342007-09-27 13:57:58 +0000708{
709 int ret;
710 struct target_sigaltstack oss;
711
712 /* XXX: test errors */
bellard579a97f2007-11-11 14:26:47 +0000713 if(uoss_addr)
thsa04e1342007-09-27 13:57:58 +0000714 {
715 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
716 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
717 __put_user(sas_ss_flags(sp), &oss.ss_flags);
718 }
719
bellard579a97f2007-11-11 14:26:47 +0000720 if(uss_addr)
thsa04e1342007-09-27 13:57:58 +0000721 {
bellard579a97f2007-11-11 14:26:47 +0000722 struct target_sigaltstack *uss;
723 struct target_sigaltstack ss;
Tom Musta0903c8b2014-08-12 13:53:40 -0500724 size_t minstacksize = TARGET_MINSIGSTKSZ;
725
726#if defined(TARGET_PPC64)
727 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
728 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
729 if (get_ppc64_abi(image) > 1) {
730 minstacksize = 4096;
731 }
732#endif
thsa04e1342007-09-27 13:57:58 +0000733
Paolo Bonzini7d374352018-12-13 23:37:37 +0100734 ret = -TARGET_EFAULT;
Riku Voipio9eeb8302014-04-23 11:26:34 +0300735 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
thsa04e1342007-09-27 13:57:58 +0000736 goto out;
Riku Voipio9eeb8302014-04-23 11:26:34 +0300737 }
738 __get_user(ss.ss_sp, &uss->ss_sp);
739 __get_user(ss.ss_size, &uss->ss_size);
740 __get_user(ss.ss_flags, &uss->ss_flags);
bellard579a97f2007-11-11 14:26:47 +0000741 unlock_user_struct(uss, uss_addr, 0);
thsa04e1342007-09-27 13:57:58 +0000742
Paolo Bonzini7d374352018-12-13 23:37:37 +0100743 ret = -TARGET_EPERM;
744 if (on_sig_stack(sp))
thsa04e1342007-09-27 13:57:58 +0000745 goto out;
746
Paolo Bonzini7d374352018-12-13 23:37:37 +0100747 ret = -TARGET_EINVAL;
748 if (ss.ss_flags != TARGET_SS_DISABLE
thsa04e1342007-09-27 13:57:58 +0000749 && ss.ss_flags != TARGET_SS_ONSTACK
750 && ss.ss_flags != 0)
751 goto out;
752
Paolo Bonzini7d374352018-12-13 23:37:37 +0100753 if (ss.ss_flags == TARGET_SS_DISABLE) {
thsa04e1342007-09-27 13:57:58 +0000754 ss.ss_size = 0;
755 ss.ss_sp = 0;
Paolo Bonzini7d374352018-12-13 23:37:37 +0100756 } else {
ths0da46a62007-10-20 20:23:07 +0000757 ret = -TARGET_ENOMEM;
Tom Musta0903c8b2014-08-12 13:53:40 -0500758 if (ss.ss_size < minstacksize) {
thsa04e1342007-09-27 13:57:58 +0000759 goto out;
Tom Musta0903c8b2014-08-12 13:53:40 -0500760 }
Paolo Bonzini7d374352018-12-13 23:37:37 +0100761 }
thsa04e1342007-09-27 13:57:58 +0000762
763 target_sigaltstack_used.ss_sp = ss.ss_sp;
764 target_sigaltstack_used.ss_size = ss.ss_size;
765 }
766
bellard579a97f2007-11-11 14:26:47 +0000767 if (uoss_addr) {
ths0da46a62007-10-20 20:23:07 +0000768 ret = -TARGET_EFAULT;
bellard579a97f2007-11-11 14:26:47 +0000769 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
thsa04e1342007-09-27 13:57:58 +0000770 goto out;
thsa04e1342007-09-27 13:57:58 +0000771 }
772
773 ret = 0;
774out:
775 return ret;
776}
777
Timothy E Baldwinef6a7782016-05-27 15:51:54 +0100778/* do_sigaction() return target values and host errnos */
bellard66fb9762003-03-23 01:06:05 +0000779int do_sigaction(int sig, const struct target_sigaction *act,
780 struct target_sigaction *oact)
bellard31e31b82003-02-18 22:55:36 +0000781{
pbrook624f7972008-05-31 16:11:38 +0000782 struct target_sigaction *k;
bellard773b93e2004-01-04 17:15:59 +0000783 struct sigaction act1;
784 int host_sig;
ths0da46a62007-10-20 20:23:07 +0000785 int ret = 0;
bellard31e31b82003-02-18 22:55:36 +0000786
Timothy E Baldwinef6a7782016-05-27 15:51:54 +0100787 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
788 return -TARGET_EINVAL;
789 }
790
791 if (block_signals()) {
792 return -TARGET_ERESTARTSYS;
793 }
794
bellard66fb9762003-03-23 01:06:05 +0000795 k = &sigact_table[sig - 1];
bellard66fb9762003-03-23 01:06:05 +0000796 if (oact) {
Richard Hendersond2565872013-01-04 16:39:32 -0800797 __put_user(k->_sa_handler, &oact->_sa_handler);
798 __put_user(k->sa_flags, &oact->sa_flags);
Richard Henderson7f047de2017-10-31 13:53:52 +0100799#ifdef TARGET_ARCH_HAS_SA_RESTORER
Richard Hendersond2565872013-01-04 16:39:32 -0800800 __put_user(k->sa_restorer, &oact->sa_restorer);
ths388bb212007-05-13 13:58:00 +0000801#endif
Richard Hendersond2565872013-01-04 16:39:32 -0800802 /* Not swapped. */
pbrook624f7972008-05-31 16:11:38 +0000803 oact->sa_mask = k->sa_mask;
bellard66fb9762003-03-23 01:06:05 +0000804 }
805 if (act) {
pbrook624f7972008-05-31 16:11:38 +0000806 /* FIXME: This is not threadsafe. */
Richard Hendersond2565872013-01-04 16:39:32 -0800807 __get_user(k->_sa_handler, &act->_sa_handler);
808 __get_user(k->sa_flags, &act->sa_flags);
Richard Henderson7f047de2017-10-31 13:53:52 +0100809#ifdef TARGET_ARCH_HAS_SA_RESTORER
Richard Hendersond2565872013-01-04 16:39:32 -0800810 __get_user(k->sa_restorer, &act->sa_restorer);
ths388bb212007-05-13 13:58:00 +0000811#endif
Richard Hendersond2565872013-01-04 16:39:32 -0800812 /* To be swapped in target_to_host_sigset. */
pbrook624f7972008-05-31 16:11:38 +0000813 k->sa_mask = act->sa_mask;
bellard773b93e2004-01-04 17:15:59 +0000814
815 /* we update the host linux signal state */
816 host_sig = target_to_host_signal(sig);
817 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
818 sigfillset(&act1.sa_mask);
819 act1.sa_flags = SA_SIGINFO;
pbrook624f7972008-05-31 16:11:38 +0000820 if (k->sa_flags & TARGET_SA_RESTART)
bellard773b93e2004-01-04 17:15:59 +0000821 act1.sa_flags |= SA_RESTART;
822 /* NOTE: it is important to update the host kernel signal
823 ignore state to avoid getting unexpected interrupted
824 syscalls */
pbrook624f7972008-05-31 16:11:38 +0000825 if (k->_sa_handler == TARGET_SIG_IGN) {
bellard773b93e2004-01-04 17:15:59 +0000826 act1.sa_sigaction = (void *)SIG_IGN;
pbrook624f7972008-05-31 16:11:38 +0000827 } else if (k->_sa_handler == TARGET_SIG_DFL) {
aurel32ca587a82008-12-18 22:44:13 +0000828 if (fatal_signal (sig))
829 act1.sa_sigaction = host_signal_handler;
830 else
831 act1.sa_sigaction = (void *)SIG_DFL;
bellard773b93e2004-01-04 17:15:59 +0000832 } else {
833 act1.sa_sigaction = host_signal_handler;
834 }
ths0da46a62007-10-20 20:23:07 +0000835 ret = sigaction(host_sig, &act1, NULL);
bellard773b93e2004-01-04 17:15:59 +0000836 }
bellard66fb9762003-03-23 01:06:05 +0000837 }
ths0da46a62007-10-20 20:23:07 +0000838 return ret;
bellard66fb9762003-03-23 01:06:05 +0000839}
bellard31e31b82003-02-18 22:55:36 +0000840
Peter Maydell31efaef2016-07-06 15:09:29 +0100841static void handle_pending_signal(CPUArchState *cpu_env, int sig,
842 struct emulated_sigtable *k)
Peter Maydelleb552502016-05-27 15:51:43 +0100843{
Richard Henderson29a0af62019-03-22 16:07:18 -0700844 CPUState *cpu = env_cpu(cpu_env);
Peter Maydelleb552502016-05-27 15:51:43 +0100845 abi_ulong handler;
Peter Maydell3d3efba2016-05-27 15:51:49 +0100846 sigset_t set;
Peter Maydelleb552502016-05-27 15:51:43 +0100847 target_sigset_t target_old_set;
848 struct target_sigaction *sa;
Peter Maydelleb552502016-05-27 15:51:43 +0100849 TaskState *ts = cpu->opaque;
Peter Maydelleb552502016-05-27 15:51:43 +0100850
Paolo Bonzinic8ee0a42015-11-13 13:52:21 +0100851 trace_user_handle_signal(cpu_env, sig);
bellard66fb9762003-03-23 01:06:05 +0000852 /* dequeue signal */
Timothy E Baldwin907f5fd2016-05-27 15:51:52 +0100853 k->pending = 0;
ths3b46e622007-09-17 08:09:54 +0000854
Andreas Färberdb6b81d2013-06-27 19:49:31 +0200855 sig = gdb_handlesig(cpu, sig);
bellard1fddef42005-04-17 19:16:13 +0000856 if (!sig) {
aurel32ca587a82008-12-18 22:44:13 +0000857 sa = NULL;
858 handler = TARGET_SIG_IGN;
859 } else {
860 sa = &sigact_table[sig - 1];
861 handler = sa->_sa_handler;
bellard1fddef42005-04-17 19:16:13 +0000862 }
bellard66fb9762003-03-23 01:06:05 +0000863
Peter Maydell0cb581d2016-07-18 18:12:24 +0100864 if (do_strace) {
865 print_taken_signal(sig, &k->info);
866 }
867
bellard66fb9762003-03-23 01:06:05 +0000868 if (handler == TARGET_SIG_DFL) {
aurel32ca587a82008-12-18 22:44:13 +0000869 /* default handler : ignore some signal. The other are job control or fatal */
870 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
871 kill(getpid(),SIGSTOP);
872 } else if (sig != TARGET_SIGCHLD &&
873 sig != TARGET_SIGURG &&
874 sig != TARGET_SIGWINCH &&
875 sig != TARGET_SIGCONT) {
Peter Maydellc599d4d2016-07-28 16:44:49 +0100876 dump_core_and_abort(sig);
bellard66fb9762003-03-23 01:06:05 +0000877 }
878 } else if (handler == TARGET_SIG_IGN) {
879 /* ignore sig */
880 } else if (handler == TARGET_SIG_ERR) {
Peter Maydellc599d4d2016-07-28 16:44:49 +0100881 dump_core_and_abort(sig);
bellard66fb9762003-03-23 01:06:05 +0000882 } else {
bellard9de5e442003-03-23 16:49:39 +0000883 /* compute the blocked signals during the handler execution */
Peter Maydell3d3efba2016-05-27 15:51:49 +0100884 sigset_t *blocked_set;
885
pbrook624f7972008-05-31 16:11:38 +0000886 target_to_host_sigset(&set, &sa->sa_mask);
bellard9de5e442003-03-23 16:49:39 +0000887 /* SA_NODEFER indicates that the current signal should not be
888 blocked during the handler */
pbrook624f7972008-05-31 16:11:38 +0000889 if (!(sa->sa_flags & TARGET_SA_NODEFER))
bellard9de5e442003-03-23 16:49:39 +0000890 sigaddset(&set, target_to_host_signal(sig));
ths3b46e622007-09-17 08:09:54 +0000891
bellard9de5e442003-03-23 16:49:39 +0000892 /* save the previous blocked signal state to restore it at the
893 end of the signal execution (see do_sigreturn) */
Peter Maydell3d3efba2016-05-27 15:51:49 +0100894 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
895
896 /* block signals in the handler */
897 blocked_set = ts->in_sigsuspend ?
898 &ts->sigsuspend_mask : &ts->signal_mask;
899 sigorset(&ts->signal_mask, blocked_set, &set);
900 ts->in_sigsuspend = 0;
bellard9de5e442003-03-23 16:49:39 +0000901
bellardbc8a22c2003-03-30 21:02:40 +0000902 /* if the CPU is in VM86 mode, we restore the 32 bit values */
j_mayer84409dd2007-04-06 08:56:50 +0000903#if defined(TARGET_I386) && !defined(TARGET_X86_64)
bellardbc8a22c2003-03-30 21:02:40 +0000904 {
905 CPUX86State *env = cpu_env;
906 if (env->eflags & VM_MASK)
907 save_v86_state(env);
908 }
909#endif
bellard9de5e442003-03-23 16:49:39 +0000910 /* prepare the stack frame of the virtual CPU */
Laurent Viviercb6ac802018-04-24 21:26:35 +0200911#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
912 if (sa->sa_flags & TARGET_SA_SIGINFO) {
913 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
914 } else {
915 setup_frame(sig, sa, &target_old_set, cpu_env);
916 }
917#else
Richard Hendersonff970902013-02-10 10:30:42 -0800918 /* These targets do not have traditional signals. */
Timothy E Baldwin907f5fd2016-05-27 15:51:52 +0100919 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
Richard Hendersonff970902013-02-10 10:30:42 -0800920#endif
Peter Maydell7ec87e02016-05-27 15:51:45 +0100921 if (sa->sa_flags & TARGET_SA_RESETHAND) {
pbrook624f7972008-05-31 16:11:38 +0000922 sa->_sa_handler = TARGET_SIG_DFL;
Peter Maydell7ec87e02016-05-27 15:51:45 +0100923 }
bellard31e31b82003-02-18 22:55:36 +0000924 }
bellard31e31b82003-02-18 22:55:36 +0000925}
Peter Maydelle902d582016-05-27 15:51:44 +0100926
927void process_pending_signals(CPUArchState *cpu_env)
928{
Richard Henderson29a0af62019-03-22 16:07:18 -0700929 CPUState *cpu = env_cpu(cpu_env);
Peter Maydelle902d582016-05-27 15:51:44 +0100930 int sig;
931 TaskState *ts = cpu->opaque;
Peter Maydell3d3efba2016-05-27 15:51:49 +0100932 sigset_t set;
933 sigset_t *blocked_set;
Peter Maydelle902d582016-05-27 15:51:44 +0100934
Peter Maydell3d3efba2016-05-27 15:51:49 +0100935 while (atomic_read(&ts->signal_pending)) {
936 /* FIXME: This is not threadsafe. */
937 sigfillset(&set);
938 sigprocmask(SIG_SETMASK, &set, 0);
Peter Maydelle902d582016-05-27 15:51:44 +0100939
Peter Maydell8bd37732016-07-28 16:44:45 +0100940 restart_scan:
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100941 sig = ts->sync_signal.pending;
942 if (sig) {
943 /* Synchronous signals are forced,
944 * see force_sig_info() and callers in Linux
945 * Note that not all of our queue_signal() calls in QEMU correspond
946 * to force_sig_info() calls in Linux (some are send_sig_info()).
947 * However it seems like a kernel bug to me to allow the process
948 * to block a synchronous signal since it could then just end up
949 * looping round and round indefinitely.
950 */
951 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
952 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
953 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
954 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
955 }
956
Peter Maydell31efaef2016-07-06 15:09:29 +0100957 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100958 }
959
Peter Maydell3d3efba2016-05-27 15:51:49 +0100960 for (sig = 1; sig <= TARGET_NSIG; sig++) {
961 blocked_set = ts->in_sigsuspend ?
962 &ts->sigsuspend_mask : &ts->signal_mask;
963
964 if (ts->sigtab[sig - 1].pending &&
965 (!sigismember(blocked_set,
Timothy E Baldwin655ed672016-05-27 15:51:53 +0100966 target_to_host_signal_table[sig]))) {
Peter Maydell31efaef2016-07-06 15:09:29 +0100967 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
Peter Maydell8bd37732016-07-28 16:44:45 +0100968 /* Restart scan from the beginning, as handle_pending_signal
969 * might have resulted in a new synchronous signal (eg SIGSEGV).
970 */
971 goto restart_scan;
Peter Maydell3d3efba2016-05-27 15:51:49 +0100972 }
Peter Maydelle902d582016-05-27 15:51:44 +0100973 }
Peter Maydell3d3efba2016-05-27 15:51:49 +0100974
975 /* if no signal is pending, unblock signals and recheck (the act
976 * of unblocking might cause us to take another host signal which
977 * will set signal_pending again).
978 */
979 atomic_set(&ts->signal_pending, 0);
980 ts->in_sigsuspend = 0;
981 set = ts->signal_mask;
982 sigdelset(&set, SIGSEGV);
983 sigdelset(&set, SIGBUS);
984 sigprocmask(SIG_SETMASK, &set, 0);
Peter Maydelle902d582016-05-27 15:51:44 +0100985 }
Peter Maydell3d3efba2016-05-27 15:51:49 +0100986 ts->in_sigsuspend = 0;
Peter Maydelle902d582016-05-27 15:51:44 +0100987}