Fix restore_rt() getter's inline asm not to clobber the x86-64 red zone
Using "call" in x86-64 inline asm without adjusting the stack pointer
for the 128 byte red zone isn't valid, because it would clobber the
red zone.
This use of "call" comes from the x86-32 version, but it's not
necessary on x86-64 where we can use %rip-relative addressing.
Also relax output constraint from using %rax ('=a') to using any
register ('=r'); the stricter constraint wasn't necessary before.
BUG=none
TEST=none
Review URL: https://codereview.chromium.org/23072035
git-svn-id: http://linux-syscall-support.googlecode.com/svn/trunk/lss@23 829466d3-f3f5-3ae4-62ad-de35cf9bba21
diff --git a/linux_syscall_support.h b/linux_syscall_support.h
index f2eaa3e..26bfe2c 100644
--- a/linux_syscall_support.h
+++ b/linux_syscall_support.h
@@ -2028,13 +2028,12 @@
* function, as glibc goes out of its way to make it inaccessible.
*/
long long res;
- __asm__ __volatile__("call 2f\n"
- "0:.align 16\n"
+ __asm__ __volatile__("jmp 2f\n"
+ ".align 16\n"
"1:movq %1,%%rax\n"
LSS_ENTRYPOINT
- "2:popq %0\n"
- "addq $(1b-0b),%0\n"
- : "=a" (res)
+ "2:leaq 1b(%%rip),%0\n"
+ : "=r" (res)
: "i" (__NR_rt_sigreturn));
return (void (*)(void))(uintptr_t)res;
}