Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 1 | // Copyright 2016 The SwiftShader Authors. All Rights Reserved. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
Nicolas Capens | 1a3ce87 | 2018-10-10 10:42:36 -0400 | [diff] [blame] | 15 | #include "ExecutableMemory.hpp" |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 16 | |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 17 | #include "Debug.hpp" |
| 18 | |
| 19 | #if defined(_WIN32) |
| 20 | #ifndef WIN32_LEAN_AND_MEAN |
| 21 | #define WIN32_LEAN_AND_MEAN |
| 22 | #endif |
| 23 | #include <windows.h> |
| 24 | #include <intrin.h> |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 25 | #elif defined(__Fuchsia__) |
| 26 | #include <unistd.h> |
| 27 | #include <zircon/process.h> |
| 28 | #include <zircon/syscalls.h> |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 29 | #else |
| 30 | #include <errno.h> |
| 31 | #include <sys/mman.h> |
| 32 | #include <stdlib.h> |
| 33 | #include <unistd.h> |
| 34 | #endif |
| 35 | |
| 36 | #include <memory.h> |
| 37 | |
| 38 | #undef allocate |
| 39 | #undef deallocate |
| 40 | |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 41 | #if(defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined (_M_X64)) && !defined(__x86__) |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 42 | #define __x86__ |
| 43 | #endif |
| 44 | |
Nicolas Capens | 157ba26 | 2019-12-10 17:49:14 -0500 | [diff] [blame] | 45 | namespace rr { |
| 46 | namespace { |
| 47 | |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 48 | struct Allocation |
| 49 | { |
| 50 | // size_t bytes; |
| 51 | unsigned char *block; |
| 52 | }; |
| 53 | |
| 54 | void *allocateRaw(size_t bytes, size_t alignment) |
| 55 | { |
| 56 | ASSERT((alignment & (alignment - 1)) == 0); // Power of 2 alignment. |
| 57 | |
| 58 | #if defined(LINUX_ENABLE_NAMED_MMAP) |
Nicolas Capens | becb44f | 2019-03-14 14:52:59 -0400 | [diff] [blame] | 59 | if(alignment < sizeof(void*)) |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 60 | { |
Nicolas Capens | becb44f | 2019-03-14 14:52:59 -0400 | [diff] [blame] | 61 | return malloc(bytes); |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 62 | } |
Nicolas Capens | becb44f | 2019-03-14 14:52:59 -0400 | [diff] [blame] | 63 | else |
| 64 | { |
| 65 | void *allocation; |
| 66 | int result = posix_memalign(&allocation, alignment, bytes); |
| 67 | if(result != 0) |
| 68 | { |
| 69 | errno = result; |
| 70 | allocation = nullptr; |
| 71 | } |
| 72 | return allocation; |
| 73 | } |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 74 | #else |
| 75 | unsigned char *block = new unsigned char[bytes + sizeof(Allocation) + alignment]; |
| 76 | unsigned char *aligned = nullptr; |
| 77 | |
| 78 | if(block) |
| 79 | { |
| 80 | aligned = (unsigned char*)((uintptr_t)(block + sizeof(Allocation) + alignment - 1) & -(intptr_t)alignment); |
| 81 | Allocation *allocation = (Allocation*)(aligned - sizeof(Allocation)); |
| 82 | |
| 83 | // allocation->bytes = bytes; |
| 84 | allocation->block = block; |
| 85 | } |
| 86 | |
| 87 | return aligned; |
| 88 | #endif |
| 89 | } |
| 90 | |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 91 | #if defined(_WIN32) |
| 92 | DWORD permissionsToProtectMode(int permissions) |
| 93 | { |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 94 | switch(permissions) { |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 95 | case PERMISSION_READ: |
| 96 | return PAGE_READONLY; |
| 97 | case PERMISSION_EXECUTE: |
| 98 | return PAGE_EXECUTE; |
| 99 | case PERMISSION_READ | PERMISSION_WRITE: |
| 100 | return PAGE_READWRITE; |
| 101 | case PERMISSION_READ | PERMISSION_EXECUTE: |
| 102 | return PAGE_EXECUTE_READ; |
| 103 | case PERMISSION_READ | PERMISSION_WRITE | PERMISSION_EXECUTE: |
| 104 | return PAGE_EXECUTE_READWRITE; |
| 105 | } |
| 106 | return PAGE_NOACCESS; |
| 107 | } |
| 108 | #endif |
| 109 | |
| 110 | #if !defined(_WIN32) && !defined(__Fuchsia__) |
| 111 | int permissionsToMmapProt(int permissions) |
| 112 | { |
| 113 | int result = 0; |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 114 | if(permissions & PERMISSION_READ) |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 115 | { |
| 116 | result |= PROT_READ; |
| 117 | } |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 118 | if(permissions & PERMISSION_WRITE) |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 119 | { |
| 120 | result |= PROT_WRITE; |
| 121 | } |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 122 | if(permissions & PERMISSION_EXECUTE) |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 123 | { |
| 124 | result |= PROT_EXEC; |
| 125 | } |
| 126 | return result; |
| 127 | } |
| 128 | #endif // !defined(_WIN32) && !defined(__Fuchsia__) |
| 129 | |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 130 | #if defined(LINUX_ENABLE_NAMED_MMAP) |
| 131 | // Create a file descriptor for anonymous memory with the given |
| 132 | // name. Returns -1 on failure. |
| 133 | // TODO: remove once libc wrapper exists. |
| 134 | int memfd_create(const char* name, unsigned int flags) |
| 135 | { |
| 136 | #if __aarch64__ |
| 137 | #define __NR_memfd_create 279 |
| 138 | #elif __arm__ |
| 139 | #define __NR_memfd_create 279 |
| 140 | #elif __powerpc64__ |
| 141 | #define __NR_memfd_create 360 |
| 142 | #elif __i386__ |
| 143 | #define __NR_memfd_create 356 |
| 144 | #elif __x86_64__ |
| 145 | #define __NR_memfd_create 319 |
| 146 | #endif /* __NR_memfd_create__ */ |
| 147 | #ifdef __NR_memfd_create |
| 148 | // In the event of no system call this returns -1 with errno set |
| 149 | // as ENOSYS. |
| 150 | return syscall(__NR_memfd_create, name, flags); |
| 151 | #else |
| 152 | return -1; |
| 153 | #endif |
| 154 | } |
| 155 | |
| 156 | // Returns a file descriptor for use with an anonymous mmap, if |
| 157 | // memfd_create fails, -1 is returned. Note, the mappings should be |
| 158 | // MAP_PRIVATE so that underlying pages aren't shared. |
| 159 | int anonymousFd() |
| 160 | { |
| 161 | static int fd = memfd_create("SwiftShader JIT", 0); |
| 162 | return fd; |
| 163 | } |
| 164 | |
| 165 | // Ensure there is enough space in the "anonymous" fd for length. |
| 166 | void ensureAnonFileSize(int anonFd, size_t length) |
| 167 | { |
| 168 | static size_t fileSize = 0; |
| 169 | if(length > fileSize) |
| 170 | { |
| 171 | ftruncate(anonFd, length); |
| 172 | fileSize = length; |
| 173 | } |
| 174 | } |
| 175 | #endif // defined(LINUX_ENABLE_NAMED_MMAP) |
| 176 | |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 177 | #if defined(__Fuchsia__) |
| 178 | zx_vm_option_t permissionsToZxVmOptions(int permissions) { |
| 179 | zx_vm_option_t result = 0; |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 180 | if(permissions & PERMISSION_READ) |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 181 | { |
| 182 | result |= ZX_VM_PERM_READ; |
| 183 | } |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 184 | if(permissions & PERMISSION_WRITE) |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 185 | { |
| 186 | result |= ZX_VM_PERM_WRITE; |
| 187 | } |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 188 | if(permissions & PERMISSION_EXECUTE) |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 189 | { |
| 190 | result |= ZX_VM_PERM_EXECUTE; |
| 191 | } |
| 192 | return result; |
| 193 | } |
| 194 | #endif // defined(__Fuchsia__) |
| 195 | |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 196 | } // anonymous namespace |
| 197 | |
| 198 | size_t memoryPageSize() |
| 199 | { |
| 200 | static int pageSize = 0; |
| 201 | |
| 202 | if(pageSize == 0) |
| 203 | { |
| 204 | #if defined(_WIN32) |
| 205 | SYSTEM_INFO systemInfo; |
| 206 | GetSystemInfo(&systemInfo); |
| 207 | pageSize = systemInfo.dwPageSize; |
| 208 | #else |
| 209 | pageSize = sysconf(_SC_PAGESIZE); |
| 210 | #endif |
| 211 | } |
| 212 | |
| 213 | return pageSize; |
| 214 | } |
| 215 | |
| 216 | void *allocate(size_t bytes, size_t alignment) |
| 217 | { |
| 218 | void *memory = allocateRaw(bytes, alignment); |
| 219 | |
| 220 | if(memory) |
| 221 | { |
| 222 | memset(memory, 0, bytes); |
| 223 | } |
| 224 | |
| 225 | return memory; |
| 226 | } |
| 227 | |
| 228 | void deallocate(void *memory) |
| 229 | { |
| 230 | #if defined(LINUX_ENABLE_NAMED_MMAP) |
| 231 | free(memory); |
| 232 | #else |
| 233 | if(memory) |
| 234 | { |
| 235 | unsigned char *aligned = (unsigned char*)memory; |
| 236 | Allocation *allocation = (Allocation*)(aligned - sizeof(Allocation)); |
| 237 | |
| 238 | delete[] allocation->block; |
| 239 | } |
| 240 | #endif |
| 241 | } |
| 242 | |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 243 | // Rounds |x| up to a multiple of |m|, where |m| is a power of 2. |
| 244 | inline uintptr_t roundUp(uintptr_t x, uintptr_t m) |
| 245 | { |
| 246 | ASSERT(m > 0 && (m & (m - 1)) == 0); // |m| must be a power of 2. |
| 247 | return (x + m - 1) & ~(m - 1); |
| 248 | } |
| 249 | |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 250 | void *allocateMemoryPages(size_t bytes, int permissions, bool need_exec) |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 251 | { |
| 252 | size_t pageSize = memoryPageSize(); |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 253 | size_t length = roundUp(bytes, pageSize); |
Nicolas Capens | 0a94b95 | 2019-07-29 17:22:34 -0400 | [diff] [blame] | 254 | void *mapping = nullptr; |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 255 | |
| 256 | #if defined(LINUX_ENABLE_NAMED_MMAP) |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 257 | int flags = MAP_PRIVATE; |
| 258 | |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 259 | // Try to name the memory region for the executable code, |
| 260 | // to aid profilers. |
| 261 | int anonFd = anonymousFd(); |
| 262 | if(anonFd == -1) |
| 263 | { |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 264 | flags |= MAP_ANONYMOUS; |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 265 | } |
| 266 | else |
| 267 | { |
| 268 | ensureAnonFileSize(anonFd, length); |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 269 | } |
| 270 | |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 271 | mapping = mmap( |
| 272 | nullptr, length, permissionsToMmapProt(permissions), flags, anonFd, 0); |
| 273 | |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 274 | if(mapping == MAP_FAILED) |
| 275 | { |
| 276 | mapping = nullptr; |
| 277 | } |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 278 | #elif defined(__Fuchsia__) |
| 279 | zx_handle_t vmo; |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 280 | if(zx_vmo_create(length, 0, &vmo) != ZX_OK) { |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 281 | return nullptr; |
| 282 | } |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 283 | if(need_exec && |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 284 | zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK) |
| 285 | { |
Matthew Dempsky | 77630f1 | 2019-01-29 17:51:59 -0800 | [diff] [blame] | 286 | return nullptr; |
| 287 | } |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 288 | zx_vaddr_t reservation; |
| 289 | zx_status_t status = zx_vmar_map( |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 290 | zx_vmar_root_self(), permissionsToZxVmOptions(permissions), 0, vmo, |
| 291 | 0, length, &reservation); |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 292 | zx_handle_close(vmo); |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 293 | if(status != ZX_OK) { |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 294 | return nullptr; |
| 295 | } |
| 296 | |
Sergey Ulanov | 0eb5c83 | 2019-07-26 10:35:10 -0700 | [diff] [blame] | 297 | // zx_vmar_map() returns page-aligned address. |
| 298 | ASSERT(roundUp(reservation, pageSize) == reservation); |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 299 | |
Sergey Ulanov | 0eb5c83 | 2019-07-26 10:35:10 -0700 | [diff] [blame] | 300 | mapping = reinterpret_cast<void*>(reservation); |
Nicolas Capens | 0a94b95 | 2019-07-29 17:22:34 -0400 | [diff] [blame] | 301 | #elif defined(__APPLE__) |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 302 | int prot = permissionsToMmapProt(permissions); |
| 303 | int flags = MAP_PRIVATE | MAP_ANONYMOUS; |
Nicolas Capens | 0a94b95 | 2019-07-29 17:22:34 -0400 | [diff] [blame] | 304 | // On macOS 10.14 and higher, executables that are code signed with the |
| 305 | // "runtime" option cannot execute writable memory by default. They can opt |
| 306 | // into this capability by specifying the "com.apple.security.cs.allow-jit" |
| 307 | // code signing entitlement and allocating the region with the MAP_JIT flag. |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 308 | mapping = mmap(nullptr, length, prot, flags | MAP_JIT, -1, 0); |
Nicolas Capens | 0a94b95 | 2019-07-29 17:22:34 -0400 | [diff] [blame] | 309 | |
| 310 | if(mapping == MAP_FAILED) |
| 311 | { |
Nicolas Capens | c39e7c7 | 2019-07-30 16:42:22 -0400 | [diff] [blame] | 312 | // Retry without MAP_JIT (for older macOS versions). |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 313 | mapping = mmap(nullptr, length, prot, flags, -1, 0); |
Nicolas Capens | c39e7c7 | 2019-07-30 16:42:22 -0400 | [diff] [blame] | 314 | } |
| 315 | |
| 316 | if(mapping == MAP_FAILED) |
| 317 | { |
Nicolas Capens | 0a94b95 | 2019-07-29 17:22:34 -0400 | [diff] [blame] | 318 | mapping = nullptr; |
| 319 | } |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 320 | #else |
| 321 | mapping = allocate(length, pageSize); |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 322 | protectMemoryPages(mapping, length, permissions); |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 323 | #endif |
| 324 | |
| 325 | return mapping; |
| 326 | } |
| 327 | |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 328 | void protectMemoryPages(void *memory, size_t bytes, int permissions) |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 329 | { |
Nicolas Capens | 81bc9d9 | 2019-12-16 15:05:57 -0500 | [diff] [blame] | 330 | if(bytes == 0) |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 331 | return; |
| 332 | bytes = roundUp(bytes, memoryPageSize()); |
| 333 | |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 334 | #if defined(_WIN32) |
| 335 | unsigned long oldProtection; |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 336 | BOOL result = |
| 337 | VirtualProtect(memory, bytes, permissionsToProtectMode(permissions), |
| 338 | &oldProtection); |
| 339 | ASSERT(result); |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 340 | #elif defined(__Fuchsia__) |
| 341 | zx_status_t status = zx_vmar_protect( |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 342 | zx_vmar_root_self(), permissionsToZxVmOptions(permissions), |
| 343 | reinterpret_cast<zx_vaddr_t>(memory), bytes); |
Sergey Ulanov | 0eb5c83 | 2019-07-26 10:35:10 -0700 | [diff] [blame] | 344 | ASSERT(status == ZX_OK); |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 345 | #else |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 346 | int result = |
| 347 | mprotect(memory, bytes, permissionsToMmapProt(permissions)); |
| 348 | ASSERT(result == 0); |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 349 | #endif |
| 350 | } |
| 351 | |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 352 | void deallocateMemoryPages(void *memory, size_t bytes) |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 353 | { |
| 354 | #if defined(_WIN32) |
| 355 | unsigned long oldProtection; |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 356 | BOOL result = |
| 357 | VirtualProtect(memory, bytes, PAGE_READWRITE, &oldProtection); |
| 358 | ASSERT(result); |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 359 | deallocate(memory); |
Nicolas Capens | 0a94b95 | 2019-07-29 17:22:34 -0400 | [diff] [blame] | 360 | #elif defined(LINUX_ENABLE_NAMED_MMAP) || defined(__APPLE__) |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 361 | size_t pageSize = memoryPageSize(); |
| 362 | size_t length = (bytes + pageSize - 1) & ~(pageSize - 1); |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 363 | int result = munmap(memory, length); |
| 364 | ASSERT(result == 0); |
Sergey Ulanov | 24e7192 | 2019-01-07 10:29:18 -0800 | [diff] [blame] | 365 | #elif defined(__Fuchsia__) |
Sergey Ulanov | 0eb5c83 | 2019-07-26 10:35:10 -0700 | [diff] [blame] | 366 | size_t pageSize = memoryPageSize(); |
| 367 | size_t length = roundUp(bytes, pageSize); |
| 368 | zx_status_t status = zx_vmar_unmap( |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 369 | zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), length); |
Sergey Ulanov | 0eb5c83 | 2019-07-26 10:35:10 -0700 | [diff] [blame] | 370 | ASSERT(status == ZX_OK); |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 371 | #else |
Sergey Ulanov | ebb0bec | 2019-12-12 11:53:04 -0800 | [diff] [blame] | 372 | int result = mprotect(memory, bytes, PROT_READ | PROT_WRITE); |
| 373 | ASSERT(result == 0); |
Nicolas Capens | c07dc4b | 2018-08-06 14:20:45 -0400 | [diff] [blame] | 374 | deallocate(memory); |
| 375 | #endif |
| 376 | } |
Nicolas Capens | 157ba26 | 2019-12-10 17:49:14 -0500 | [diff] [blame] | 377 | |
| 378 | } // namespace rr |