From 136b2b60fcc02a3644c94b311b3472f3a5b8b1a3 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 22 Dec 2022 19:12:53 +0000 Subject: [PATCH 1/3] Handle errors from mmap and mprotect due to Apple Hardened Runtime. On Apple Silicon, when Hardened Runtime is in force and the application lacks the "Allow Unsigned Executable Memory Entitlement", mmap and mprotect return EACCES when an attempt is made to create or modify memory so that it is simultaneously writable and executable. This commit handles these cases by retrying the operation without the PROT_EXEC flag, and setting global variables so that future operations omit the flag. --- code/config.h | 17 +++++++++++++++++ code/protix.c | 22 +++++++++++++++++++--- code/vmix.c | 25 ++++++++++++++++++++----- 3 files changed, 56 insertions(+), 8 deletions(-) diff --git a/code/config.h b/code/config.h index aa9b70c7cc..987f4a8034 100644 --- a/code/config.h +++ b/code/config.h @@ -710,6 +710,23 @@ #define WB_DEFER_HIT 1 /* boring scans after barrier hit */ +/* Apple Hardened Runtime + * + * .hardened-runtime: On Apple Silicon, applications may be compiled + * with Hardened Runtime enabled. These applications have restricted + * capabilities: in particular, unless the "Allow Unsigned Executable + * Memory Entitlement" is enabled, these applications cannot create + * memory that is simultaneously writable and executable. Attempts to + * do so using mmap() and mprotect() fail with EACCES. + * + * See + */ +#if defined(MPS_OS_XC) && defined(MPS_ARCH_A6) +#define MAYBE_HARDENED_RUNTIME 1 +#else +#define MAYBE_HARDENED_RUNTIME 0 +#endif + #endif /* config_h */ diff --git a/code/protix.c b/code/protix.c index 3c9f520398..2f1ad1f62e 100644 --- a/code/protix.c +++ b/code/protix.c @@ -41,13 +41,21 @@ #include "vm.h" +#include #include +#include /* sig_atomic_t */ #include #include #include SRCID(protix, "$Id$"); + +/* Value for memory protection corresponding to AccessSetEMPTY. */ + +static sig_atomic_t prot_all = PROT_READ | PROT_WRITE | PROT_EXEC; + + /* ProtSet -- set protection * * This is just a thin veneer on top of mprotect(2). @@ -55,7 +63,7 @@ SRCID(protix, "$Id$"); void ProtSet(Addr base, Addr limit, AccessSet mode) { - int flags; + int flags, result; AVER(sizeof(size_t) == sizeof(Addr)); AVER(base < limit); @@ -82,7 +90,7 @@ void ProtSet(Addr base, Addr limit, AccessSet mode) flags = PROT_READ | PROT_EXEC; break; case AccessSetEMPTY: - flags = PROT_READ | PROT_WRITE | PROT_EXEC; + flags = prot_all; break; default: NOTREACHED; @@ -90,7 +98,15 @@ void ProtSet(Addr base, Addr limit, AccessSet mode) } /* .assume.mprotect.base */ - if(mprotect((void *)base, (size_t)AddrOffset(base, limit), flags) != 0) + result = mprotect((void *)base, (size_t)AddrOffset(base, limit), flags); + if (MAYBE_HARDENED_RUNTIME && result != 0 && errno == EACCES + && (flags & PROT_WRITE) && (flags & PROT_EXEC)) + { + /* See . */ + prot_all = PROT_READ | PROT_WRITE; + result = mprotect((void *)base, (size_t)AddrOffset(base, limit), flags & prot_all); + } + if (result != 0) NOTREACHED; } diff --git a/code/vmix.c b/code/vmix.c index 418c26f700..941939f170 100644 --- a/code/vmix.c +++ b/code/vmix.c @@ -47,6 +47,7 @@ #include "vm.h" #include /* errno */ +#include /* sig_atomic_t */ #include /* see .feature.li in config.h */ #include /* mmap, munmap */ #include /* getpagesize */ @@ -156,11 +157,17 @@ void VMFinish(VM vm) } +/* Value to use for protection of newly allocated pages. */ + +static sig_atomic_t vm_prot = PROT_READ | PROT_WRITE | PROT_EXEC; + + /* VMMap -- map the given range of memory */ Res VMMap(VM vm, Addr base, Addr limit) { Size size; + void *result; AVERT(VM, vm); AVER(sizeof(void *) == sizeof(Addr)); @@ -172,11 +179,19 @@ Res VMMap(VM vm, Addr base, Addr limit) size = AddrOffset(base, limit); - if(mmap((void *)base, (size_t)size, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_ANON | MAP_PRIVATE | MAP_FIXED, - -1, 0) - == MAP_FAILED) { + result = mmap((void *)base, (size_t)size, vm_prot, + MAP_ANON | MAP_PRIVATE | MAP_FIXED, + -1, 0); + if (MAYBE_HARDENED_RUNTIME && result == MAP_FAILED && errno == EACCES + && (vm_prot & PROT_WRITE) && (vm_prot & PROT_EXEC)) + { + /* See . */ + vm_prot = PROT_READ | PROT_WRITE; + result = mmap((void *)base, (size_t)size, vm_prot, + MAP_ANON | MAP_PRIVATE | MAP_FIXED, + -1, 0); + } + if (result == MAP_FAILED) { AVER(errno == ENOMEM); /* .assume.mmap.err */ return ResMEMORY; } From b71e18c5551eaaa6009b3268c4430fdb1cfd02e5 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Sat, 24 Dec 2022 14:16:10 +0000 Subject: [PATCH 2/3] Fix some race conditions in xca6ll/hot/amcssth. * Use memory-model-aware builtins in GCC and Clang when a memory location may be written by one thread and read by another, avoiding race conditions due to out-of-order updates on ARM. * Call dylan_make_wrappers while the test is still single-threaded, preventing multiple threads from racing to call it. * Prevent dylan_init from creating a padding object, as we must not have an exact root pointing at a padding object. --- code/amcssth.c | 38 ++++++++++++++++++++++++-------------- code/fmtdytst.c | 9 +++------ code/testlib.h | 19 +++++++++++++++++++ 3 files changed, 46 insertions(+), 20 deletions(-) diff --git a/code/amcssth.c b/code/amcssth.c index 35bd6a14ed..56011acfc4 100644 --- a/code/amcssth.c +++ b/code/amcssth.c @@ -108,19 +108,22 @@ static mps_res_t area_scan(mps_ss_t ss, void *base, void *limit, void *closure) static void churn(mps_ap_t ap, size_t roots_count) { - size_t i; - size_t r; + size_t i, j, r; ++objs; r = (size_t)rnd(); if (r & 1) { + mps_addr_t root; i = (r >> 1) % exactRootsCOUNT; - if (exactRoots[i] != objNULL) - cdie(dylan_check(exactRoots[i]), "dying root check"); - exactRoots[i] = make(ap, roots_count); - if (exactRoots[(exactRootsCOUNT-1) - i] != objNULL) - dylan_write(exactRoots[(exactRootsCOUNT-1) - i], - exactRoots, exactRootsCOUNT); + atomic_load(&exactRoots[i], &root); + if (root != objNULL) + cdie(dylan_check(root), "dying root check"); + root = make(ap, roots_count); + atomic_store(&exactRoots[i], &root); + j = exactRootsCOUNT - i - 1; + atomic_load(&exactRoots[j], &root); + if (root != objNULL) + dylan_write(root, exactRoots, exactRootsCOUNT); } else { i = (r >> 1) % ambigRootsCOUNT; ambigRoots[(ambigRootsCOUNT-1) - i] = make(ap, roots_count); @@ -221,9 +224,11 @@ static void test_pool(const char *name, mps_pool_t pool, size_t roots_count) (unsigned long)collections, objs, (unsigned long)mps_arena_committed(arena)); - for (i = 0; i < exactRootsCOUNT; ++i) - cdie(exactRoots[i] == objNULL || dylan_check(exactRoots[i]), - "all roots check"); + for (i = 0; i < exactRootsCOUNT; ++i) { + mps_addr_t root; + atomic_load(&exactRoots[i], &root); + cdie(root == objNULL || dylan_check(root), "all roots check"); + } if (collections >= collectionsCOUNT / 2 && !walked) { @@ -248,9 +253,12 @@ static void test_pool(const char *name, mps_pool_t pool, size_t roots_count) ramping = 0; /* kill half of the roots */ for(i = 0; i < exactRootsCOUNT; i += 2) { - if (exactRoots[i] != objNULL) { - cdie(dylan_check(exactRoots[i]), "ramp kill check"); - exactRoots[i] = objNULL; + mps_addr_t root; + atomic_load(&exactRoots[i], &root); + if (root != objNULL) { + cdie(dylan_check(root), "ramp kill check"); + root = objNULL; + atomic_store(&exactRoots[i], &root); } } } @@ -298,6 +306,8 @@ static void test_arena(void) mps_pool_t amc_pool, amcz_pool; void *marker = ▮ + die(dylan_make_wrappers(), "make wrappers"); + MPS_ARGS_BEGIN(args) { MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(testArenaSIZE)); diff --git a/code/fmtdytst.c b/code/fmtdytst.c index 6a48860fe9..97b1f30e8e 100644 --- a/code/fmtdytst.c +++ b/code/fmtdytst.c @@ -68,7 +68,7 @@ mps_res_t dylan_make_wrappers(void) return MPS_RES_OK; } -/* dylan_init -- turn raw memory into initialised dylan-vector (or pad) +/* dylan_init -- turn raw memory into initialised dylan-vector * * If the raw memory is large enough, initialises it to a dylan-vector, * whose slots are initialised to either dylan-ints, or valid refs, at @@ -78,8 +78,6 @@ mps_res_t dylan_make_wrappers(void) * and "nr_refs" arguments. If "nr_refs" is 0, all slots are * initialized to dylan-ints: this may be useful for making leaf * objects. - * - * (Makes a pad if the raw memory is too small to hold a dylan-vector) */ mps_res_t dylan_init(mps_addr_t addr, size_t size, @@ -93,8 +91,7 @@ mps_res_t dylan_init(mps_addr_t addr, size_t size, if (res != MPS_RES_OK) return res; - /* If there is enough room, make a vector, otherwise just */ - /* make a padding object. */ + /* If there is enough room, make a vector. */ if(size >= sizeof(mps_word_t) * 2) { mps_word_t *p = (mps_word_t *)addr; mps_word_t i, t = (size / sizeof(mps_word_t)) - 2; @@ -110,7 +107,7 @@ mps_res_t dylan_init(mps_addr_t addr, size_t size, p[2+i] = (mps_word_t)refs[(r >> 1) % nr_refs]; /* random ptr */ } } else { - dylan_pad(addr, size); + return MPS_RES_UNIMPL; } return MPS_RES_OK; diff --git a/code/testlib.h b/code/testlib.h index f140b0a9e1..f86635bc71 100644 --- a/code/testlib.h +++ b/code/testlib.h @@ -292,6 +292,25 @@ extern void randomize(int argc, char *argv[]); extern void testlib_init(int argc, char *argv[]); +/* Memory-model-aware operations */ + +#if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) + +/* See + * and */ +#define atomic_load(SRC, DEST) __atomic_load(SRC, DEST, __ATOMIC_ACQUIRE) +#define atomic_store(DEST, SRC) __atomic_store(DEST, SRC, __ATOMIC_RELEASE) + +#elif defined(MPS_BUILD_MV) + +/* Microsoft Visual C/C++ does not need memory-model-aware load and store as + * loads and stores of register-sized values are atomic on Intel. */ +#define atomic_load(SRC, DEST) (*(DEST) = *(SRC)) +#define atomic_store(DEST, SRC) (*(DEST) = *(SRC)) + +#endif + + #endif /* testlib_h */ From 7a61d37937cdfca95c346b932f59a906d95f7ac0 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 29 Dec 2022 15:18:33 +0000 Subject: [PATCH 3/3] Report sizes in grains in MMQA test function/150.c. This makes the test portable to XCA6LL, where the page size (and so the default arena grain size) can be 16 kB. --- test/function/150.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/test/function/150.c b/test/function/150.c index f1d19b3402..d5a7c4d94e 100644 --- a/test/function/150.c +++ b/test/function/150.c @@ -8,7 +8,7 @@ OUTPUT_SPEC count1 < 50 count2 < 50 collect = true - collect_not_condemned <= 4096 + collect_not_condemned_grains <= 1 result = pass END_HEADER */ @@ -30,6 +30,7 @@ mps_arena_t arena; int final_count = 0; +size_t grain_size = 0; enum { FINAL_DISCARD, @@ -115,12 +116,12 @@ static void qpoll(mycell **ref, int faction) static void process_stats(mps_message_t message) { report("collect", "true"); - report("collect_live", "%ld", - mps_message_gc_live_size(arena, message)); - report("collect_condemned", "%ld", - mps_message_gc_condemned_size(arena, message)); - report("collect_not_condemned", "%ld", - mps_message_gc_not_condemned_size(arena, message)); + report("collect_live_grains", "%ld", + mps_message_gc_live_size(arena, message) / grain_size); + report("collect_condemned_grains", "%ld", + mps_message_gc_condemned_size(arena, message) / grain_size); + report("collect_not_condemned_grains", "%ld", + mps_message_gc_not_condemned_size(arena, message) / grain_size); mps_message_discard(arena, message); } @@ -191,6 +192,9 @@ static void test(void *stack_pointer) a = allocone(apamc, 2, 1); + /* Deduce arena's grain size from the total size of the AMC pool. */ + grain_size = mps_pool_total_size(poolamc); + for (j=0; j<1000; j++) { b = allocone(apamc, 2, mps_rank_exact()); c = allocone(apawl, 2, mps_rank_weak());