From 54b93517c0f256366234c60e2d3bcc4cdd150686 Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Thu, 30 Mar 2023 19:06:45 +0100 Subject: [PATCH 01/15] Break indefinite recursion in deep checking via AVERT on arena in ChunkOfAddr. --- code/tract.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/code/tract.c b/code/tract.c index baa01ef611..979916aa4b 100644 --- a/code/tract.c +++ b/code/tract.c @@ -324,7 +324,11 @@ Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr) Tree tree; AVER_CRITICAL(chunkReturn != NULL); - AVERT_CRITICAL(Arena, arena); + + /* Avoid AVERT on arena because ChunkOfAddr is used by ArenaCheck + and it causes indefinite recursion in deep checking. */ + AVER_CRITICAL(TESTT(Arena, arena)); + /* addr is arbitrary */ if (TreeFind(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr), From 01422138ed9699f93147b7b0927cb7f063f95a1b Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Thu, 30 Mar 2023 19:07:12 +0100 Subject: [PATCH 02/15] Fix premature finishing of trace genRing causing assertion failures in deep checking. --- code/trace.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/code/trace.c b/code/trace.c index 9507eb8f72..f6825b7a21 100644 --- a/code/trace.c +++ b/code/trace.c @@ -814,7 +814,7 @@ static void traceDestroyCommon(Trace trace) GenDesc gen = GenDescOfTraceRing(node, trace); GenDescEndTrace(gen, trace); } - RingFinish(&trace->genRing); + AVER(RingIsSingle(&trace->genRing)); /* Ensure that address space is returned to the operating system for * traces that don't have any condemned objects (there might be @@ -828,6 +828,8 @@ static void traceDestroyCommon(Trace trace) * violating . */ ArenaSetEmergency(trace->arena, FALSE); + RingFinish(&trace->genRing); + trace->sig = SigInvalid; trace->arena->busyTraces = TraceSetDel(trace->arena->busyTraces, trace); trace->arena->flippedTraces = TraceSetDel(trace->arena->flippedTraces, trace); From 0d56581f0d64d9f925fdc12c3288306d5dfbf689 Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Thu, 30 Mar 2023 20:21:40 +0100 Subject: [PATCH 03/15] Correcting false assertion about shield queue length and enforcing exact check by better accounting. Fixes job004026 . Mitigating GitHub issue #205 with extra shield type checks. --- code/mpmst.h | 3 +- code/shield.c | 85 +++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 74 insertions(+), 14 deletions(-) diff --git a/code/mpmst.h b/code/mpmst.h index e03033ae99..0e8800ffb3 100644 --- a/code/mpmst.h +++ b/code/mpmst.h @@ -669,13 +669,14 @@ typedef struct ShieldStruct { Sig sig; /* design.mps.sig.field */ BOOLFIELD(inside); /* */ BOOLFIELD(suspended); /* mutator suspended? */ - BOOLFIELD(queuePending); /* queue insertion pending? */ Seg *queue; /* queue of unsynced segs */ Count length; /* number of elements in shield queue */ Index next; /* next free element in shield queue */ Index limit; /* high water mark for cache usage */ Count depth; /* sum of depths of all segs */ Count unsynced; /* number of unsynced segments */ + Count unqueued; /* number of unsynced unqueued segments */ + Count synced; /* number of synced queued segments */ Count holds; /* number of holds */ SortStruct sortStruct; /* workspace for queue sort */ } ShieldStruct; diff --git a/code/shield.c b/code/shield.c index 180374cfb8..dcb7550db3 100644 --- a/code/shield.c +++ b/code/shield.c @@ -19,13 +19,14 @@ void ShieldInit(Shield shield) { shield->inside = FALSE; shield->suspended = FALSE; - shield->queuePending = FALSE; shield->queue = NULL; shield->length = 0; shield->next = 0; shield->limit = 0; shield->depth = 0; shield->unsynced = 0; + shield->unqueued = 0; + shield->synced = 0; shield->holds = 0; shield->sig = ShieldSig; } @@ -55,6 +56,8 @@ void ShieldFinish(Shield shield) AVER(shield->depth == 0); AVER(shield->unsynced == 0); + AVER(shield->unqueued == 0); + AVER(shield->synced == 0); AVER(shield->holds == 0); shield->sig = SigInvalid; } @@ -69,6 +72,8 @@ Bool ShieldCheck(Shield shield) CHECKL(shield->queue == NULL || shield->length > 0); CHECKL(shield->limit <= shield->length); CHECKL(shield->next <= shield->limit); + CHECKL(shield->unqueued <= shield->unsynced); + CHECKL(shield->synced <= shield->limit); /* The mutator is not suspended while outside the shield . */ @@ -85,11 +90,11 @@ Bool ShieldCheck(Shield shield) /* There are no unsynced segments when we're outside the shield. */ CHECKL(shield->inside || shield->unsynced == 0); - /* Every unsynced segment should be on the queue, because we have to - remember to sync it before we return to the mutator. */ - CHECKL(shield->limit + shield->queuePending >= shield->unsynced); + /* The queue contains exactly all the unsynced segments that aren't + unqueued, and the queued synced segments. */ + CHECKL(shield->limit == shield->unsynced - shield->unqueued + shield->synced); - /* The mutator is suspeneded if there are any holds. */ + /* The mutator is suspended if there are any holds. */ CHECKL(shield->holds == 0 || shield->suspended); /* This is too expensive to check all the time since we have an @@ -105,7 +110,8 @@ Bool ShieldCheck(Shield shield) if (!SegIsSynced(seg)) ++unsynced; } - CHECKL(unsynced + shield->queuePending == shield->unsynced); + CHECKL(unsynced == shield->limit - shield->synced); + CHECKL(unsynced + shield->unqueued == shield->unsynced); } #endif @@ -125,6 +131,8 @@ Res ShieldDescribe(Shield shield, mps_lib_FILE *stream, Count depth) " next $U\n", (WriteFU)shield->next, " length $U\n", (WriteFU)shield->length, " unsynced $U\n", (WriteFU)shield->unsynced, + " unqueued $U\n", (WriteFU)shield->unqueued, + " synced $U\n", (WriteFU)shield->synced, " holds $U\n", (WriteFU)shield->holds, "} Shield $P\n", (WriteFP)shield, NULL); @@ -168,11 +176,22 @@ static void shieldSetSM(Shield shield, Seg seg, AccessSet mode) if (SegIsSynced(seg)) { SegSetSM(seg, mode); ++shield->unsynced; + if (seg->queued) { + AVER(shield->synced > 0); + --shield->synced; + } else + ++shield->unqueued; } else { SegSetSM(seg, mode); if (SegIsSynced(seg)) { AVER(shield->unsynced > 0); --shield->unsynced; + if (seg->queued) + ++shield->synced; + else { + AVER(shield->unqueued > 0); + --shield->unqueued; + } } } } @@ -187,11 +206,22 @@ static void shieldSetPM(Shield shield, Seg seg, AccessSet mode) if (SegIsSynced(seg)) { SegSetPM(seg, mode); ++shield->unsynced; + if (seg->queued) { + AVER(shield->synced > 0); + --shield->synced; + } else + ++shield->unqueued; } else { SegSetPM(seg, mode); if (SegIsSynced(seg)) { AVER(shield->unsynced > 0); --shield->unsynced; + if (seg->queued) + ++shield->synced; + else { + AVER(shield->unqueued > 0); + --shield->unqueued; + } } } } @@ -220,9 +250,11 @@ static void shieldSync(Shield shield, Seg seg) SHIELD_AVERT_CRITICAL(Seg, seg); if (!SegIsSynced(seg)) { + /* TODO: Could assert something about the unsync count going down. */ shieldSetPM(shield, seg, SegSM(seg)); ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); } + AVER_CRITICAL(SegIsSynced(seg)); } @@ -239,6 +271,7 @@ static void shieldSuspend(Arena arena) AVERT(Arena, arena); shield = ArenaShield(arena); + AVERT(Shield, shield); AVER(shield->inside); if (!shield->suspended) { @@ -275,6 +308,7 @@ void (ShieldRelease)(Arena arena) AVERT(Arena, arena); shield = ArenaShield(arena); + AVERT(Shield, shield); AVER(shield->inside); AVER(shield->suspended); @@ -318,6 +352,11 @@ static Seg shieldDequeue(Shield shield, Index i) AVER(seg->queued); shield->queue[i] = NULL; /* to ensure it can't get re-used */ seg->queued = FALSE; + if (SegIsSynced(seg)) { + AVER(shield->synced > 0); + --shield->synced; + } else + ++shield->unqueued; return seg; } @@ -444,11 +483,15 @@ static void shieldQueue(Arena arena, Seg seg) /* */ AVERT_CRITICAL(Arena, arena); shield = ArenaShield(arena); + AVERT_CRITICAL(Shield, shield); SHIELD_AVERT_CRITICAL(Seg, seg); if (SegIsSynced(seg) || seg->queued) return; + /* This segment is unsynced and not queued. */ + AVER(shield->unqueued > 0); + if (SegIsExposed(seg)) { /* This can occur if the mutator isn't suspended, we expose a segment, then raise the shield on it. In this case, the @@ -519,6 +562,7 @@ static void shieldQueue(Arena arena, Seg seg) shield->queue[shield->next] = seg; ++shield->next; seg->queued = TRUE; + --shield->unqueued; if (shield->next >= shield->limit) shield->limit = shield->next; @@ -539,8 +583,7 @@ void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode) SHIELD_AVERT(Seg, seg); AVERT(AccessSet, mode); shield = ArenaShield(arena); - AVER(!shield->queuePending); - shield->queuePending = TRUE; + AVERT(Shield, shield); /* preserved */ shieldSetSM(ArenaShield(arena), seg, BS_UNION(SegSM(seg), mode)); @@ -548,7 +591,6 @@ void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode) /* Ensure and */ shieldQueue(arena, seg); - shield->queuePending = FALSE; /* Check queue and segment consistency. */ AVERT(Arena, arena); @@ -591,6 +633,7 @@ void (ShieldEnter)(Arena arena) AVERT(Arena, arena); shield = ArenaShield(arena); + AVERT(Shield, shield); AVER(!shield->inside); AVER(shield->depth == 0); AVER(!shield->suspended); @@ -620,9 +663,13 @@ static void shieldDebugCheck(Arena arena) Seg seg; Count queued = 0; Count depth = 0; + Count unqueued = 0; + Count unsynced = 0; + Count synced = 0; AVERT(Arena, arena); shield = ArenaShield(arena); + AVERT(Shield, shield); AVER(shield->inside || shield->limit == 0); if (SegFirst(&seg, arena)) @@ -630,18 +677,26 @@ static void shieldDebugCheck(Arena arena) depth += SegDepth(seg); if (shield->limit == 0) { AVER(!seg->queued); - AVER(SegIsSynced(seg)); + AVER(shield->unsynced > 0 || SegIsSynced(seg)); /* You can directly set protections here to see if it makes a difference. */ /* ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); */ - } else { - if (seg->queued) - ++queued; } + if (seg->queued) + ++queued; + if (!SegIsSynced(seg)) + ++unsynced; + if (seg->queued && SegIsSynced(seg)) + ++synced; + if (!seg->queued && !SegIsSynced(seg)) + ++unqueued; } while(SegNext(&seg, arena, seg)); AVER(depth == shield->depth); AVER(queued == shield->limit); + AVER(unsynced == shield->unsynced); + AVER(unqueued == shield->unqueued); + AVER(synced == shield->synced); } #endif @@ -668,6 +723,7 @@ void (ShieldFlush)(Arena arena) AVERT(Arena, arena); shield = ArenaShield(arena); + AVERT(Shield, shield); #ifdef SHIELD_DEBUG shieldDebugCheck(arena); #endif @@ -687,6 +743,7 @@ void (ShieldLeave)(Arena arena) AVERT(Arena, arena); shield = ArenaShield(arena); + AVERT(Shield, shield); AVER(shield->inside); AVER(shield->depth == 0); /* no pending covers */ AVER(shield->holds == 0); @@ -721,6 +778,7 @@ void (ShieldExpose)(Arena arena, Seg seg) /* */ AVERT_CRITICAL(Arena, arena); shield = ArenaShield(arena); + AVERT(Shield, shield); AVER_CRITICAL(shield->inside); SegSetDepth(seg, SegDepth(seg) + 1); @@ -747,6 +805,7 @@ void (ShieldCover)(Arena arena, Seg seg) /* */ AVERT_CRITICAL(Arena, arena); shield = ArenaShield(arena); + AVERT_CRITICAL(Shield, shield); AVERT_CRITICAL(Seg, seg); AVER_CRITICAL(SegPM(seg) == AccessSetEMPTY); From a8e2fc45c4a5d49f1dc1110e89c0b5b9d4457006 Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Thu, 30 Mar 2023 20:46:06 +0100 Subject: [PATCH 04/15] Adding GitHub CI configuration to test MPS with deep checking. --- .github/workflows/deep-check.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .github/workflows/deep-check.yml diff --git a/.github/workflows/deep-check.yml b/.github/workflows/deep-check.yml new file mode 100644 index 0000000000..f33fa0823c --- /dev/null +++ b/.github/workflows/deep-check.yml @@ -0,0 +1,25 @@ +# .github/workflows/deep-check.yml -- GitHub CI deep check configuration for the MPS +# +# Copyright (c) 2023 Ravenbrook Limited. See end of file for license. +# +# FIXME: Document the design for this. +# +# Part of a fix for GitHub issue #208 +# to ensure deep +# checking does not rot. + +name: deep check + +on: + - push + - pull_request + # Also run when triggered manually, e.g. by tool/github-ci-kick + # + - workflow_dispatch + +jobs: + deep-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: make -C code -f lii6ll.gmk VARIETY=cool CFLAGS='-DCHECKLEVEL=CheckLevelDEEP' testci From b4f52cb15c4708cf23973e41205942fb49aefedf Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Thu, 30 Mar 2023 21:46:10 +0100 Subject: [PATCH 05/15] Put the root walk trace into state FLIPPED when we add it to the flipped traces, fixing an assertion in deep checking. --- code/walk.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/code/walk.c b/code/walk.c index c1738d91a0..e91176db7e 100644 --- a/code/walk.c +++ b/code/walk.c @@ -328,6 +328,9 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f, /* Make the roots grey so that they are scanned */ res = RootsIterate(arenaGlobals, rootWalkGrey, trace); /* Make this trace look like any other trace. */ + /* FIXME: In traceFlip the roots are scanned *before* the trace + state is set to flipped. Why is this different? */ + trace->state = TraceFLIPPED; arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace); rootsStepClosureInit(rsc, arenaGlobals, trace, RootsWalkFix, f, p, s); From 223ab07aa8eb6b2b6d9d48ec00d515952362d11d Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Thu, 30 Mar 2023 22:14:01 +0100 Subject: [PATCH 06/15] Making the lifecycle of a roots walk trace consistent with a normal trace. --- code/walk.c | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/code/walk.c b/code/walk.c index e91176db7e..853198303e 100644 --- a/code/walk.c +++ b/code/walk.c @@ -305,9 +305,12 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f, /* operations of its own. */ res = TraceCreate(&trace, arena, TraceStartWhyWALK); - /* Have to fail if no trace available. Unlikely due to .assume.parked. */ - if (res != ResOK) + if (res != ResOK) { + NOTREACHED; /* .assume.parked means a trace should be available */ return res; + } + + /* Whiten everything. This step is equivalent to TraceCondemn. */ /* .roots-walk.first-stage: In order to fool MPS_FIX12 into calling _mps_fix2 for a reference in a root, the reference must pass the @@ -325,17 +328,17 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f, } while (SegNext(&seg, arena, seg)); } + /* Start the trace. This step is equivalent to TraceStart. */ + /* Make the roots grey so that they are scanned */ res = RootsIterate(arenaGlobals, rootWalkGrey, trace); - /* Make this trace look like any other trace. */ - /* FIXME: In traceFlip the roots are scanned *before* the trace - state is set to flipped. Why is this different? */ - trace->state = TraceFLIPPED; - arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace); + trace->state = TraceUNFLIPPED; + + /* Scan the roots. This step is equivalent to traceFlip. */ + rootsStepClosureInit(rsc, arenaGlobals, trace, RootsWalkFix, f, p, s); ss = rootsStepClosure2ScanState(rsc); - for(rank = RankMIN; rank < RankLIMIT; ++rank) { ss->rank = rank; AVERT(ScanState, ss); @@ -343,16 +346,24 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f, if (res != ResOK) break; } + rootsStepClosureFinish(rsc); + + trace->state = TraceFLIPPED; + arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace); - /* Turn segments black again. */ + /* At this point in a normal trace we'd search and scan grey + segments, but there aren't any, so we can proceed directly to + reclaim. */ + trace->state = TraceRECLAIM; + + /* Turn segments black again. This is equivalent to traceReclaim, + but in this case nothing is reclaimed. */ if (SegFirst(&seg, arena)) { do { SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); } while (SegNext(&seg, arena, seg)); } - rootsStepClosureFinish(rsc); - /* Make this trace look like any other finished trace. */ trace->state = TraceFINISHED; TraceDestroyFinished(trace); AVER(!ArenaEmergency(arena)); /* There was no allocation. */ @@ -441,9 +452,10 @@ static Res poolWalk(Arena arena, Pool pool, mps_area_scan_t area_scan, void *clo * white set means that the MPS_FIX1 test will always fail and * _mps_fix2 will never be called. */ res = TraceCreate(&trace, arena, TraceStartWhyWALK); - /* Fail if no trace available. Unlikely due to .assume.parked. */ - if (res != ResOK) + if (res != ResOK) { + NOTREACHED; /* .assume.parked means a trace should be available */ return res; + } trace->white = ZoneSetEMPTY; trace->state = TraceFLIPPED; arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace); From 8a01218c4e516c0fae6cf78025a6d95acee3d9ce Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Thu, 30 Mar 2023 22:15:47 +0100 Subject: [PATCH 07/15] Disabling assertions about trace generations that are false for walks and transforms. --- code/trace.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/code/trace.c b/code/trace.c index f6825b7a21..27799698da 100644 --- a/code/trace.c +++ b/code/trace.c @@ -191,20 +191,20 @@ Bool TraceCheck(Trace trace) break; case TraceUNFLIPPED: - CHECKL(!RingIsSingle(&trace->genRing)); + /* CHECKL(!RingIsSingle(&trace->genRing)); FIXME: Not true for walks and probably transforms */ CHECKL(!TraceSetIsMember(trace->arena->flippedTraces, trace)); /* @@@@ Assert that mutator is grey for trace. */ break; case TraceFLIPPED: - CHECKL(!RingIsSingle(&trace->genRing)); + /* CHECKL(!RingIsSingle(&trace->genRing)); FIXME: Not true for walks and probably transforms */ CHECKL(TraceSetIsMember(trace->arena->flippedTraces, trace)); CHECKL(RankCheck(trace->band)); /* @@@@ Assert that mutator is black for trace. */ break; case TraceRECLAIM: - CHECKL(!RingIsSingle(&trace->genRing)); + /* CHECKL(!RingIsSingle(&trace->genRing)); FIXME: Not true for walks and probably transforms */ CHECKL(TraceSetIsMember(trace->arena->flippedTraces, trace)); /* @@@@ Assert that grey set is empty for trace. */ break; From 8421f4f05a2b1acac4f5d69864a7f9348d85df1f Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Fri, 31 Mar 2023 07:37:08 +0100 Subject: [PATCH 08/15] Adding clarifying comments suggested during walkthrough with JPH. --- code/shield.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/code/shield.c b/code/shield.c index dcb7550db3..86d0a4cf3f 100644 --- a/code/shield.c +++ b/code/shield.c @@ -450,6 +450,9 @@ static void shieldFlushEntries(Shield shield) Seg seg = shieldDequeue(shield, i); if (!SegIsSynced(seg)) { shieldSetPM(shield, seg, SegSM(seg)); + /* If the segment has a different mode from the previous run, or + it's not contiguous with the previous run, then set the OS + protection on the previous run, and start a new run. */ if (SegSM(seg) != mode || SegBase(seg) != limit) { if (base != NULL) { AVER(base < limit); @@ -461,6 +464,7 @@ static void shieldFlushEntries(Shield shield) limit = SegLimit(seg); } } + /* Set the OS protection on the last run, if there is one. */ if (base != NULL) { AVER(base < limit); ProtSet(base, limit, mode); From d8c9f43b891733b711b78ad037cc0d8da8008e50 Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Fri, 31 Mar 2023 07:36:43 +0100 Subject: [PATCH 09/15] Passing a shield to shield functions, rather than an arena, to ensure the shield gets checked. Also makes the shield code smaller and more natural. Fixes GitHub issue #205 . --- code/dbgpool.c | 4 +- code/global.c | 14 +++---- code/ld.c | 4 +- code/mpm.h | 45 ++++++++++---------- code/poolamc.c | 28 ++++++------- code/poolams.c | 5 ++- code/poolawl.c | 4 +- code/poolmrg.c | 4 +- code/poolsnc.c | 8 ++-- code/seg.c | 28 ++++++------- code/shield.c | 112 +++++++++++++++++-------------------------------- code/trace.c | 20 ++++----- code/walk.c | 8 ++-- 13 files changed, 125 insertions(+), 159 deletions(-) diff --git a/code/dbgpool.c b/code/dbgpool.c index 54ab6864bb..306bcf8177 100644 --- a/code/dbgpool.c +++ b/code/dbgpool.c @@ -356,12 +356,12 @@ static void debugPoolSegIterate(Arena arena, Addr base, Addr limit, static void debugPoolShieldExpose(Arena arena, Seg seg) { - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); } static void debugPoolShieldCover(Arena arena, Seg seg) { - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); } diff --git a/code/global.c b/code/global.c index 6c7cd39b94..12223f342a 100644 --- a/code/global.c +++ b/code/global.c @@ -78,7 +78,7 @@ void GlobalsReleaseAll(void) static void arenaReinitLock(Arena arena) { AVERT(Arena, arena); - ShieldLeave(arena); + ShieldLeave(ArenaShield(arena)); LockInit(ArenaGlobals(arena)->lock); } @@ -579,7 +579,7 @@ void ArenaEnterLock(Arena arena, Bool recursive) if(recursive) { /* already in shield */ } else { - ShieldEnter(arena); + ShieldEnter(ArenaShield(arena)); } } @@ -611,7 +611,7 @@ void ArenaLeaveLock(Arena arena, Bool recursive) if(recursive) { /* no need to leave shield */ } else { - ShieldLeave(arena); + ShieldLeave(ArenaShield(arena)); } ProtSync(arena); /* */ if(recursive) { @@ -917,9 +917,9 @@ Ref ArenaPeekSeg(Arena arena, Seg seg, Ref *p) /* We don't need to update the Seg Summary as in PoolSingleAccess * because we are not changing it after it has been scanned. */ - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); ref = *p; - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); return ref; } @@ -953,12 +953,12 @@ void ArenaPokeSeg(Arena arena, Seg seg, Ref *p, Ref ref) /* TODO: Consider checking p's alignment using seg->pool->alignment */ /* ref is arbitrary and can't be checked */ - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); *p = ref; summary = SegSummary(seg); summary = RefSetAdd(arena, summary, (Addr)ref); SegSetSummary(seg, summary); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); } /* ArenaRead -- like ArenaPeek, but reference known to be owned by arena */ diff --git a/code/ld.c b/code/ld.c index 483c2f90cb..51342e786f 100644 --- a/code/ld.c +++ b/code/ld.c @@ -149,11 +149,11 @@ void LDReset(mps_ld_t ld, Arena arena) b = SegOfAddr(&seg, arena, (Addr)ld); if (b) - ShieldExpose(arena, seg); /* .ld.access */ + ShieldExpose(ArenaShield(arena), seg); /* .ld.access */ ld->_epoch = ArenaHistory(arena)->epoch; ld->_rs = RefSetEMPTY; if (b) - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); } diff --git a/code/mpm.h b/code/mpm.h index 9979d20180..a5941ec4c7 100644 --- a/code/mpm.h +++ b/code/mpm.h @@ -872,37 +872,38 @@ extern ZoneSet ZoneSetBlacklist(Arena arena); /* Shield Interface -- see */ +#define ShieldArena(shield) PARENT(ArenaStruct, shieldStruct, shield) extern void ShieldInit(Shield shield); extern void ShieldFinish(Shield shield); extern Bool ShieldCheck(Shield shield); extern Res ShieldDescribe(Shield shield, mps_lib_FILE *stream, Count depth); extern void ShieldDestroyQueue(Shield shield, Arena arena); -extern void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode); -extern void (ShieldLower)(Arena arena, Seg seg, AccessSet mode); -extern void (ShieldEnter)(Arena arena); -extern void (ShieldLeave)(Arena arena); -extern void (ShieldExpose)(Arena arena, Seg seg); -extern void (ShieldCover)(Arena arena, Seg seg); -extern void (ShieldHold)(Arena arena); -extern void (ShieldRelease)(Arena arena); -extern void (ShieldFlush)(Arena arena); +extern void (ShieldRaise)(Shield shield, Seg seg, AccessSet mode); +extern void (ShieldLower)(Shield shield, Seg seg, AccessSet mode); +extern void (ShieldEnter)(Shield shield); +extern void (ShieldLeave)(Shield shield); +extern void (ShieldExpose)(Shield shield, Seg seg); +extern void (ShieldCover)(Shield shield, Seg seg); +extern void (ShieldHold)(Shield shield); +extern void (ShieldRelease)(Shield shield); +extern void (ShieldFlush)(Shield shield); #if defined(SHIELD) /* Nothing to do: functions declared in all shield configurations. */ #elif defined(SHIELD_NONE) -#define ShieldRaise(arena, seg, mode) \ - BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END -#define ShieldLower(arena, seg, mode) \ - BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END -#define ShieldEnter(arena) BEGIN UNUSED(arena); END -#define ShieldLeave(arena) AVER(arena->busyTraces == TraceSetEMPTY) -#define ShieldExpose(arena, seg) \ - BEGIN UNUSED(arena); UNUSED(seg); END -#define ShieldCover(arena, seg) \ - BEGIN UNUSED(arena); UNUSED(seg); END -#define ShieldHold(arena) BEGIN UNUSED(arena); END -#define ShieldRelease(arena) BEGIN UNUSED(arena); END -#define ShieldFlush(arena) BEGIN UNUSED(arena); END +#define ShieldRaise(shield, seg, mode) \ + BEGIN UNUSED(shield); UNUSED(seg); UNUSED(mode); END +#define ShieldLower(shield, seg, mode) \ + BEGIN UNUSED(shield); UNUSED(seg); UNUSED(mode); END +#define ShieldEnter(shield) BEGIN UNUSED(shield); END +#define ShieldLeave(shield) AVER(ShieldArena(shield)->busyTraces == TraceSetEMPTY) +#define ShieldExpose(shield, seg) \ + BEGIN UNUSED(shield); UNUSED(seg); END +#define ShieldCover(shield, seg) \ + BEGIN UNUSED(shield); UNUSED(seg); END +#define ShieldHold(shield) BEGIN UNUSED(shield); END +#define ShieldRelease(shield) BEGIN UNUSED(shield); END +#define ShieldFlush(shield) BEGIN UNUSED(shield); END #else #error "No shield configuration." #endif /* SHIELD */ diff --git a/code/poolamc.c b/code/poolamc.c index 8a29aee552..a22ef0e2e6 100644 --- a/code/poolamc.c +++ b/code/poolamc.c @@ -985,9 +985,9 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, AVER(SizeIsAligned(padSize, PoolAlignment(pool))); AVER(AddrAdd(limit, padSize) == SegLimit(seg)); if(padSize > 0) { - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); (*pool->format->pad)(limit, padSize); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); } } @@ -1032,9 +1032,9 @@ static void amcSegBufferEmpty(Seg seg, Buffer buffer) /* */ if (init < limit) { - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); (*pool->format->pad)(init, AddrOffset(init, limit)); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); } /* Any allocation in the buffer (including the padding object just @@ -1508,9 +1508,9 @@ static Res amcSegFixEmergency(Seg seg, ScanState ss, Ref *refIO) if(ss->rank == RankAMBIG) goto fixInPlace; - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); newRef = (*pool->format->isMoved)(*refIO); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); if(newRef != (Addr)0) { /* Object has been forwarded already, so snap-out pointer. */ /* TODO: Implement weak pointer semantics in emergency fixing. This @@ -1592,7 +1592,7 @@ static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO) /* .exposed.seg: Statements tagged ".exposed.seg" below require */ /* that "seg" (that is: the 'from' seg) has been ShieldExposed. */ - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); newRef = (*format->isMoved)(ref); /* .exposed.seg */ if(newRef == (Addr)0) { @@ -1637,7 +1637,7 @@ static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO) newRef = AddrAdd(newBase, headerSize); toSeg = BufferSeg(buffer); - ShieldExpose(arena, toSeg); + ShieldExpose(ArenaShield(arena), toSeg); /* Since we're moving an object from one segment to another, */ /* union the greyness and the summaries together. */ @@ -1653,7 +1653,7 @@ static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO) /* */ (void)AddrCopy(newBase, base, length); /* .exposed.seg */ - ShieldCover(arena, toSeg); + ShieldCover(ArenaShield(arena), toSeg); } while (!BUFFER_COMMIT(buffer, newBase, length)); STATISTIC(ss->copiedSize += length); @@ -1675,7 +1675,7 @@ static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO) res = ResOK; returnRes: - ShieldCover(arena, seg); /* .exposed.seg */ + ShieldCover(ArenaShield(arena), seg); /* .exposed.seg */ return res; } @@ -1706,7 +1706,7 @@ static void amcSegReclaimNailed(Pool pool, Trace trace, Seg seg) /* see for improvements */ headerSize = format->headerSize; - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); p = SegBase(seg); limit = SegBufferScanLimit(seg); padBase = p; @@ -1753,7 +1753,7 @@ static void amcSegReclaimNailed(Pool pool, Trace trace, Seg seg) (*format->pad)(padBase, padLength); STATISTIC(bytesReclaimed += padLength); } - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); SegSetNailed(seg, TraceSetDel(SegNailed(seg), trace)); SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); @@ -1887,9 +1887,9 @@ static void amcWalkAll(Pool pool, FormattedObjectsVisitor f, void *p, size_t s) RING_FOR(node, ring, next) { Seg seg = SegOfPoolRing(node); - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); amcSegWalk(seg, format, f, p, s); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); } } diff --git a/code/poolams.c b/code/poolams.c index a76c5a86d8..7a24da0fca 100644 --- a/code/poolams.c +++ b/code/poolams.c @@ -1491,10 +1491,11 @@ static Res amsSegFix(Seg seg, ScanState ss, Ref *refIO) if (SegRankSet(seg) == RankSetEMPTY && ss->rank != RankAMBIG) { /* */ Addr clientNext, next; + Shield shield = ArenaShield(PoolArena(pool)); - ShieldExpose(PoolArena(pool), seg); + ShieldExpose(shield, seg); clientNext = (*pool->format->skip)(clientRef); - ShieldCover(PoolArena(pool), seg); + ShieldCover(shield, seg); next = AddrSub(clientNext, format->headerSize); /* Part of the object might be grey, because of ambiguous */ /* fixes, but that's OK, because scan will ignore that. */ diff --git a/code/poolawl.c b/code/poolawl.c index 17347d7af5..4e60d1bc9c 100644 --- a/code/poolawl.c +++ b/code/poolawl.c @@ -869,7 +869,7 @@ static Res awlScanObject(Arena arena, AWL awl, ScanState ss, dependent = SegOfAddr(&dependentSeg, arena, dependentObject); if (dependent) { /* */ - ShieldExpose(arena, dependentSeg); + ShieldExpose(ArenaShield(arena), dependentSeg); /* */ SegSetSummary(dependentSeg, RefSetUNIV); } @@ -877,7 +877,7 @@ static Res awlScanObject(Arena arena, AWL awl, ScanState ss, res = TraceScanFormat(ss, base, limit); if (dependent) - ShieldCover(arena, dependentSeg); + ShieldCover(ArenaShield(arena), dependentSeg); return res; } diff --git a/code/poolmrg.c b/code/poolmrg.c index 426aaf8667..631fd64a07 100644 --- a/code/poolmrg.c +++ b/code/poolmrg.c @@ -829,13 +829,13 @@ static Res MRGDescribe(Inst inst, mps_lib_FILE *stream, Count depth) Bool outsideShield = !ArenaShield(arena)->inside; refPart = MRGRefPartOfLink(linkOfRing(node), arena); if (outsideShield) { - ShieldEnter(arena); + ShieldEnter(ArenaShield(arena)); } res = WriteF(stream, depth + 2, "at $A Ref $A\n", (WriteFA)refPart, (WriteFA)MRGRefPartRef(arena, refPart), NULL); if (outsideShield) { - ShieldLeave(arena); + ShieldLeave(ArenaShield(arena)); } if (res != ResOK) return res; diff --git a/code/poolsnc.c b/code/poolsnc.c index 6b9d87f21e..75b75b7063 100644 --- a/code/poolsnc.c +++ b/code/poolsnc.c @@ -287,9 +287,9 @@ static void sncRecordFreeSeg(Arena arena, SNC snc, Seg seg) SegSetRankAndSummary(seg, RankSetEMPTY, RefSetEMPTY); /* Pad the whole segment so we don't try to walk it. */ - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); (*SNCPool(snc)->format->pad)(SegBase(seg), SegSize(seg)); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); sncSegSetNext(seg, snc->freeSegs); snc->freeSegs = seg; @@ -498,9 +498,9 @@ static void sncSegBufferEmpty(Seg seg, Buffer buffer) /* Pad the unused space at the end of the segment */ if (init < limit) { - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); (*pool->format->pad)(init, AddrOffset(init, limit)); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); } } diff --git a/code/seg.c b/code/seg.c index 9782e43b3c..e139379116 100644 --- a/code/seg.c +++ b/code/seg.c @@ -194,7 +194,7 @@ static void segAbsFinish(Inst inst) unmapped. This would require tracking of protection independent of the existence of a SegStruct. */ if (seg->sm != AccessSetEMPTY) { - ShieldLower(arena, seg, seg->sm); + ShieldLower(ArenaShield(arena), seg, seg->sm); } seg->rankSet = RankSetEMPTY; @@ -202,7 +202,7 @@ static void segAbsFinish(Inst inst) /* See */ AVER(seg->depth == 0); if (seg->queued) - ShieldFlush(PoolArena(SegPool(seg))); + ShieldFlush(ArenaShield(PoolArena(SegPool(seg)))); AVER(!seg->queued); limit = SegLimit(seg); @@ -624,7 +624,7 @@ Res SegMerge(Seg *mergedSegReturn, Seg segLo, Seg segHi) arena = PoolArena(SegPool(segLo)); if (segLo->queued || segHi->queued) - ShieldFlush(arena); /* see */ + ShieldFlush(ArenaShield(arena)); /* see */ /* Invoke class-specific methods to do the merge */ res = Method(Seg, segLo, merge)(segLo, segHi, base, mid, limit); @@ -678,7 +678,7 @@ Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at) AVER(!SegBuffer(&buffer, seg) || BufferLimit(buffer) <= at); if (seg->queued) - ShieldFlush(arena); /* see */ + ShieldFlush(ArenaShield(arena)); /* see */ AVER(SegSM(seg) == SegPM(seg)); /* Allocate the new segment object from the control pool */ @@ -1297,7 +1297,7 @@ Res SegSingleAccess(Seg seg, Arena arena, Addr addr, Ref ref; Res res; - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); if(mode & SegSM(seg) & AccessREAD) { /* Read access. */ @@ -1325,7 +1325,7 @@ Res SegSingleAccess(Seg seg, Arena arena, Addr addr, * this is conservative. */ SegSetSummary(seg, RefSetAdd(arena, SegSummary(seg), ref)); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); return ResOK; } else { @@ -1608,10 +1608,10 @@ static void mutatorSegSetGrey(Seg seg, TraceSet grey) flippedTraces = arena->flippedTraces; if (TraceSetInter(oldGrey, flippedTraces) == TraceSetEMPTY) { if (TraceSetInter(grey, flippedTraces) != TraceSetEMPTY) - ShieldRaise(arena, seg, AccessREAD); + ShieldRaise(ArenaShield(arena), seg, AccessREAD); } else { if (TraceSetInter(grey, flippedTraces) == TraceSetEMPTY) - ShieldLower(arena, seg, AccessREAD); + ShieldLower(ArenaShield(arena), seg, AccessREAD); } } @@ -1631,7 +1631,7 @@ static void mutatorSegFlip(Seg seg, Trace trace) /* Raise the read barrier if the segment was not grey for any currently flipped trace. */ if (TraceSetInter(SegGrey(seg), flippedTraces) == TraceSetEMPTY) { - ShieldRaise(arena, seg, AccessREAD); + ShieldRaise(ArenaShield(arena), seg, AccessREAD); } else { /* If the segment is grey for some currently flipped trace then the read barrier must already have been raised, either in this @@ -1702,12 +1702,12 @@ static void mutatorSegSetRankSet(Seg seg, RankSet rankSet) if (oldRankSet == RankSetEMPTY) { if (rankSet != RankSetEMPTY) { AVER_CRITICAL(SegGCSeg(seg)->summary == RefSetEMPTY); - ShieldRaise(PoolArena(SegPool(seg)), seg, AccessWRITE); + ShieldRaise(ArenaShield(PoolArena(SegPool(seg))), seg, AccessWRITE); } } else { if (rankSet == RankSetEMPTY) { AVER_CRITICAL(SegGCSeg(seg)->summary == RefSetEMPTY); - ShieldLower(PoolArena(SegPool(seg)), seg, AccessWRITE); + ShieldLower(ArenaShield(PoolArena(SegPool(seg))), seg, AccessWRITE); } } } @@ -1727,9 +1727,9 @@ static void mutatorSegSyncWriteBarrier(Seg seg) Arena arena = PoolArena(SegPool(seg)); /* Can't check seg -- this function enforces invariants tested by SegCheck. */ if (SegSummary(seg) == RefSetUNIV) - ShieldLower(arena, seg, AccessWRITE); + ShieldLower(ArenaShield(arena), seg, AccessWRITE); else - ShieldRaise(arena, seg, AccessWRITE); + ShieldRaise(ArenaShield(arena), seg, AccessWRITE); } @@ -1888,7 +1888,7 @@ static Res gcSegMerge(Seg seg, Seg segHi, /* This shield won't cope with a partially-protected segment, so flush the shield queue to bring both halves in sync. See also . */ - ShieldFlush(PoolArena(SegPool(seg))); + ShieldFlush(ArenaShield(PoolArena(SegPool(seg)))); } /* Merge the superclass fields via next-method call */ diff --git a/code/shield.c b/code/shield.c index 86d0a4cf3f..dc1a4595eb 100644 --- a/code/shield.c +++ b/code/shield.c @@ -265,16 +265,13 @@ static void shieldSync(Shield shield, Seg seg) * .inv.unsynced.suspended. */ -static void shieldSuspend(Arena arena) +static void shieldSuspend(Shield shield) { - Shield shield; - - AVERT(Arena, arena); - shield = ArenaShield(arena); AVERT(Shield, shield); AVER(shield->inside); if (!shield->suspended) { + Arena arena = ShieldArena(shield); ThreadRingSuspend(ArenaThreadRing(arena), ArenaDeadRing(arena)); shield->suspended = TRUE; } @@ -288,11 +285,11 @@ static void shieldSuspend(Arena arena) * when we must scan all thread registers at once. */ -void (ShieldHold)(Arena arena) +void (ShieldHold)(Shield shield) { - AVERT(Arena, arena); - shieldSuspend(arena); - ++ArenaShield(arena)->holds; + AVERT(Shield, shield); + shieldSuspend(shield); + ++shield->holds; } @@ -302,16 +299,11 @@ void (ShieldHold)(Arena arena) * this marks the earliest point at which we could resume. */ -void (ShieldRelease)(Arena arena) +void (ShieldRelease)(Shield shield) { - Shield shield; - - AVERT(Arena, arena); - shield = ArenaShield(arena); AVERT(Shield, shield); AVER(shield->inside); AVER(shield->suspended); - AVER(shield->holds > 0); --shield->holds; @@ -444,6 +436,7 @@ static void shieldFlushEntries(Shield shield) shieldQueueEntryCompare, UNUSED_POINTER, &shield->sortStruct); + /* Coalesce runs of segments that need the same protection mode. */ mode = AccessSetEMPTY; limit = NULL; for (i = 0; i < shield->limit; ++i) { @@ -480,13 +473,9 @@ static void shieldFlushEntries(Shield shield) * queued and the mutator is suspended. */ -static void shieldQueue(Arena arena, Seg seg) +static void shieldQueue(Shield shield, Seg seg) { - Shield shield; - /* */ - AVERT_CRITICAL(Arena, arena); - shield = ArenaShield(arena); AVERT_CRITICAL(Shield, shield); SHIELD_AVERT_CRITICAL(Seg, seg); @@ -501,7 +490,7 @@ static void shieldQueue(Arena arena, Seg seg) segment, then raise the shield on it. In this case, the mutator isn't allowed to see the segment, but we don't need to queue it until its covered. */ - shieldSuspend(arena); + shieldSuspend(shield); return; } @@ -518,7 +507,7 @@ static void shieldQueue(Arena arena, Seg seg) else length = shield->length * 2; - res = ControlAlloc(&p, arena, length * sizeof shield->queue[0]); + res = ControlAlloc(&p, ShieldArena(shield), length * sizeof shield->queue[0]); if (res != ResOK) { AVER(ResIsAllocFailure(res)); /* Carry on with the existing queue. */ @@ -527,7 +516,7 @@ static void shieldQueue(Arena arena, Seg seg) Size oldSize = shield->length * sizeof shield->queue[0]; AVER(shield->queue != NULL); mps_lib_memcpy(p, shield->queue, oldSize); - ControlFree(arena, shield->queue, oldSize); + ControlFree(ShieldArena(shield), shield->queue, oldSize); } shield->queue = p; shield->length = length; @@ -579,37 +568,30 @@ static void shieldQueue(Arena arena, Seg seg) * covered and the shield queue is unavailable. */ -void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode) +void (ShieldRaise)(Shield shield, Seg seg, AccessSet mode) { - Shield shield; - - SHIELD_AVERT(Arena, arena); + SHIELD_AVERT(Shield, shield); SHIELD_AVERT(Seg, seg); AVERT(AccessSet, mode); - shield = ArenaShield(arena); - AVERT(Shield, shield); /* preserved */ - shieldSetSM(ArenaShield(arena), seg, BS_UNION(SegSM(seg), mode)); + shieldSetSM(shield, seg, BS_UNION(SegSM(seg), mode)); /* Ensure and */ - shieldQueue(arena, seg); + shieldQueue(shield, seg); /* Check queue and segment consistency. */ - AVERT(Arena, arena); + AVERT(Shield, shield); /* FIXME: Does this acheive the above comment? */ AVERT(Seg, seg); } /* ShieldLower -- declare segment may be accessed by mutator */ -void (ShieldLower)(Arena arena, Seg seg, AccessSet mode) +void (ShieldLower)(Shield shield, Seg seg, AccessSet mode) { - Shield shield; - - AVERT(Arena, arena); - shield = ArenaShield(arena); + AVERT(Shield, shield); SHIELD_AVERT(Seg, seg); AVERT(AccessSet, mode); @@ -620,23 +602,19 @@ void (ShieldLower)(Arena arena, Seg seg, AccessSet mode) /* TODO: Do we need to promptly call shieldProtLower here? It loses the opportunity to coalesce the protection call. It would violate . */ - /* shieldQueue(arena, seg); */ + /* shieldQueue(shield, seg); */ shieldProtLower(shield, seg, mode); /* Check queue and segment consistency. */ - AVERT(Arena, arena); + AVERT(Shield, shield); /* FIXME: Does this acheive the above comment? */ AVERT(Seg, seg); } /* ShieldEnter -- enter the shield, allowing exposes */ -void (ShieldEnter)(Arena arena) +void (ShieldEnter)(Shield shield) { - Shield shield; - - AVERT(Arena, arena); - shield = ArenaShield(arena); AVERT(Shield, shield); AVER(!shield->inside); AVER(shield->depth == 0); @@ -661,9 +639,9 @@ void (ShieldEnter)(Arena arena) */ #if defined(SHIELD_DEBUG) -static void shieldDebugCheck(Arena arena) +static void shieldDebugCheck(Shield shield) { - Shield shield; + Arena arena; Seg seg; Count queued = 0; Count depth = 0; @@ -671,12 +649,11 @@ static void shieldDebugCheck(Arena arena) Count unsynced = 0; Count synced = 0; - AVERT(Arena, arena); - shield = ArenaShield(arena); AVERT(Shield, shield); AVER(shield->inside || shield->limit == 0); - if (SegFirst(&seg, arena)) + arena = ShieldArena(shield); + if (SegFirst(&seg, arena)) { do { depth += SegDepth(seg); if (shield->limit == 0) { @@ -695,6 +672,7 @@ static void shieldDebugCheck(Arena arena) if (!seg->queued && !SegIsSynced(seg)) ++unqueued; } while(SegNext(&seg, arena, seg)); + } AVER(depth == shield->depth); AVER(queued == shield->limit); @@ -721,44 +699,37 @@ static void shieldDebugCheck(Arena arena) * . */ -void (ShieldFlush)(Arena arena) +void (ShieldFlush)(Shield shield) { - Shield shield; - - AVERT(Arena, arena); - shield = ArenaShield(arena); AVERT(Shield, shield); #ifdef SHIELD_DEBUG - shieldDebugCheck(arena); + shieldDebugCheck(shield); #endif shieldFlushEntries(shield); AVER(shield->unsynced == 0); /* everything back in sync */ #ifdef SHIELD_DEBUG - shieldDebugCheck(arena); + shieldDebugCheck(shield); #endif } /* ShieldLeave -- leave the shield, protect segs from mutator */ -void (ShieldLeave)(Arena arena) +void (ShieldLeave)(Shield shield) { - Shield shield; - - AVERT(Arena, arena); - shield = ArenaShield(arena); AVERT(Shield, shield); AVER(shield->inside); AVER(shield->depth == 0); /* no pending covers */ AVER(shield->holds == 0); - ShieldFlush(arena); + ShieldFlush(shield); AVER(shield->unsynced == 0); /* everything back in sync */ /* Ensuring the mutator is running at this point guarantees .inv.outside.running */ if (shield->suspended) { + Arena arena = ShieldArena(shield); ThreadRingResume(ArenaThreadRing(arena), ArenaDeadRing(arena)); shield->suspended = FALSE; } @@ -774,15 +745,12 @@ void (ShieldLeave)(Arena arena) * ensure the MPS has exclusive access. */ -void (ShieldExpose)(Arena arena, Seg seg) +void (ShieldExpose)(Shield shield, Seg seg) { - Shield shield; AccessSet mode = AccessREAD | AccessWRITE; /* */ - AVERT_CRITICAL(Arena, arena); - shield = ArenaShield(arena); - AVERT(Shield, shield); + AVERT_CRITICAL(Shield, shield); AVER_CRITICAL(shield->inside); SegSetDepth(seg, SegDepth(seg) + 1); @@ -791,7 +759,7 @@ void (ShieldExpose)(Arena arena, Seg seg) AVER_CRITICAL(shield->depth > 0); /* overflow */ if (BS_INTER(SegPM(seg), mode) != AccessSetEMPTY) - shieldSuspend(arena); + shieldSuspend(shield); /* Ensure . */ /* TODO: Mass exposure -- see @@ -802,13 +770,9 @@ void (ShieldExpose)(Arena arena, Seg seg) /* ShieldCover -- declare MPS no longer needs access to seg */ -void (ShieldCover)(Arena arena, Seg seg) +void (ShieldCover)(Shield shield, Seg seg) { - Shield shield; - /* */ - AVERT_CRITICAL(Arena, arena); - shield = ArenaShield(arena); AVERT_CRITICAL(Shield, shield); AVERT_CRITICAL(Seg, seg); AVER_CRITICAL(SegPM(seg) == AccessSetEMPTY); @@ -819,7 +783,7 @@ void (ShieldCover)(Arena arena, Seg seg) --shield->depth; /* Ensure . */ - shieldQueue(arena, seg); + shieldQueue(shield, seg); } diff --git a/code/trace.c b/code/trace.c index 27799698da..faccb5b727 100644 --- a/code/trace.c +++ b/code/trace.c @@ -454,7 +454,7 @@ Res TraceCondemnEnd(double *mortalityReturn, Trace trace) AVER(trace->state == TraceINIT); AVER(trace->white == ZoneSetEMPTY); - ShieldHold(trace->arena); + ShieldHold(ArenaShield(trace->arena)); RING_FOR(genNode, &trace->genRing, genNext) { Size condemnedBefore, condemnedGen; Ring segNode, segNext; @@ -472,7 +472,7 @@ Res TraceCondemnEnd(double *mortalityReturn, Trace trace) condemnedGen = trace->condemned - condemnedBefore; casualtySize += (Size)((double)condemnedGen * gen->mortality); } - ShieldRelease(trace->arena); + ShieldRelease(ArenaShield(trace->arena)); if (TraceIsEmpty(trace)) return ResFAIL; @@ -489,7 +489,7 @@ Res TraceCondemnEnd(double *mortalityReturn, Trace trace) triggered. In that case, we'll have to recover here by blackening the segments again. */ AVER(TraceIsEmpty(trace)); - ShieldRelease(trace->arena); + ShieldRelease(ArenaShield(trace->arena)); return res; } @@ -624,7 +624,7 @@ static Res traceFlip(Trace trace) arena = trace->arena; rfc.arena = arena; - ShieldHold(arena); + ShieldHold(ArenaShield(arena)); AVER(trace->state == TraceUNFLIPPED); AVER(!TraceSetIsMember(arena->flippedTraces, trace)); @@ -681,11 +681,11 @@ static Res traceFlip(Trace trace) EVENT2(TraceFlipEnd, trace, arena); - ShieldRelease(arena); + ShieldRelease(ArenaShield(arena)); return ResOK; failRootFlip: - ShieldRelease(arena); + ShieldRelease(ArenaShield(arena)); return res; } @@ -1202,10 +1202,10 @@ static Res traceScanSegRes(TraceSet ts, Rank rank, Arena arena, Seg seg) ScanStateInitSeg(ss, ts, arena, rank, white, seg); /* Expose the segment to make sure we can scan it. */ - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); res = SegScan(&wasTotal, seg, ss); /* Cover, regardless of result */ - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); traceSetUpdateCounts(ts, arena, ss, traceAccountingPhaseSegScan); /* Count segments scanned pointlessly */ @@ -1473,7 +1473,7 @@ static Res traceScanSingleRefRes(TraceSet ts, Rank rank, Arena arena, } ScanStateInit(&ss, ts, arena, rank, white); - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); TRACE_SCAN_BEGIN(&ss) { res = TRACE_FIX12(&ss, refIO); @@ -1483,7 +1483,7 @@ static Res traceScanSingleRefRes(TraceSet ts, Rank rank, Arena arena, summary = SegSummary(seg); summary = RefSetAdd(arena, summary, *refIO); SegSetSummary(seg, summary); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); traceSetUpdateCounts(ts, arena, &ss, traceAccountingPhaseSingleScan); ScanStateFinish(&ss); diff --git a/code/walk.c b/code/walk.c index 853198303e..2af6d56f24 100644 --- a/code/walk.c +++ b/code/walk.c @@ -75,9 +75,9 @@ static void ArenaFormattedObjectsWalk(Arena arena, FormattedObjectsVisitor f, if (SegFirst(&seg, arena)) { do { if (PoolFormat(&format, SegPool(seg))) { - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); SegWalk(seg, format, f, p, s); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); } } while(SegNext(&seg, arena, seg)); } @@ -476,9 +476,9 @@ static Res poolWalk(Arena arena, Pool pool, mps_area_scan_t area_scan, void *clo ScanStateSetSummary(&ss, RefSetEMPTY); /* Expose the segment to make sure we can scan it. */ - ShieldExpose(arena, seg); + ShieldExpose(ArenaShield(arena), seg); res = SegScan(&wasTotal, seg, &ss); - ShieldCover(arena, seg); + ShieldCover(ArenaShield(arena), seg); if (needSummary) ScanStateUpdateSummary(&ss, seg, res == ResOK && wasTotal); From 20c014d5b35f92469f63a363976ae7b52e126ad8 Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Fri, 31 Mar 2023 07:55:51 +0100 Subject: [PATCH 10/15] Don't remove segment from pool ring until it is flushed from the shield queue, so that shieldDebugCheck can find and check it. Fixes GitHub issue #206 . --- code/seg.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/code/seg.c b/code/seg.c index e139379116..3169b2aa54 100644 --- a/code/seg.c +++ b/code/seg.c @@ -185,8 +185,6 @@ static void segAbsFinish(Inst inst) AVERT(Seg, seg); - RingRemove(SegPoolRing(seg)); - arena = PoolArena(SegPool(seg)); /* TODO: It would be good to avoid deprotecting segments eagerly @@ -205,8 +203,9 @@ static void segAbsFinish(Inst inst) ShieldFlush(ArenaShield(PoolArena(SegPool(seg)))); AVER(!seg->queued); - limit = SegLimit(seg); + RingRemove(SegPoolRing(seg)); + limit = SegLimit(seg); TRACT_TRACT_FOR(tract, addr, arena, seg->firstTract, limit) { AVERT(Tract, tract); TRACT_UNSET_SEG(tract); From f33795d1b1b564fe966d19d0058c8bc396a26454 Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Fri, 31 Mar 2023 08:05:20 +0100 Subject: [PATCH 11/15] Fixing comments with design references and promptness of assertions in segAbsFinish. --- code/seg.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/code/seg.c b/code/seg.c index 3169b2aa54..441fc6020d 100644 --- a/code/seg.c +++ b/code/seg.c @@ -187,6 +187,9 @@ static void segAbsFinish(Inst inst) arena = PoolArena(SegPool(seg)); + /* Don't finish exposed segments (design.mps.shield.def.depth). */ + AVER(seg->depth == 0); + /* TODO: It would be good to avoid deprotecting segments eagerly when we free them, especially if they're going to be unmapped. This would require tracking of protection independent @@ -203,6 +206,11 @@ static void segAbsFinish(Inst inst) ShieldFlush(ArenaShield(PoolArena(SegPool(seg)))); AVER(!seg->queued); + /* Ensure spare committed pages are not protected + (design.mps.arena.spare-committed). */ + AVER(seg->sm == AccessSetEMPTY); + AVER(seg->pm == AccessSetEMPTY); + RingRemove(SegPoolRing(seg)); limit = SegLimit(seg); @@ -214,14 +222,6 @@ static void segAbsFinish(Inst inst) RingFinish(SegPoolRing(seg)); - /* Check that the segment is not exposed, or in the shield */ - /* cache . */ - AVER(seg->depth == 0); - /* Check not shielded or protected (so that pages in hysteresis */ - /* fund are not protected) */ - AVER(seg->sm == AccessSetEMPTY); - AVER(seg->pm == AccessSetEMPTY); - seg->sig = SigInvalid; InstFinish(CouldBeA(Inst, seg)); } From 6db32e5efc098154fdc78137559251aa83cb84ab Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Fri, 31 Mar 2023 08:20:11 +0100 Subject: [PATCH 12/15] Oops. Fixing shield function calls from ANSI platform to go with d8c9f43b8. --- code/protan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/code/protan.c b/code/protan.c index c83a25197c..c2335c320d 100644 --- a/code/protan.c +++ b/code/protan.c @@ -60,9 +60,9 @@ void ProtSync(Arena arena) if (SegFirst(&seg, arena)) { do { if (SegPM(seg) != AccessSetEMPTY) { /* */ - ShieldEnter(arena); + ShieldEnter(ArenaShield(arena)); TraceSegAccess(arena, seg, SegPM(seg)); - ShieldLeave(arena); + ShieldLeave(ArenaShield(arena)); synced = FALSE; } } while(SegNext(&seg, arena, seg)); From dc401a3a2bee7550b39447602edfb5d43d444d0a Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Fri, 31 Mar 2023 08:45:35 +0100 Subject: [PATCH 13/15] Removing FIXMEs that I believe are satisfied. Updating copyright dates. --- code/shield.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/code/shield.c b/code/shield.c index dc1a4595eb..8cf0edc812 100644 --- a/code/shield.c +++ b/code/shield.c @@ -1,7 +1,7 @@ /* shield.c: SHIELD IMPLEMENTATION * * $Id$ - * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Copyright (c) 2001-2023 Ravenbrook Limited. See end of file for license. * * See: idea.shield, . * @@ -582,7 +582,7 @@ void (ShieldRaise)(Shield shield, Seg seg, AccessSet mode) shieldQueue(shield, seg); /* Check queue and segment consistency. */ - AVERT(Shield, shield); /* FIXME: Does this acheive the above comment? */ + AVERT(Shield, shield); AVERT(Seg, seg); } @@ -606,7 +606,7 @@ void (ShieldLower)(Shield shield, Seg seg, AccessSet mode) shieldProtLower(shield, seg, mode); /* Check queue and segment consistency. */ - AVERT(Shield, shield); /* FIXME: Does this acheive the above comment? */ + AVERT(Shield, shield); AVERT(Seg, seg); } @@ -789,7 +789,7 @@ void (ShieldCover)(Shield shield, Seg seg) /* C. COPYRIGHT AND LICENSE * - * Copyright (C) 2001-2020 Ravenbrook Limited . + * Copyright (C) 2001-2023 Ravenbrook Limited . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are From 166b06668a76a56e579d7245c66bf6afd742c92f Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Tue, 4 Apr 2023 08:49:20 +0100 Subject: [PATCH 14/15] Including deep checking in build-and-test CI configuration, and documenting it in design.mps.tests.ci.run.other.deep. --- .github/workflows/build-and-test.yml | 10 ++++++++++ .github/workflows/deep-check.yml | 25 ------------------------- Makefile.in | 5 ++++- design/tests.txt | 6 ++++++ 4 files changed, 20 insertions(+), 26 deletions(-) delete mode 100644 .github/workflows/deep-check.yml diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 1ab63c1159..f90e3e0dcf 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -66,6 +66,16 @@ jobs: nmake /f w3i6mv.nmk all testci testansi testpollnone shell: cmd + # See design.mps.tests.ci.run.other.deep. + deep-check: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - run: ./configure + - run: make test-make-build-deep + # A. REFERENCES # diff --git a/.github/workflows/deep-check.yml b/.github/workflows/deep-check.yml deleted file mode 100644 index f33fa0823c..0000000000 --- a/.github/workflows/deep-check.yml +++ /dev/null @@ -1,25 +0,0 @@ -# .github/workflows/deep-check.yml -- GitHub CI deep check configuration for the MPS -# -# Copyright (c) 2023 Ravenbrook Limited. See end of file for license. -# -# FIXME: Document the design for this. -# -# Part of a fix for GitHub issue #208 -# to ensure deep -# checking does not rot. - -name: deep check - -on: - - push - - pull_request - # Also run when triggered manually, e.g. by tool/github-ci-kick - # - - workflow_dispatch - -jobs: - deep-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - run: make -C code -f lii6ll.gmk VARIETY=cool CFLAGS='-DCHECKLEVEL=CheckLevelDEEP' testci diff --git a/Makefile.in b/Makefile.in index a1e8759406..a8b468fa00 100644 --- a/Makefile.in +++ b/Makefile.in @@ -79,6 +79,9 @@ test-make-build: $(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool clean testansi $(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool CFLAGS="-DCONFIG_POLL_NONE" clean testpollnone +test-make-build-deep: + $(MAKE) $(TARGET_OPTS) VARIETY=cool CFLAGS='-DCHECKLEVEL=CheckLevelDEEP' testci + test-xcode-build: $(XCODEBUILD) -config Debug -target testci $(XCODEBUILD) -config Release -target testci @@ -88,7 +91,7 @@ test: @TEST_TARGET@ # C. COPYRIGHT AND LICENSE # -# Copyright (C) 2012-2020 Ravenbrook Limited . +# Copyright (C) 2012-2023 Ravenbrook Limited . # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are diff --git a/design/tests.txt b/design/tests.txt index eed0c4fc85..114ce3d981 100644 --- a/design/tests.txt +++ b/design/tests.txt @@ -311,6 +311,12 @@ least:: as defined by the `.ci.github.config`_. +_`.ci.run.other.deep`: CI services also run the test-make-build-deep +target (defined in `Makefile.in`_) in order to exercise +`design.mps.check.level.deep`_ and ensure that it stays usable. This +target is only run on GitHub's "ubuntu-latest" platform in order to +save resources, but could be extended. + _`.ci.run.other.targets`: On some platforms we arrange to run the testansi, testpollnone, testratio, and testscheme targets. [Need to explain why, where, etc. RB 2023-01-15] From 7f18ffb6e2ee3160fda9c4a3d972c543975d9112 Mon Sep 17 00:00:00 2001 From: Richard Brooksby Date: Tue, 4 Apr 2023 09:08:34 +0100 Subject: [PATCH 15/15] Oops. Fixing missing target link in design.mps.tests. --- design/tests.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/design/tests.txt b/design/tests.txt index 114ce3d981..0c860702d0 100644 --- a/design/tests.txt +++ b/design/tests.txt @@ -317,6 +317,8 @@ target (defined in `Makefile.in`_) in order to exercise target is only run on GitHub's "ubuntu-latest" platform in order to save resources, but could be extended. +.. _design.mps.check.level.deep: check.txt#level-deep + _`.ci.run.other.targets`: On some platforms we arrange to run the testansi, testpollnone, testratio, and testscheme targets. [Need to explain why, where, etc. RB 2023-01-15]