diff --git a/test/lucas-noob-work/clh_lock.rav b/test/lucas-noob-work/clh_lock.rav new file mode 100644 index 0000000..bd04623 --- /dev/null +++ b/test/lucas-noob-work/clh_lock.rav @@ -0,0 +1,116 @@ +// REF: https://dada.cs.washington.edu/research/tr/1993/02/UW-CSE-93-02-02.pdf +// REF: diaframe_heap_lang_examples/comparison/clh_lock.v (MPI-SWS gitlab) + +module CLHRequest { + field state: Bool; // Whether request is PENDING (true) or GRANTED (false) + field watch: Ref; // Pointer to predecessor in queue --- we wait on its state + + pred request_rep(r: Ref, b: Bool) + { + exists predreq: Ref :: own(r, watch, predreq, 1.0) && own(r, state, b, 1.0) + // TODO: Is this enough? Can we get additional assurance that predreq + // points to a valid CLHRequest? I guess we could recursively call + // request_rep here on predreq, but not sure if this supported in Raven. + } + + // NOTE: Seems like is_request would just be redundant with request_rep here. + // I can't think of a reason right now to have a separate is_request predicate + // here (like with locks) --- locks have an additional resource field that + // they're protecting, which is why having an additional predicate makes + // sense, but requests here don't have the same structure. + // pred is_request() {} + + proc create() + returns (req: Ref) + ensures request_rep(req, false) + { + req := new (state: false, watch: null); + fold request_rep(req, false); + } +} + +interface CLHLock { + module R: Library.Type + module Agree = Library.Agree[R] + + ghost field agr: Agree + + // Resource protected by the lock + pred resource(r: R) + + field tail: Ref; // Pointer to latest (tail) request in queue + // (or: request to be watched by next requester) + + pred lock_rep(l: Ref; b: Bool) + { + // What do we do with b: Bool here? I think the policy is basically: if the + // lock's request is GRANTED/false, the lock is unlocked. Otherwise, it is + // locked some other process and possibly has other competing acquisition + // requests in the queue. + exists req: Ref :: + own(l, tail, req, 1.0) && + CLHRequest.request_rep(req, b) + } + + // NOTE: Ripped directly from the lock.rav higher-order module for locks. + pred is_lock(l: Ref; r: R, b: Bool) { + own(l, agr, Agree.agree(r)) + && lock_rep(l, b) + && (b ? true : resource(r)) + } + + // According to Craig, need to allocate a Request (node in Diaframe) for the lock + // and mark the Request as GRANTED (i.e. the lock is free). In Diaframe, the + // state field is false/true vs. GRANTED/PENDING in Craig's paper. + // It actually seems that in Diaframe, we combine the lock and the lock node + // initialization, i.e. why the allocation looks like ref (ref #false). + proc create(r: R) + returns (lk: Ref) + requires resource(r) + ensures is_lock(lk, false) + { + // NOTE: This won't work --- procedure calls are only allowed as top-level + // statements (for simplicity). We can't nest them inside an allocation. + // This should be fixed now, but previously we had: + // new (tail: CLHRequest.create(), agr: Agree.agree(r)) + // NOTE: Agree.agree(r) is fine because agree, I think, is a *function*, + // i.e. it has no side-effects. + var lkreq: Ref := CLHRequest.create(); + lk := new (tail: lkreq, agr: Agree.agree(r)); + } + + // acquire takes two arguments: a Ref to the lock and a Ref to a node that is + // associated with the acquiring process. + // acquire needs to set its own node's (aka request in Craig's parlance) + // "locked" ("state" in Craig's parlance) to True. Then it needs to perform + // an (atomic) fetch-and-store that replaces the lock's tail with a pointer + // to the acquiring node and the acquiring node's watching field with the lock's + // current tail. + proc acquire(lk: Ref, req: Ref) + requires is_lock(lk) && request_rep(req) + { + req.state := true; + // TODO: The "fetch-and-store" here needs to be atomic --- I think? + // NOTE: This won't work! Only 1 logically atomic step per statement. This + // is 2 steps: a read and a write. Should be fixed below, but it was + // originally: + // req.watch := lk.tail + currtail := lk.tail; + req.watch := currtail; + lk.tail := req; + } + + // release takes one argument: a Ref to the node that is associated with the + // releasing process. It returns a Ref that represents the Request/node + // granted to it by its predecessor --- this is now the Request/node that the + // current process owns. + // TODO: How do we ensure that only the process holding the lock can release it? + proc release(req: Ref) + returns (predreq: Ref) + requires request_rep(req, false) + ensures request_rep(predreq) + { + req.state := false; + predreq := req.watch; + } +} diff --git a/test/lucas-noob-work/clh_lock.v b/test/lucas-noob-work/clh_lock.v new file mode 100644 index 0000000..823e1ca --- /dev/null +++ b/test/lucas-noob-work/clh_lock.v @@ -0,0 +1,106 @@ +(* This is material copied from the Diaframe proof of the CLH lock. I add some +annotations here that help tie it to the canonical description of the lock in +Craig's paper "Bulding FIFO and Priority-Queuing Spin Locks from Atomic Swap". *) + +From diaframe.heap_lang Require Import proof_automation wp_auto_lob. +From diaframe.lib Require Import own_hints. +From iris.algebra Require Import agree frac excl. + +Definition new_lock : val := + λ: "_", ref (ref #false). + +Definition new_node : val := + λ: "_", + let: "ret" := AllocN #2 NONE in (* NOTE: Nodes in Diaframe, it seems, are tuples --- hence, the AllocN #2. We set both fields here to NONE, then we set the first field to false in the following line. It helps to notice that we are directly referencing memory locations here --- not logical structures at the programming language level --- so when we set the value at "ret" to be ref #false, we are setting the first memory unit (doesn't really matter, but i.e. would be a byte in a byte-addressable system), or the first field (logically speaking), to false. The second field remains NONE. *) + "ret" <- ref #false;; + "ret". + +Definition wait_on : val := (* NOTE: This is the spin part of the spinlock. *) + rec: "wait_on" "l" := + if: ! "l" then + "wait_on" "l" + else + #(). + +Definition acquire : val := + λ: "lk" "node", + let: "l" := ! "node" in + "l" <- #true;; + let: "pred" := Xchg "lk" "l" in (* also called fetch_and_store sometimes *) + ("node" +ₗ #1) <- "pred";; (* NOTE: Here, we are setting our Request/node state to PENDING (true in Diaframe code). Then, we set the lock's tail (lk) to the pointer to the acquiring Request/node's reference and the acquiring Request/node's predecessor pointer (i.e. the pointer we're watching to determine our place in line) to the lock's tail (the one from before we swapped it out/set it to something else). *) + wait_on "pred". + +Definition release : val := + λ: "node", + ! "node" <- #false;; (* NOTE: false == GRANTED and true == PENDING in Craig's paper. We can interpret false here as false to the statement "waiting to acquire" or "is pending". *) + "node" <- ! ("node" +ₗ #1). (* NOTE: this step corresponds to the step in Craig's description of grant_lock where the releaser takes ownership of the Request/node granted to it by its predecessor --- in the Diaframe code here, ("node" + #1) is the second field in the node tuple that stores the pointer to the predecessor reference (see line 30 above for the associated operation in acquire) *) + +(* NOTE: I guess in this implementation, each calling thread must keep track of its own associated node/Request that it currently owns. It's returned by release though, for example, so it can be reused as long as we save that returned value. *) + +(* NOTE: Below, we have the specifications and proofs of the implementation above. *) + +Definition clh_lockR := prodR (agreeR $ leibnizO loc) fracR. +Class clh_lockG Σ := CLHLockG { + #[local] clh_lock_tokG :: inG Σ clh_lockR; + #[local] clh_lock_exclG :: inG Σ $ exclR unitO +}. +Definition clh_lockΣ : gFunctors := #[GFunctor clh_lockR; GFunctor $ exclR unitO]. + +#[local] Obligation Tactic := program_verify. +Global Program Instance subG_clh_lockΣ {Σ} : subG clh_lockΣ Σ → clh_lockG Σ. + +Section spec. + Context `{!heapGS Σ, clh_lockG Σ}. + + Let N := nroot .@ "clhlock". + Definition N_node := N .@ "node". + + Definition queued_loc γe l γ R : iProp Σ := + own γ (to_agree l, 1%Qp) ∨ (∃ (b : bool), l ↦{#1/2} #b ∗ (⌜b = true⌝ ∨ (⌜b = false⌝ ∗ R ∗ own γe (Excl ()) ∗ own γ (to_agree l, 1/2)%Qp ∗ l ↦{#1/2} #b))). + (* NOTE: Lemma for a queued location? Not sure what the arguments here mean. Either gamma owns 1.0 of l. Or there is some boolean b s.t. the value at l is b and we own 1/2 of l. Either b is true, which should mean the location is attempting to acquire the lock (?) or b is false (we hold the lock) AND we own resource R, we have exclusive ownership of γe, which I guess is the watched node, l owns 1/2 of γ, which I guess is the owned node, and l also owns half of the state bool of the owned node? *) + (* NOTE: From the spin_lock.v example, it seems like gamma and gammae should indicate ghost state, particularly a ghost name. How does this make sense here? In spin_lock.v, own gamma (Excl ()) indicates locked gamma, which means that the lock is currently locked/held (as the name suggests lol). In fact, queued_loc looks *a lot* like lock_inv in the spin lock verification --- I believe it serves the same purpose. BTW: each node's first field is a bool for its state and its second field is a pointer to the node it's watching. l should be that first field, i.e. the bool for the state. R is the resource and indicates whether the process that owns the node currently has access to that resource. γe is the ghost name representing the lock itself. γ is the ghost name representing the node itself (the one that's queued). *) + (* NOTE: I gotta figure out what the left disjoint here means. The right disjoint makes more sense given our deduced interpretation above. When a process queues a location/node, it has 1/2 ownership over it (the points to just means that the first field of the node is the state boolean). If b is true, i.e. we are still pending/in the process of acquiring the lock, that's the whole spec --- the predecessor node's process holds the other 1/2 ownership over our node. If b is false, i.e. we now have the lock, we own R, we have exclusive lock access (own gammae (Excl ())), we also own the other 1/2 of our current node now (since there's no more predecessor node). The question remains: what is the own gammae (to_agree l, ...) stuff doing, both in the left disjunct and in the right disjunct in the b = false branch. *) + (* NOTE: Also, for this lock, shouldn't we be watching the predecessor's state? The above interpretation makes it seem like we're just looking at our own node's state. *) + + Definition free_node n : iProp Σ := + ∃ (l : loc) v, n ↦ #l ∗ (n +ₗ 1) ↦ v ∗ l ↦ #false. + (* NOTE: l is a location, not sure what v is. n is the node. The first field of the node points to the address of a location l that holds the state value. The second field of the node points to some v...this would generally be the watched node, but it doesn't seem to matter here. the value at l should be false, since this is the default value when a node is first created. *) + (* NOTE: Being free is the opposite of being queued (i.e. this predicate should be the converse of queued_loc above). *) + + Definition acquired_node γe n R : iProp Σ := + ∃ (l p : loc), n ↦ #l ∗ (∃ γ, inv N_node (queued_loc γe l γ R) ∗ own γ (to_agree l, 1/2)%Qp) ∗ l ↦{#1/2} #true ∗ + (n +ₗ 1) ↦ #p ∗ p ↦ #false ∗ own γe (Excl ()). + (* NOTE: l and p are both locations. p is the predecessor. n needs to be queued (can't be free?). Like above, first field of n points to address of state location. *) + + Definition lock_inv γe lk R : iProp Σ := + ∃ (l : loc), lk ↦ #l ∗ ∃ γ, own γ (to_agree l, 1/2)%Qp ∗ inv N_node (queued_loc γe l γ R). + (* NOTE: Lock points to the address of location l. There is some gamma, a ghost name representing a node, that we own 1/2 of and that has value l (note that lk points to the address of l, which would also then be the address of this node). I think thinking through and relating lock_inv to the definition of queued_loc will help me understand what the own γ (to_agree l, 1/2)%Qp stuff is doing. Specifically, the own here makes it so that we cannot take the left disjunct of queued_loc...*) + (* NOTE: BTW, the fact that we have γe here strongly supports the fact that it is a ghost name representing the lock itself. *) + + Definition is_lock γe R v : iProp Σ := + ∃ (lk : loc), ⌜v = #lk⌝ ∗ inv N (lock_inv γe lk R). + + (* These are the specifications for each of the procedure definitions at the top of this file. They are just Hoare triples, using the lemmas we defined just above this. *) + Global Program Instance new_lock_spec : + SPEC {{ True }} + new_lock #() + {{ (lk : val), RET lk; (∀ R, R ={⊤}=∗ ∃ γe, is_lock γe R lk) }}. + + Global Program Instance new_node_spec : + SPEC {{ True }} + new_node #() + {{ (n : loc), RET #n; free_node n }}. + + Global Program Instance acquire_spec R (n : loc) (lk : val) γe : + SPEC {{ is_lock γe R lk ∗ free_node n }} + acquire lk #n + {{ RET #(); R ∗ acquired_node γe n R }}. + + Global Program Instance release_spec R (n : loc) γe : + SPEC {{ R ∗ acquired_node γe n R }} + release #n + {{ RET #(); free_node n }}. + + Definition acquired_mutual_exclusion γe n m R : acquired_node γe n R ∗ acquired_node γe m R ⊢ False + := verify. (* Cannot do Lemma _ : _ := verify unfortunately. If proper reuse is wanted this should be a Mergable instance *) +End spec. diff --git a/test/lucas-noob-work/clh_lock_clean.rav b/test/lucas-noob-work/clh_lock_clean.rav new file mode 100644 index 0000000..58f4629 --- /dev/null +++ b/test/lucas-noob-work/clh_lock_clean.rav @@ -0,0 +1,170 @@ +// REF: https://dada.cs.washington.edu/research/tr/1993/02/UW-CSE-93-02-02.pdf +// REF: diaframe_heap_lang_examples/comparison/clh_lock.v (MPI-SWS gitlab) + +interface CLHLock { + module R: Library.Type + module Agree = Library.Agree[R] + module BoolType : Library.Type { + rep type T = Bool + } + module FracBool = Library.Frac[BoolType] + + import FracBool.frac_chunk + ghost field agr: Agree + + // =========================================================================== + // Section defining loc (just a flag for storing node "locked" state) + // =========================================================================== + field locked: Bool; + field l_agr: Bool; // QUESTION: How do I use a unit type? Seems like it's + // been deprecated for an empty tuple, but how do I define a tuple type? For + // now, I guess I'll just have a bool that's always set to true. + + // =========================================================================== + // Section defining node fields, predicates, and procedures + // =========================================================================== + field loc: Ref; + field predecessor: Ref; + + pred free_node(node: Ref) { + exists ln: Ref, pn: Ref :: + own(node, loc, ln, 1.0) && + own(ln, locked, false, 1.0) && + own(node, predecessor, pn, 1.0) && + own(ln, l_agr, true, 1.0) + } + + proc create_node() + returns (node: Ref) + ensures free_node(node) + { + var newloc: Ref; + newloc := new (locked: false, l_agr: true); + node := new (loc: newloc, predecessor: null); + // NOTE: Might need to be false + fold free_node(node) [b_disj := true]; + } + + // =========================================================================== + // Lock definition + // =========================================================================== + // Resource protected by the lock + pred resource(r: R) + + // Structurally, the lock is a pointer to the tail loc in our implicit queue + field tail: Ref + + // =========================================================================== + // Predicates and invariants used for verification + // =========================================================================== + inv queued_loc(l: Ref, r: R) { + exists b_disj: Bool, b: Bool :: + (b_disj ? own(l, l_agr, true, 1.0) : own(l, locked, b, 0.5) && + (!b ==> resource(r) && own(l, l_agr, true, 0.5) && own(l, locked, b, 0.5))) + } + + inv lock_inv(lk: Ref, r: R) { + exists l: Ref :: + own(lk, tail, l, 1.0) && + own(l, l_agr, true, 0.5) && + queued_loc(l, r) + } + + pred acquired_node(node: Ref, r: R) { + exists ln: Ref, pn: Ref :: + own(node, loc, ln, 1.0) && + queued_loc(ln, r) && own(ln, l_agr, true, 0.5) && + own(ln, locked, true, 0.5) && + own(node, predecessor, pn, 1.0) && + own(pn, locked, false, 0.5) && + resource(r) + } + + // =========================================================================== + // Actual procedures (with specifications) on locks + // =========================================================================== + proc create_lock(r: R) + returns (lk: Ref) + requires resource(r) + ensures lock_inv(lk, r) + { + var l: Ref; + l := new (locked: false, l_agr: true); + lk := new (tail: l, agr: Agree.agree(r)); + fold queued_loc(l, r) [b_disj := false]; + fold lock_inv(lk, r); + } + + proc wait_on(lk: Ref, pn: Ref, ln: Ref, node: Ref, implicit ghost r: R) + requires lock_inv(lk, r) && queued_loc(ln, r) && + own(node, loc, ln, 1.0) && + own(node, predecessor, pn, 1.0) && + own(ln, l_agr, true, 0.5) && + own(ln, locked, true, 0.5) && + queued_loc(pn, r) && own(pn, l_agr, true, 0.5) + ensures acquired_node(node, r) + { + ghost var branch: Bool; + unfold queued_loc(pn, r) {branch :| b_disj}; + var b: Bool := pn.locked; + // assert (!b ==> resource(r) && own(pn, l_agr, true, 0.5) && + // own(pn, locked, b, 0.5)); + + {! + if (!b) { + // TODO: think this through a bit more + fold acquired_node(node, r); + // NOTE: This line currently causes crash...won't dig further for now. + // fpu(pn, full_agr, frac_chunk(false, 1.0), frac_chunk(true, 1.0)); + assert ln != pn; + // assert own(pn, full_agr, false, 1.0); + assert own(pn, locked, b, 0.5); + fold queued_loc(pn, r) [b := b, b_disj := true]; + } else { + fold queued_loc(pn, r) [b := b, b_disj := branch]; + // TODO: Updates to ghost fields in ghost blocks are not allowed (when + // wrapped in a Frac) -- message Ekansh about this + } + !} + + if (b) { + wait_on(lk, pn, ln, node, r); + } + } + + proc xchg(lk: Ref, ln: Ref, implicit ghost t: Ref) + returns (pn: Ref) + atomic requires own(lk, tail, t, 1.0) + atomic ensures own(lk, tail, ln, 1.0) && pn == t + + proc acquire(lk: Ref, node: Ref, implicit ghost r: R) + requires lock_inv(lk, r) && free_node(node) + ensures acquired_node(node, r) + { + unfold free_node(node); + var ln: Ref := node.loc; + ln.locked := true; + + unfold lock_inv(lk, r); + var pn: Ref := xchg(lk, ln); + fold queued_loc(ln, r) [b_disj := false]; + fold lock_inv(lk, r); + + // Set new predecessor + node.predecessor := pn; + // Wait on the predecessor + wait_on(lk, pn, ln, node, r); + } + + proc release(node: Ref, implicit ghost r: R) + requires acquired_node(node, r) + ensures free_node(node) + { + unfold acquired_node(node, r); + var ln: Ref := node.loc; + ln.locked := false; + var pn: Ref := node.predecessor; + node.loc := pn; + fold free_node(node); + } +} diff --git a/test/lucas-noob-work/clh_lock_clean.v b/test/lucas-noob-work/clh_lock_clean.v new file mode 100644 index 0000000..b7f7cb5 --- /dev/null +++ b/test/lucas-noob-work/clh_lock_clean.v @@ -0,0 +1,90 @@ +From diaframe.heap_lang Require Import proof_automation wp_auto_lob. +From diaframe.lib Require Import own_hints. +From iris.algebra Require Import agree frac excl. + +Definition new_lock : val := + λ: "_", ref (ref #false). + +Definition new_node : val := + λ: "_", + let: "ret" := AllocN #2 NONE in + "ret" <- ref #false;; + "ret". + +Definition wait_on : val := + rec: "wait_on" "l" := + if: ! "l" then + "wait_on" "l" + else + #(). + +Definition acquire : val := + λ: "lk" "node", + let: "l" := ! "node" in + "l" <- #true;; + let: "pred" := Xchg "lk" "l" in (* also called fetch_and_store sometimes *) + ("node" +ₗ #1) <- "pred";; + wait_on "pred". + +Definition release : val := + λ: "node", + ! "node" <- #false;; + "node" <- ! ("node" +ₗ #1). + +Definition clh_lockR := prodR (agreeR $ leibnizO loc) fracR. +Class clh_lockG Σ := CLHLockG { + #[local] clh_lock_tokG :: inG Σ clh_lockR; + #[local] clh_lock_exclG :: inG Σ $ exclR unitO +}. +Definition clh_lockΣ : gFunctors := #[GFunctor clh_lockR; GFunctor $ exclR unitO]. + +#[local] Obligation Tactic := program_verify. + +Global Program Instance subG_clh_lockΣ {Σ} : subG clh_lockΣ Σ → clh_lockG Σ. + +Section spec. + Context `{!heapGS Σ, clh_lockG Σ}. + + Let N := nroot .@ "clhlock". + Definition N_node := N .@ "node". + + Definition queued_loc γe l γ R : iProp Σ := + own γ (to_agree l, 1%Qp) ∨ (∃ (b : bool), l ↦{#1/2} #b ∗ (⌜b = true⌝ ∨ (⌜b = false⌝ ∗ R ∗ own γe (Excl ()) ∗ own γ (to_agree l, 1/2)%Qp ∗ l ↦{#1/2} #b))). + (* a unique pair that owns the node *) + + Definition free_node n : iProp Σ := + ∃ (l : loc) v, n ↦ #l ∗ (n +ₗ 1) ↦ v ∗ l ↦ #false. + + Definition acquired_node γe n R : iProp Σ := + ∃ (l p : loc), n ↦ #l ∗ (∃ γ, inv N_node (queued_loc γe l γ R) ∗ own γ (to_agree l, 1/2)%Qp) ∗ l ↦{#1/2} #true ∗ + (n +ₗ 1) ↦ #p ∗ p ↦ #false ∗ own γe (Excl ()). + + Definition lock_inv γe lk R : iProp Σ := + ∃ (l : loc), lk ↦ #l ∗ ∃ γ, own γ (to_agree l, 1/2)%Qp ∗ inv N_node (queued_loc γe l γ R). + + Definition is_lock γe R v : iProp Σ := + ∃ (lk : loc), ⌜v = #lk⌝ ∗ inv N (lock_inv γe lk R). + + Global Program Instance new_lock_spec : + SPEC {{ True }} + new_lock #() + {{ (lk : val), RET lk; (∀ R, R ={⊤}=∗ ∃ γe, is_lock γe R lk) }}. + + Global Program Instance new_node_spec : + SPEC {{ True }} + new_node #() + {{ (n : loc), RET #n; free_node n }}. + + Global Program Instance acquire_spec R (n : loc) (lk : val) γe : + SPEC {{ is_lock γe R lk ∗ free_node n }} + acquire lk #n + {{ RET #(); R ∗ acquired_node γe n R }}. + + Global Program Instance release_spec R (n : loc) γe : + SPEC {{ R ∗ acquired_node γe n R }} + release #n + {{ RET #(); free_node n }}. + + Definition acquired_mutual_exclusion γe n m R : acquired_node γe n R ∗ acquired_node γe m R ⊢ False + := verify. (* Cannot do Lemma _ : _ := verify unfortunately. If proper reuse is wanted this should be a Mergable instance *) +End spec. diff --git a/test/lucas-noob-work/clh_lock_clean_debug.rav b/test/lucas-noob-work/clh_lock_clean_debug.rav new file mode 100644 index 0000000..6877e50 --- /dev/null +++ b/test/lucas-noob-work/clh_lock_clean_debug.rav @@ -0,0 +1,174 @@ +// REF: https://dada.cs.washington.edu/research/tr/1993/02/UW-CSE-93-02-02.pdf +// REF: diaframe_heap_lang_examples/comparison/clh_lock.v (MPI-SWS gitlab) + +interface CLHLock { + module R: Library.Type + module Agree = Library.Agree[R] + module BoolType : Library.Type { + rep type T = Bool + } + module FracBool = Library.Frac[BoolType] + + import FracBool.frac_chunk + ghost field agr: Agree + + // =========================================================================== + // Section defining loc (just a flag for storing node "locked" state) + // =========================================================================== + field locked: Bool; + field l_agr: Bool; // QUESTION: How do I use a unit type? Seems like it's + // been deprecated for an empty tuple, but how do I define a tuple type? For + // now, I guess I'll just have a bool that's always set to true. + ghost field full_agr: FracBool; // NOTE: Another hack, this time to get around the + // problem with disjunctions in predicate/invariant definitions. We will use + // this in a conditional in queued_loc to tell us which disjunct to look at. + + // =========================================================================== + // Section defining node fields, predicates, and procedures + // =========================================================================== + field loc: Ref; + field predecessor: Ref; + + pred free_node(node: Ref) { + exists ln: Ref, pn: Ref :: + own(node, loc, ln, 1.0) && + own(ln, locked, false, 1.0) && + own(node, predecessor, pn, 1.0) && + own(ln, l_agr, true, 1.0) && + // TODO: Note to self -- perhaps this should really be false, but not + // currently convinced that it matters. + own(ln, full_agr, frac_chunk(true, 1.0)) // NOTE: Extra perm for disjunction hack. + } + + proc create_node() + returns (node: Ref) + ensures free_node(node) + { + var newloc: Ref; + newloc := new (locked: false, l_agr: true, full_agr: frac_chunk(true, 1.0)); + node := new (loc: newloc, predecessor: null); + fold free_node(node); + } + + // =========================================================================== + // Lock definition + // =========================================================================== + // Resource protected by the lock + pred resource(r: R) + + // Structurally, the lock is a pointer to the tail loc in our implicit queue + field tail: Ref + + // =========================================================================== + // Predicates and invariants used for verification + // =========================================================================== + inv queued_loc(l: Ref, r: R) { + exists b_disj: Bool, b: Bool :: + own(l, full_agr, frac_chunk(b_disj, 1.0)) && + (b_disj ? own(l, l_agr, true, 1.0) : own(l, locked, b, 0.5) && + (!b ==> resource(r) && own(l, l_agr, true, 0.5) && own(l, locked, b, 0.5))) + } + + inv lock_inv(lk: Ref, r: R) { + exists l: Ref :: + own(lk, tail, l, 1.0) && + own(l, l_agr, true, 0.5) && + queued_loc(l, r) + } + + pred acquired_node(node: Ref, r: R) { + exists ln: Ref, pn: Ref :: + own(node, loc, ln, 1.0) && + queued_loc(ln, r) && own(ln, l_agr, true, 0.5) && + own(ln, locked, true, 0.5) && + own(node, predecessor, pn, 1.0) && + own(pn, locked, false, 0.5) && + resource(r) + } + + // =========================================================================== + // Actual procedures (with specifications) on locks + // =========================================================================== + proc create_lock(r: R) + returns (lk: Ref) + requires resource(r) + ensures lock_inv(lk, r) + { + var l: Ref; + l := new (locked: false, l_agr: true, full_agr: frac_chunk(false, 1.0)); + lk := new (tail: l, agr: Agree.agree(r)); + fold queued_loc(l, r); + fold lock_inv(lk, r); + } + + proc wait_on(lk: Ref, pn: Ref, ln: Ref, node: Ref, implicit ghost r: R) + requires lock_inv(lk, r) && queued_loc(ln, r) && + own(node, loc, ln, 1.0) && + own(node, predecessor, pn, 1.0) && + own(ln, l_agr, true, 0.5) && + own(ln, locked, true, 0.5) && + queued_loc(pn, r) && own(pn, l_agr, true, 0.5) + ensures acquired_node(node, r) + { + unfold queued_loc(pn, r); + var b: Bool := pn.locked; + // assert (!b ==> resource(r) && own(pn, l_agr, true, 0.5) && own(pn, locked, b, 0.5)); + + {! + if (!b) { + // TODO: think this through a bit more + fold acquired_node(node, r); + // NOTE: This line currently causes crash...won't dig further for now. + // fpu(pn, full_agr, frac_chunk(false, 1.0), frac_chunk(true, 1.0)); + assert ln != pn; + // assert own(pn, full_agr, false, 1.0); + assert own(pn, locked, b, 0.5); + fold queued_loc(pn, r) [b := b]; + } else { + fold queued_loc(pn, r) [b := b]; + // TODO: Updates to ghost fields in ghost blocks are not allowed (when + // wrapped in a Frac) -- message Ekansh about this + } + !} + + if (b) { + wait_on(lk, pn, ln, node, r); + } + } + + proc xchg(lk: Ref, ln: Ref, implicit ghost t: Ref) + returns (pn: Ref) + atomic requires own(lk, tail, t, 1.0) + atomic ensures own(lk, tail, ln, 1.0) && pn == t + + proc acquire(lk: Ref, node: Ref, implicit ghost r: R) + requires lock_inv(lk, r) && free_node(node) + ensures acquired_node(node, r) + { + unfold free_node(node); + var ln: Ref := node.loc; + ln.locked := true; + + unfold lock_inv(lk, r); + var pn: Ref := xchg(lk, ln); + fold queued_loc(ln, r); + fold lock_inv(lk, r); + + // Set new predecessor + node.predecessor := pn; + // Wait on the predecessor + wait_on(lk, pn, ln, node, r); + } + + proc release(node: Ref, implicit ghost r: R) + requires acquired_node(node, r) + ensures free_node(node) + { + unfold acquired_node(node, r); + var ln: Ref := node.loc; + ln.locked := false; + var pn: Ref := node.predecessor; + node.loc := pn; + fold free_node(node); + } +} diff --git a/test/lucas-noob-work/clh_lock_tmp.v b/test/lucas-noob-work/clh_lock_tmp.v new file mode 100644 index 0000000..2c83b8c --- /dev/null +++ b/test/lucas-noob-work/clh_lock_tmp.v @@ -0,0 +1,17 @@ + Definition queued_loc γe l γ R : iProp Σ := + own γ (to_agree l, 1%Qp) ∨ (∃ (b : bool), l ↦{#1/2} #b ∗ (⌜b = true⌝ ∨ (⌜b = false⌝ ∗ R ∗ own γe (Excl ()) ∗ own γ (to_agree l, 1/2)%Qp ∗ l ↦{#1/2} #b))). + (* If own γ (to_agree l, 1%Qp) is true, this is the very end/head of the queue, where our process will own the entirety of the node (?). I think this is the crux of a lot of this --- how exactly does queued_loc express the fact that a location/node is queued? A queued location is either inside the queue (waiting to be the head), or at the head. It looks like the first left disjunct is when there is only the lock location in the queue, the b=true branch of the right disjunct is when we are inside the queue, and the b=false branch of the right disjunct is when we are at the head. *) + (* NOTE: The lock is just the linked list of nodes. *) + + Definition free_node n : iProp Σ := + ∃ (l : loc) v, n ↦ #l ∗ (n +ₗ 1) ↦ v ∗ l ↦ #false. + + Definition acquired_node γe n R : iProp Σ := + ∃ (l p : loc), n ↦ #l ∗ (∃ γ, inv N_node (queued_loc γe l γ R) ∗ own γ (to_agree l, 1/2)%Qp) ∗ l ↦{#1/2} #true ∗ + (n +ₗ 1) ↦ #p ∗ p ↦ #false ∗ own γe (Excl ()). + (* This formula forces queued_loc to take the b=true path, since the b=false asserts that l ↦{#1/2} #false, while our statement here asserts that l ↦{#1/2} #true. *) + + Definition lock_inv γe lk R : iProp Σ := + ∃ (l : loc), lk ↦ #l ∗ ∃ γ, own γ (to_agree l, 1/2)%Qp ∗ inv N_node (queued_loc γe l γ R). + + (* lock --> l, lock owns half of ghost representation of l, l is a queued location. *) diff --git a/test/lucas-noob-work/daily-work-log.md b/test/lucas-noob-work/daily-work-log.md new file mode 100644 index 0000000..9cb07e5 --- /dev/null +++ b/test/lucas-noob-work/daily-work-log.md @@ -0,0 +1,8 @@ +# additional polish, small bug reports +- If we have more bindings on the left than are available, i.e: +lk, p, r := openAU(phi) + +we get this error: [Error] length mismatch in fold2_exn: 1 <> 3, which is a bit +confusing. Error would be nicer if it directly pointed to the openAU call +instead of fold2_exn, which I assume is used somewhere in the underlying layers +of the openAU implementation. diff --git a/test/lucas-noob-work/lclist-notes.md b/test/lucas-noob-work/lclist-notes.md new file mode 100644 index 0000000..35d3354 --- /dev/null +++ b/test/lucas-noob-work/lclist-notes.md @@ -0,0 +1,55 @@ +# lclist notes + +NOTE: OK, here's a new, hopefully more consolidated idea of what should be done: +- predicate that defines the general structure of the linked list, probably +also the sortedness requirement +- predicates that define the concurrent locking behavior of the list --- +how how threads must hold locks, how different threads interact with the locks + - this might require me to define a new resource algebra...but this part + is unclear + - we'll probably need to specify linearizability with ghost state, possibly + as part of a resource algebra +- each operation must be a FPU (if we need to use a resource algebra) that +preserves the expected invariants (both concrete and ghost) + +IMPORTANT NOTE: Raven uses resource algebra types to model ghost state (just +like concrete types---i.e. Int, Bool, Ref---are used to model concrete state) +the default resource algebra is Frac + +NOTE: Raven models the heap through typed fields---all separation logic +statements about the heap are done using typed fields + +NOTE: Raven invariants are maintained *by every atomic computation step* +this makes them duplicable (and also easier to reason about, but less +expressive) + +NOTE: own(x, c, v, 1.0) means we have ownership over field c of location x, +the value at x.c is n, and we own 1.0 fractional permission over the field +x.c + +NOTE: It appears that a big point of the ARC resource algebra is to connect +global state to local state --- i.e. ensuring that any increase/decrease +in the local readers/permissions is reflected in the global readers/permissions. +Updates that satisfy this are the frame-preserving updates allowed in the +resource algebra. + +QUESTION: What is a shareable predicate in Raven? +ANSWER: Seems like it's just what the name implies: a predicate that is +shareable (permission-wise) between multiple threads. It's an example of +a higher order module. + +--- + +NOTE: need a specification and proof of lock behavior (we already have this with +spin-lock.rav) +NOTE: each node in the lclist has 3 fields --- a location with a value, +a tail pointer to the next node, and a lock + +an invariant to describe the structure of the list, should be preserved +after every operation +I guess the whole locking situation and concurrency invariants will need +to be encoded in ghost state somehow --- generally, ghost state allows us +to connect concrete heap details with protocol adherence +resource algebras in Raven as an abstract way to define specifications --- +I guess they are the building blocks with which we can construct our proofs? +resource algebras as a way to constrain global behavior in a system diff --git a/test/lucas-noob-work/lclist.rav b/test/lucas-noob-work/lclist.rav new file mode 100644 index 0000000..fe493da --- /dev/null +++ b/test/lucas-noob-work/lclist.rav @@ -0,0 +1,79 @@ +include "spin-lock.rav" + +module Node { + +} + +module LCList { + module SpinLk: SpinLock; + field value: Int; + field next: Ref; + field l: SpinLk; + + // NOTE: Does this preclude cycles? How? + pred lseg(x: Ref, y: Ref) { + x == y ? true : + (exists z: Ref :: own(x, next, z, 1.0) && lseg(z, y)) + } + + // TODO: This is probably not going to work... + pred sorted(x: Ref, y: Ref) + requires lseg(x, y) + { + forall a: Ref, b: Ref :: lseg(x, a) && lseg(a, b) && lseg(b, y) && (a.value <= b.value) + } + + pred is_list(x: Ref, y: Ref) { + lseg(x, y) && sorted(x, y) + } + + pred is_node(x: Ref) { + + } + + proc create_node(v: Int) + returns (ret: Ref) + ensures is_node(ret) + { + // TODO: How do we create a new Ref in Raven? + var x: Ref; + x := SpinLk.create(new(value: v, next: 0)); + return x; + } + + // TODO: Lemmas that do the work of actually moving pointers around to add or + // remove a node from the linked list + lemma insert_node(prevNode: Node, newNode: Node) + requires is_locked(prevNode) + // TODO: Ensure proper locking through ghost state + { + var next := prevNode.next; + prevNode.next := newNode; + newNode.next = next; + } + + lemma remove_node(prevNode: Node, currNode: Node) + requires is_locked(currNode) && is_locked(prevNode) + // TODO: Again, ensure proper locking (both prev and curr nodes must be locked) + { + var nextNode := currNode; + prevNode.next := nextNode; + } + + + proc create_list() {} + + + proc locate() { + // TODO + } + + proc remove() { + // TODO + // TODO: How do we handle garbage collection (i.e. deallocating currNode above) + } + + proc add() { + // TODO + } +}