diff --git a/proof/crefine/AARCH64/ADT_C.thy b/proof/crefine/AARCH64/ADT_C.thy index 96ce1bd238..d51bff8bec 100644 --- a/proof/crefine/AARCH64/ADT_C.thy +++ b/proof/crefine/AARCH64/ADT_C.thy @@ -787,6 +787,7 @@ lemma map_to_ctes_tcb_ctes: "ctes_of s' = ctes_of s \ ko_at' tcb p s \ ko_at' tcb' p s' \ \x\ran tcb_cte_cases. fst x tcb' = fst x tcb" + supply raw_tcb_cte_cases_simps[simp] (* FIXME arch-split: legacy, try use tcb_cte_cases_neqs *) apply (clarsimp simp add: ran_tcb_cte_cases) apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKO_opt_tcb split: kernel_object.splits) diff --git a/proof/crefine/AARCH64/ArchMove_C.thy b/proof/crefine/AARCH64/ArchMove_C.thy index 3c1fe7bb54..dd8bb34863 100644 --- a/proof/crefine/AARCH64/ArchMove_C.thy +++ b/proof/crefine/AARCH64/ArchMove_C.thy @@ -34,15 +34,6 @@ lemma ps_clear_is_aligned_ksPSpace_None: power_overflow) by assumption -lemma ps_clear_is_aligned_ctes_None: - assumes "ps_clear p tcbBlockSizeBits s" - and "is_aligned p tcbBlockSizeBits" - shows "ksPSpace s (p + 2*2^cteSizeBits) = None" - and "ksPSpace s (p + 3*2^cteSizeBits) = None" - and "ksPSpace s (p + 4*2^cteSizeBits) = None" - by (auto intro: assms ps_clear_is_aligned_ksPSpace_None - simp: objBits_defs mask_def)+ - lemma word_shift_by_3: "x * 8 = (x::'a::len word) << 3" by (simp add: shiftl_t2n) @@ -89,6 +80,15 @@ lemmas unat64_eq_of_nat = unat_eq_of_nat[where 'a=64, folded word_bits_def] context begin interpretation Arch . +lemma ps_clear_is_aligned_ctes_None: + assumes "ps_clear p tcbBlockSizeBits s" + and "is_aligned p tcbBlockSizeBits" + shows "ksPSpace s (p + 2*2^cteSizeBits) = None" + and "ksPSpace s (p + 3*2^cteSizeBits) = None" + and "ksPSpace s (p + 4*2^cteSizeBits) = None" + by (auto intro: assms ps_clear_is_aligned_ksPSpace_None + simp: objBits_defs mask_def)+ + crunch archThreadGet for inv'[wp]: P diff --git a/proof/crefine/AARCH64/Finalise_C.thy b/proof/crefine/AARCH64/Finalise_C.thy index 939f62ed25..2ad3bfcce7 100644 --- a/proof/crefine/AARCH64/Finalise_C.thy +++ b/proof/crefine/AARCH64/Finalise_C.thy @@ -2391,7 +2391,7 @@ lemma associateVCPUTCB_ccorres: apply vcg apply wpsimp apply (vcg exspec=dissociateVCPUTCB_modifies) - apply ((wpsimp wp: hoare_vcg_all_lift hoare_drop_imps + apply ((wpsimp wp: hoare_vcg_all_lift hoare_drop_imps valid_arch_tcb_lift' | strengthen invs_valid_objs' invs_arch_state')+)[1] apply (vcg exspec=dissociateVCPUTCB_modifies) apply (rule_tac Q'="\_. invs' and vcpu_at' vcpuptr and tcb_at' tptr" in hoare_post_imp) diff --git a/proof/crefine/AARCH64/IsolatedThreadAction.thy b/proof/crefine/AARCH64/IsolatedThreadAction.thy index b11ec605d4..ea26491a83 100644 --- a/proof/crefine/AARCH64/IsolatedThreadAction.thy +++ b/proof/crefine/AARCH64/IsolatedThreadAction.thy @@ -142,11 +142,11 @@ end lemmas getObject_return_tcb = getObject_return[OF meta_eq_to_obj_eq, OF loadObject_tcb, - unfolded objBits_simps, simplified] + unfolded gen_objBits_simps, simplified] lemmas setObject_modify_tcb = setObject_modify[OF _ meta_eq_to_obj_eq, OF _ updateObject_tcb, - unfolded objBits_simps, simplified] + unfolded gen_objBits_simps, simplified] lemma partial_overwrite_fun_upd: "inj idx \ diff --git a/proof/crefine/AARCH64/Recycle_C.thy b/proof/crefine/AARCH64/Recycle_C.thy index ec167b9566..b3c098b535 100644 --- a/proof/crefine/AARCH64/Recycle_C.thy +++ b/proof/crefine/AARCH64/Recycle_C.thy @@ -502,10 +502,10 @@ lemma heap_to_user_data_in_user_mem'[simp]: apply (subst is_aligned_add_helper[THEN conjunct2,where n1 = 3]) apply (erule aligned_add_aligned) apply (simp add: is_aligned_mult_triv2[where n = 3,simplified]) - apply (clarsimp simp: objBits_simps AARCH64.pageBits_def) + apply (clarsimp simp: gen_objBits_simps AARCH64.pageBits_def) apply simp apply (rule is_aligned_add_helper[THEN conjunct2]) - apply (simp add: AARCH64.pageBits_def objBits_simps) + apply (simp add: AARCH64.pageBits_def gen_objBits_simps) apply (rule word_less_power_trans2[where k = 3,simplified]) apply (rule less_le_trans[OF ucast_less]) apply simp+ diff --git a/proof/crefine/Move_C.thy b/proof/crefine/Move_C.thy index 7377cd1bfc..5aa161c1a1 100644 --- a/proof/crefine/Move_C.thy +++ b/proof/crefine/Move_C.thy @@ -131,13 +131,11 @@ lemma length_Suc_0_conv: lemma imp_ignore: "B \ A \ B" by blast -lemma cteSizeBits_eq: - "cteSizeBits = cte_level_bits" - by (simp add: cte_level_bits_def cteSizeBits_def) +lemmas cteSizeBits_eq = cteSizeBits_cte_level_bits lemma cteSizeBits_le_cte_level_bits[simp]: "cteSizeBits \ cte_level_bits" - by (simp add: cte_level_bits_def cteSizeBits_def) + by (simp add: cteSizeBits_eq) lemma unat_ucast_prio_mask_simp[simp]: "unat (ucast (p::priority) && mask m :: machine_word) = unat (p && mask m)" @@ -344,6 +342,20 @@ lemma word_eq_cast_unsigned: "(x = y) = (UCAST ('a signed \ ('a :: len)) x = ucast y)" by (simp add: word_eq_iff nth_ucast) +(* tcbIPCBufferSlot is last slot in TCB *) +(* FIXME arch-split: Arch is needed for wordSizeCase, proof is same on all arches *) +lemma (in Arch) cteSizeBits_2ptcbBlockSizeBits[simplified tcbIPCBufferSlot_def]: + "n \ tcbIPCBufferSlot \ n << cteSizeBits < 2 ^ tcbBlockSizeBits" + unfolding tcbIPCBufferSlot_def tcbBlockSizeBits_def cteSizeBits_def + apply (simp only: wordSizeCase_simp) + apply (rule shiftl_less_t2n; simp) + apply unat_arith + done + +requalify_facts Arch.cteSizeBits_2ptcbBlockSizeBits + +lemmas zero_less_2p_tcbBlockSizeBits = cteSizeBits_2ptcbBlockSizeBits[where n=0, simplified] + lemma ran_tcb_cte_cases: "ran tcb_cte_cases = {(Structures_H.tcbCTable, tcbCTable_update), @@ -351,7 +363,8 @@ lemma ran_tcb_cte_cases: (Structures_H.tcbReply, tcbReply_update), (Structures_H.tcbCaller, tcbCaller_update), (tcbIPCBufferFrame, tcbIPCBufferFrame_update)}" - by (auto simp add: tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + unfolding tcb_cte_cases_def + by (auto split: if_split_asm simp: tcb_cte_cases_neqs) lemma user_data_at_ko: "typ_at' UserDataT p s \ ko_at' UserData p s" @@ -568,7 +581,7 @@ lemma threadGet_again: (threadGet ext t >>= n) s' = n rv s'" apply (clarsimp simp add: threadGet_def liftM_def in_monad) apply (frule use_valid [OF _ getObject_obj_at']) - apply (simp add: objBits_simps')+ + apply (simp add: gen_objBits_simps tcbBlockSizeBits_def)+ (* FIXME arch-split: tcbBlockSizeBits *) apply (frule getObject_tcb_det) apply (clarsimp simp: bind_def split_def) apply (insert no_fail_getObject_tcb) @@ -623,16 +636,17 @@ lemma mapM_only_length: lemma tcb_aligned': "tcb_at' t s \ is_aligned t tcbBlockSizeBits" - apply (drule obj_at_aligned') - apply (simp add: objBits_simps) - apply (simp add: objBits_simps) - done + by (drule obj_at_aligned'; simp add: gen_objBits_simps) -lemma cte_at_0' [dest!]: +(* identical proof on all arches *) +lemma (in Arch) cte_at_0'[dest!]: "\ cte_at' 0 s; no_0_obj' s \ \ False" apply (clarsimp simp: cte_wp_at_obj_cases') by (auto simp: tcb_cte_cases_def is_aligned_def objBits_simps' dest!:tcb_aligned' split: if_split_asm) +requalify_facts Arch.cte_at_0' +lemmas [dest!] = cte_at_0' + lemma getMessageInfo_le3: "\\\ getMessageInfo sender \\rv s. unat (msgExtraCaps rv) \ 3\" including no_pre @@ -1119,45 +1133,54 @@ lemma updateObject_cte_tcb: return (KOTCB (updF (\_. ctea) tcb)) od)" using tc unfolding tcb_cte_cases_def - apply - - apply (clarsimp simp add: updateObject_cte Let_def - tcb_cte_cases_def objBits_simps' tcbSlots shiftl_t2n - split: if_split_asm cong: if_cong) - done + by (clarsimp simp: updateObject_cte Let_def tcb_cte_cases_neqs gen_objBits_simps tcbSlots + split: if_split_asm + cong: if_cong) + +lemma self_elim: + "\P Q. \ P \ Q; P \ \ Q" + by blast lemma tcb_cte_cases_in_range1: assumes tc:"tcb_cte_cases (y - x) = Some v" and al: "is_aligned x tcbBlockSizeBits" shows "x \ y" proof - - note objBits_defs [simp] - from tc obtain q where yq: "y = x + q" and qv: "q < 2 ^ tcbBlockSizeBits" unfolding tcb_cte_cases_def - by (simp add: diff_eq_eq split: if_split_asm) + by (clarsimp simp: diff_eq_eq tcbSlot_defs + simp del: shiftl_1 + elim!: self_elim + intro!: cteSizeBits_2ptcbBlockSizeBits zero_less_2p_tcbBlockSizeBits + split: if_split_asm) have "x \ x + 2 ^ tcbBlockSizeBits - 1" using al by (rule is_aligned_no_overflow) hence "x \ x + q" using qv - apply simp - apply unat_arith - apply simp - done + by unat_arith thus ?thesis using yq by simp qed +lemma tcbBlockSizeBits_word_less_sub_le: + fixes x :: machine_word + shows "(x \ 2 ^ tcbBlockSizeBits - 1) = (x < 2 ^ tcbBlockSizeBits)" + unfolding tcbBlockSizeBits_def + by (subst word_less_sub_le; simp) + lemma tcb_cte_cases_in_range2: assumes tc: "tcb_cte_cases (y - x) = Some v" and al: "is_aligned x tcbBlockSizeBits" shows "y \ x + 2 ^ tcbBlockSizeBits - 1" proof - - note objBits_defs [simp] - from tc obtain q where yq: "y = x + q" and qv: "q \ 2 ^ tcbBlockSizeBits - 1" unfolding tcb_cte_cases_def - by (simp add: diff_eq_eq split: if_split_asm) + by (clarsimp simp: diff_eq_eq tcbSlot_defs tcbBlockSizeBits_word_less_sub_le + simp del: shiftl_1 + elim!: self_elim + intro!: cteSizeBits_2ptcbBlockSizeBits zero_less_2p_tcbBlockSizeBits + split: if_split_asm) have "x + q \ x + (2 ^ tcbBlockSizeBits - 1)" using qv apply (rule word_plus_mono_right)