diff --git a/proof/crefine/AARCH64/ADT_C.thy b/proof/crefine/AARCH64/ADT_C.thy index 96ce1bd238..168b7b386e 100644 --- a/proof/crefine/AARCH64/ADT_C.thy +++ b/proof/crefine/AARCH64/ADT_C.thy @@ -624,7 +624,7 @@ lemma carch_state_to_H_correct: apply (simp add: ccur_vcpu_to_H_correct) apply (rule conjI) using valid[simplified valid_arch_state'_def] - apply (clarsimp simp: max_armKSGICVCPUNumListRegs_def unat_of_nat_eq) + apply (clarsimp simp: max_armKSGICVCPUNumListRegs_val unat_of_nat_eq) apply (simp add: pt_t) done @@ -1017,7 +1017,7 @@ lemma cpspace_vcpu_relation_unique: apply clarsimp apply (rule ext) apply (rename_tac r) - apply (case_tac "64 \ r"; simp) + apply (case_tac "max_armKSGICVCPUNumListRegs \ r"; simp) apply (rule conjI) apply (rule ext, blast) apply (rule conjI, blast) diff --git a/proof/crefine/AARCH64/Arch_C.thy b/proof/crefine/AARCH64/Arch_C.thy index 8b7b1c02da..8e42717f08 100644 --- a/proof/crefine/AARCH64/Arch_C.thy +++ b/proof/crefine/AARCH64/Arch_C.thy @@ -3686,7 +3686,7 @@ lemma invokeVCPUInjectIRQ_ccorres: notes Collect_const[simp del] shows "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and vcpu_at' vcpuptr and K (idx < 64)) + (invs' and vcpu_at' vcpuptr and K (idx < max_armKSGICVCPUNumListRegs)) (UNIV \ \\vcpu = Ptr vcpuptr \ \ \\index = of_nat idx \ \ \ virq_to_H \virq = virq \) @@ -3721,7 +3721,7 @@ lemma invokeVCPUInjectIRQ_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg, clarsimp simp: return_def) apply wpsimp+ - apply (clarsimp simp: unat_of_nat_eq word_of_nat_less) + apply (clarsimp simp: unat_of_nat_eq word_of_nat_less max_armKSGICVCPUNumListRegs_val) apply (rule conjI) apply (clarsimp elim: typ_at'_no_0_objD invs_no_0_obj') apply (subst scast_eq_ucast; simp?) @@ -3737,9 +3737,25 @@ lemma virq_virq_pending_EN_new_spec: \ virqEOIIRQEN_' s = 1 \ virq_to_H \ret__struct_virq_C = makeVIRQ (virqGroup_' s) (virqPriority_' s) (virqIRQ_' s) \" apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) apply vcg - apply (clarsimp simp: virq_to_H_def makeVIRQ_def virq_virq_pending_def) + (* unfold config to match up with bitfield gen shifts *) + apply (clarsimp simp: virq_to_H_def make_virq_def virq_virq_pending_def + Kernel_Config.config_ARM_GIC_V3_def virq_type_shift_def + eoiirqen_shift_def mask_def) by (simp add: bit.disj_commute bit.disj_assoc bit.disj_ac) +lemma virq_virq_active_eq_is_virq_active: + "(of_nat (virq_type virq) = (scast virq_virq_active :: machine_word)) = is_virq_active virq" + apply (simp add: is_virq_active_def virq_virq_active_def) + apply (rule iffI; clarsimp?) + apply (clarsimp simp: virq_type_def) + done + +lemma virq_get_tag_eq_of_nat_virq_type: + "virq_get_tag virq = of_nat (virq_type (virq_to_H virq))" + (* config unfolding to match bitfield shift in virq_get_tag with virq_type_shift in virq_type *) + by (simp add: virq_to_H_def virq_type_def virq_get_tag_def virq_type_shift_def mask_def + Kernel_Config.config_ARM_GIC_V3_def) + lemma decodeVCPUInjectIRQ_ccorres: notes if_cong[cong] Collect_const[simp del] (* csymbr will use this instead now *) @@ -3807,98 +3823,85 @@ lemma decodeVCPUInjectIRQ_ccorres: apply (clarsimp simp: rangeCheck_def not_le[symmetric] liftE_liftM[symmetric] liftE_bindE_assoc) - (* symbolically execute the gets on LHS *) - apply (rule_tac ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState, - rename_tac nregs) - - apply (rule_tac P="nregs \ max_armKSGICVCPUNumListRegs" in ccorres_gen_asm) - apply (rule_tac P="nregs \ max_armKSGICVCPUNumListRegs" - in ccorres_cross_over_guard_no_st) - - (* unfortunately directly looking at \gic_vcpu_num_list_regs means we need to abstract the - IF condition, and because of 32/64-bit casting we need to know \ max_armKSGICVCPUNumListRegs *) - apply (rule_tac Q="\s. valid_arch_state' s \ nregs = armKSGICVCPUNumListRegs (ksArchState s)" - and Q'=UNIV - and C'="{s. of_nat nregs \ (args ! 0 >> 32) && 0xFF}" - in ccorres_rewrite_cond_sr_Seq) - apply (clarsimp simp: not_le[symmetric] word_le_nat_alt unat_of_nat_eq) - apply (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - valid_arch_state'_def max_armKSGICVCPUNumListRegs_def - unat_of_nat64' unat_of_nat32') + (* symbolically execute the gets on LHS *) + apply (rule_tac ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState, + rename_tac nregs) - apply (rule ccorres_Cond_rhs_Seq) - apply (subgoal_tac "(of_nat nregs \ (args ! 0 >> 32) && 0xFF)") - prefer 2 - subgoal by (simp add: word_le_nat_alt not_le) + apply (rule_tac P="nregs < max_armKSGICVCPUNumListRegs" in ccorres_gen_asm) + apply (rule_tac P="nregs < max_armKSGICVCPUNumListRegs" in ccorres_cross_over_guard_no_st) - apply (simp add: rangeCheck_def not_le[symmetric]) - apply (simp add: throwError_bind invocationCatch_def - cong: StateSpace.state.fold_congs globals.fold_congs) + (* unfortunately directly looking at \gic_vcpu_num_list_regs means we need to abstract the + IF condition, and because of 32/64-bit casting we need to know < max_armKSGICVCPUNumListRegs *) + apply (rule_tac Q="\s. valid_arch_state' s \ nregs = armKSGICVCPUNumListRegs (ksArchState s)" + and Q'=UNIV + and C'="{s. of_nat nregs \ (args ! 0 >> 32) && 0xFF}" + in ccorres_rewrite_cond_sr_Seq) + apply (clarsimp simp: not_le[symmetric] word_le_nat_alt unat_of_nat_eq) + apply (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + valid_arch_state'_def + unat_of_nat64' unat_of_nat32') + + apply (rule ccorres_Cond_rhs_Seq) + apply (prop_tac "(of_nat nregs \ (args ! 0 >> 32) && 0xFF)") + apply (simp add: word_le_nat_alt not_le) + + apply (simp add: rangeCheck_def not_le[symmetric]) + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) (* can't use syscall_error_throwError_ccorres_n, since one of the globals updates reads a global var itself: gic_vcpu_num_list_regs_', need to split off up to the first return_C else vcg barfs *) - apply (rule ccorres_split_throws) - apply (rule_tac P="\s. valid_arch_state' s \ nregs = armKSGICVCPUNumListRegs (ksArchState s)" - and P'="UNIV" in ccorres_from_vcg_throws) - apply (rule allI, rule conseqPre) - apply (vcg exspec=invokeVCPUInjectIRQ_modifies) - apply (clarsimp split: prod.splits - simp: throwError_def return_def EXCEPTION_SYSCALL_ERROR_def - EXCEPTION_NONE_def syscall_error_rel_def syscall_error_to_H_def - syscall_error_type_defs) - apply (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def) - apply (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - valid_arch_state'_def max_armKSGICVCPUNumListRegs_def - unat_of_nat64' unat_of_nat32') - apply vcg - - apply (subgoal_tac "\ (of_nat nregs \ (args ! 0 >> 32) && 0xFF)") - prefer 2 - subgoal by (simp add: word_le_nat_alt not_le) - apply clarsimp - apply (rule ccorres_move_const_guard) - apply (rule ccorres_move_c_guard_vcpu) - apply (simp add: liftM_def) - apply (clarsimp simp: rangeCheck_def not_le[symmetric] - liftE_liftM[symmetric] liftE_bindE_assoc) - - apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) - apply csymbr - apply (rule ccorres_abstract_cleanup) - apply (simp add: virq_virq_active_def) - -(* FIXME AARCH64 cleanup and re-indent needed in this lemma *) + apply (rule ccorres_split_throws) + apply (rule_tac P="\s. valid_arch_state' s \ nregs = armKSGICVCPUNumListRegs (ksArchState s)" + and P'="UNIV" in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre) + apply (vcg exspec=invokeVCPUInjectIRQ_modifies) + apply (clarsimp split: prod.splits + simp: throwError_def return_def EXCEPTION_SYSCALL_ERROR_def + EXCEPTION_NONE_def syscall_error_rel_def syscall_error_to_H_def + syscall_error_type_defs) + apply (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def) + apply (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + valid_arch_state'_def not_le + unat_of_nat64' unat_of_nat32') + apply vcg -(* FIXME AARCH64 magic numbers: 3 here is the mask in virq_get_virqType, 28 is the shift *) - apply (rule_tac - P="ret__unsigned_longlong = - (vgicLR (vcpuVGIC vcpu) (unat ((args ! 0 >> 32) && 0xFF)) >> 28) && 3" - in ccorres_gen_asm2) - apply clarsimp - apply (rule ccorres_Cond_rhs_Seq) - apply (subgoal_tac "isVIRQActive (vgicLR (vcpuVGIC vcpu) (unat ((args ! 0 >> 32) && 0xFF)))") - prefer 2 - subgoal - apply (clarsimp simp: isVIRQActive_def virq_type_def word_unat_eq_iff) - done + apply (prop_tac "\ (of_nat nregs \ (args ! 0 >> 32) && 0xFF)") + apply (simp add: word_le_nat_alt not_le) + apply clarsimp + apply (rule ccorres_move_const_guard) + apply (rule ccorres_move_c_guard_vcpu) + apply (simp add: liftM_def) + apply (clarsimp simp: rangeCheck_def not_le[symmetric] + liftE_liftM[symmetric] liftE_bindE_assoc) - apply (simp add: rangeCheck_def not_le[symmetric]) - apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types - cong: StateSpace.state.fold_congs globals.fold_congs) - apply (rule syscall_error_throwError_ccorres_n) - apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply csymbr + apply (rule ccorres_abstract_cleanup) - apply (subgoal_tac "\ isVIRQActive (vgicLR (vcpuVGIC vcpu) (unat ((args ! 0 >> 32) && 0xFF)))") - prefer 2 - subgoal by (clarsimp simp: isVIRQActive_def virq_type_def word_unat_eq_iff) + apply (rule_tac P="ret__unsigned_longlong = + of_nat (virq_type (vgicLR (vcpuVGIC vcpu) + (unat ((args ! 0 >> 32) && 0xFF))))" + in ccorres_gen_asm2) + apply clarsimp + apply (rule ccorres_Cond_rhs_Seq) + apply (prop_tac "isVIRQActive (vgicLR (vcpuVGIC vcpu) (unat ((args ! 0 >> 32) && 0xFF)))") + apply (clarsimp simp add: virq_virq_active_eq_is_virq_active) + apply (simp add: rangeCheck_def not_le[symmetric]) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) - apply clarsimp - apply (simp add: returnOk_bind bindE_assoc - performARMMMUInvocations performARMVCPUInvocation_def) - apply csymbr - apply (subst liftE_invokeVCPUInjectIRQ_empty_return) - apply clarsimp + apply (prop_tac "\is_virq_active (vgicLR (vcpuVGIC vcpu) (unat ((args ! 0 >> 32) && 0xFF)))") + apply (clarsimp simp add: virq_virq_active_eq_is_virq_active) + apply clarsimp + apply (simp add: returnOk_bind bindE_assoc not_le + performARMMMUInvocations performARMVCPUInvocation_def) + apply csymbr + apply (subst liftE_invokeVCPUInjectIRQ_empty_return) + apply clarsimp \ \we want the second alternative - nothing to return to user\ apply (ctac add: setThreadState_ccorres) @@ -3923,40 +3926,26 @@ lemma decodeVCPUInjectIRQ_ccorres: valid_tcb_state'_def ThreadState_defs mask_def) apply (frule invs_arch_state') - apply (clarsimp simp: valid_arch_state'_def max_armKSGICVCPUNumListRegs_def rf_sr_armKSGICVCPUNumListRegs) - apply (clarsimp simp: isCap_simps cap_get_tag_isCap capVCPUPtr_eq) - apply (clarsimp simp: sysargs_rel_to_n word_le_nat_alt linorder_not_less) - apply (clarsimp simp: valid_cap'_def) - apply (clarsimp simp: not_le word_le_nat_alt) + apply (clarsimp simp: valid_arch_state'_def max_armKSGICVCPUNumListRegs_val + rf_sr_armKSGICVCPUNumListRegs isCap_simps cap_get_tag_isCap capVCPUPtr_eq + sysargs_rel_to_n word_le_nat_alt linorder_not_less valid_cap'_def not_le) - apply (subgoal_tac "armKSGICVCPUNumListRegs (ksArchState s) < 2 ^ LENGTH(machine_word_len)") - prefer 2 subgoal by (erule order_le_less_trans; simp) + apply (prop_tac "armKSGICVCPUNumListRegs (ksArchState s) < 2 ^ LENGTH(machine_word_len)") + apply (erule order_less_trans; simp) apply (safe; clarsimp?) - apply (simp add: unat_eq_zero) - apply (subgoal_tac "armKSGICVCPUNumListRegs (ksArchState s) < 2 ^ LENGTH(machine_word_len)") - prefer 2 subgoal by (erule order_le_less_trans; simp) - apply (erule order_less_le_trans) - apply (simp add: unat_of_nat_eq) - apply (fastforce simp: sysargs_rel_to_n ct_in_state'_def st_tcb_at'_def comp_def - elim: obj_at'_weakenE) - apply (fastforce simp: sysargs_rel_to_n ct_in_state'_def st_tcb_at'_def comp_def - elim: obj_at'_weakenE) - - apply (subgoal_tac "armKSGICVCPUNumListRegs (ksArchState s) < 2 ^ LENGTH(machine_word_len)") - prefer 2 subgoal by (erule order_le_less_trans; simp) - apply (erule order_less_le_trans) + apply (simp add: unat_eq_zero) + apply (erule order_less_le_trans) + apply (simp add: unat_of_nat_eq) + apply (fastforce simp: sysargs_rel_to_n ct_in_state'_def st_tcb_at'_def comp_def + elim: obj_at'_weakenE) + apply (fastforce simp: sysargs_rel_to_n ct_in_state'_def st_tcb_at'_def comp_def + elim: obj_at'_weakenE) apply (simp add: unat_of_nat_eq) - apply (clarsimp simp: typ_heap_simps') - apply (simp add: virq_get_tag_def mask_def shiftr_over_and_dist) - apply (simp add: cvcpu_relation_def cvgic_relation_def virq_to_H_def) - apply (clarsimp simp: cvcpu_relation_def cvgic_relation_def virq_get_tag_def - shiftr_over_and_dist mask_def cvcpu_vppi_masked_relation_def) - - apply (subgoal_tac "unat ((args ! 0 >> 32) && 0xFF) \ 63") - apply (rule sym) - apply simp - apply (fastforce simp: unat_of_nat_eq) + apply (clarsimp simp: typ_heap_simps' cvcpu_relation_def cvgic_relation_def) + apply (subgoal_tac "unat ((args ! 0 >> 32) && 0xFF) < max_armKSGICVCPUNumListRegs") + apply (simp add: virq_get_tag_eq_of_nat_virq_type) + apply (fastforce simp: unat_of_nat_eq max_armKSGICVCPUNumListRegs_val) done lemma decodeVCPUReadReg_ccorres: diff --git a/proof/crefine/AARCH64/Invoke_C.thy b/proof/crefine/AARCH64/Invoke_C.thy index 08815fe4e4..7c2cd9fe5a 100644 --- a/proof/crefine/AARCH64/Invoke_C.thy +++ b/proof/crefine/AARCH64/Invoke_C.thy @@ -1804,7 +1804,7 @@ inline assembly, so we must appeal to the axiomatisation of inline assembly to s \ lemma preemptionPoint_hrs_htd: "\\. \\\<^bsub>/UNIV\<^esub> {\} Call preemptionPoint_'proc \hrs_htd \t_hrs = hrs_htd \<^bsup>\\<^esup>t_hrs\" - by (rule allI, rule conseqPre, vcg, clarsimp simp: asm_spec_enabled elim!: asm_specE) + by (rule allI, rule conseqPre, vcg exspec=isIRQPending_modifies, clarsimp simp: asm_spec_enabled elim!: asm_specE) lemma resetUntypedCap_ccorres: notes upt.simps[simp del] Collect_const[simp del] replicate_numeral[simp del] diff --git a/proof/crefine/AARCH64/Retype_C.thy b/proof/crefine/AARCH64/Retype_C.thy index 1dc37d6ca0..2717ef8ac7 100644 --- a/proof/crefine/AARCH64/Retype_C.thy +++ b/proof/crefine/AARCH64/Retype_C.thy @@ -5447,7 +5447,7 @@ lemma ptr_retyp_fromzeroVCPU: assumes cor: "caps_overlap_reserved' {p ..+ 2 ^ vcpuBits} \" assumes ptr0: "p \ 0" assumes kdr: "{p ..+ 2 ^ vcpuBits} \ kernel_data_refs = {}" - assumes subr: "{p ..+ 752} \ {p ..+ 2 ^ vcpuBits}" (is "{_ ..+ ?vcpusz} \ _") + assumes subr: "{p ..+ size_of TYPE(vcpu_C)} \ {p ..+ 2 ^ vcpuBits}" (is "{_ ..+ ?vcpusz} \ _") assumes act_bytes: "region_actually_is_bytes p (2 ^ vcpuBits) \'" assumes rep0: "heap_list (hrs_mem (t_hrs_' (globals \'))) (2 ^ vcpuBits) p = replicate (2 ^ vcpuBits) 0" assumes "\ snd (placeNewObject p vcpu0 0 \)" @@ -5464,10 +5464,6 @@ proof - let ?htdret = "(hrs_htd_update (ptr_retyp (vcpu_Ptr p)) (t_hrs_' (globals \')))" let ?zeros = "from_bytes (replicate (size_of TYPE(vcpu_C)) 0) :: vcpu_C" - (* sanity check for the value of ?vcpusz *) - have "size_of TYPE(vcpu_C) = ?vcpusz" - by simp - have ptr_al: "ptr_aligned (vcpu_Ptr p)" using al by (auto simp: align_of_def vcpuBits_def pageBits_def @@ -5496,7 +5492,7 @@ proof - (is "?hl = ?rep0") using retyp_p' rep0 apply (simp add: lift_t_if h_val_def) - apply (subst take_heap_list_le[where k="?vcpusz" and n="2^vcpuBits", symmetric]) + apply (subst take_heap_list_le[where k="?vcpusz" and n="2^vcpuBits", simplified, symmetric]) apply (simp add: vcpuBits_def)+ done @@ -5550,7 +5546,7 @@ proof - apply (case_tac r; clarsimp simp: index_foldr_update) apply (rule conjI, clarsimp) (* vgic_C array initialisation *) - apply (subst index_fold_update; clarsimp) + apply (subst index_fold_update; clarsimp simp: max_armKSGICVCPUNumListRegs_val) (* vppi array initialisation *) apply clarsimp apply (case_tac vppi; clarsimp) @@ -5692,7 +5688,7 @@ proof - apply (subgoal_tac "region_is_bytes p ?vcpusz \'") prefer 2 apply (fastforce simp: region_actually_is_bytes[OF act_bytes] - region_is_bytes_subset[OF _ subr]) + region_is_bytes_subset[OF _ subr, simplified]) apply (simp add: projectKO_opt_retyp_other' objBitsKO_vcpu cl_vcpu htd_safe[simplified] kernel_data_refs_domain_eq_rotate) apply (subst ptr_retyp_gen_one[symmetric])+ @@ -5719,7 +5715,7 @@ lemma placeNewObject_vcpu_fromzero_ccorres: apply (rule ccorres_from_vcg_nofail, clarsimp) apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_htd_safe) - apply (subgoal_tac "{regionBase..+752} \ {regionBase..+2^vcpuBits}") + apply (subgoal_tac "{regionBase..+size_of TYPE(vcpu_C)} \ {regionBase..+2^vcpuBits}") prefer 2 apply clarsimp apply (drule intvlD, clarsimp) diff --git a/proof/crefine/AARCH64/StateRelation_C.thy b/proof/crefine/AARCH64/StateRelation_C.thy index 1e25d3ef45..5125f5a253 100644 --- a/proof/crefine/AARCH64/StateRelation_C.thy +++ b/proof/crefine/AARCH64/StateRelation_C.thy @@ -528,8 +528,8 @@ definition cvgic_relation :: "gicvcpuinterface \ gicVCpuIface_C \ gicVCpuIface_C.vmcr_C cvgic = vgicVMCR vgic \ gicVCpuIface_C.apr_C cvgic = vgicAPR vgic - \ (\i\63. vgicLR vgic i = virq_to_H ((gicVCpuIface_C.lr_C cvgic).[i])) - \ (\i\64. vgicLR vgic i = 0)" + \ (\i (\i\max_armKSGICVCPUNumListRegs. vgicLR vgic i = 0)" definition cvcpu_relation :: "vcpu \ vcpu_C \ bool" where "cvcpu_relation vcpu cvcpu \ diff --git a/proof/crefine/AARCH64/Syscall_C.thy b/proof/crefine/AARCH64/Syscall_C.thy index 199592009c..0f63a3d138 100644 --- a/proof/crefine/AARCH64/Syscall_C.thy +++ b/proof/crefine/AARCH64/Syscall_C.thy @@ -1567,7 +1567,10 @@ lemma virq_virq_active_set_virqEOIIRQEN_spec': apply (case_tac virq) apply clarsimp apply (rule array_ext) - apply (clarsimp simp: virq_get_tag_def virq_tag_defs mask_def virq_type_def split: if_split) + (* unfold config to match up with bitfield gen *) + apply (clarsimp simp: virq_get_tag_def virq_tag_defs mask_def virq_type_def + virq_type_shift_def eoiirqen_shift_def Kernel_Config.config_ARM_GIC_V3_def + split: if_split) done lemma virq_virq_invalid_set_virqEOIIRQEN_spec': @@ -1580,7 +1583,10 @@ lemma virq_virq_invalid_set_virqEOIIRQEN_spec': apply (case_tac virq) apply clarsimp apply (rule array_ext) - apply (clarsimp simp: virq_get_tag_def virq_tag_defs mask_def virq_type_def split: if_split) + (* unfold config to match up with bitfield gen *) + apply (clarsimp simp: virq_get_tag_def virq_tag_defs mask_def virq_type_def + virq_type_shift_def eoiirqen_shift_def Kernel_Config.config_ARM_GIC_V3_def + split: if_split) done lemma virq_virq_pending_set_virqEOIIRQEN_spec': @@ -1593,16 +1599,10 @@ lemma virq_virq_pending_set_virqEOIIRQEN_spec': apply (case_tac virq) apply clarsimp apply (rule array_ext) - apply (clarsimp simp: virq_get_tag_def virq_tag_defs mask_def virq_type_def split: if_split) - done - -lemma gic_vcpu_num_list_regs_cross_over: - "\ of_nat (armKSGICVCPUNumListRegs (ksArchState s)) = gic_vcpu_num_list_regs_' t; - valid_arch_state' s \ - \ gic_vcpu_num_list_regs_' t \ 0x3F" - apply (drule sym, simp) - apply (clarsimp simp: valid_arch_state'_def max_armKSGICVCPUNumListRegs_def) - apply (clarsimp simp: word_le_nat_alt unat_of_nat) + (* unfold config to match up with bitfield gen *) + apply (clarsimp simp: virq_get_tag_def virq_tag_defs mask_def virq_type_def + virq_type_shift_def eoiirqen_shift_def Kernel_Config.config_ARM_GIC_V3_def + split: if_split) done lemma virqSetEOIIRQEN_id: @@ -1611,13 +1611,15 @@ lemma virqSetEOIIRQEN_id: virq_get_tag (virq_C (ARRAY _. idx)) \ scast virq_virq_invalid \ \ virqSetEOIIRQEN idx 0 = idx" apply (clarsimp simp: AARCH64_A.virqSetEOIIRQEN_def virq_get_tag_def virq_tag_defs mask_def - virq_type_def + virq_type_def eoiirqen_shift_def split: if_split) - apply (rule_tac x="idx >> 28" in two_bits_cases; simp) + (* unfold config to match up with bitfield gen *) + apply (rule_tac x="idx >> virq_type_shift" in two_bits_cases; + simp add: virq_type_shift_def Kernel_Config.config_ARM_GIC_V3_def) done lemma vgicUpdateLR_ccorres_armHSCurVCPU: - "\ v' = v ; n' = n ; n \ 63 \ \ + "\ v' = v ; n' = n ; n < max_armKSGICVCPUNumListRegs \ \ ccorres dc xfdc (\s. (\active. armHSCurVCPU (ksArchState s) = Some (vcpuptr,active) \ vcpu_at' vcpuptr s)) @@ -1656,8 +1658,12 @@ lemma vgicUpdateLR_ccorres_armHSCurVCPU: typ_heap_simps' cpspace_relation_def update_vcpu_map_tos) apply (erule (1) cmap_relation_updI ; clarsimp simp: cvcpu_relation_regs_def cvgic_relation_def cvcpu_vppi_masked_relation_def + simp del: fun_upd_apply ; (rule refl)?) - apply (fastforce simp: virq_to_H_def split: if_split) + (* needs max_armKSGICVCPUNumListRegs_val so it can prove CARD side condition in + simp rule Arrays.index_update2 *) + apply (rule conjI, clarsimp simp: max_armKSGICVCPUNumListRegs_val split: if_split) + apply (fastforce split: if_split) apply (clarsimp simp add: objBits_simps bit_simps)+ apply (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def split: option.splits) done @@ -1668,6 +1674,10 @@ definition where "eisr_calc eisr0 eisr1 \ if eisr0 \ 0 then word_ctz eisr0 else word_ctz eisr1 + 32" +schematic_goal max_armKSGICVCPUNumListRegs_int_val: + "int max_armKSGICVCPUNumListRegs = numeral ?n" + by (simp add: max_armKSGICVCPUNumListRegs_val) + lemma ccorres_vgicMaintenance: notes Collect_const[simp del] notes virq_virq_active_set_virqEOIIRQEN_spec = virq_virq_active_set_virqEOIIRQEN_spec' @@ -1766,15 +1776,6 @@ proof - using word_ctz_le[of x] by (simp add: signed_of_nat signed_take_bit_int_eq_self) - have sint_int_ctz_ge_0: - "0 \ sint (word_of_nat (word_ctz x) :: int_word)" for x :: "32 word" - using word_ctz_le[of x] - by (simp add: signed_of_nat bit_iff_odd) - - have sint_int_ctz_less_32: - "x \ 0 \ sint (word_of_nat (word_ctz x) :: int_word) < 32" for x :: "32 word" - by (drule word_ctz_less, simp add: signed_of_nat signed_take_bit_int_eq_self) - have unat_of_nat_ctz_plus_32s: "unat (of_nat (word_ctz w) + (0x20 :: int_word)) = word_ctz w + 32" for w :: "32 word" apply (subst unat_add_lem' ; clarsimp simp: unat_of_nat_ctz_smw) @@ -1785,13 +1786,6 @@ proof - apply (subst unat_add_lem' ; clarsimp simp: unat_of_nat_ctz_mw) using word_ctz_le[where w=w, simplified] by (auto simp: unat_of_nat_eq) - have eisr_calc_le: - "eisr0 = 0 \ eisr1 \ 0 - \ eisr_calc eisr0 eisr1 \ 63" - for eisr0 and eisr1 - using word_ctz_le[where w=eisr0] word_ctz_less[where w=eisr1] - by (clarsimp simp: eisr_calc_def split: if_splits) - have of_nat_word_ctz_0x21helper: "0x21 + word_of_nat (word_ctz w) \ (0 :: int_word)" for w :: "32 word" apply (subst unat_arith_simps, simp) @@ -1799,6 +1793,25 @@ proof - using word_ctz_le[where w=w, simplified] by simp + have maxListRegs_unat_id: + "\n. n < max_armKSGICVCPUNumListRegs \ unat (of_nat n :: machine_word) = n" + by (simp add: max_armKSGICVCPUNumListRegs_val unat_of_nat) + + have maxListRegs_sint_ge_0: + "\n. n < max_armKSGICVCPUNumListRegs \ 0 \ sint (word_of_nat n :: int_word)" + by (simp add: max_armKSGICVCPUNumListRegs_val int_eq_sint) + + have maxListRegs_sint_less: + "\n. n < max_armKSGICVCPUNumListRegs \ + sint (word_of_nat n :: int_word) < int max_armKSGICVCPUNumListRegs" + by (simp add: max_armKSGICVCPUNumListRegs_val int_eq_sint) + + (* needs "simp del: of_nat_add", because of rhs *) + have of_nat_ctz_plus_32s: + "(of_nat (word_ctz w) + 0x20 :: int_word) = (of_nat (word_ctz w + 32) :: int_word)" + for w :: "32 word" + by simp + show ?thesis supply if_cong[cong] apply (cinit) @@ -1829,6 +1842,7 @@ proof - apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_misr_ccorres) apply (rule ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState[simplified comp_def], rename_tac num_list_regs) + apply (rule_tac P="num_list_regs < max_armKSGICVCPUNumListRegs" in ccorres_gen_asm) apply clarsimp apply (rule ccorres_Cond_rhs_Seq ; (clarsimp simp: vgicHCREN_def)) apply (rule ccorres_rhs_assoc)+ @@ -1853,7 +1867,7 @@ proof - apply (simp add: if_to_top_of_bind) apply (rule_tac C'="{s. eisr0 = 0 \ eisr1 = 0 \ num_list_regs \ eisr_calc eisr0 eisr1}" - and Q="\s. num_list_regs \ 63" + and Q="\s. num_list_regs < max_armKSGICVCPUNumListRegs" and Q'="{s. gic_vcpu_num_list_regs_' (globals s) = of_nat num_list_regs}" in ccorres_rewrite_cond_sr_Seq) apply clarsimp @@ -1862,7 +1876,8 @@ proof - by (clarsimp split: if_splits simp: eisr_calc_def word_le_nat_alt unat_of_nat_eq of_nat_eq_signed_scast of_nat_word_ctz_0x21helper ctz_add_0x20_int_mw_eq scast_int_mw_ctz_eq - ctz_add_0x20_unat_mw_eq less_trans[OF word_ctz_less]) + ctz_add_0x20_unat_mw_eq less_trans[OF word_ctz_less] + maxListRegs_unat_id) apply (rule ccorres_Cond_rhs_Seq) (* check if current thread is runnable, if so handle fault *) @@ -1932,9 +1947,8 @@ proof - (* active *) apply (rule ccorres_move_const_guards)+ apply (rule vgicUpdateLR_ccorres_armHSCurVCPU ; clarsimp simp: word_ctz_le) - apply (fastforce dest: word_ctz_less - simp: eisr_calc_def unat_of_nat_ctz_smw unat_of_nat_ctz_plus_32s) - apply (erule eisr_calc_le) + apply (fastforce dest: word_ctz_less + simp: eisr_calc_def unat_of_nat_ctz_smw unat_of_nat_ctz_plus_32s) apply ceqv (* check if current thread is runnable, if so handle fault *) @@ -2017,13 +2031,14 @@ proof - apply (rule_tac Q'="\_ s. ?PRE s \ armHSCurVCPU (ksArchState s) = Some (vcpuPtr, active)" in hoare_post_imp) apply clarsimp subgoal for _ _ eisr0 eisr1 - apply (clarsimp simp: invs'_HScurVCPU_vcpu_at' valid_arch_state'_def max_armKSGICVCPUNumListRegs_def dest!: invs_arch_state') - using sint_ctz[where x=eisr0, simplified] - apply (clarsimp simp: word_sless_alt word_sle_eq sint_int_ctz_ge_0 split: if_split) - using sint_ctz[where x=eisr1, simplified] - apply (subst signed_arith_sint; clarsimp simp: word_size; simp add: sint_int_ctz_ge_0) - using sint_ctz[where x=eisr1, simplified] - apply (subst signed_arith_sint; clarsimp simp: word_size; simp add: sint_int_ctz_less_32) + apply (clarsimp simp: invs'_HScurVCPU_vcpu_at' valid_arch_state'_def dest!: invs_arch_state') + apply (clarsimp simp: eisr_calc_def not_le word_sless_alt word_sle_eq) + (* fold max_armKSGICVCPUNumListRegs_int_val to prevent arithmetic with the "+ 32" term *) + apply (fold max_armKSGICVCPUNumListRegs_int_val) + apply (drule (1) less_trans) + apply (auto simp: maxListRegs_sint_ge_0 maxListRegs_sint_less of_nat_ctz_plus_32s + simp del: of_nat_add + split: if_split) done apply clarsimp apply wpsimp+ diff --git a/proof/crefine/AARCH64/VSpace_C.thy b/proof/crefine/AARCH64/VSpace_C.thy index 8381d5c75c..061d882813 100644 --- a/proof/crefine/AARCH64/VSpace_C.thy +++ b/proof/crefine/AARCH64/VSpace_C.thy @@ -2779,32 +2779,32 @@ lemma vcpu_restore_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) (* the loop *) - apply (rule_tac P="lr_num \ 63" in ccorres_gen_asm) - apply (rule_tac F="\_ s. lr_num \ 63 \ ko_at' vcpu vcpuPtr s" in ccorres_mapM_x_while) + apply (rule_tac P="lr_num < max_armKSGICVCPUNumListRegs" in ccorres_gen_asm) + apply (rule_tac F="\_ s. lr_num < max_armKSGICVCPUNumListRegs \ ko_at' vcpu vcpuPtr s" in ccorres_mapM_x_while) apply (intro allI impI) apply clarsimp apply (rule ccorres_guard_imp2) - apply (rule_tac P="\s. lr_num \ 63" in ccorres_cross_over_guard) + apply (rule_tac P="\s. lr_num < max_armKSGICVCPUNumListRegs" in ccorres_cross_over_guard) apply (rule ccorres_Guard) apply (rule_tac val="of_nat n" in ccorres_abstract_known[where xf'=i_'], ceqv) - apply (rule_tac P="n \ 63" in ccorres_gen_asm) + apply (rule_tac P="n < max_armKSGICVCPUNumListRegs" in ccorres_gen_asm) apply (rule ccorres_move_c_guard_vcpu) apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_lr_ccorres) apply (clarsimp simp: virq_to_H_def ko_at_vcpu_at'D upt_Suc) apply (rule conjI) apply (subst scast_eq_ucast; (rule refl)?) - apply (fastforce intro!: not_msb_from_less simp: word_less_nat_alt unat_of_nat) + apply (solves \simp add: max_armKSGICVCPUNumListRegs_msb\) apply (frule (1) vcpu_at_rf_sr) apply (clarsimp simp: typ_heap_simps cvcpu_relation_regs_def cvgic_relation_def virq_to_H_def unat_of_nat) - apply (simp add: word_less_nat_alt upt_Suc) - subgoal (* FIXME extract into separate lemma *) - by (fastforce simp: word_less_nat_alt unat_of_nat_eq elim: order_less_le_trans) + (* _val for mod 2^word_bits and for value equality with C *) + apply (simp add: word_less_nat_alt upt_Suc unat_of_nat_eq max_armKSGICVCPUNumListRegs_val) apply clarsimp apply (simp add: upt_Suc) - apply vcg + apply (vcg exspec=set_gic_vcpu_ctrl_lr_modifies) apply (fastforce simp: word_less_nat_alt unat_of_nat_eq word_bits_def elim: order_less_le_trans) apply wpsimp - apply (simp add: upt_Suc word_bits_def) + using max_armKSGICVCPUNumListRegs_word_bits + apply (solves \simp add: upt_Suc word_bits_def\) apply ceqv apply (ctac add: vcpu_restore_reg_range_ccorres) apply (ctac add: vcpu_enable_ccorres) @@ -2818,36 +2818,13 @@ lemma vcpu_restore_ccorres: apply (clarsimp simp: guard_is_UNIV_def) apply (wpsimp simp: vcpu_at_ko'_eq wp: hoare_vcg_imp_lift')+ apply (rule conjI) - apply (fastforce simp: invs_no_cicd'_def valid_arch_state'_def max_armKSGICVCPUNumListRegs_def) + apply (fastforce simp: invs_no_cicd'_def valid_arch_state'_def) apply (rule conjI) apply (fastforce simp: fromEnum_def enum_vcpureg) apply (fastforce dest!: vcpu_at_rf_sr simp: typ_heap_simps' cvcpu_relation_def cvgic_relation_def) done -(* FIXME AARCH64 unused -lemma ccorres_pre_getsNumListRegs: - assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" - shows "ccorres r xf - (\s. (\rv. (armKSGICVCPUNumListRegs \ ksArchState) s = rv \ P rv s)) - {s. \rv num'. gic_vcpu_num_list_regs_' (globals s) = num' - \ s \ P' rv } - hs (gets (armKSGICVCPUNumListRegs \ ksArchState) >>= (\rv. f rv)) c" - apply (rule ccorres_guard_imp) - apply (rule ccorres_symb_exec_l) - defer - apply wp - apply (rule hoare_gets_sp) - apply (clarsimp simp: empty_fail_def getCurThread_def simpler_gets_def) - apply assumption - apply clarsimp - defer - apply (rule ccorres_guard_imp) - apply (rule cc) - apply clarsimp - apply assumption - apply (clarsimp simp: rf_sr_ksArchState_armHSCurVCPU) - done *) lemma ccorres_gets_armKSGICVCPUNumListRegs: "ccorres ((=) \ of_nat) lr_num_' \ UNIV hs @@ -2860,7 +2837,7 @@ lemma ccorres_gets_armKSGICVCPUNumListRegs: done lemma vgicUpdateLR_ccorres: - "ccorres dc xfdc (\ and K (n \ 63 \ n' = n \ virq_to_H v' = v)) UNIV hs + "ccorres dc xfdc (\ and K (n < max_armKSGICVCPUNumListRegs \ n' = n \ virq_to_H v' = v)) UNIV hs (vgicUpdateLR vcpuptr n v) (Basic_heap_update (\_. vgic_lr_C_Ptr &(vgic_C_Ptr &(vcpu_Ptr vcpuptr\[''vgic_C''])\[''lr_C''])) @@ -2871,8 +2848,7 @@ lemma vgicUpdateLR_ccorres: apply (rule ccorres_grab_asm) apply (simp add: vgicUpdateLR_def vgicUpdate_def) apply vcpuUpdate_ccorres - supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] - apply (fastforce simp: virq_to_H_def cvcpu_vppi_masked_relation_def split: if_split) + apply (auto simp: max_armKSGICVCPUNumListRegs_val split: if_split) done lemma vcpu_save_ccorres: @@ -2918,28 +2894,31 @@ lemma vcpu_save_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) (* the loop *) - apply (rule_tac P="lr_num \ 63" in ccorres_gen_asm) - apply (rule_tac F="\_ s. lr_num \ 63 \ vcpu_at' vcpuPtr s" in ccorres_mapM_x_while) + apply (rule_tac P="lr_num < max_armKSGICVCPUNumListRegs" in ccorres_gen_asm) + apply (rule_tac F="\_ s. lr_num < max_armKSGICVCPUNumListRegs \ vcpu_at' vcpuPtr s" in ccorres_mapM_x_while) apply (intro allI impI) apply clarsimp apply (rule ccorres_guard_imp2) - apply (rule_tac P="\s. lr_num \ 63" in ccorres_cross_over_guard) + apply (rule_tac P="\s. lr_num < max_armKSGICVCPUNumListRegs" in ccorres_cross_over_guard) apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_lr_ccorres) apply (rule ccorres_Guard) apply (rule_tac val="of_nat n" in ccorres_abstract_known[where xf'=i_'], ceqv) - apply (rule_tac P="n \ 63" in ccorres_gen_asm) + apply (rule_tac P="n < max_armKSGICVCPUNumListRegs" in ccorres_gen_asm) apply (rule ccorres_move_c_guard_vcpu) apply (clarsimp simp: unat_of_nat_eq) apply (ctac (no_vcg) add: vgicUpdateLR_ccorres) apply (wpsimp simp: virq_to_H_def)+ apply (subst scast_eq_ucast; (rule refl)?) - apply (fastforce intro!: not_msb_from_less simp: word_less_nat_alt unat_of_nat) - apply (fastforce intro: word_of_nat_less) + apply (simp add: max_armKSGICVCPUNumListRegs_msb) + (* _val for value comparison with C *) + apply (fastforce simp: max_armKSGICVCPUNumListRegs_val unat_of_nat + intro: word_of_nat_less) apply (fastforce simp: word_less_nat_alt unat_of_nat) apply clarsimp apply (rule conseqPre, vcg exspec=get_gic_vcpu_ctrl_lr_modifies) apply fastforce apply wpsimp + using max_armKSGICVCPUNumListRegs_word_bits apply (fastforce simp: word_bits_def) apply ceqv apply (ctac (no_vcg) add: armv_vcpu_save_ccorres) diff --git a/proof/crefine/AARCH64/Wellformed_C.thy b/proof/crefine/AARCH64/Wellformed_C.thy index 9517cab4bf..27c5ae2b57 100644 --- a/proof/crefine/AARCH64/Wellformed_C.thy +++ b/proof/crefine/AARCH64/Wellformed_C.thy @@ -86,8 +86,10 @@ abbreviation abbreviation vs_Ptr :: "machine_word \ vs_ptr" where "vs_Ptr == Ptr" +value_type vgic_lr_len = "max_armKSGICVCPUNumListRegs" + abbreviation - vgic_lr_C_Ptr :: "addr \ (virq_C[64]) ptr" where "vgic_lr_C_Ptr \ Ptr" + vgic_lr_C_Ptr :: "addr \ (virq_C[vgic_lr_len]) ptr" where "vgic_lr_C_Ptr \ Ptr" abbreviation vgic_C_Ptr :: "addr \ gicVCpuIface_C ptr" where "vgic_C_Ptr \ Ptr" abbreviation @@ -616,6 +618,25 @@ schematic_goal hcrVCPU_val: by (simp add: hcrVCPU_def hcrCommon_def hcrTWE_def hcrTWI_def Kernel_Config.config_DISABLE_WFI_WFE_TRAPS_def) + +text \@{text max_armKSGICVCPUNumListRegs} interface\ + +(* The definition is in Invariants_H, but the properties are only needed in + CRefine. *) + +schematic_goal max_armKSGICVCPUNumListRegs_val: + "max_armKSGICVCPUNumListRegs = numeral ?n" + by (simp add: max_armKSGICVCPUNumListRegs_def Kernel_Config.config_ARM_GIC_V3_def) + +lemma max_armKSGICVCPUNumListRegs_msb: + "n < max_armKSGICVCPUNumListRegs \ \ msb (word_of_nat n :: machine_word)" + by (rule not_msb_from_less) + (simp add: max_armKSGICVCPUNumListRegs_val word_less_nat_alt unat_of_nat) + +lemma max_armKSGICVCPUNumListRegs_word_bits: + "max_armKSGICVCPUNumListRegs \ word_bits" + by (simp add: max_armKSGICVCPUNumListRegs_val word_bits_def) + (* end of Kernel_Config interface section *) diff --git a/proof/refine/AARCH64/Arch_R.thy b/proof/refine/AARCH64/Arch_R.thy index ff4072cd66..230d6bb39d 100644 --- a/proof/refine/AARCH64/Arch_R.thy +++ b/proof/refine/AARCH64/Arch_R.thy @@ -839,9 +839,7 @@ lemma decodeARMVCPUInvocation_corres: apply (clarsimp simp: shiftL_nat whenE_bindE_throwError_to_if) apply (corresKsimp wp: get_vcpu_wp) apply (clarsimp simp: archinv_relation_def vcpu_invocation_map_def - valid_cap'_def valid_cap_def isVIRQActive_def is_virq_active_def - virqType_def virq_type_def - make_virq_def makeVIRQ_def) + valid_cap'_def valid_cap_def) (* read register *) apply (clarsimp simp: decode_vcpu_read_register_def decodeVCPUReadReg_def) apply (cases args; clarsimp simp: isCap_simps whenE_def split: if_split) diff --git a/proof/refine/AARCH64/InterruptAcc_R.thy b/proof/refine/AARCH64/InterruptAcc_R.thy index 0892d66ab5..d2c0c75b38 100644 --- a/proof/refine/AARCH64/InterruptAcc_R.thy +++ b/proof/refine/AARCH64/InterruptAcc_R.thy @@ -166,5 +166,25 @@ lemma preemptionPoint_invs [wp]: "\invs'\ preemptionPoint \\_. invs'\" by (wp preemptionPoint_inv | clarsimp)+ +lemma virqType_eq[simp]: + "virqType = virq_type" + unfolding virqType_def virq_type_def virq_type_shift_def virqTypeShift_def + by simp + +lemma virqSetEOIIRQEN_eq[simp]: + "AARCH64_H.virqSetEOIIRQEN = AARCH64_A.virqSetEOIIRQEN" + unfolding virqSetEOIIRQEN_def AARCH64_A.virqSetEOIIRQEN_def eoiirqenShift_def eoiirqen_shift_def + by (simp cong: if_cong) + +lemma isVIRQActive_eq[simp]: + "isVIRQActive = is_virq_active" + unfolding isVIRQActive_def is_virq_active_def + by simp + +lemma makeVIRQ_eq[simp]: + "makeVIRQ = make_virq" + unfolding make_virq_def makeVIRQ_def + by (clarsimp simp: virq_type_shift_def eoiirqen_shift_def virqTypeShift_def eoiirqenShift_def) + end end diff --git a/proof/refine/AARCH64/Interrupt_R.thy b/proof/refine/AARCH64/Interrupt_R.thy index 407e022f64..fcacd379a1 100644 --- a/proof/refine/AARCH64/Interrupt_R.thy +++ b/proof/refine/AARCH64/Interrupt_R.thy @@ -733,16 +733,6 @@ lemma dmo_gets_wp: crunch vgicUpdateLR for ksCurThread[wp]: "\s. P (ksCurThread s)" -lemma virqType_eq[simp]: - "virqType = virq_type" - unfolding virqType_def virq_type_def - by simp - -lemma virqSetEOIIRQEN_eq[simp]: - "AARCH64_H.virqSetEOIIRQEN = AARCH64_A.virqSetEOIIRQEN" - unfolding virqSetEOIIRQEN_def AARCH64_A.virqSetEOIIRQEN_def - by auto - lemma not_pred_tcb': "(\pred_tcb_at' proj P t s) = (\tcb_at' t s \ pred_tcb_at' proj (\a. \P a) t s)" by (auto simp: pred_tcb_at'_def obj_at'_def) diff --git a/proof/refine/AARCH64/Invariants_H.thy b/proof/refine/AARCH64/Invariants_H.thy index c952717536..485a18e41c 100644 --- a/proof/refine/AARCH64/Invariants_H.thy +++ b/proof/refine/AARCH64/Invariants_H.thy @@ -1050,7 +1050,7 @@ definition valid_asid_table' :: "(asid \ machine_word) \ \ko. \vcpu. ko = (KOArch (KOVCPU vcpu))" definition max_armKSGICVCPUNumListRegs :: nat where - "max_armKSGICVCPUNumListRegs \ 63" + "max_armKSGICVCPUNumListRegs \ if config_ARM_GIC_V3 then 16 else 64" definition valid_arch_state' :: "kernel_state \ bool" where "valid_arch_state' \ \s. @@ -1059,7 +1059,7 @@ definition valid_arch_state' :: "kernel_state \ bool" where (case armHSCurVCPU (ksArchState s) of Some (v, b) \ ko_wp_at' (is_vcpu' and hyp_live') v s | _ \ True) \ - armKSGICVCPUNumListRegs (ksArchState s) \ max_armKSGICVCPUNumListRegs \ + armKSGICVCPUNumListRegs (ksArchState s) < max_armKSGICVCPUNumListRegs \ canonical_address (addrFromKPPtr (armKSGlobalUserVSpace (ksArchState s)))" definition irq_issued' :: "irq \ kernel_state \ bool" where diff --git a/spec/abstract/AARCH64/ArchInterrupt_A.thy b/spec/abstract/AARCH64/ArchInterrupt_A.thy index 79945af74e..515080842b 100644 --- a/spec/abstract/AARCH64/ArchInterrupt_A.thy +++ b/spec/abstract/AARCH64/ArchInterrupt_A.thy @@ -17,7 +17,7 @@ definition virqSetEOIIRQEN :: "virq \ machine_word \ vir "virqSetEOIIRQEN virq v \ if virq_type virq = 3 then virq - else (virq && ~~0x80000) || ((v << 19) && 0x80000)" + else (virq && ~~(1 << eoiirqen_shift)) || ((v << eoiirqen_shift) && (1 << eoiirqen_shift))" definition vgic_maintenance :: "(unit,'z::state_ext) s_monad" where "vgic_maintenance = do diff --git a/spec/abstract/AARCH64/VCPU_A.thy b/spec/abstract/AARCH64/VCPU_A.thy index 497cce331f..a95cd5c56e 100644 --- a/spec/abstract/AARCH64/VCPU_A.thy +++ b/spec/abstract/AARCH64/VCPU_A.thy @@ -106,20 +106,30 @@ definition text \VCPU : inject IRQ\ +(* bit position of type field in virq bitfield struct *) +definition virq_type_shift :: nat where + "virq_type_shift \ if config_ARM_GIC_V3 then 62 else 28" + +(* bit position of eoiirqen field in virq bitfield struct *) +definition eoiirqen_shift :: nat where + "eoiirqen_shift \ if config_ARM_GIC_V3 then 32+9 else 19" + (* This following function does not correspond to exactly what the C does, but it is the value that is stored inside of lr in the vgic *) definition make_virq :: "obj_ref \ obj_ref \ obj_ref \ virq" where "make_virq grp prio irq \ let - groupShift = 30; - prioShift = 23; - irqPending = 1 << 28; - eoiirqen = 1 << 19 - in ((grp && 1) << groupShift) || ((prio && 0x1F) << prioShift) || (irq && 0x3FF) + groupShift = if config_ARM_GIC_V3 then 60 else 30; + prioShift = if config_ARM_GIC_V3 then 48 else 23; + irqPending = 1 << virq_type_shift; + eoiirqen = 1 << eoiirqen_shift; + irqMask = mask (if config_ARM_GIC_V3 then 32 else 10); + prioMask = mask (if config_ARM_GIC_V3 then 8 else 5) + in ((grp && 1) << groupShift) || ((prio && prioMask) << prioShift) || (irq && irqMask) || irqPending || eoiirqen" definition virq_type :: "virq \ nat" where - "virq_type virq \ unat ((virq >> 28) && 3)" + "virq_type virq \ unat ((virq >> virq_type_shift) && 3)" definition is_virq_active :: "virq \ bool" where "is_virq_active virq \ virq_type virq = 2" diff --git a/spec/cspec/c/gen-config-thy.py b/spec/cspec/c/gen-config-thy.py index 1b1972fb25..3901966024 100755 --- a/spec/cspec/c/gen-config-thy.py +++ b/spec/cspec/c/gen-config-thy.py @@ -38,11 +38,16 @@ nat = 'nat' word = 'machine_word' -# All known config keys that could be set in a gen_config.h file. -# The dict maps config key to (type, definition name) +# All known config keys that could be set in a gen_config.h file. The dict maps +# config key to (type, definition name) # -# This table duplicates some information from the kernel build system, but -# only which keys exist, not their values. +# This table duplicates some information from the kernel build system, but only +# which keys exist, not their values. +# +# If they are used in Haskell, the translated config key names have to start +# with a lower case letter, because Haskell enforces that only datatype +# constructores start with upper case. For new names, we tend to use `config_` +# as prefix and potentially shorten the C name a bit. known_config_keys = { 'CONFIG_ARM_HIKEY_OUTSTANDING_PREFETCHERS': (bool, None), 'CONFIG_ARM_HIKEY_PREFETCHER_STRIDE': (bool, None), @@ -108,7 +113,7 @@ 'CONFIG_DEBUG_DISABLE_L1_DCACHE': (bool, None), 'CONFIG_DEBUG_DISABLE_BRANCH_PREDICTION': (bool, None), 'CONFIG_ARM_HYPERVISOR_SUPPORT': (bool, None), - 'CONFIG_ARM_GIC_V3_SUPPORT': (bool, None), + 'CONFIG_ARM_GIC_V3_SUPPORT': (bool, 'config_ARM_GIC_V3'), 'CONFIG_AARCH64_VSPACE_S2_START_L1': (bool, None), 'CONFIG_ARM_HYP_ENABLE_VCPU_CP14_SAVE_AND_RESTORE': (bool, None), 'CONFIG_ARM_ERRATA_430973': (bool, None), diff --git a/spec/design/skel/AARCH64/ArchHypervisor_H.thy b/spec/design/skel/AARCH64/ArchHypervisor_H.thy index 740f6677d8..a4e9841ae8 100644 --- a/spec/design/skel/AARCH64/ArchHypervisor_H.thy +++ b/spec/design/skel/AARCH64/ArchHypervisor_H.thy @@ -18,9 +18,9 @@ begin context Arch begin arch_global_naming (H) #INCLUDE_HASKELL SEL4/Object/VCPU/AARCH64.hs CONTEXT AARCH64_H decls_only \ - ONLY countTrailingZeros irqVPPIEventIndex + ONLY countTrailingZeros irqVPPIEventIndex virqTypeShift eoiirqenShift #INCLUDE_HASKELL SEL4/Object/VCPU/AARCH64.hs CONTEXT AARCH64_H bodies_only \ - ONLY countTrailingZeros irqVPPIEventIndex + ONLY countTrailingZeros irqVPPIEventIndex virqTypeShift eoiirqenShift #INCLUDE_HASKELL SEL4/Object/VCPU/AARCH64.hs CONTEXT AARCH64_H ArchInv=Arch \ ONLY vcpuUpdate vgicUpdate vgicUpdateLR vcpuSaveReg vcpuRestoreReg \ vcpuSaveRegRange vcpuRestoreRegRange vcpuWriteReg vcpuReadReg saveVirtTimer \ diff --git a/spec/design/skel/AARCH64/Hardware_H.thy b/spec/design/skel/AARCH64/Hardware_H.thy index 9c1e2074f1..a6d0e7adff 100644 --- a/spec/design/skel/AARCH64/Hardware_H.thy +++ b/spec/design/skel/AARCH64/Hardware_H.thy @@ -37,7 +37,9 @@ context Arch begin arch_global_naming (H) enableFpuEL01 \ getFAR getDFSR getIFSR getHSR setHCR getESR getSCTLR setSCTLR \ addressTranslateS1 \ - readVCPUHardwareReg writeVCPUHardwareReg vcpuBits + readVCPUHardwareReg writeVCPUHardwareReg vcpuBits \ + config_DISABLE_WFI_WFE_TRAPS \ + config_ARM_GIC_V3 end diff --git a/spec/design/skel/AARCH64/VCPU_H.thy b/spec/design/skel/AARCH64/VCPU_H.thy index 97dfc4c813..ba34cf4bd3 100644 --- a/spec/design/skel/AARCH64/VCPU_H.thy +++ b/spec/design/skel/AARCH64/VCPU_H.thy @@ -22,8 +22,8 @@ context Arch begin arch_global_naming (H) vcpuSaveRegRange vcpuRestoreRegRange vcpuWriteReg vcpuReadReg saveVirtTimer \ restoreVirtTimer vcpuDisable vcpuEnable vcpuRestore vcpuSave vcpuSwitch \ vcpuInvalidateActive vcpuCleanInvalidateActive countTrailingZeros virqType \ - virqSetEOIIRQEN vgicMaintenance vppiEvent irqVPPIEventIndex armvVCPUSave \ - curVCPUActive + virqTypeShift eoiirqenShift virqSetEOIIRQEN vgicMaintenance vppiEvent \ + irqVPPIEventIndex armvVCPUSave curVCPUActive end end diff --git a/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs b/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs index 887cd90aa7..5feee9ba80 100644 --- a/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs +++ b/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs @@ -464,12 +464,16 @@ vgicHCREN = (0x1 :: Word32) -- VGIC_HCR_EN gicVCPUMaxNumLR = (64 :: Int) -{- Config parameter -} +{- Config parameters -} -- The size of the physical address space in hyp mode can be configured on some platforms. config_ARM_PA_SIZE_BITS_40 :: Bool config_ARM_PA_SIZE_BITS_40 = error "generated from CMake config" --- Wether to trap WFI/WFE instructions or not in hyp mode +-- Whether to trap WFI/WFE instructions or not in hyp mode. config_DISABLE_WFI_WFE_TRAPS :: Bool config_DISABLE_WFI_WFE_TRAPS = error "generated from CMake config" + +-- Whether to use the GICv3. Defaults to GICv2 when set to False. +config_ARM_GIC_V3 :: Bool +config_ARM_GIC_V3 = error "generated from CMake config" diff --git a/spec/haskell/src/SEL4/Object/VCPU/AARCH64.hs b/spec/haskell/src/SEL4/Object/VCPU/AARCH64.hs index 94a6fea4c5..bed2667ddf 100644 --- a/spec/haskell/src/SEL4/Object/VCPU/AARCH64.hs +++ b/spec/haskell/src/SEL4/Object/VCPU/AARCH64.hs @@ -206,18 +206,26 @@ invokeVCPUWriteReg vcpuPtr reg val = do {- VCPU: inject IRQ -} +-- bit position of type field in virq bitfield struct +virqTypeShift = if config_ARM_GIC_V3 then 62 else 28 + +-- bit position of eoiirqen field in virq bitfield struct +eoiirqenShift = if config_ARM_GIC_V3 then 32+9 else 19 + -- analogous to virq_virq_pending_new in C, with virqEOIIRQEN set makeVIRQ :: Word -> Word -> Word -> VIRQ makeVIRQ grp prio irq = - ((grp .&. 1) `shiftL` groupShift) .|. ((prio .&. 0x1F) `shiftL` prioShift) .|. - (irq .&. 0x3FF) .|. irqPending .|. eoiirqen - where groupShift = 30 - prioShift = 23 - irqPending = bit 28 - eoiirqen = bit 19 + ((grp .&. 1) `shiftL` groupShift) .|.((prio .&. prioMask) `shiftL` prioShift) .|. + (irq .&. irqMask) .|. irqPending .|. eoiirqen + where groupShift = if config_ARM_GIC_V3 then 60 else 30 + prioShift = if config_ARM_GIC_V3 then 48 else 23 + irqPending = bit virqTypeShift + eoiirqen = bit eoiirqenShift + irqMask = mask (if config_ARM_GIC_V3 then 32 else 10) + prioMask = mask (if config_ARM_GIC_V3 then 8 else 5) virqType :: Word -> Int -virqType virq = fromIntegral $ (virq `shiftR` 28) .&. 3 +virqType virq = fromIntegral $ (virq `shiftR` virqTypeShift) .&. 3 -- combination of virq_get_virqType and virq_virq_active isVIRQActive :: VIRQ -> Bool @@ -229,7 +237,8 @@ virqSetEOIIRQEN :: VIRQ -> Word -> VIRQ virqSetEOIIRQEN virq v = if virqType virq == 3 then virq - else (virq .&. complement 0x80000) .|. ((v `shiftL` 19) .&. 0x80000) + else (virq .&. complement (bit eoiirqenShift)) .|. + ((v `shiftL` eoiirqenShift) .&. bit eoiirqenShift) decodeVCPUInjectIRQ :: [Word] -> ArchCapability -> KernelF SyscallError ArchInv.Invocation