From fd68e9d37164871cdcb4ee83ab649c5054b0f1cc Mon Sep 17 00:00:00 2001 From: Xavier Leroy Date: Sun, 6 Jun 2021 19:24:10 +0200 Subject: More lemmas about list append --- lib/Coqlib.v | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'lib') diff --git a/lib/Coqlib.v b/lib/Coqlib.v index 1e93b91d..361ae924 100644 --- a/lib/Coqlib.v +++ b/lib/Coqlib.v @@ -773,6 +773,32 @@ Proof. exists (a0 :: l1); exists l2; intuition. simpl; congruence. Qed. +(** Properties of [List.app] (concatenation) *) + +Lemma list_append_injective_l: + forall (A: Type) (l1 l2 l1' l2': list A), + l1 ++ l2 = l1' ++ l2' -> List.length l1 = List.length l1' -> l1 = l1' /\ l2 = l2'. +Proof. + intros until l2'. revert l1 l1'. induction l1 as [ | a l1]; destruct l1' as [ | a' l1']; simpl; intros. +- auto. +- discriminate. +- discriminate. +- destruct (IHl1 l1'). congruence. congruence. split; congruence. +Qed. + +Lemma list_append_injective_r: + forall (A: Type) (l1 l2 l1' l2': list A), + l1 ++ l2 = l1' ++ l2' -> List.length l2 = List.length l2' -> l1 = l1' /\ l2 = l2'. +Proof. + intros. + assert (X: rev l2 = rev l2' /\ rev l1 = rev l1'). + { apply list_append_injective_l. + rewrite <- ! rev_app_distr. congruence. + rewrite ! rev_length; auto. } + rewrite <- (rev_involutive l1), <- (rev_involutive l1'), <- (rev_involutive l2), <- (rev_involutive l2'). + intuition congruence. +Qed. + (** Folding a function over a list *) Section LIST_FOLD. -- cgit From 88589fa168a0b73f7d82c4ad2dcf9fde9d17f439 Mon Sep 17 00:00:00 2001 From: Xavier Leroy Date: Mon, 7 Jun 2021 11:27:44 +0200 Subject: More lemmas about `align` --- lib/Coqlib.v | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'lib') diff --git a/lib/Coqlib.v b/lib/Coqlib.v index 361ae924..706a5d49 100644 --- a/lib/Coqlib.v +++ b/lib/Coqlib.v @@ -525,6 +525,23 @@ Proof. intros. unfold align. apply Z.divide_factor_r. Qed. +Lemma align_lt: forall x y, y > 0 -> align x y < x + y. +Proof. + intros. unfold align. + generalize (Z_div_mod_eq (x + y - 1) y H); intro. + generalize (Z_mod_lt (x + y - 1) y H); intro. + lia. +Qed. + +Lemma align_same: + forall x y, y > 0 -> (y | x) -> align x y = x. +Proof. + unfold align; intros. destruct H0 as [k E]. + replace (x + y - 1) with (x + (y - 1)) by lia. + rewrite E, Z.div_add_l, Z.div_small by lia. + lia. +Qed. + (** * Definitions and theorems on the data types [option], [sum] and [list] *) Set Implicit Arguments. -- cgit From 4fe221a26400fcf1aaca74889afffa5d01897b13 Mon Sep 17 00:00:00 2001 From: Xavier Leroy Date: Mon, 26 Jul 2021 11:39:36 +0200 Subject: Add `floor` and some properties --- lib/Coqlib.v | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'lib') diff --git a/lib/Coqlib.v b/lib/Coqlib.v index 706a5d49..c023bdd5 100644 --- a/lib/Coqlib.v +++ b/lib/Coqlib.v @@ -542,6 +542,43 @@ Proof. lia. Qed. +(** Floor: [floor n amount] returns the greatest multiple of [amount] + less than or equal to [n]. *) + +Definition floor (n: Z) (amount: Z) := (n / amount) * amount. + +Lemma floor_interval: + forall x y, y > 0 -> floor x y <= x < floor x y + y. +Proof. + unfold floor; intros. + generalize (Z_div_mod_eq x y H) (Z_mod_lt x y H). + set (q := x / y). set (r := x mod y). intros. lia. +Qed. + +Lemma floor_divides: + forall x y, y > 0 -> (y | floor x y). +Proof. + unfold floor; intros. exists (x / y); auto. +Qed. + +Lemma floor_same: + forall x y, y > 0 -> (y | x) -> floor x y = x. +Proof. + unfold floor; intros. rewrite (Zdivide_Zdiv_eq y x) at 2; auto; lia. +Qed. + +Lemma floor_align_interval: + forall x y, y > 0 -> + floor x y <= align x y <= floor x y + y. +Proof. + unfold floor, align; intros. + replace (x / y * y + y) with ((x + 1 * y) / y * y). + assert (A: forall a b, a <= b -> a / y * y <= b / y * y). + { intros. apply Z.mul_le_mono_nonneg_r. lia. apply Z.div_le_mono; lia. } + split; apply A; lia. + rewrite Z.div_add by lia. lia. +Qed. + (** * Definitions and theorems on the data types [option], [sum] and [list] *) Set Implicit Arguments. -- cgit From e9f40aaca38ba81f3e9e5c0a5e03de9fa074d838 Mon Sep 17 00:00:00 2001 From: Xavier Leroy Date: Thu, 10 Jun 2021 09:52:47 +0200 Subject: Int.sign_ext_shr_shl: weaker hypothesis Works also for sign_ext 32. ARM, RISC-V: adapt Asmgenproof1 accordingly --- lib/Integers.v | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/Integers.v b/lib/Integers.v index b38f8564..63dc2251 100644 --- a/lib/Integers.v +++ b/lib/Integers.v @@ -2766,7 +2766,7 @@ Qed. Corollary sign_ext_shr_shl: forall n x, - 0 < n < zwordsize -> + 0 < n <= zwordsize -> let y := repr (zwordsize - n) in sign_ext n x = shr (shl x y) y. Proof. @@ -2801,7 +2801,7 @@ Qed. Lemma sign_ext_range: forall n x, 0 < n < zwordsize -> -two_p (n-1) <= signed (sign_ext n x) < two_p (n-1). Proof. - intros. rewrite sign_ext_shr_shl; auto. + intros. rewrite sign_ext_shr_shl by lia. set (X := shl x (repr (zwordsize - n))). assert (two_p (n - 1) > 0) by (apply two_p_gt_ZERO; lia). assert (unsigned (repr (zwordsize - n)) = zwordsize - n). -- cgit From 47fae389c800034e002c9f8a398e9adc79a14b81 Mon Sep 17 00:00:00 2001 From: Xavier Leroy Date: Mon, 17 May 2021 18:07:02 +0200 Subject: Native support for bit fields (#400) This big PR adds support for bit fields in structs and unions to the verified part of CompCert, namely the CompCert C and Clight languages. The compilation of bit field accesses to normal integer accesses + shifts and masks is done and proved correct as part of the Cshmgen pass. The layout of bit fields in memory is done by the functions in module Ctypes. It follows the ELF ABI layout algorithm. As a bonus, basic soundness properties of the layout are shown, such as "two different bit fields do not overlap" or "a bit field and a regular field do not overlap". All this replaces the previous emulation of bit fields by source-to-source rewriting in the unverified front-end of CompCert (module cparse/Bitfield.ml). This emulation was prone to errors (see nonstandard layout instead. The core idea for the PR is that expressions in l-value position denote not just a block, a byte offset and a type, but also a bitfield designator saying whether all the bits of the type are accessed (designator Full) or only some of its bits (designator Bits). Designators of the Bits kind appear when the l-value is a bit field access; the bit width and bit offset in Bits are computed by the functions in Ctypes that implement the layout algorithm. Consequently, both in the semantics of CompCert C and Clight and in the SimplExpr, SimplLocals and Cshmgen compilation passes, pairs of a type and a bitfield designator are used in a number of places where a single type was used before. The introduction of bit fields has a big impact on static initialization (module cfrontend/Initializers.v), which had to be rewritten in large part, along with its soundness proof (cfrontend/Initializersproof.v). Both static initialization and run-time manipulation of bit fields are tested in test/abi using differential testing against GCC and randomly-generated structs. This work exposed subtle interactions between bit fields and the volatile modifier. Currently, the volatile modifier is ignored when accessing a bit field (and a warning is printed at compile-time), just like it is ignored when accessing a struct or union as a r-value. Currently, the natural alignment of bit fields and their storage units cannot be modified with the aligned attribute. _Alignas on bit fields is rejected as per C11, and the packed modifier cannot be applied to a struct containing bit fields. --- lib/Integers.v | 112 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) (limited to 'lib') diff --git a/lib/Integers.v b/lib/Integers.v index 63dc2251..68bff3a0 100644 --- a/lib/Integers.v +++ b/lib/Integers.v @@ -3389,6 +3389,118 @@ Proof. lia. Qed. +(** ** Accessing bit fields *) + +Definition unsigned_bitfield_extract (pos width: Z) (n: int) : int := + zero_ext width (shru n (repr pos)). + +Definition signed_bitfield_extract (pos width: Z) (n: int) : int := + sign_ext width (shru n (repr pos)). + +Definition bitfield_insert (pos width: Z) (n p: int) : int := + let mask := shl (repr (two_p width - 1)) (repr pos) in + or (shl (zero_ext width p) (repr pos)) + (and n (not mask)). + +Lemma bits_unsigned_bitfield_extract: + forall pos width n i, + 0 <= pos -> 0 < width -> pos + width <= zwordsize -> + 0 <= i < zwordsize -> + testbit (unsigned_bitfield_extract pos width n) i = + if zlt i width then testbit n (i + pos) else false. +Proof. + intros. unfold unsigned_bitfield_extract. rewrite bits_zero_ext by lia. + destruct (zlt i width); auto. + rewrite bits_shru by auto. rewrite unsigned_repr, zlt_true. auto. + lia. + generalize wordsize_max_unsigned; lia. +Qed. + +Lemma bits_signed_bitfield_extract: + forall pos width n i, + 0 <= pos -> 0 < width -> pos + width <= zwordsize -> + 0 <= i < zwordsize -> + testbit (signed_bitfield_extract pos width n) i = + testbit n (if zlt i width then i + pos else width - 1 + pos). +Proof. + intros. unfold signed_bitfield_extract. rewrite bits_sign_ext by lia. + rewrite bits_shru, unsigned_repr, zlt_true. + destruct (zlt i width); auto. + destruct (zlt i width); lia. + generalize wordsize_max_unsigned; lia. + destruct (zlt i width); lia. +Qed. + +Lemma bits_bitfield_insert: + forall pos width n p i, + 0 <= pos -> 0 < width -> pos + width <= zwordsize -> + 0 <= i < zwordsize -> + testbit (bitfield_insert pos width n p) i = + if zle pos i && zlt i (pos + width) then testbit p (i - pos) else testbit n i. +Proof. + intros. unfold bitfield_insert. + assert (P: unsigned (repr pos) = pos). + { apply unsigned_repr. generalize wordsize_max_unsigned; lia. } + rewrite bits_or, bits_and, bits_not, ! bits_shl, ! P by auto. + destruct (zlt i pos). +- unfold proj_sumbool; rewrite zle_false by lia. cbn. apply andb_true_r. +- unfold proj_sumbool; rewrite zle_true by lia; cbn. + rewrite bits_zero_ext, testbit_repr, Ztestbit_two_p_m1 by lia. + destruct (zlt (i - pos) width); cbn. ++ rewrite zlt_true by lia. rewrite andb_false_r, orb_false_r. auto. ++ rewrite zlt_false by lia. apply andb_true_r. +Qed. + +Lemma unsigned_bitfield_extract_by_shifts: + forall pos width n, + 0 <= pos -> 0 < width -> pos + width <= zwordsize -> + unsigned_bitfield_extract pos width n = + shru (shl n (repr (zwordsize - pos - width))) (repr (zwordsize - width)). +Proof. + intros. apply same_bits_eq; intros. + rewrite bits_unsigned_bitfield_extract by lia. + rewrite bits_shru by auto. + rewrite unsigned_repr by (generalize wordsize_max_unsigned; lia). + destruct (zlt i width). +- rewrite bits_shl by lia. + rewrite unsigned_repr by (generalize wordsize_max_unsigned; lia). + rewrite zlt_true by lia. rewrite zlt_false by lia. f_equal; lia. +- rewrite zlt_false by lia. auto. +Qed. + +Lemma signed_bitfield_extract_by_shifts: + forall pos width n, + 0 <= pos -> 0 < width -> pos + width <= zwordsize -> + signed_bitfield_extract pos width n = + shr (shl n (repr (zwordsize - pos - width))) (repr (zwordsize - width)). +Proof. + intros. apply same_bits_eq; intros. + rewrite bits_signed_bitfield_extract by lia. + rewrite bits_shr by auto. + rewrite unsigned_repr by (generalize wordsize_max_unsigned; lia). + rewrite bits_shl. + rewrite unsigned_repr by (generalize wordsize_max_unsigned; lia). + symmetry. rewrite zlt_false. f_equal. + destruct (zlt i width); [rewrite zlt_true | rewrite zlt_false]; lia. + destruct zlt; lia. + destruct zlt; lia. +Qed. + +Lemma bitfield_insert_alternative: + forall pos width n p, + 0 <= width -> + bitfield_insert pos width n p = + let mask := shl (repr (two_p width - 1)) (repr pos) in + or (and (shl p (repr pos)) mask) + (and n (not mask)). +Proof. + intros. unfold bitfield_insert. + set (m1 := repr (two_p width - 1)). + set (m2 := shl m1 (repr pos)). + f_equal. + rewrite zero_ext_and by lia. fold m1. unfold m2. rewrite <- and_shl. auto. +Qed. + End Make. (** * Specialization to integers of size 8, 32, and 64 bits *) -- cgit From c9fad7cd7bdc4e79fb06a1d39abfa0d5471623e5 Mon Sep 17 00:00:00 2001 From: Xavier Leroy Date: Wed, 15 Sep 2021 14:00:41 +0200 Subject: Avoid `Global Set Asymmetric Patterns` (#408) Instead, add `Set Asymmetric Patterns` to the files that need it, or use `Arguments` to make inductive types work better with symmetric patterns. Closes: #403 --- lib/Coqlib.v | 2 -- lib/Floats.v | 1 + lib/Iteration.v | 2 ++ lib/UnionFind.v | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/Coqlib.v b/lib/Coqlib.v index c023bdd5..045fb03a 100644 --- a/lib/Coqlib.v +++ b/lib/Coqlib.v @@ -25,8 +25,6 @@ Require Export List. Require Export Bool. Require Export Lia. -Global Set Asymmetric Patterns. - (** * Useful tactics *) Ltac inv H := inversion H; clear H; subst. diff --git a/lib/Floats.v b/lib/Floats.v index f56078aa..43caebb0 100644 --- a/lib/Floats.v +++ b/lib/Floats.v @@ -26,6 +26,7 @@ Import ListNotations. Close Scope R_scope. Open Scope Z_scope. +Set Asymmetric Patterns. Definition float := binary64. (**r the type of IEE754 double-precision FP numbers *) Definition float32 := binary32. (**r the type of IEE754 single-precision FP numbers *) diff --git a/lib/Iteration.v b/lib/Iteration.v index 82110bff..66bb3970 100644 --- a/lib/Iteration.v +++ b/lib/Iteration.v @@ -20,6 +20,8 @@ Require Import Axioms. Require Import Coqlib. Require Import Wfsimpl. +Set Asymmetric Patterns. + (** This modules defines several Coq encodings of a general "while" loop. The loop is presented in functional style as the iteration of a [step] function of type [A -> B + A]: diff --git a/lib/UnionFind.v b/lib/UnionFind.v index 67e4271d..1bc2f657 100644 --- a/lib/UnionFind.v +++ b/lib/UnionFind.v @@ -21,6 +21,7 @@ Require Import Coqlib. Open Scope nat_scope. Set Implicit Arguments. +Set Asymmetric Patterns. (* To avoid useless definitions of inductors in extracted code. *) Local Unset Elimination Schemes. -- cgit