aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorXavier Leroy <xavier.leroy@college-de-france.fr>2021-05-17 18:07:02 +0200
committerXavier Leroy <xavier.leroy@college-de-france.fr>2021-08-22 13:29:00 +0200
commit47fae389c800034e002c9f8a398e9adc79a14b81 (patch)
tree210933a5a526afe0469a66f59861c13d687c733e /lib
parenta94edc576ca2c288c66f710798ab2ada3c485a40 (diff)
downloadcompcert-kvx-47fae389c800034e002c9f8a398e9adc79a14b81.tar.gz
compcert-kvx-47fae389c800034e002c9f8a398e9adc79a14b81.zip
Native support for bit fields (#400)
This big PR adds support for bit fields in structs and unions to the verified part of CompCert, namely the CompCert C and Clight languages. The compilation of bit field accesses to normal integer accesses + shifts and masks is done and proved correct as part of the Cshmgen pass. The layout of bit fields in memory is done by the functions in module Ctypes. It follows the ELF ABI layout algorithm. As a bonus, basic soundness properties of the layout are shown, such as "two different bit fields do not overlap" or "a bit field and a regular field do not overlap". All this replaces the previous emulation of bit fields by source-to-source rewriting in the unverified front-end of CompCert (module cparse/Bitfield.ml). This emulation was prone to errors (see nonstandard layout instead. The core idea for the PR is that expressions in l-value position denote not just a block, a byte offset and a type, but also a bitfield designator saying whether all the bits of the type are accessed (designator Full) or only some of its bits (designator Bits). Designators of the Bits kind appear when the l-value is a bit field access; the bit width and bit offset in Bits are computed by the functions in Ctypes that implement the layout algorithm. Consequently, both in the semantics of CompCert C and Clight and in the SimplExpr, SimplLocals and Cshmgen compilation passes, pairs of a type and a bitfield designator are used in a number of places where a single type was used before. The introduction of bit fields has a big impact on static initialization (module cfrontend/Initializers.v), which had to be rewritten in large part, along with its soundness proof (cfrontend/Initializersproof.v). Both static initialization and run-time manipulation of bit fields are tested in test/abi using differential testing against GCC and randomly-generated structs. This work exposed subtle interactions between bit fields and the volatile modifier. Currently, the volatile modifier is ignored when accessing a bit field (and a warning is printed at compile-time), just like it is ignored when accessing a struct or union as a r-value. Currently, the natural alignment of bit fields and their storage units cannot be modified with the aligned attribute. _Alignas on bit fields is rejected as per C11, and the packed modifier cannot be applied to a struct containing bit fields.
Diffstat (limited to 'lib')
-rw-r--r--lib/Integers.v112
1 files changed, 112 insertions, 0 deletions
diff --git a/lib/Integers.v b/lib/Integers.v
index 63dc2251..68bff3a0 100644
--- a/lib/Integers.v
+++ b/lib/Integers.v
@@ -3389,6 +3389,118 @@ Proof.
lia.
Qed.
+(** ** Accessing bit fields *)
+
+Definition unsigned_bitfield_extract (pos width: Z) (n: int) : int :=
+ zero_ext width (shru n (repr pos)).
+
+Definition signed_bitfield_extract (pos width: Z) (n: int) : int :=
+ sign_ext width (shru n (repr pos)).
+
+Definition bitfield_insert (pos width: Z) (n p: int) : int :=
+ let mask := shl (repr (two_p width - 1)) (repr pos) in
+ or (shl (zero_ext width p) (repr pos))
+ (and n (not mask)).
+
+Lemma bits_unsigned_bitfield_extract:
+ forall pos width n i,
+ 0 <= pos -> 0 < width -> pos + width <= zwordsize ->
+ 0 <= i < zwordsize ->
+ testbit (unsigned_bitfield_extract pos width n) i =
+ if zlt i width then testbit n (i + pos) else false.
+Proof.
+ intros. unfold unsigned_bitfield_extract. rewrite bits_zero_ext by lia.
+ destruct (zlt i width); auto.
+ rewrite bits_shru by auto. rewrite unsigned_repr, zlt_true. auto.
+ lia.
+ generalize wordsize_max_unsigned; lia.
+Qed.
+
+Lemma bits_signed_bitfield_extract:
+ forall pos width n i,
+ 0 <= pos -> 0 < width -> pos + width <= zwordsize ->
+ 0 <= i < zwordsize ->
+ testbit (signed_bitfield_extract pos width n) i =
+ testbit n (if zlt i width then i + pos else width - 1 + pos).
+Proof.
+ intros. unfold signed_bitfield_extract. rewrite bits_sign_ext by lia.
+ rewrite bits_shru, unsigned_repr, zlt_true.
+ destruct (zlt i width); auto.
+ destruct (zlt i width); lia.
+ generalize wordsize_max_unsigned; lia.
+ destruct (zlt i width); lia.
+Qed.
+
+Lemma bits_bitfield_insert:
+ forall pos width n p i,
+ 0 <= pos -> 0 < width -> pos + width <= zwordsize ->
+ 0 <= i < zwordsize ->
+ testbit (bitfield_insert pos width n p) i =
+ if zle pos i && zlt i (pos + width) then testbit p (i - pos) else testbit n i.
+Proof.
+ intros. unfold bitfield_insert.
+ assert (P: unsigned (repr pos) = pos).
+ { apply unsigned_repr. generalize wordsize_max_unsigned; lia. }
+ rewrite bits_or, bits_and, bits_not, ! bits_shl, ! P by auto.
+ destruct (zlt i pos).
+- unfold proj_sumbool; rewrite zle_false by lia. cbn. apply andb_true_r.
+- unfold proj_sumbool; rewrite zle_true by lia; cbn.
+ rewrite bits_zero_ext, testbit_repr, Ztestbit_two_p_m1 by lia.
+ destruct (zlt (i - pos) width); cbn.
++ rewrite zlt_true by lia. rewrite andb_false_r, orb_false_r. auto.
++ rewrite zlt_false by lia. apply andb_true_r.
+Qed.
+
+Lemma unsigned_bitfield_extract_by_shifts:
+ forall pos width n,
+ 0 <= pos -> 0 < width -> pos + width <= zwordsize ->
+ unsigned_bitfield_extract pos width n =
+ shru (shl n (repr (zwordsize - pos - width))) (repr (zwordsize - width)).
+Proof.
+ intros. apply same_bits_eq; intros.
+ rewrite bits_unsigned_bitfield_extract by lia.
+ rewrite bits_shru by auto.
+ rewrite unsigned_repr by (generalize wordsize_max_unsigned; lia).
+ destruct (zlt i width).
+- rewrite bits_shl by lia.
+ rewrite unsigned_repr by (generalize wordsize_max_unsigned; lia).
+ rewrite zlt_true by lia. rewrite zlt_false by lia. f_equal; lia.
+- rewrite zlt_false by lia. auto.
+Qed.
+
+Lemma signed_bitfield_extract_by_shifts:
+ forall pos width n,
+ 0 <= pos -> 0 < width -> pos + width <= zwordsize ->
+ signed_bitfield_extract pos width n =
+ shr (shl n (repr (zwordsize - pos - width))) (repr (zwordsize - width)).
+Proof.
+ intros. apply same_bits_eq; intros.
+ rewrite bits_signed_bitfield_extract by lia.
+ rewrite bits_shr by auto.
+ rewrite unsigned_repr by (generalize wordsize_max_unsigned; lia).
+ rewrite bits_shl.
+ rewrite unsigned_repr by (generalize wordsize_max_unsigned; lia).
+ symmetry. rewrite zlt_false. f_equal.
+ destruct (zlt i width); [rewrite zlt_true | rewrite zlt_false]; lia.
+ destruct zlt; lia.
+ destruct zlt; lia.
+Qed.
+
+Lemma bitfield_insert_alternative:
+ forall pos width n p,
+ 0 <= width ->
+ bitfield_insert pos width n p =
+ let mask := shl (repr (two_p width - 1)) (repr pos) in
+ or (and (shl p (repr pos)) mask)
+ (and n (not mask)).
+Proof.
+ intros. unfold bitfield_insert.
+ set (m1 := repr (two_p width - 1)).
+ set (m2 := shl m1 (repr pos)).
+ f_equal.
+ rewrite zero_ext_and by lia. fold m1. unfold m2. rewrite <- and_shl. auto.
+Qed.
+
End Make.
(** * Specialization to integers of size 8, 32, and 64 bits *)