summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYann Herklotz <git@yannherklotz.com>2023-05-11 19:38:03 +0100
committerYann Herklotz <git@yannherklotz.com>2023-05-11 19:38:03 +0100
commit47c1289ff658a5aec71635d79ffe30bb29a07876 (patch)
tree56cf6b959e37fed88c492d34defd3d7ec40e7148
parentfbe0fc62120348f582dc4db2b614078943d0764b (diff)
downloadzk-web-47c1289ff658a5aec71635d79ffe30bb29a07876.tar.gz
zk-web-47c1289ff658a5aec71635d79ffe30bb29a07876.zip
Add content
-rw-r--r--.gitmodules6
-rw-r--r--config.toml43
-rw-r--r--content/zettel/1a.md9
-rw-r--r--content/zettel/1a1.md33
-rw-r--r--content/zettel/1a2.md25
-rw-r--r--content/zettel/1a3.md52
-rw-r--r--content/zettel/1a3a.md14
-rw-r--r--content/zettel/1a3b.md13
-rw-r--r--content/zettel/1a3c.md13
-rw-r--r--content/zettel/1a3d.md13
-rw-r--r--content/zettel/1a4.md13
-rw-r--r--content/zettel/1b.md9
-rw-r--r--content/zettel/1b1.md50
-rw-r--r--content/zettel/1b2.md95
-rw-r--r--content/zettel/1b3.md34
-rw-r--r--content/zettel/1b4.md16
-rw-r--r--content/zettel/1b5.md17
-rw-r--r--content/zettel/1b6.md18
-rw-r--r--content/zettel/1b7.md22
-rw-r--r--content/zettel/1b8.md47
-rw-r--r--content/zettel/1b9.md9
-rw-r--r--content/zettel/1b9a.md20
-rw-r--r--content/zettel/1b9b.md11
-rw-r--r--content/zettel/1b9c.md24
-rw-r--r--content/zettel/1c.md51
-rw-r--r--content/zettel/1c1.md17
-rw-r--r--content/zettel/1c10.md14
-rw-r--r--content/zettel/1c2.md18
-rw-r--r--content/zettel/1c2a.md17
-rw-r--r--content/zettel/1c2a1.md29
-rw-r--r--content/zettel/1c2a2.md12
-rw-r--r--content/zettel/1c2a2a.md22
-rw-r--r--content/zettel/1c2a2b.md28
-rw-r--r--content/zettel/1c2a2c.md23
-rw-r--r--content/zettel/1c2a3.md12
-rw-r--r--content/zettel/1c2a4.md14
-rw-r--r--content/zettel/1c2b.md38
-rw-r--r--content/zettel/1c2b1.md28
-rw-r--r--content/zettel/1c2c.md16
-rw-r--r--content/zettel/1c2d.md35
-rw-r--r--content/zettel/1c2e.md28
-rw-r--r--content/zettel/1c2f.md55
-rw-r--r--content/zettel/1c2g.md56
-rw-r--r--content/zettel/1c2h.md37
-rw-r--r--content/zettel/1c2h1.md18
-rw-r--r--content/zettel/1c2h2.md18
-rw-r--r--content/zettel/1c2h3.md17
-rw-r--r--content/zettel/1c2h4.md18
-rw-r--r--content/zettel/1c2h5.md18
-rw-r--r--content/zettel/1c2h6.md18
-rw-r--r--content/zettel/1c3.md56
-rw-r--r--content/zettel/1c3a.md34
-rw-r--r--content/zettel/1c3b.md14
-rw-r--r--content/zettel/1c4.md22
-rw-r--r--content/zettel/1c4a.md19
-rw-r--r--content/zettel/1c4b.md15
-rw-r--r--content/zettel/1c4b1.md20
-rw-r--r--content/zettel/1c5.md17
-rw-r--r--content/zettel/1c5a.md30
-rw-r--r--content/zettel/1c5b.md25
-rw-r--r--content/zettel/1c5c.md25
-rw-r--r--content/zettel/1c5d.md19
-rw-r--r--content/zettel/1c5e.md21
-rw-r--r--content/zettel/1c6.md18
-rw-r--r--content/zettel/1c6a.md54
-rw-r--r--content/zettel/1c6a1.md21
-rw-r--r--content/zettel/1c6a2.md25
-rw-r--r--content/zettel/1c6b.md23
-rw-r--r--content/zettel/1c6c.md18
-rw-r--r--content/zettel/1c6d.md49
-rw-r--r--content/zettel/1c6e.md24
-rw-r--r--content/zettel/1c7.md12
-rw-r--r--content/zettel/1c7a.md9
-rw-r--r--content/zettel/1c8.md29
-rw-r--r--content/zettel/1c9.md18
-rw-r--r--content/zettel/1d.md9
-rw-r--r--content/zettel/1d1.md102
-rw-r--r--content/zettel/1e.md9
-rw-r--r--content/zettel/1e1.md21
-rw-r--r--content/zettel/1f.md9
-rw-r--r--content/zettel/1f1.md11
-rw-r--r--content/zettel/1f2.md29
-rw-r--r--content/zettel/1f2a.md33
-rw-r--r--content/zettel/1f3.md45
-rw-r--r--content/zettel/1f3a.md17
-rw-r--r--content/zettel/1f4.md28
-rw-r--r--content/zettel/1f4a.md57
-rw-r--r--content/zettel/1f4b.md20
-rw-r--r--content/zettel/2a.md9
-rw-r--r--content/zettel/2a1.md35
-rw-r--r--content/zettel/2ab.md13
-rw-r--r--content/zettel/2b.md9
-rw-r--r--content/zettel/2b1.md20
-rw-r--r--content/zettel/2b1a.md37
-rw-r--r--content/zettel/2b1b.md31
-rw-r--r--content/zettel/2b1c.md17
-rw-r--r--content/zettel/2b1c1.md36
-rw-r--r--content/zettel/2b1d.md17
-rw-r--r--content/zettel/2b1d1.md20
-rw-r--r--content/zettel/2b1d2.md17
-rw-r--r--content/zettel/2b1e.md21
-rw-r--r--content/zettel/2b2.md18
-rw-r--r--content/zettel/2c.md15
-rw-r--r--content/zettel/2c1.md21
-rw-r--r--content/zettel/2d.md9
-rw-r--r--content/zettel/2d1.md19
-rw-r--r--content/zettel/2d2.md25
-rw-r--r--content/zettel/2d3.md25
-rw-r--r--content/zettel/2d4.md39
-rw-r--r--content/zettel/2d5.md21
-rw-r--r--content/zettel/2e.md9
-rw-r--r--content/zettel/2e1.md9
-rw-r--r--content/zettel/2e1a.md42
-rw-r--r--content/zettel/2e1a1.md17
-rw-r--r--content/zettel/2e1b.md36
-rw-r--r--content/zettel/2e1b1.md13
-rw-r--r--content/zettel/2e1b1a.md15
-rw-r--r--content/zettel/2e1b1b.md25
-rw-r--r--content/zettel/2e1b2.md20
-rw-r--r--content/zettel/2e1b2a.md12
-rw-r--r--content/zettel/2e1b3.md17
-rw-r--r--content/zettel/2e1b3a.md14
-rw-r--r--content/zettel/2e1b3b.md14
-rw-r--r--content/zettel/2e1b3c.md13
-rw-r--r--content/zettel/2e1b3c1.md29
-rw-r--r--content/zettel/2e1b3c2.md25
-rw-r--r--content/zettel/2e1b4.md30
-rw-r--r--content/zettel/2e1b5.md13
-rw-r--r--content/zettel/2e1b5a.md27
-rw-r--r--content/zettel/2e1b5b.md26
-rw-r--r--content/zettel/2e1c.md9
-rw-r--r--content/zettel/2e1c1.md36
-rw-r--r--content/zettel/2e1c1a.md18
-rw-r--r--content/zettel/2e1c2.md26
-rw-r--r--content/zettel/2e1c3.md20
-rw-r--r--content/zettel/2e1c3a.md21
-rw-r--r--content/zettel/2e1c3b.md19
-rw-r--r--content/zettel/2e1c4.md40
-rw-r--r--content/zettel/2e1c4a.md22
-rw-r--r--content/zettel/2e1c4b.md11
-rw-r--r--content/zettel/2e1c5.md17
-rw-r--r--content/zettel/2e1c6.md24
-rw-r--r--content/zettel/2e1c6a.md25
-rw-r--r--content/zettel/2e1c6b.md34
-rw-r--r--content/zettel/2e1c7.md40
-rw-r--r--content/zettel/2e1d.md35
-rw-r--r--content/zettel/2e1d1.md14
-rw-r--r--content/zettel/2e1d2.md16
-rw-r--r--content/zettel/2e1e.md20
-rw-r--r--content/zettel/2e1f.md24
-rw-r--r--content/zettel/2e1f1.md16
-rw-r--r--content/zettel/2e2.md12
-rw-r--r--content/zettel/2e2a.md14
-rw-r--r--content/zettel/2e2b.md15
-rw-r--r--content/zettel/2e3.md9
-rw-r--r--content/zettel/2e3b.md9
-rw-r--r--content/zettel/2e4a.md9
-rw-r--r--content/zettel/2e4a1.md13
-rw-r--r--content/zettel/2e4a2.md13
-rw-r--r--content/zettel/2e4a3.md14
-rw-r--r--content/zettel/2e4a4.md15
-rw-r--r--content/zettel/2f.md9
-rw-r--r--content/zettel/2f1.md38
-rw-r--r--content/zettel/3a.md36
-rw-r--r--content/zettel/3a1.md15
-rw-r--r--content/zettel/3a10.md13
-rw-r--r--content/zettel/3a10a.md30
-rw-r--r--content/zettel/3a10b.md28
-rw-r--r--content/zettel/3a10c.md28
-rw-r--r--content/zettel/3a10d.md28
-rw-r--r--content/zettel/3a1a.md33
-rw-r--r--content/zettel/3a2.md16
-rw-r--r--content/zettel/3a3.md47
-rw-r--r--content/zettel/3a4.md55
-rw-r--r--content/zettel/3a4a.md27
-rw-r--r--content/zettel/3a4b.md13
-rw-r--r--content/zettel/3a5.md25
-rw-r--r--content/zettel/3a5a.md18
-rw-r--r--content/zettel/3a5b.md25
-rw-r--r--content/zettel/3a5c.md36
-rw-r--r--content/zettel/3a5c1.md64
-rw-r--r--content/zettel/3a5c1a.md24
-rw-r--r--content/zettel/3a5d.md18
-rw-r--r--content/zettel/3a5e.md15
-rw-r--r--content/zettel/3a5f.md29
-rw-r--r--content/zettel/3a6.md16
-rw-r--r--content/zettel/3a7.md27
-rw-r--r--content/zettel/3a7a.md37
-rw-r--r--content/zettel/3a7b.md26
-rw-r--r--content/zettel/3a7c.md27
-rw-r--r--content/zettel/3a7d.md50
-rw-r--r--content/zettel/3a7e.md17
-rw-r--r--content/zettel/3a8.md37
-rw-r--r--content/zettel/3a8a.md25
-rw-r--r--content/zettel/3a8a1.md27
-rw-r--r--content/zettel/3a8a2.md20
-rw-r--r--content/zettel/3a8a3.md15
-rw-r--r--content/zettel/3a8b.md19
-rw-r--r--content/zettel/3a8c.md17
-rw-r--r--content/zettel/3a8d.md13
-rw-r--r--content/zettel/3a8e.md36
-rw-r--r--content/zettel/3a8e1.md14
-rw-r--r--content/zettel/3a8e2.md16
-rw-r--r--content/zettel/3a8e3.md28
-rw-r--r--content/zettel/3a8f.md21
-rw-r--r--content/zettel/3a8g.md17
-rw-r--r--content/zettel/3a8g1.md38
-rw-r--r--content/zettel/3a8g1a.md15
-rw-r--r--content/zettel/3a8g1b.md19
-rw-r--r--content/zettel/3a8g2.md16
-rw-r--r--content/zettel/3a8g2a.md27
-rw-r--r--content/zettel/3a8g2b.md25
-rw-r--r--content/zettel/3a8g2c.md16
-rw-r--r--content/zettel/3a8g2c1.md16
-rw-r--r--content/zettel/3a8g2d.md44
-rw-r--r--content/zettel/3a8g2e.md20
-rw-r--r--content/zettel/3a8g3.md27
-rw-r--r--content/zettel/3a8g4.md27
-rw-r--r--content/zettel/3a8g4a.md35
-rw-r--r--content/zettel/3a8g4b.md34
-rw-r--r--content/zettel/3a8g5.md15
-rw-r--r--content/zettel/3a8g5a.md50
-rw-r--r--content/zettel/3a8g5b.md31
-rw-r--r--content/zettel/3a8g5c.md39
-rw-r--r--content/zettel/3a8g5c1.md22
-rw-r--r--content/zettel/3a8g5c2.md27
-rw-r--r--content/zettel/3a8g5d.md44
-rw-r--r--content/zettel/3a8g5e.md29
-rw-r--r--content/zettel/3a8g5e1.md21
-rw-r--r--content/zettel/3a8g5e1a.md55
-rw-r--r--content/zettel/3a8g5e2.md25
-rw-r--r--content/zettel/3a8g5e3.md24
-rw-r--r--content/zettel/3a8g5e3a.md15
-rw-r--r--content/zettel/3a8g5e4.md15
-rw-r--r--content/zettel/3a8g5f.md30
-rw-r--r--content/zettel/3a8g5g.md19
-rw-r--r--content/zettel/3a8g5g1.md23
-rw-r--r--content/zettel/3a8g5h.md34
-rw-r--r--content/zettel/3a8g5h1.md20
-rw-r--r--content/zettel/3a8g5h2.md18
-rw-r--r--content/zettel/3a8g5h3.md17
-rw-r--r--content/zettel/3a8g5h4.md15
-rw-r--r--content/zettel/3a8g5h5.md18
-rw-r--r--content/zettel/3a8g5h6.md15
-rw-r--r--content/zettel/3a8g5i.md32
-rw-r--r--content/zettel/3a9.md24
-rw-r--r--content/zettel/3b.md14
-rw-r--r--content/zettel/3b1.md25
-rw-r--r--content/zettel/3b2.md17
-rw-r--r--content/zettel/3b3.md9
-rw-r--r--content/zettel/3b3a.md20
-rw-r--r--content/zettel/3b3b.md19
-rw-r--r--content/zettel/3b3c.md26
-rw-r--r--content/zettel/3b3d.md32
-rw-r--r--content/zettel/3b4.md19
-rw-r--r--content/zettel/3b5.md45
-rw-r--r--content/zettel/3b5a.md18
-rw-r--r--content/zettel/3b6.md50
-rw-r--r--content/zettel/3b6a.md24
-rw-r--r--content/zettel/3b7.md38
-rw-r--r--content/zettel/3b7a.md21
-rw-r--r--content/zettel/3b8.md12
-rw-r--r--content/zettel/3c.md9
-rw-r--r--content/zettel/3c1.md43
-rw-r--r--content/zettel/3c2.md23
-rw-r--r--content/zettel/3c3.md16
-rw-r--r--content/zettel/3c3a.md14
-rw-r--r--content/zettel/3c3a1.md13
-rw-r--r--content/zettel/3c3b.md30
-rw-r--r--content/zettel/3c3b1.md25
-rw-r--r--content/zettel/3c3b2.md17
-rw-r--r--content/zettel/3c3b3.md26
-rw-r--r--content/zettel/3c3c.md40
-rw-r--r--content/zettel/3c3c1.md20
-rw-r--r--content/zettel/3c3c2.md15
-rw-r--r--content/zettel/3c3c3.md64
-rw-r--r--content/zettel/3c3d.md15
-rw-r--r--content/zettel/3c3e.md28
-rw-r--r--content/zettel/3c3e1.md16
-rw-r--r--content/zettel/3c3e2.md19
-rw-r--r--content/zettel/3c3f.md24
-rw-r--r--content/zettel/3c3f1.md21
-rw-r--r--content/zettel/3c3f2.md18
-rw-r--r--content/zettel/3c3f3.md27
-rw-r--r--content/zettel/3c3f4.md28
-rw-r--r--content/zettel/3c3f5.md25
-rw-r--r--content/zettel/3c3f6.md33
-rw-r--r--content/zettel/3c3f6a.md23
-rw-r--r--content/zettel/3c3f7.md23
-rw-r--r--content/zettel/3c3f7a.md24
-rw-r--r--content/zettel/3c3g.md25
-rw-r--r--content/zettel/3c3g1.md25
-rw-r--r--content/zettel/3c3g2.md23
-rw-r--r--content/zettel/3c3g3.md20
-rw-r--r--content/zettel/3c3g4.md23
-rw-r--r--content/zettel/3c3g5.md21
-rw-r--r--content/zettel/3c3g6.md26
-rw-r--r--content/zettel/3c3g6a.md27
-rw-r--r--content/zettel/3c3g7.md17
-rw-r--r--content/zettel/3c3h.md9
-rw-r--r--content/zettel/3c3h1.md16
-rw-r--r--content/zettel/3c3h2.md32
-rw-r--r--content/zettel/3c3h3.md20
-rw-r--r--content/zettel/3c3h4.md25
-rw-r--r--content/zettel/3c3i.md18
-rw-r--r--content/zettel/3c3i1.md29
-rw-r--r--content/zettel/3c3j.md24
-rw-r--r--content/zettel/3c3k.md41
-rw-r--r--content/zettel/3c3k1.md22
-rw-r--r--content/zettel/3c3l.md17
-rw-r--r--content/zettel/3c3l1.md25
-rw-r--r--content/zettel/3c3m.md31
-rw-r--r--content/zettel/3c3m1.md24
-rw-r--r--content/zettel/3c3m2.md26
-rw-r--r--content/zettel/3c3m3.md20
-rw-r--r--content/zettel/3c3n.md14
-rw-r--r--content/zettel/3c4.md21
-rw-r--r--content/zettel/3c5.md14
-rw-r--r--content/zettel/3c6.md18
-rw-r--r--content/zettel/3c6a.md18
-rw-r--r--content/zettel/3c6b.md36
-rw-r--r--content/zettel/3c7.md10
-rw-r--r--content/zettel/3c7a.md71
-rw-r--r--content/zettel/3c8.md26
-rw-r--r--content/zettel/3c8a.md23
-rw-r--r--content/zettel/3d.md9
-rw-r--r--content/zettel/3d1.md19
-rw-r--r--content/zettel/3d2.md9
-rw-r--r--content/zettel/3d2a.md20
-rw-r--r--content/zettel/3d2b.md38
-rw-r--r--content/zettel/3d2b1.md26
-rw-r--r--content/zettel/3d2b1a.md15
-rw-r--r--content/zettel/3d2b1b.md23
-rw-r--r--content/zettel/3d2b1c.md13
-rw-r--r--content/zettel/3d2b1d.md12
-rw-r--r--content/zettel/3d2b1e.md16
-rw-r--r--content/zettel/4a.md13
-rw-r--r--content/zettel/4b.md12
-rw-r--r--content/zettel/4b1.md27
-rw-r--r--content/zettel/4b2.md21
-rw-r--r--content/zettel/4c.md29
-rw-r--r--content/zettel/4c1.md19
-rw-r--r--content/zettel/4c2.md9
-rw-r--r--content/zettel/4c2a.md14
-rw-r--r--content/zettel/4c2b.md13
-rw-r--r--content/zettel/4c3.md44
-rw-r--r--content/zettel/4d.md9
-rw-r--r--content/zettel/4d1.md22
-rw-r--r--content/zettel/4d2.md21
-rw-r--r--content/zettel/4d2a.md18
-rw-r--r--content/zettel/4d2b.md14
-rw-r--r--content/zettel/4d2c.md16
-rw-r--r--content/zettel/4d2d.md14
-rw-r--r--content/zettel/4d3.md9
-rw-r--r--content/zettel/4d3a.md15
-rw-r--r--content/zettel/4d3b.md14
-rw-r--r--content/zettel/4e.md9
-rw-r--r--content/zettel/4e1.md19
-rw-r--r--content/zettel/4e2.md22
-rw-r--r--content/zettel/4e2a.md35
-rw-r--r--content/zettel/4e2a1.md14
-rw-r--r--content/zettel/4e2b.md47
-rw-r--r--content/zettel/4e3.md36
-rw-r--r--content/zettel/4e4.md21
-rw-r--r--content/zettel/4e5.md16
-rw-r--r--content/zettel/4e6.md9
-rw-r--r--content/zettel/4f.md23
-rw-r--r--content/zettel/5a.md9
-rw-r--r--content/zettel/5a1.md14
-rw-r--r--content/zettel/5a1a.md35
-rw-r--r--content/zettel/5a1b.md18
-rw-r--r--content/zettel/5a2.md19
-rw-r--r--content/zettel/5a2a.md23
-rw-r--r--content/zettel/5a2b.md25
-rw-r--r--content/zettel/5a2c.md16
-rw-r--r--content/zettel/5a2d.md13
-rw-r--r--content/zettel/5b.md10
-rw-r--r--content/zettel/5b1.md10
-rw-r--r--content/zettel/5b1a.md18
-rw-r--r--content/zettel/5b1b.md18
-rw-r--r--content/zettel/5b2.md22
-rw-r--r--content/zettel/5b3.md21
-rw-r--r--layouts/_default/list.html17
-rw-r--r--layouts/_default/single.html30
-rw-r--r--layouts/partials/head_custom.html19
-rw-r--r--layouts/partials/post-element.html4
-rw-r--r--layouts/shortcodes/transclude-1.html4
-rw-r--r--layouts/shortcodes/transclude-2.html4
-rw-r--r--layouts/shortcodes/transclude-3.html4
m---------themes/hugo-xmin0
390 files changed, 9030 insertions, 3 deletions
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..e5e02d5
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,6 @@
+[submodule "themes/hugo-blog-awesome"]
+ path = themes/hugo-blog-awesome
+ url = https://github.com/hugo-sid/hugo-blog-awesome.git
+[submodule "themes/hugo-xmin"]
+ path = themes/hugo-xmin
+ url = https://github.com/yihui/hugo-xmin
diff --git a/config.toml b/config.toml
index 1d7c819..7d87c45 100644
--- a/config.toml
+++ b/config.toml
@@ -1,3 +1,40 @@
-baseURL = 'http://example.org/'
-languageCode = 'en-us'
-title = 'My New Hugo Site'
+baseurl = "/"
+languageCode = "en-us"
+title = "Yann's Zettelkasten"
+theme = "hugo-xmin"
+ignoreFiles = [ "\\.Rmd$", "\\.Rmarkdown$", "_cache$" ]
+footnotereturnlinkcontents = "↩"
+
+[[menu.main]]
+name = "Home"
+url = ""
+weight = 10
+
+[[menu.main]]
+name = "About"
+url = "about/"
+weight = 20
+
+[[menu.main]]
+name = "Categories"
+url = "categories/"
+weight = 30
+
+[[menu.main]]
+name = "Tags"
+url = "tags/"
+weight = 40
+
+[[menu.main]]
+name = "Subscribe"
+url = "index.xml"
+
+[params]
+description = "A website built through Hugo and blogdown."
+footer = "&copy; [Yann Herklotz](https://yannherklotz.com) {Year} | [sourcehut](https://sr.ht/~ymherklotz) | [Twitter](https://twitter.com/ymherklotz)"
+
+[markup.highlight]
+codeFences = false
+
+[markup.goldmark.renderer]
+unsafe = true
diff --git a/content/zettel/1a.md b/content/zettel/1a.md
new file mode 100644
index 0000000..c5c119d
--- /dev/null
+++ b/content/zettel/1a.md
@@ -0,0 +1,9 @@
++++
+title = "Data Structures"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = []
+forwardlinks = ["1b", "1a1"]
+zettelid = "1a"
++++
diff --git a/content/zettel/1a1.md b/content/zettel/1a1.md
new file mode 100644
index 0000000..7f4bc22
--- /dev/null
+++ b/content/zettel/1a1.md
@@ -0,0 +1,33 @@
++++
+title = "Data-flow Graph"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1", "1c2b", "1b6", "1a"]
+forwardlinks = ["1a2"]
+zettelid = "1a1"
++++
+
+The data-flow graph (DFG) is a great representation for code that should
+eventually become a circuit \[1\], because it builds data relationships
+between the assignments instead of control low dependencies between
+them. This removes the ordering that was imposed by the initial writing
+of the code, as the inherent parallelism of hardware means that
+independent instructions can execute in parallel.
+
+Control flow is important for the CPU, as it is inherently single
+threaded. Using multiple threads for only a couple of instructions is
+not feasible, in addition to that.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-hauck10_recon" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">S. Hauck and A. DeHon,
+*Reconfigurable computing: The theory and practice of FPGA-based
+computation*. Elsevier, 2010.</span>
+
+</div>
+
+</div>
diff --git a/content/zettel/1a2.md b/content/zettel/1a2.md
new file mode 100644
index 0000000..2bc0fdd
--- /dev/null
+++ b/content/zettel/1a2.md
@@ -0,0 +1,25 @@
++++
+title = "Petri nets"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c3", "1a1"]
+forwardlinks = ["1a3"]
+zettelid = "1a2"
++++
+
+A petri net[^1] is a mathematical model for the description of
+distributed systems. There are three basic components that can be used
+to describe many different components that are useful in modelling
+various concurrent components such as forks or joins. The main
+components in petri nets are **places**, **transitions** and **arcs**.
+There can only be arcs from places to transitions or vice-versa, not
+between two places or between two transitions.
+
+Petri nets also have formal execution models, from which large
+constructions can be made such as forks, transparent buffers and merges.
+These can then be used to nicely describe digital circuits as well, and
+can be used to show how a circuit was scheduled using dynamic
+scheduling.
+
+[^1]: <https://en.wikipedia.org/wiki/Petri_net>
diff --git a/content/zettel/1a3.md b/content/zettel/1a3.md
new file mode 100644
index 0000000..11614ce
--- /dev/null
+++ b/content/zettel/1a3.md
@@ -0,0 +1,52 @@
++++
+title = "Dominance relations"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1a2"]
+forwardlinks = ["1a4", "1a3a"]
+zettelid = "1a3"
++++
+
+Dominance analysis is important to figure out the control flow in a
+program an analyse it, so that it can be optimised. For example, it can
+be used to generate phi functions correctly for a minimal SSA form \[1\]
+or it can also be used to generate the data dependency graph to then
+perform modulo scheduling on \[2\].
+
+Dominance relations are present in a control-flow graph (CFG) ([\#1a4]).
+A node $i$ dominates another node $j$ if every path from the entry point
+of the CFG to $j$ contains $i$. In addition to that, the relation is
+strict if $i \ne j$.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-barthe14_formal_verif_ssa_based_middl_end_compc"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">G. Barthe, D. Demange, and D.
+Pichardie, “Formal verification of an SSA-based middle-end for
+CompCert,” *ACM Trans. Program. Lang. Syst.*, vol. 36, no. 1, Mar. 2014,
+doi: [10.1145/2579080].</span>
+
+</div>
+
+<div id="ref-tristan10_simpl_verif_valid_softw_pipel" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">J.-B. Tristan and X. Leroy, “A
+simple, verified validator for software pipelining,” in *Proceedings of
+the 37th annual ACM SIGPLAN-SIGACT symposium on principles of
+programming languages*, in POPL ’10. Madrid, Spain: Association for
+Computing Machinery, 2010, pp. 83–92. doi:
+[10.1145/1706299.1706311].</span>
+
+</div>
+
+</div>
+
+ [\#1a4]: /zettel/1a4
+ [10.1145/2579080]: https://doi.org/10.1145/2579080
+ [10.1145/1706299.1706311]: https://doi.org/10.1145/1706299.1706311
diff --git a/content/zettel/1a3a.md b/content/zettel/1a3a.md
new file mode 100644
index 0000000..3f1c756
--- /dev/null
+++ b/content/zettel/1a3a.md
@@ -0,0 +1,14 @@
++++
+title = "Dominance frontier"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1a3"]
+forwardlinks = ["1a3b"]
+zettelid = "1a3a"
++++
+
+For a node $i$ of a CFG, the dominance frontier $DF(i)$ is defined as
+the set of nodes $j$ such that $i$ dominates at least one predecessor of
+$j$ in the CFG, but does not strictly dominate $j$ itself. This can be
+extended to a set of nodes $S$ with $DF(S) = \bigcup_{i \in S} DF(i)$.
diff --git a/content/zettel/1a3b.md b/content/zettel/1a3b.md
new file mode 100644
index 0000000..3430a52
--- /dev/null
+++ b/content/zettel/1a3b.md
@@ -0,0 +1,13 @@
++++
+title = "Iterated Dominance Frontier"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1a3a"]
+forwardlinks = ["1a3c"]
+zettelid = "1a3b"
++++
+
+$DF^{+}(S)$ of a set of nodes S is
+$\text{lim}_{i \to \infty} DF^{i}(S)$, where $DF^1(S) = DF(S)$ and
+$DF^{i+1}(S) = DF(S \cup DF^{i}(S))$.
diff --git a/content/zettel/1a3c.md b/content/zettel/1a3c.md
new file mode 100644
index 0000000..33e15c3
--- /dev/null
+++ b/content/zettel/1a3c.md
@@ -0,0 +1,13 @@
++++
+title = "Immediate Dominator"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1a3b"]
+forwardlinks = ["1a3d"]
+zettelid = "1a3c"
++++
+
+The immediate dominator of node $j$ is written as $idom(j)$ and is the
+closest strict dominator of $j$ on every path from the entry node to
+$j$. It is uniquely determined.
diff --git a/content/zettel/1a3d.md b/content/zettel/1a3d.md
new file mode 100644
index 0000000..0bf0451
--- /dev/null
+++ b/content/zettel/1a3d.md
@@ -0,0 +1,13 @@
++++
+title = "Dominator Tree"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1a3c"]
+forwardlinks = []
+zettelid = "1a3d"
++++
+
+Finally, the dominator tree is defined as the start node being the root,
+and each of the nodes children are the nodes that it immediately
+dominates.
diff --git a/content/zettel/1a4.md b/content/zettel/1a4.md
new file mode 100644
index 0000000..a481db7
--- /dev/null
+++ b/content/zettel/1a4.md
@@ -0,0 +1,13 @@
++++
+title = "Control flow graph"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1a3"]
+forwardlinks = []
+zettelid = "1a4"
++++
+
+A control flow graph is a representation of the control flow in a
+programming language, where each node corresponds to a possible next
+edge which the current instruction could lead to.
diff --git a/content/zettel/1b.md b/content/zettel/1b.md
new file mode 100644
index 0000000..2d22790
--- /dev/null
+++ b/content/zettel/1b.md
@@ -0,0 +1,9 @@
++++
+title = "Language Constructs"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1a"]
+forwardlinks = ["1c", "1b1"]
+zettelid = "1b"
++++
diff --git a/content/zettel/1b1.md b/content/zettel/1b1.md
new file mode 100644
index 0000000..14b0c84
--- /dev/null
+++ b/content/zettel/1b1.md
@@ -0,0 +1,50 @@
++++
+title = "Guarded commands"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1d1", "1b2", "1b"]
+forwardlinks = ["1b2"]
+zettelid = "1b1"
++++
+
+Guarded commands \[1\] are an interesting construct which can be added
+to languages. They look similar to `case` statements, but behave in a
+parallel and nondeterministic way. Each guard has a boolean value
+followed by a program which may be executed if the guard evaluates to
+true. The following shows the main syntax that guards may have.
+
+``` grammar
+e ::= if gc fi | do gc od ...
+
+gc ::= (b e) || gc
+```
+
+One reason these are interesting language constructs, is because they
+allow for the encoding of commands that may be executed when a condition
+is true, but that it isn't necessary. Often, when giving instructions,
+one does not really specify the order, just the commands that should
+eventually be executed.
+
+The guarded commands `gc` will either return a match if a boolean
+evaluates to true, or `abort`. There are two constructs that are built
+around guarded commands which adds more functionality to them.
+`if gc fi` matches one rule in the guarded statement and executes it. If
+it does not match a rule, it then acts like `abort`. `do gc od` loops
+over the guarded commands while any rule matches. If there no match is
+found, it acts like `skip`.
+
+These allow for nice encoding of common algorithms, using two other
+constructs that use the guarded commands.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-winskel93" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">G. Winskel, *The formal semantics
+of programming languages: An introduction*. MIT press, 1993.</span>
+
+</div>
+
+</div>
diff --git a/content/zettel/1b2.md b/content/zettel/1b2.md
new file mode 100644
index 0000000..150523f
--- /dev/null
+++ b/content/zettel/1b2.md
@@ -0,0 +1,95 @@
++++
+title = "Tail calls"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1d1", "1b1"]
+forwardlinks = ["3a", "1b1", "1b3"]
+zettelid = "1b2"
++++
+
+Tail calls are an optimisation that can be applied in software, which
+detects that a recursive call only happens at the tail of the function.
+If that is the case, then the recursive call can happen without having
+to preserve the stack at every call, as it can effectively be translated
+to an equivalent version that only uses loops. This optimisation is
+important to improve the efficiency of recursive functions if the stack
+is not needed, and is especially important in functional languages where
+recursion might be more widely used.
+
+As tail calls can be efficiently translated to loops, this also means
+that they can be efficiently encoded in hardware when the code is passed
+through high-level synthesis. This is straightforward if the tail calls
+are detected and automatically translated to loops, however, what
+happens if that is not the case and we only have a tail call construct
+in the intermediate language.
+
+The CompCert ([\#3a]) \[1\] register transfer language (RTL)
+intermediate language, for example, has an explicit tail call construct
+(`Itailcall`), which is used if it can detect that a function contains a
+tail call. Apart from that, it only has a jump command, which can be
+used for loops. Therefore, supporting the tail call construct allows us
+to support a subset of possible recursive functions. Even though these
+are as powerful as loops, it may be more natural to write their
+definitions as a recursive function instead of a loop.
+
+{{< transclude-1 zettel="1b1" >}}
+
+Guarded commands \[2\] are an interesting construct which can be added
+to languages. They look similar to `case` statements, but behave in a
+parallel and nondeterministic way. Each guard has a boolean value
+followed by a program which may be executed if the guard evaluates to
+true. The following shows the main syntax that guards may have.
+
+``` grammar
+e ::= if gc fi | do gc od ...
+
+gc ::= (b e) || gc
+```
+
+One reason these are interesting language constructs, is because they
+allow for the encoding of commands that may be executed when a condition
+is true, but that it isn't necessary. Often, when giving instructions,
+one does not really specify the order, just the commands that should
+eventually be executed.
+
+The guarded commands `gc` will either return a match if a boolean
+evaluates to true, or `abort`. There are two constructs that are built
+around guarded commands which adds more functionality to them.
+`if gc fi` matches one rule in the guarded statement and executes it. If
+it does not match a rule, it then acts like `abort`. `do gc od` loops
+over the guarded commands while any rule matches. If there no match is
+found, it acts like `skip`.
+
+These allow for nice encoding of common algorithms, using two other
+constructs that use the guarded commands.
+
+{{< /transclude-1 >}}
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-leroy06_formal_certif_compil_back_end" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">X. Leroy, “Formal certification of
+a compiler back-end or: Programming a compiler with a proof assistant,”
+in *Conference record of the 33rd ACM SIGPLAN-SIGACT symposium on
+principles of programming languages*, in POPL ’06. Charleston, South
+Carolina, USA: Association for Computing Machinery, 2006, pp. 42–54.
+doi: [10.1145/1111037.1111042].</span>
+
+</div>
+
+<div id="ref-winskel93" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">G. Winskel, *The formal semantics
+of programming languages: An introduction*. MIT press, 1993.</span>
+
+</div>
+
+</div>
+
+ [\#3a]: /zettel/3a
+ [10.1145/1111037.1111042]: https://doi.org/10.1145/1111037.1111042
diff --git a/content/zettel/1b3.md b/content/zettel/1b3.md
new file mode 100644
index 0000000..aeb07c8
--- /dev/null
+++ b/content/zettel/1b3.md
@@ -0,0 +1,34 @@
++++
+title = "Tail call conversion"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1b2"]
+forwardlinks = ["1b4"]
+zettelid = "1b3"
++++
+
+Each function is effectively converted to a module in Verilog, meaning
+the tail call will have to refer to the current module it is called in.
+
+In software, this is done by jumping to the start of the function with
+the arguments set to the new values for the new iteration. There is then
+normally a condition that is checked before the tail call is executed,
+which will return the final value back to the caller.
+
+The first intuition about how to convert these tail calls to hardware
+may be to somehow detect that a function contains a tail call and wire
+up the outside of the module correctly, so that it takes the outputs and
+passes them back to the module until the condition is meat. However,
+this changes the structure of the module quite a lot, meaning it is
+difficult to prove anything about it. It is simpler to prove equivalence
+between the hardware and the software if the translation between the two
+is more direct.
+
+In hardware, the function becomes a module. In our case, each module
+contains a state machine with a state variable that controls what
+instruction will be executed next. This means a similar approach to the
+software translation, where the inputs to the module can just be set to
+the updated variables, and the state can be set back to the start of the
+module. The final end flag of the module will therefore only go high
+when the default condition of the function is meat.
diff --git a/content/zettel/1b4.md b/content/zettel/1b4.md
new file mode 100644
index 0000000..fed01ce
--- /dev/null
+++ b/content/zettel/1b4.md
@@ -0,0 +1,16 @@
++++
+title = "Converting any recursive function"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1b3"]
+forwardlinks = ["1b5"]
+zettelid = "1b4"
++++
+
+The problem with synthesising arbitrary recursive functions is that they
+require a stack. The stack saves the state of all the functions that are
+live before the recursive function call happens, so that the function
+can continue normally afterwards. Saving the state on the stack is
+inexpensive in software normally, as memory is abundant, however, in
+hardware it is quite expensive and slow to save variables on the stack.
diff --git a/content/zettel/1b5.md b/content/zettel/1b5.md
new file mode 100644
index 0000000..c060e4a
--- /dev/null
+++ b/content/zettel/1b5.md
@@ -0,0 +1,17 @@
++++
+title = "Global State"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1b4"]
+forwardlinks = ["1b6"]
+zettelid = "1b5"
++++
+
+It is quite tricky to synthesise global state, as global variables are
+not a thing in Verilog. There are therefore two alternatives, either all
+the global variables have to be passed to all the modules, or memory has
+to be used to store the global state and it then has to be retrieved.
+However, to start out, if only one module is generated, and if module
+instantiations are never used, then global variables can just be
+supported by defining them in the main module.
diff --git a/content/zettel/1b6.md b/content/zettel/1b6.md
new file mode 100644
index 0000000..c3a73b8
--- /dev/null
+++ b/content/zettel/1b6.md
@@ -0,0 +1,18 @@
++++
+title = "Basic Blocks"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5c1", "1c8", "1b7", "1b5"]
+forwardlinks = ["1a1", "1c1", "1b7"]
+zettelid = "1b6"
++++
+
+Basic blocks are continuous sections of code, which do not contain any
+control flow. These can therefore be uniquely identified by a data-flow
+graph ([\#1a1]), as apart from their data dependencies, the instructions
+do not have to have a specific order, which allows them to be scheduled
+([\#1c1]).
+
+ [\#1a1]: /zettel/1a1
+ [\#1c1]: /zettel/1c1
diff --git a/content/zettel/1b7.md b/content/zettel/1b7.md
new file mode 100644
index 0000000..3ec13e7
--- /dev/null
+++ b/content/zettel/1b7.md
@@ -0,0 +1,22 @@
++++
+title = "Superblocks"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3e2", "1b6"]
+forwardlinks = ["1b6", "2b1", "1b8"]
+zettelid = "1b7"
++++
+
+Superblocks are a generalisation of basic blocks ([\#1b6]), where there
+can be multiple exits out of the basic block, but only one entry at the
+very top of the basic block. This allows for more analysis to increase
+instruction-level parallelism (ILP) as the analysis can be done over a
+superblock with more instructions.
+
+However, as there are no predicated instructions ([\#2b1]), a superblock
+is also a block of one control path through the program, as there can be
+no control flow in the superblock.
+
+ [\#1b6]: /zettel/1b6
+ [\#2b1]: /zettel/2b1
diff --git a/content/zettel/1b8.md b/content/zettel/1b8.md
new file mode 100644
index 0000000..ea81d1a
--- /dev/null
+++ b/content/zettel/1b8.md
@@ -0,0 +1,47 @@
++++
+title = "Hyperblocks"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g", "3c3b", "3a7d", "2b1e", "1c8", "1c6a2", "1b9c", "1b7"]
+forwardlinks = ["2b1", "1c8", "1b9"]
+zettelid = "1b8"
++++
+
+A hyperblock \[1\] is a generalisation on superblocks which can be used
+if there is predicated execution supported in the target processor (or
+in HLS where there is no restriction really). It can therefore represent
+any control-flow that does not contain back-edges, and can therefore
+represent strictly more blocks than a superblock.
+
+The benefit of this is that if predicated execution ([\#2b1]) is
+supported, one can get large blocks that can be optimised and scheduled,
+leading to a more optimised schedule. However, the problem with multiple
+control paths is that these have to be analysed to ensure that two
+instructions are independent, meaning there is no control dependency
+between them. This can be done by taking the predicates of both
+instructions, anding them together, and seeing if the formula can be
+reduced to `false`. If that is the case, then the instructions are
+independent and can be executed in parallel as there is no control
+dependency between them.
+
+These hyperblocks can be created using if-conversion ([\#1c8]).
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-mahlke92_effec_compil_suppor_predic_execut_using_hyper"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">S. A. Mahlke, D. C. Lin, W. Y.
+Chen, R. E. Hank, and R. A. Bringmann, “Effective compiler support for
+predicated execution using the hyperblock,” *SIGMICRO Newsl.*, vol. 23,
+no. 1–2, pp. 45–54, Dec. 1992, doi: [10.1145/144965.144998].</span>
+
+</div>
+
+</div>
+
+ [\#2b1]: /zettel/2b1
+ [\#1c8]: /zettel/1c8
+ [10.1145/144965.144998]: https://doi.org/10.1145/144965.144998
diff --git a/content/zettel/1b9.md b/content/zettel/1b9.md
new file mode 100644
index 0000000..75b76d9
--- /dev/null
+++ b/content/zettel/1b9.md
@@ -0,0 +1,9 @@
++++
+title = "Memory"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1b8"]
+forwardlinks = ["1b9a"]
+zettelid = "1b9"
++++
diff --git a/content/zettel/1b9a.md b/content/zettel/1b9a.md
new file mode 100644
index 0000000..7d2e8f5
--- /dev/null
+++ b/content/zettel/1b9a.md
@@ -0,0 +1,20 @@
++++
+title = "LegUp local memory support"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1b9"]
+forwardlinks = ["1b9b"]
+zettelid = "1b9a"
++++
+
+LegUp supports local memories in functions by declaring the RAM's once
+with their initial values. The main problem with this is that if the
+function is called multiple times, but is using the same RAM underneath
+to model it, it needs to be reset to the initial state whenever the
+function is called.
+
+Therefore only using the RAM initialisation to set the initial value is
+not enough, and it therefore has to be initialised everytime the
+function is called. This is done in LegUp by copying the memory from
+another RAM over everytime.
diff --git a/content/zettel/1b9b.md b/content/zettel/1b9b.md
new file mode 100644
index 0000000..f21916b
--- /dev/null
+++ b/content/zettel/1b9b.md
@@ -0,0 +1,11 @@
++++
+title = "Casting pointers to arrays"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1b9a"]
+forwardlinks = ["1b9c"]
+zettelid = "1b9b"
++++
+
+- HLS you want to keep the size of the array.
diff --git a/content/zettel/1b9c.md b/content/zettel/1b9c.md
new file mode 100644
index 0000000..869414c
--- /dev/null
+++ b/content/zettel/1b9c.md
@@ -0,0 +1,24 @@
++++
+title = "Negative Edge Triggered RAM"
+date = "2022-06-28"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1b9b"]
+forwardlinks = ["1c2", "1b8"]
+zettelid = "1b9c"
++++
+
+Currently Vericert triggers at the negative edge of an always block.
+This means that loads and stores take 2 and 1 clock cycle respectively,
+and simplifies the proof. It does mean though that only half the time is
+available for logic. Instead, it would be better to actually have 2 and
+3 clock cycles for stores and loads, especially when hyperblock
+scheduling ([\#1c2], [\#1b8]) is supported.
+
+I guess that negative edge triggered RAMs are supported in most
+synthesis tools, however, only insofar as them turning it into a
+positive edge triggered RAM and then halving the period.
+
+ [\#1c2]: /zettel/1c2
+ [\#1b8]: /zettel/1b8
diff --git a/content/zettel/1c.md b/content/zettel/1c.md
new file mode 100644
index 0000000..ed00a91
--- /dev/null
+++ b/content/zettel/1c.md
@@ -0,0 +1,51 @@
++++
+title = "Optimisations"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1", "1b"]
+forwardlinks = ["1c1", "1c2", "1c3", "1c4", "1c5", "1c7", "1c6", "1c8", "1d"]
+zettelid = "1c"
++++
+
+The survey by Nane et al. \[1\] goes over some of the optimsations that
+are present in high-level synthesis tools, and that are important to
+make them efficient and usable.
+
+Some of the optimisations are
+
+- scheduling ([\#1c1], [\#1c2], [\#1c3]),
+- operation chaining ([\#1c4]),
+- register allocation ([\#1c5]),
+- bitwidth analysis and optimisation,
+- memory space allocation,
+- loop optimisations: polyhedral analysis ([\#1c7]),
+- hardware resource library,
+- speculation and code motion: loop pipelining ([\#1c6]),
+- exploiting spatial parallelism, and
+- if-conversion ([\#1c8]).
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-nane16_survey_evaluat_fpga_high_level_synth_tools"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">R. Nane *et al.*, “A survey and
+evaluation of fpga high-level synthesis tools,” *IEEE Transactions on
+Computer-Aided Design of Integrated Circuits and Systems*, vol. 35, no.
+10, pp. 1591–1604, Oct. 2016, doi: [10.1109/TCAD.2015.2513673].</span>
+
+</div>
+
+</div>
+
+ [\#1c1]: /zettel/1c1
+ [\#1c2]: /zettel/1c2
+ [\#1c3]: /zettel/1c3
+ [\#1c4]: /zettel/1c4
+ [\#1c5]: /zettel/1c5
+ [\#1c7]: /zettel/1c7
+ [\#1c6]: /zettel/1c6
+ [\#1c8]: /zettel/1c8
+ [10.1109/TCAD.2015.2513673]: https://doi.org/10.1109/TCAD.2015.2513673
diff --git a/content/zettel/1c1.md b/content/zettel/1c1.md
new file mode 100644
index 0000000..8fbd05c
--- /dev/null
+++ b/content/zettel/1c1.md
@@ -0,0 +1,17 @@
++++
+title = "Scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3", "1c", "1b6"]
+forwardlinks = ["1c2", "1c3", "1c2d"]
+zettelid = "1c1"
++++
+
+- General static scheduling: ([\#1c2]),
+- General dynamic scheduling: ([\#1c3]),
+- Discussion about software vs hardware scheduling ([\#1c2d]).
+
+ [\#1c2]: /zettel/1c2
+ [\#1c3]: /zettel/1c3
+ [\#1c2d]: /zettel/1c2d
diff --git a/content/zettel/1c10.md b/content/zettel/1c10.md
new file mode 100644
index 0000000..1f67276
--- /dev/null
+++ b/content/zettel/1c10.md
@@ -0,0 +1,14 @@
++++
+title = "Abstract Interpretation of Hardware"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c9"]
+forwardlinks = []
+zettelid = "1c10"
++++
+
+It would be nice to be able to interpret hardware abstractly to compare
+it to code that generated it. However, this seems infeasible because at
+each iteration the whole hardware gets evaluated, and all the registers
+might change again.
diff --git a/content/zettel/1c2.md b/content/zettel/1c2.md
new file mode 100644
index 0000000..7218e41
--- /dev/null
+++ b/content/zettel/1c2.md
@@ -0,0 +1,18 @@
++++
+title = "Static Scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2g", "1c1", "1c", "1b9c"]
+forwardlinks = ["1c3", "1c2a"]
+zettelid = "1c2"
++++
+
+Scheduling is an important stage in high-level synthesis, as it is the
+main operation that parallelises the input behavioural description so
+that it can take advantage of spatial hardware. Static scheduling in
+particular performs analysis on the behavioural input to extract
+dependencies between constructs or instructions so that it can
+parallelise them as much as possible without changing the behaviour.
+This is contrary to dynamic scheduling, where no analysis is done, and
+tokens are used to automatically schedule all the instructions.
diff --git a/content/zettel/1c2a.md b/content/zettel/1c2a.md
new file mode 100644
index 0000000..7b17b7f
--- /dev/null
+++ b/content/zettel/1c2a.md
@@ -0,0 +1,17 @@
++++
+title = "Scheduling as an ILP"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2"]
+forwardlinks = ["1c2b", "1c2a1"]
+zettelid = "1c2a"
++++
+
+Scheduling can be expressed as an integer linear program (ILP) by
+setting constraints that have to be held and solving for the optimal
+solution. Constraints could be dependencies between different
+instructions and would therefore say that these instructions cannot run
+in parallel. The result of solving the ILP is therefore a mathematically
+correct schedule that adheres to the constraints that were used in the
+ILP.
diff --git a/content/zettel/1c2a1.md b/content/zettel/1c2a1.md
new file mode 100644
index 0000000..0f662c6
--- /dev/null
+++ b/content/zettel/1c2a1.md
@@ -0,0 +1,29 @@
++++
+title = "SDC Scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2a"]
+forwardlinks = ["1c2a2"]
+zettelid = "1c2a1"
++++
+
+System of difference constraints (SDC) is a way to express scheduling as
+an ILP \[1\]. There are various ways in which different constraints can
+be expressed in this system to perform scheduling.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-cong06_sdc" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">J. Cong and Z. Zhang, “An
+efficient and versatile scheduling algorithm based on SDC formulation,”
+in *2006 43rd ACM/IEEE design automation conference*, Jul. 2006, pp.
+433–438. doi: [10.1145/1146909.1147025].</span>
+
+</div>
+
+</div>
+
+ [10.1145/1146909.1147025]: https://doi.org/10.1145/1146909.1147025
diff --git a/content/zettel/1c2a2.md b/content/zettel/1c2a2.md
new file mode 100644
index 0000000..93e72a3
--- /dev/null
+++ b/content/zettel/1c2a2.md
@@ -0,0 +1,12 @@
++++
+title = "Scheduling constraints"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2a1"]
+forwardlinks = ["1c2a3", "1c2a2a"]
+zettelid = "1c2a2"
++++
+
+There are various scheduling constraints that have to be added to
+correctly schedule the code.
diff --git a/content/zettel/1c2a2a.md b/content/zettel/1c2a2a.md
new file mode 100644
index 0000000..1501c1e
--- /dev/null
+++ b/content/zettel/1c2a2a.md
@@ -0,0 +1,22 @@
++++
+title = "Dependency constraints"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2a2"]
+forwardlinks = ["1c2a2b"]
+zettelid = "1c2a2a"
++++
+
+Data dependency constraints
+: If there is a data dependency between two variables, then one cannot
+ be scheduled until the other has completed its execution.
+
+$$\forall (v_i, v_j) \in E_d, \mathit{sv}_{\mathit{end}} (v_i) -\mathit{sv}_{\mathit{beg}} (v_j) \le 0$$
+
+Control dependency constraint
+: Control dependencies are also set up, so that the instructions in
+ basic block $\mathit{bb}_j$ cannot be scheduled before instructions
+ in $\mathit{bb}_i$.
+
+$$\forall (\mathit{bb}_i, \mathit{bb}_j) \in E_c, \mathit{sv}_{\mathit{end}}(\mathit{ssnk} (\mathit{bb}_i)) - \mathit{sv}_{\mathit{beg}} (\mathit{ssrc}(\mathit{bb}_j)) \le 0$$
diff --git a/content/zettel/1c2a2b.md b/content/zettel/1c2a2b.md
new file mode 100644
index 0000000..b686c93
--- /dev/null
+++ b/content/zettel/1c2a2b.md
@@ -0,0 +1,28 @@
++++
+title = "Timing constraints"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2a2a"]
+forwardlinks = ["3c3", "1c2a2c"]
+zettelid = "1c2a2b"
++++
+
+Relative timing constraints
+
+: These are relative timing constraints for IO interaction, which is
+ probably not that needed for Vericert ([\#3c3]). For example, these
+ can be used to have a specific amount of cycles in between
+ operations.
+
+Latency constraint
+
+: This is used to specify the maximum latency for a subgraph in the
+ CDFG. Not sure when this would be useful in practice.
+
+Cycle time constraint
+: This is to limit the maximum combinational delay within a clock
+ cycle. This will basically implement operation chaining as long as
+ the operations take less time than the maximum combinational delay.
+
+ [\#3c3]: /zettel/3c3
diff --git a/content/zettel/1c2a2c.md b/content/zettel/1c2a2c.md
new file mode 100644
index 0000000..845004b
--- /dev/null
+++ b/content/zettel/1c2a2c.md
@@ -0,0 +1,23 @@
++++
+title = "Resource constraints"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2a2b"]
+forwardlinks = []
+zettelid = "1c2a2c"
++++
+
+Resource constraints
+: These are the constraints that lead to resource constraint
+ managements and the implementation of pipelined operations.
+
+The first important step is to derive a linear order for the operations,
+so that one gets a good estimation of the operations in between two
+operations. This can be defined by getting the ALAP scheduling and then
+the ASAP scheduling for tie breakers.
+
+We then go through the operations of type *res*, and count how many
+different operations are in between. If there are $c-1$ operations in
+between, where *c* is the maximum number of functional units for *res*,
+we can then schedule the operation to start *II* cycles later.
diff --git a/content/zettel/1c2a3.md b/content/zettel/1c2a3.md
new file mode 100644
index 0000000..0af2ab6
--- /dev/null
+++ b/content/zettel/1c2a3.md
@@ -0,0 +1,12 @@
++++
+title = "Solving system of difference constraints"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2a2"]
+forwardlinks = ["1c2a4"]
+zettelid = "1c2a3"
++++
+
+The main idea is that if there are no negative cycles, then the system
+of difference constraints is solvable.
diff --git a/content/zettel/1c2a4.md b/content/zettel/1c2a4.md
new file mode 100644
index 0000000..53a9615
--- /dev/null
+++ b/content/zettel/1c2a4.md
@@ -0,0 +1,14 @@
++++
+title = "Objective functions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2a3"]
+forwardlinks = []
+zettelid = "1c2a4"
++++
+
+Different objective functions can also be used to get different
+schedules. For example, if all of the variables are added up and
+minimised, this would result in an ASAP, whereas if they are maximised,
+this would result in a ALAP schedule.
diff --git a/content/zettel/1c2b.md b/content/zettel/1c2b.md
new file mode 100644
index 0000000..649f6fe
--- /dev/null
+++ b/content/zettel/1c2b.md
@@ -0,0 +1,38 @@
++++
+title = "List scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2f", "1c2a"]
+forwardlinks = ["1a1", "1c2c", "1c2b1"]
+zettelid = "1c2b"
++++
+
+List scheduling \[1\] is another example of scheduling which is more
+algorithmic than the ILP expression shown in ILP scheduling. However,
+this is much easier to implement, especially if the initial
+representation is a data-flow graph (DFG) ([\#1a1]).
+
+The DFG already encodes all the necessary information to schedule each
+assignment at the earliest possible time when its dependencies are met.
+This means that one can just iterate through the DFG and pick all the
+instructions that have their dependencies met, add them to the list of
+instructions that should be scheduled for this clock cycle and let the
+other instructions that depend on it know that it is met. Once all the
+nodes in the DFG have been processed, everything should have been
+scheduled properly.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-hauck10_recon" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">S. Hauck and A. DeHon,
+*Reconfigurable computing: The theory and practice of FPGA-based
+computation*. Elsevier, 2010.</span>
+
+</div>
+
+</div>
+
+ [\#1a1]: /zettel/1a1
diff --git a/content/zettel/1c2b1.md b/content/zettel/1c2b1.md
new file mode 100644
index 0000000..cb42985
--- /dev/null
+++ b/content/zettel/1c2b1.md
@@ -0,0 +1,28 @@
++++
+title = "Memory Aliasing"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2b"]
+forwardlinks = ["1c7", "1c2c"]
+zettelid = "1c2b1"
++++
+
+One problem with scheduling, and list scheduling in particular as it is
+a relatively simple scheduling model, is that loads and stores cannot be
+scheduled correctly. This is because dependencies cannot be calculated
+properly between loads and stores, so one cannot know if these were done
+in parallel or not.
+
+One solution is to use polyhedral analysis ([\#1c7]) with loads and
+stores that have some kind of induction variable, as is present in loop
+pipelining ([\#1c2c]).
+
+The problem in C is that there is no aliasing information about
+pointers, and they can therefore point to same memory location. However,
+strict aliasing in GCC, for example, can allow for aliasing information
+about pointers of different types, as GCC assumes that these cannot
+point to the same data structure.
+
+ [\#1c7]: /zettel/1c7
+ [\#1c2c]: /zettel/1c2c
diff --git a/content/zettel/1c2c.md b/content/zettel/1c2c.md
new file mode 100644
index 0000000..408fdf5
--- /dev/null
+++ b/content/zettel/1c2c.md
@@ -0,0 +1,16 @@
++++
+title = "Pipelined scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c6", "1c2b1", "1c2b"]
+forwardlinks = ["1c2d"]
+zettelid = "1c2c"
++++
+
+Pipelined scheduling is similar to the more algorithmic list scheduling,
+but can also pipeline instructions that support it. One example is
+multiplication, which can be performed in multiple stages and can
+therefore accept another input while the other has moved on to the next
+stage. This further optimises the high-level synthesis code as more
+hardware will be in use at the same time.
diff --git a/content/zettel/1c2d.md b/content/zettel/1c2d.md
new file mode 100644
index 0000000..6ed9cb6
--- /dev/null
+++ b/content/zettel/1c2d.md
@@ -0,0 +1,35 @@
++++
+title = "Hardware Scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2c", "1c1"]
+forwardlinks = ["1c2e"]
+zettelid = "1c2d"
++++
+
+Hardware scheduling is a bit different to software scheduling, however,
+it is quite similar to the instruction scheduling in the KVX processor
+\[1\]. However, there are a few difference, for example, I think for the
+transformation from software to hardware, I really have to get rid of
+the notion that there is a program counter in hardware, because that is
+really not the case. Instead, we have a variable that keeps track of the
+current state, but technically we could have many of those and they
+could all be executing at the same time.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-six20_certif_effic_instr_sched" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">C. Six, S. Boulmé, and D.
+Monniaux, “Certified and efficient instruction scheduling: Application
+to interlocked VLIW processors,” *Proc. ACM Program. Lang.*, vol. 4, no.
+OOPSLA, Nov. 2020, doi: [10.1145/3428197].</span>
+
+</div>
+
+</div>
+
+ [10.1145/3428197]: https://doi.org/10.1145/3428197
diff --git a/content/zettel/1c2e.md b/content/zettel/1c2e.md
new file mode 100644
index 0000000..10c178b
--- /dev/null
+++ b/content/zettel/1c2e.md
@@ -0,0 +1,28 @@
++++
+title = "Iterative Modulo Scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2g", "1c2d"]
+forwardlinks = ["1c2f"]
+zettelid = "1c2e"
++++
+
+Iterative modulo scheduling \[1\] is an algorithm that can be used to
+schedule loops as optimally as possible, without being intractable. The
+other problem that is faced is that the loop bounds are often only known
+at run time. This means that the loop cannot be completely unrolled
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-rau96_iterat_modul_sched" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">B. R. Rau, “Iterative modulo
+scheduling,” *International Journal of Parallel Programming*, vol. 24,
+no. 1, pp. 3–64, Feb. 1996, Available:
+<https://doi.org/10.1007/BF03356742></span>
+
+</div>
+
+</div>
diff --git a/content/zettel/1c2f.md b/content/zettel/1c2f.md
new file mode 100644
index 0000000..098273d
--- /dev/null
+++ b/content/zettel/1c2f.md
@@ -0,0 +1,55 @@
++++
+title = "Trace scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2h", "1c2e"]
+forwardlinks = ["1c2b", "1c2g"]
+zettelid = "1c2f"
++++
+
+Trace scheduling \[1\], \[2\], is a technique used to do global
+scheduling of the code. This is different to list scheduling ([\#1c2b]),
+where the scheduling is only done for one basic block.
+
+The way this is done is by looking through the most common trace through
+a program and then regarding that path as not containing any control
+flow. Normal scheduling techniques can then be applied on this path,
+which is now straight-line code, such as placing instructions into
+larger instruction words, like with VLIW processors.
+
+However, to counter-act this, instructions need to also be placed into
+other possible execution paths apart from the trace, as the above
+scheduling might have gotten rid of instructions and change the
+behaviour of the program.
+
+After the correctness has been restored, another trace is taken and the
+same operation is performed.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-fisher81_trace_sched" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">J. A. Fisher, “Trace scheduling: A
+technique for global microcode compaction,” *IEEE Transactions on
+Computers*, vol. C–30, no. 7, pp. 478–490, 1981, doi:
+[10.1109/TC.1981.1675827].</span>
+
+</div>
+
+<div id="ref-colwell88_vliw" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">R. P. Colwell, R. P. Nix, J. J.
+O’Donnell, D. B. Papworth, and P. K. Rodman, “A VLIW architecture for a
+trace scheduling compiler,” *IEEE Transactions on Computers*, vol. 37,
+no. 8, pp. 967–979, 1988, doi: [10.1109/12.2247].</span>
+
+</div>
+
+</div>
+
+ [\#1c2b]: /zettel/1c2b
+ [10.1109/TC.1981.1675827]: https://doi.org/10.1109/TC.1981.1675827
+ [10.1109/12.2247]: https://doi.org/10.1109/12.2247
diff --git a/content/zettel/1c2g.md b/content/zettel/1c2g.md
new file mode 100644
index 0000000..2152f51
--- /dev/null
+++ b/content/zettel/1c2g.md
@@ -0,0 +1,56 @@
++++
+title = "Soft scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2f"]
+forwardlinks = ["1c2", "1c5", "1c2e", "1c2h"]
+zettelid = "1c2g"
++++
+
+Soft scheduling \[1\] is when scheduling optimisations are performed
+before the actual scheduling, to be able to better schedule the code.
+These can include register pressure optimisations such as shown in
+Beidas et al. \[1\], which can improve scheduling ([\#1c2]) and register
+allocation ([\#1c5]) but also maybe modulo scheduling ([\#1c2e]) without
+having to really place the instructions into proper clock cycles.
+
+This is contrary to how normal HLS tools perform their optimisations,
+because they normally do the scheduling in one step, so that all the
+information is available at once. However, I do not think that this
+works that well, because with too much information one has to analyse
+too much to be able to effectively perform each optimisation correctly.
+Instead, if each pass is separated, they can also be improved
+separately, instead of having to change one large scheduling algorithm.
+However, SDC Scheduling \[2\] does seem to be an interesting idea as it
+provides a general framework to express constraints in.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-beidas11_regis" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">R. Beidas, W. S. Mong, and J. Zhu,
+“Register pressure aware scheduling for high level synthesis,” in *16th
+asia and south pacific design automation conference (ASP-DAC 2011)*,
+Jan. 2011, pp. 461–466. doi: [10.1109/ASPDAC.2011.5722234].</span>
+
+</div>
+
+<div id="ref-cong06_sdc" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">J. Cong and Z. Zhang, “An
+efficient and versatile scheduling algorithm based on SDC formulation,”
+in *2006 43rd ACM/IEEE design automation conference*, Jul. 2006, pp.
+433–438. doi: [10.1145/1146909.1147025].</span>
+
+</div>
+
+</div>
+
+ [\#1c2]: /zettel/1c2
+ [\#1c5]: /zettel/1c5
+ [\#1c2e]: /zettel/1c2e
+ [10.1109/ASPDAC.2011.5722234]: https://doi.org/10.1109/ASPDAC.2011.5722234
+ [10.1145/1146909.1147025]: https://doi.org/10.1145/1146909.1147025
diff --git a/content/zettel/1c2h.md b/content/zettel/1c2h.md
new file mode 100644
index 0000000..8c821e1
--- /dev/null
+++ b/content/zettel/1c2h.md
@@ -0,0 +1,37 @@
++++
+title = "Branch Prediction for Scheduling"
+date = "2022-05-11"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1d1", "1c2g"]
+forwardlinks = ["1c2f", "1c8", "2b1d1", "1c2h1"]
+zettelid = "1c2h"
++++
+
+Trace scheduling ([\#1c2f]) especially needs some heuristics to get the
+best performance out of your code. The best heuristics will be from
+profiling the binary, however, there are also some good static
+indicators for which branch is most likely to be taken. The following
+heuristics are taken from \[1\]. This is also tied to how if-conversion
+([\#1c8], [\#2b1d1]) should handle these heuristics.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-ball93_branc_predic_free" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">T. Ball and J. R. Larus, “Branch
+prediction for free,” in *Proceedings of the ACM SIGPLAN 1993 conference
+on programming language design and implementation*, in PLDI ’93. New
+York, NY, USA: Association for Computing Machinery, 1993, pp. 300–313.
+doi: [10.1145/155090.155119].</span>
+
+</div>
+
+</div>
+
+ [\#1c2f]: /zettel/1c2f
+ [\#1c8]: /zettel/1c8
+ [\#2b1d1]: /zettel/2b1d1
+ [10.1145/155090.155119]: https://doi.org/10.1145/155090.155119
diff --git a/content/zettel/1c2h1.md b/content/zettel/1c2h1.md
new file mode 100644
index 0000000..bb60e21
--- /dev/null
+++ b/content/zettel/1c2h1.md
@@ -0,0 +1,18 @@
++++
+title = "Loop Heuristic"
+date = "2022-05-11"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2h"]
+forwardlinks = ["1c2h2"]
+zettelid = "1c2h1"
++++
+
+> The successor does not postdominate the branch and is either a loop
+> head or a loop preheader (i.e., passes control unconditionally to a
+> loop head which it dominates). If the heuristic applies, predict the
+> successor *with* the property.
+
+This property says that if one is at a loop head, then one should
+predict that one takes the back-edge and does not exit the loop.
diff --git a/content/zettel/1c2h2.md b/content/zettel/1c2h2.md
new file mode 100644
index 0000000..d1c0abc
--- /dev/null
+++ b/content/zettel/1c2h2.md
@@ -0,0 +1,18 @@
++++
+title = "Call Heuristic"
+date = "2022-05-11"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2h1"]
+forwardlinks = ["1c2h3"]
+zettelid = "1c2h2"
++++
+
+> The successor block contains a call or unconditionally passes control
+> to a block with a call that it dominates, and the successor block does
+> not postdominate the branch. If the heuristic applies, predict the
+> successor *without* the property.
+
+This property states that one should not predict a function call, and
+should try and avoid that if possible.
diff --git a/content/zettel/1c2h3.md b/content/zettel/1c2h3.md
new file mode 100644
index 0000000..5e9df7b
--- /dev/null
+++ b/content/zettel/1c2h3.md
@@ -0,0 +1,17 @@
++++
+title = "Return Heuristic"
+date = "2022-05-11"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2h2"]
+forwardlinks = ["1c2h4"]
+zettelid = "1c2h3"
++++
+
+> The successor block contains a return or unconditionally passes
+> control to a block that contains a return. If the heuristic applies,
+> predict the successor without the property.
+
+In addition to that, this property says that one should avoid predicting
+a return instruction.
diff --git a/content/zettel/1c2h4.md b/content/zettel/1c2h4.md
new file mode 100644
index 0000000..18a624c
--- /dev/null
+++ b/content/zettel/1c2h4.md
@@ -0,0 +1,18 @@
++++
+title = "Guard Heuristic"
+date = "2022-05-11"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2h3"]
+forwardlinks = ["1c2h5"]
+zettelid = "1c2h4"
++++
+
+> Register *r* is an operand of the branch instruction, register *r* is
+> used in the successor block before it is defined, and the successor
+> block does not postdominate the branch. If the heuristic applies,
+> predict the successor *with* the property.
+
+This property predicts that one will not take the branch that contains a
+guard condition, and instead will follow along in the code.
diff --git a/content/zettel/1c2h5.md b/content/zettel/1c2h5.md
new file mode 100644
index 0000000..ff2fa05
--- /dev/null
+++ b/content/zettel/1c2h5.md
@@ -0,0 +1,18 @@
++++
+title = "Store Heuristic"
+date = "2022-05-11"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2h4"]
+forwardlinks = ["1c2h6"]
+zettelid = "1c2h5"
++++
+
+> The successor block contains a store instruction and does not
+> postdominate the branch. If the heuristic applies, predict the
+> successor *without* the property.
+
+This says that one should not predict that one will perform a store.
+This is in general not such a great heuristic, but apparently it does
+work well for floating point intensive applications.
diff --git a/content/zettel/1c2h6.md b/content/zettel/1c2h6.md
new file mode 100644
index 0000000..e60689c
--- /dev/null
+++ b/content/zettel/1c2h6.md
@@ -0,0 +1,18 @@
++++
+title = "Order of Heuristics"
+date = "2022-05-11"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2h5"]
+forwardlinks = []
+zettelid = "1c2h6"
++++
+
+The following was the order of the heuristics that were applied and
+tested:
+
+`Point -> Call -> Opcode -> Return -> Store -> Loop -> Guard`
+
+To use this order, everyone of these properties is tried until one
+applies.1
diff --git a/content/zettel/1c3.md b/content/zettel/1c3.md
new file mode 100644
index 0000000..5436bf5
--- /dev/null
+++ b/content/zettel/1c3.md
@@ -0,0 +1,56 @@
++++
+title = "Dynamic Scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c2", "1c1", "1c"]
+forwardlinks = ["1a2", "3c3", "1c4", "1c3a"]
+zettelid = "1c3"
++++
+
+Dynamic scheduling \[1\] is a really interesting way of generating
+circuits, because one does not have to perform as much static analysis,
+and one does not have to really place operations into specific clock
+cycles.
+
+Dynamic scheduling can be modelled using petri-nets ([\#1a2]), where
+only a few basic components are enough to model any computation which is
+scheduled dynamically, meaning the actual execution times of each
+section are not important to the correctness.
+
+In terms of proving scheduling correct ([\#3c3]), it might actually be
+simpler with dynamic scheduling, assuming that each of the components
+are assumed to be correct. However, if the components need to be proven
+correct as well, then it might be more difficult, as it is not quite
+clear yet if proving components in the Verilog semantics \[2\] is
+feasible, or if a separate translation is needed.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-carmona09_elast_circuit" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">J. Carmona, J. Cortadella, M.
+Kishinevsky, and A. Taubin, “Elastic circuits,” *IEEE Transactions on
+Computer-Aided Design of Integrated Circuits and Systems*, vol. 28, no.
+10, pp. 1437–1455, 2009.</span>
+
+</div>
+
+<div id="ref-loeoew19_proof_trans_veril_devel_hol" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">A. Lööw and M. O. Myreen, “A
+proof-producing translator for verilog development in HOL,” in
+*Proceedings of the 7th international workshop on formal methods in
+software engineering*, in FormaliSE ’19. Montreal, Quebec, Canada: IEEE
+Press, 2019, pp. 99–108. doi: [10.1109/FormaliSE.2019.00020].</span>
+
+</div>
+
+</div>
+
+ [\#1a2]: /zettel/1a2
+ [\#3c3]: /zettel/3c3
+ [10.1109/FormaliSE.2019.00020]: https://doi.org/10.1109/FormaliSE.2019.00020
diff --git a/content/zettel/1c3a.md b/content/zettel/1c3a.md
new file mode 100644
index 0000000..0ae84cf
--- /dev/null
+++ b/content/zettel/1c3a.md
@@ -0,0 +1,34 @@
++++
+title = "Dynamic Scheduling with Static Scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c3"]
+forwardlinks = ["1c3b"]
+zettelid = "1c3a"
++++
+
+Combining static and dynamic scheduling is really useful to get the best
+benefits from both \[1\]. This is mainly because dynamic scheduling has
+better performance when memory access patterns are strange and
+unpredictable, however, static scheduling is much more efficient in
+terms of performance and area when the access patterns are predictable.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-cheng20_combin_dynam_static_sched_high_level_synth"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">J. Cheng, L. Josipovic, G. A.
+Constantinides, P. Ienne, and J. Wickerson, “Combining dynamic & static
+scheduling in high-level synthesis,” in *The 2020 ACM/SIGDA
+international symposium on field-programmable gate arrays*, in FPGA ’20.
+Seaside, CA, USA: Association for Computing Machinery, 2020, pp.
+288–298. doi: [10.1145/3373087.3375297].</span>
+
+</div>
+
+</div>
+
+ [10.1145/3373087.3375297]: https://doi.org/10.1145/3373087.3375297
diff --git a/content/zettel/1c3b.md b/content/zettel/1c3b.md
new file mode 100644
index 0000000..4963389
--- /dev/null
+++ b/content/zettel/1c3b.md
@@ -0,0 +1,14 @@
++++
+title = "Dynamic Scheduling with GSA"
+date = "2022-10-04"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c3a"]
+forwardlinks = ["2e1b"]
+zettelid = "1c3b"
++++
+
+GSA ([\#2e1b])
+
+ [\#2e1b]: /zettel/2e1b
diff --git a/content/zettel/1c4.md b/content/zettel/1c4.md
new file mode 100644
index 0000000..31241d2
--- /dev/null
+++ b/content/zettel/1c4.md
@@ -0,0 +1,22 @@
++++
+title = "Operation Chaining"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c3", "1c"]
+forwardlinks = ["1c5", "1c4a"]
+zettelid = "1c4"
++++
+
+Data-flow dependencies that can be discovered using a
+<span class="spurious-link" target="*Data-flow Graph">*data-flow
+graph*</span> are useful for high-level synthesis optimisations, as
+instructions that do not depend on each other can be scheduled into the
+same clock cycle. However, in addition to that, even if there is a data
+dependency between two operations, but the operations can both be
+completed faster than the time required for the clock cycle, then these
+can be scheduled into the same clock cycle as well.
+
+This allows for less registers in the hardware that need to store
+intermediate results, and can also lead to less states and reduce the
+throughput of the circuit.
diff --git a/content/zettel/1c4a.md b/content/zettel/1c4a.md
new file mode 100644
index 0000000..afc2a6a
--- /dev/null
+++ b/content/zettel/1c4a.md
@@ -0,0 +1,19 @@
++++
+title = "Problem with chaining in general"
+date = "2022-05-17"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c4"]
+forwardlinks = ["1c4b"]
+zettelid = "1c4a"
++++
+
+The main problem with operation chaining in the current implementation
+of scheduling is that intermediate instructions limits the use of
+multiply and add (MAC) operations that are present in DSPs. This is
+because the synthesis tool cannot know if the register is used again,
+and therefore can't use a MAC operation directly, but instead has to do
+a multiply, store the result in a register, and then do an add, which
+will probably be in logic. This introduces a long delay and therefore
+heavily slows down the clock.
diff --git a/content/zettel/1c4b.md b/content/zettel/1c4b.md
new file mode 100644
index 0000000..2c89511
--- /dev/null
+++ b/content/zettel/1c4b.md
@@ -0,0 +1,15 @@
++++
+title = "Solution in Vericert"
+date = "2022-05-17"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c4a"]
+forwardlinks = ["1c4b1"]
+zettelid = "1c4b"
++++
+
+One possible solution to this in Vericert is to add the MAC operation as
+a valid operation in the Verilog instruction. This means that one can
+then have a peephole optimisation pass (maybe as part of the scheduling
+pass), which will then introduce the MAC operations into the graph.
diff --git a/content/zettel/1c4b1.md b/content/zettel/1c4b1.md
new file mode 100644
index 0000000..63fddf5
--- /dev/null
+++ b/content/zettel/1c4b1.md
@@ -0,0 +1,20 @@
++++
+title = "Stability of MAC optimisation in Vericert"
+date = "2022-11-09"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c4b"]
+forwardlinks = ["5b3"]
+zettelid = "1c4b1"
++++
+
+MAC Optimisations ([\#5b3]) for a fused multiply-add operation is not
+very stable, because if the intermediate register is used anywhere else,
+then it will not be able to fuse the two instructions. This means that
+from a local perspective it looks quite random whether or not a series
+of multipliers and adds are actually fused. The decision needs a global
+property of whether that register is used anywhere, which means that it
+is quite unreliable if it will fire or not.
+
+ [\#5b3]: /zettel/5b3
diff --git a/content/zettel/1c5.md b/content/zettel/1c5.md
new file mode 100644
index 0000000..9a94646
--- /dev/null
+++ b/content/zettel/1c5.md
@@ -0,0 +1,17 @@
++++
+title = "Register Allocation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c4", "1c2g", "1c"]
+forwardlinks = ["1c6", "1c5a"]
+zettelid = "1c5"
++++
+
+Register allocation is an optimisation that is vital to fit the program
+onto a CPU, by allocating the registers that will be used for each
+virtual registers. This was mostly taken from Wikipedia
+
+[^1]
+
+[^1]: <https://en.wikipedia.org/wiki/Register_allocation>
diff --git a/content/zettel/1c5a.md b/content/zettel/1c5a.md
new file mode 100644
index 0000000..112bc3d
--- /dev/null
+++ b/content/zettel/1c5a.md
@@ -0,0 +1,30 @@
++++
+title = "Components of Register Allocation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c5"]
+forwardlinks = ["1c5b"]
+zettelid = "1c5a"
++++
+
+Move insertion
+
+: This consists of increasing the number of moves so that the variable
+ can live in various registers over its lifetime. This occurs in the
+ split live range approach.
+
+Spilling
+
+: This consists of storing a variable to memory because there aren't
+ enough registers available.
+
+Assignment
+
+: This consists of assigning a register to a variable.
+
+Coalescing
+: This consists of limiting the number of move instructions, thereby
+ reducing the total number of instructions. This can be done by
+ identifying variables that are live over various blocks and storing
+ it constantly in one variable.
diff --git a/content/zettel/1c5b.md b/content/zettel/1c5b.md
new file mode 100644
index 0000000..e4ff827
--- /dev/null
+++ b/content/zettel/1c5b.md
@@ -0,0 +1,25 @@
++++
+title = "Common Problems"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c5a"]
+forwardlinks = ["1c5c"]
+zettelid = "1c5b"
++++
+
+Aliasing
+
+: In some architectures, assigning a value to one register can affect
+ the value to other registers.
+
+Pre-coloring
+
+: This is the problem that forces variables to be assigned to some
+ specific registers. For example, calling conventions may force a
+ register to be assigned in a specific range of registers.
+
+NP-Problem
+: Register allocation is an NP-complete problem, however there are
+ quite efficient ways to actually perform it by reducing it to graph
+ coloring.
diff --git a/content/zettel/1c5c.md b/content/zettel/1c5c.md
new file mode 100644
index 0000000..de4f6ee
--- /dev/null
+++ b/content/zettel/1c5c.md
@@ -0,0 +1,25 @@
++++
+title = "Graph-Colouring Allocation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c5b"]
+forwardlinks = ["1c5d"]
+zettelid = "1c5c"
++++
+
+The following are the main phases of graph-colouring:
+
+1. **Renumber**: discover the live range information in the source
+ program.
+2. **Build**: build the inference graph.
+3. **Coalesce**: merge the live ranges of non-interfering variables
+ related by copy instructions.
+4. **Spill cost**: compute the spill cost of each variable. This
+ assesses the impact of mapping a variable to memory on the speed of
+ the final program.
+5. **Simplify**: construct an ordering of the nodes in the inferences
+ graph.
+6. **Spill code**: insert spill instructions, i.e. loads and stores to
+ commute values between registers and memory.
+7. **Select**: assign a register to each variable.
diff --git a/content/zettel/1c5d.md b/content/zettel/1c5d.md
new file mode 100644
index 0000000..c69012e
--- /dev/null
+++ b/content/zettel/1c5d.md
@@ -0,0 +1,19 @@
++++
+title = "Before or after scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c5c"]
+forwardlinks = ["1c5e"]
+zettelid = "1c5d"
++++
+
+Register allocation is a tricky optimisation to get right, because there
+are a lot of trade-offs on where to place the optimisation. Especially
+in high-level synthesis, it's tricky to know where to place these, as
+one can target really any architecture.
+
+In high-level synthesis particularly, it seems like it would be better
+to perform the optimisation after scheduling, as one would want as much
+freedom for scheduling as possible. This is to get the largest possible
+instruction level parallelism.
diff --git a/content/zettel/1c5e.md b/content/zettel/1c5e.md
new file mode 100644
index 0000000..17ff5fd
--- /dev/null
+++ b/content/zettel/1c5e.md
@@ -0,0 +1,21 @@
++++
+title = "Rotating Register file"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c6a2", "1c5d"]
+forwardlinks = ["3c3a"]
+zettelid = "1c5e"
++++
+
+A rotating register file is a set of registers that are basically made
+up of FIFOs where all the values can be read again. This can be very
+useful for a number of reasons, for example, for expressing SSA over
+loops (where you are technically modifying registers every loop
+iteration), so that one is never modifying previous values and these
+could technically be read again.
+
+It would therefore be interesting to implement these rotating register
+files in RTLBlock and RTLPar ([\#3c3a]).
+
+ [\#3c3a]: /zettel/3c3a
diff --git a/content/zettel/1c6.md b/content/zettel/1c6.md
new file mode 100644
index 0000000..c1f835d
--- /dev/null
+++ b/content/zettel/1c6.md
@@ -0,0 +1,18 @@
++++
+title = "Loop pipelining"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c1", "3a8", "2e1f", "2b2", "1c5", "1c"]
+forwardlinks = ["3c1", "1c7", "1c6a"]
+zettelid = "1c6"
++++
+
+Loop pipelining is a great optimisation for VLIW processors that have
+parallel constructs. The main idea is to identify loops where reordering
+the instructions would improve the instruction parallelism inside of the
+loops.
+
+Notes on verifying loop pipelining can be found in [\#3c1].
+
+ [\#3c1]: /zettel/3c1
diff --git a/content/zettel/1c6a.md b/content/zettel/1c6a.md
new file mode 100644
index 0000000..2f7dd71
--- /dev/null
+++ b/content/zettel/1c6a.md
@@ -0,0 +1,54 @@
++++
+title = "Difference between hardware and software loop scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c1", "1c6"]
+forwardlinks = ["1c6b", "1c6a1"]
+zettelid = "1c6a"
++++
+
+Loop scheduling is another name for loop pipelining, and maybe more
+accurate. There are two main types of this, hardware loop scheduling as
+in LegUp \[1\] and software loop scheduling as in Rau et al. \[2\].
+
+Even though both of these use the same algorithm to perform the loop
+scheduling, the main differences between the two are the following:
+
+Constraints
+: The constraints for both methods will be quite different, because
+ software loop scheduling does not have as much freedom as hardware
+ loop scheduling, as the instructions are still quite linear and
+ executed one after another.
+
+Final representation
+: The output of the loop scheduling algorithm is the reservation table
+ which places instructions into specific clock cycles. In hardware
+ scheduling, this can be directly translated into.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-canis14_modul_sdc" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">A. Canis, S. D. Brown, and J. H.
+Anderson, “Modulo SDC scheduling with recurrence minimization in
+high-level synthesis,” in *2014 24th international conference on field
+programmable logic and applications (FPL)*, Sep. 2014, pp. 1–8. doi:
+[10.1109/FPL.2014.6927490].</span>
+
+</div>
+
+<div id="ref-rau96_iterat_modul_sched" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">B. R. Rau, “Iterative modulo
+scheduling,” *International Journal of Parallel Programming*, vol. 24,
+no. 1, pp. 3–64, Feb. 1996, Available:
+<https://doi.org/10.1007/BF03356742></span>
+
+</div>
+
+</div>
+
+ [10.1109/FPL.2014.6927490]: https://doi.org/10.1109/FPL.2014.6927490
diff --git a/content/zettel/1c6a1.md b/content/zettel/1c6a1.md
new file mode 100644
index 0000000..326712f
--- /dev/null
+++ b/content/zettel/1c6a1.md
@@ -0,0 +1,21 @@
++++
+title = "Implementation of a Hardware Pipeline"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c6a"]
+forwardlinks = ["1c6a2"]
+zettelid = "1c6a1"
++++
+
+A hardware pipeline can be directly inferred from the schedule that is
+output by the modulo scheduling algorithm. This can be done by taking
+the amounts of states, and generating a state machine inside of that
+pipeline that will go from one state to the other. A controller can then
+be added to handle the loop iterator and feed the pipeline with the
+correct II.
+
+However, this can also be handled inside of the pipeline itself, by
+using the `ready` and `valid` signals. When the pipeline is in a state
+that it can't accept another input in, then the `ready` signal is set to
+0.
diff --git a/content/zettel/1c6a2.md b/content/zettel/1c6a2.md
new file mode 100644
index 0000000..18e3169
--- /dev/null
+++ b/content/zettel/1c6a2.md
@@ -0,0 +1,25 @@
++++
+title = "Resource usage differences"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c6a1"]
+forwardlinks = ["1b8", "1c5e"]
+zettelid = "1c6a2"
++++
+
+In terms of resources it seems like by default, even though the final
+pipelines will be the same between software and hardware pipelining, the
+resources of software pipelining will be significantly higher than
+hardware pipelining. This is mainly due to the fact that many hacks are
+needed to support the arbitrary pipelines. The first is duplication of
+the loop body due to register allocation, and the second is generating
+the epilogue and the prologue to start the pipeline.
+
+However, these can both be eliminated by using predicated instructions
+([\#1b8]) and rotating registers ([\#1c5e]). You could then just
+implement the body directly using the rotating register files, and
+control the execution of the loop by using the predicated instructions.
+
+ [\#1b8]: /zettel/1b8
+ [\#1c5e]: /zettel/1c5e
diff --git a/content/zettel/1c6b.md b/content/zettel/1c6b.md
new file mode 100644
index 0000000..a20fe01
--- /dev/null
+++ b/content/zettel/1c6b.md
@@ -0,0 +1,23 @@
++++
+title = "Software pipelining needs MVE"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c2", "1c6a"]
+forwardlinks = ["2b2", "1c6c"]
+zettelid = "1c6b"
++++
+
+Modulo variable expansion (MVE) is needed because sometimes the
+lifetimes of variables exceed the II, meaning they get overwritten after
+II clock cycles even though they are needed afterwards. This is because
+the next iteration of the loop is already executing and overwriting the
+previous value of the register that was stored there. Hardware support
+for rotating registers ([\#2b2]) could prevent this from happening,
+otherwise, modulo variable expansion needs to be added to have the
+correct lifetimes for each variable. For example, if the II is 3 and the
+maximum lifetime of a variable is 12 cycles, then the loop needs to be
+unrolled 4 times, as after 12 cycles the initial register can finally be
+reused. Otherwise, a new register needs to be used for each value.
+
+ [\#2b2]: /zettel/2b2
diff --git a/content/zettel/1c6c.md b/content/zettel/1c6c.md
new file mode 100644
index 0000000..de60867
--- /dev/null
+++ b/content/zettel/1c6c.md
@@ -0,0 +1,18 @@
++++
+title = "Kernel only loop scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c2", "1c6b"]
+forwardlinks = ["2b1", "1c6d"]
+zettelid = "1c6c"
++++
+
+Using predicated execution ([\#2b1]), one does not have to create a
+prologue or an epilogue when trying to execute a loop, and apparently
+also benefits from a large performance increase by using predicated
+execution. This is because each instruction in the kernel can be
+predicated in such a way that the pipeline gets filled correctly, and
+then in such a way so that it gets drained correctly.
+
+ [\#2b1]: /zettel/2b1
diff --git a/content/zettel/1c6d.md b/content/zettel/1c6d.md
new file mode 100644
index 0000000..32ff6f7
--- /dev/null
+++ b/content/zettel/1c6d.md
@@ -0,0 +1,49 @@
++++
+title = "Speculative loop pipelining using GSA"
+date = "2022-04-29"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c6c"]
+forwardlinks = ["2e1b", "1c6e"]
+zettelid = "1c6d"
++++
+
+This is work by Steven Derrien from Irisa Rennes \[1\].
+
+Using GSA ([\#2e1b]), one can generate loop pipelines with speculative
+execution support that can then be passed to a high-level synthesis tool
+(as this is a source-to-source translation). This is done by generating
+the GSA representation then using the predicates in the γ-functions to
+generate an FSM controlling the selection of the values for the
+γ-functions and control whether a rollback of the state is necessary, as
+well as generating an entry block and a commit block at the end to
+choose a correct value that is not speculated.
+
+The main idea is to perform a source-to-source translation which can
+still take advantage of static scheduling tools, therefore, this dynamic
+scheduling is introduced into the source code, in addition to the FSM
+that actually picks the state that should be gone into (if one has to
+perform a rollback or if everything is OK). Then, the actual memory
+accesses are transformed into accesses that the static scheduling tool
+can understand well, as affine expressions so that the distances are
+clear.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-derrien20_towar_specul_loop_pipel_high_level_synth"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">S. Derrien, T. Marty, S. Rokicki,
+and T. Yuki, “Toward speculative loop pipelining for high-level
+synthesis,” *IEEE Transactions on Computer-Aided Design of Integrated
+Circuits and Systems*, vol. 39, no. 11, pp. 4229–4239, Nov. 2020, doi:
+[10.1109/tcad.2020.3012866].</span>
+
+</div>
+
+</div>
+
+ [\#2e1b]: /zettel/2e1b
+ [10.1109/tcad.2020.3012866]: https://doi.org/10.1109/tcad.2020.3012866
diff --git a/content/zettel/1c6e.md b/content/zettel/1c6e.md
new file mode 100644
index 0000000..ac07ade
--- /dev/null
+++ b/content/zettel/1c6e.md
@@ -0,0 +1,24 @@
++++
+title = "Speculation does not have a cost"
+date = "2022-04-29"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c6d"]
+forwardlinks = []
+zettelid = "1c6e"
++++
+
+It is interesting to note that speculating on an instruction does not
+really incur a cost in itself. The idea is that if one is always wrong
+with the speculation, one really only reverts back to the non-pipelined
+instructions. So in the worst case one is only sequential. However, if
+one speculates, then it is highly unlikely that one is always wrong, and
+one will therefore always benefit from it.
+
+Therefore, in the speculative loop pipelining, it does not really matter
+in the end which instructions are picked to speculate on, and often the
+most efficient will be the shortest path in the pipeline. For example,
+Steven showed that even when one speculates on a binary search algorithm
+to two iterations (where one then has a 25% chance of being right), one
+still benefits from speculating going into one of the directions.
diff --git a/content/zettel/1c7.md b/content/zettel/1c7.md
new file mode 100644
index 0000000..bd5d8c2
--- /dev/null
+++ b/content/zettel/1c7.md
@@ -0,0 +1,12 @@
++++
+title = "Polyhedral analysis"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c6", "1c2b1", "1c"]
+forwardlinks = ["1c8", "1c7a"]
+zettelid = "1c7"
++++
+
+Polyhedral analysis is the analysis of loops and memory indexes to prove
+that memory reads and writes are independent of each other.
diff --git a/content/zettel/1c7a.md b/content/zettel/1c7a.md
new file mode 100644
index 0000000..baa2015
--- /dev/null
+++ b/content/zettel/1c7a.md
@@ -0,0 +1,9 @@
++++
+title = "Steps in polyhedral analysis"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c7"]
+forwardlinks = []
+zettelid = "1c7a"
++++
diff --git a/content/zettel/1c8.md b/content/zettel/1c8.md
new file mode 100644
index 0000000..9997554
--- /dev/null
+++ b/content/zettel/1c8.md
@@ -0,0 +1,29 @@
++++
+title = "If-conversion"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1e", "1c7", "1c2h", "1c", "1b8"]
+forwardlinks = ["1b6", "1b8", "1c9"]
+zettelid = "1c8"
++++
+
+If-conversion is an optimisation which is very common in HLS tools. It
+transforms basic blocks ([\#1b6]) that do not contain any loops, into
+single hyperblocks ([\#1b8]), which use predicated instructions instead.
+However, to support such an optimisation in a verified high-level
+synthesis tool, the verification algorithm needs to support SAT checking
+of the predicates. It is the conversion that introduces predicated
+instructions which can make use of the hyperblocks. It converts
+conditional statements into predicated instructions. This transformation
+has a few limitations on the kind of conditional statements that it can
+translate. First, only conditional statements without loops can be
+translated, therefore, one must identify cycles in the control-flow
+before performing the if-conversion. Secondly, if-conversion will not
+always result in more efficient code, so it should not be applied to any
+conditional statements. Instead, it is best applied to conditional
+statements where each branch will take a similar amount of time to
+execute.
+
+ [\#1b6]: /zettel/1b6
+ [\#1b8]: /zettel/1b8
diff --git a/content/zettel/1c9.md b/content/zettel/1c9.md
new file mode 100644
index 0000000..e2ca725
--- /dev/null
+++ b/content/zettel/1c9.md
@@ -0,0 +1,18 @@
++++
+title = "Resource Sharing"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c6", "1c8"]
+forwardlinks = ["1c10"]
+zettelid = "1c9"
++++
+
+Resource sharing is an important optimisation in HLS, because some
+operations can be expensive to implement directly in hardware, and it is
+therefore more efficient to share the hardware between multiple
+different uses of the hardware.
+
+This is also an important metric during scheduling, because if one can
+get a minimal schedule while reusing as much of the hardware as
+possible, that is a much better hardware design in general.
diff --git a/content/zettel/1d.md b/content/zettel/1d.md
new file mode 100644
index 0000000..50bcab3
--- /dev/null
+++ b/content/zettel/1d.md
@@ -0,0 +1,9 @@
++++
+title = "Alternatives"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1c"]
+forwardlinks = ["1e", "1d1"]
+zettelid = "1d"
++++
diff --git a/content/zettel/1d1.md b/content/zettel/1d1.md
new file mode 100644
index 0000000..dcd131c
--- /dev/null
+++ b/content/zettel/1d1.md
@@ -0,0 +1,102 @@
++++
+title = "Bluespec"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1d"]
+forwardlinks = ["1b2", "3a", "1b1"]
+zettelid = "1d1"
++++
+
+Bluespec is a higher-level hardware description language, based on
+circuit generation instead of circuit description.
+
+{{< transclude-1 zettel="1b2" >}}
+
+Tail calls are an optimisation that can be applied in software, which
+detects that a recursive call only happens at the tail of the function.
+If that is the case, then the recursive call can happen without having
+to preserve the stack at every call, as it can effectively be translated
+to an equivalent version that only uses loops. This optimisation is
+important to improve the efficiency of recursive functions if the stack
+is not needed, and is especially important in functional languages where
+recursion might be more widely used.
+
+As tail calls can be efficiently translated to loops, this also means
+that they can be efficiently encoded in hardware when the code is passed
+through high-level synthesis. This is straightforward if the tail calls
+are detected and automatically translated to loops, however, what
+happens if that is not the case and we only have a tail call construct
+in the intermediate language.
+
+The CompCert ([\#3a]) \[1\] register transfer language (RTL)
+intermediate language, for example, has an explicit tail call construct
+(`Itailcall`), which is used if it can detect that a function contains a
+tail call. Apart from that, it only has a jump command, which can be
+used for loops. Therefore, supporting the tail call construct allows us
+to support a subset of possible recursive functions. Even though these
+are as powerful as loops, it may be more natural to write their
+definitions as a recursive function instead of a loop.
+
+{{< transclude-2 zettel="1b1" >}}
+
+Guarded commands \[2\] are an interesting construct which can be added
+to languages. They look similar to `case` statements, but behave in a
+parallel and nondeterministic way. Each guard has a boolean value
+followed by a program which may be executed if the guard evaluates to
+true. The following shows the main syntax that guards may have.
+
+``` grammar
+e ::= if gc fi | do gc od ...
+
+gc ::= (b e) || gc
+```
+
+One reason these are interesting language constructs, is because they
+allow for the encoding of commands that may be executed when a condition
+is true, but that it isn't necessary. Often, when giving instructions,
+one does not really specify the order, just the commands that should
+eventually be executed.
+
+The guarded commands `gc` will either return a match if a boolean
+evaluates to true, or `abort`. There are two constructs that are built
+around guarded commands which adds more functionality to them.
+`if gc fi` matches one rule in the guarded statement and executes it. If
+it does not match a rule, it then acts like `abort`. `do gc od` loops
+over the guarded commands while any rule matches. If there no match is
+found, it acts like `skip`.
+
+These allow for nice encoding of common algorithms, using two other
+constructs that use the guarded commands.
+
+{{< /transclude-2 >}}
+
+{{< /transclude-1 >}}
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-leroy06_formal_certif_compil_back_end" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">X. Leroy, “Formal certification of
+a compiler back-end or: Programming a compiler with a proof assistant,”
+in *Conference record of the 33rd ACM SIGPLAN-SIGACT symposium on
+principles of programming languages*, in POPL ’06. Charleston, South
+Carolina, USA: Association for Computing Machinery, 2006, pp. 42–54.
+doi: [10.1145/1111037.1111042].</span>
+
+</div>
+
+<div id="ref-winskel93" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">G. Winskel, *The formal semantics
+of programming languages: An introduction*. MIT press, 1993.</span>
+
+</div>
+
+</div>
+
+ [\#3a]: /zettel/3a
+ [10.1145/1111037.1111042]: https://doi.org/10.1145/1111037.1111042
diff --git a/content/zettel/1e.md b/content/zettel/1e.md
new file mode 100644
index 0000000..312ecd7
--- /dev/null
+++ b/content/zettel/1e.md
@@ -0,0 +1,9 @@
++++
+title = "Main ideas"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a2d", "1d"]
+forwardlinks = ["1f", "1e1"]
+zettelid = "1e"
++++
diff --git a/content/zettel/1e1.md b/content/zettel/1e1.md
new file mode 100644
index 0000000..eba95c0
--- /dev/null
+++ b/content/zettel/1e1.md
@@ -0,0 +1,21 @@
++++
+title = "How to run C on spatial hardware"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1e"]
+forwardlinks = []
+zettelid = "1e1"
++++
+
+- Key is that hardware has unlimited parallelism
+- Want to take away the unnecessary ordering that is imposed in
+ software
+- Loads ask for memory and can then block to retrieve the memory
+- For if-statements, the branch is removed and is instead replaced by
+ a multiplexer
+- Any stores in the if-statement need to be anded with the condition
+ of that branch
+- If the execution time is very different in two branches, they should
+ be separated into different control flow statements, so that the
+ delay is only experienced when that branch is actually taken.
diff --git a/content/zettel/1f.md b/content/zettel/1f.md
new file mode 100644
index 0000000..84f4607
--- /dev/null
+++ b/content/zettel/1f.md
@@ -0,0 +1,9 @@
++++
+title = "Reliability"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1e"]
+forwardlinks = ["1f1"]
+zettelid = "1f"
++++
diff --git a/content/zettel/1f1.md b/content/zettel/1f1.md
new file mode 100644
index 0000000..137edd7
--- /dev/null
+++ b/content/zettel/1f1.md
@@ -0,0 +1,11 @@
++++
+title = "What exactly is unreliability"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1f"]
+forwardlinks = ["1f2"]
+zettelid = "1f1"
++++
+
+HLS tools are often seen as quite flaky, but what does that even mean?
diff --git a/content/zettel/1f2.md b/content/zettel/1f2.md
new file mode 100644
index 0000000..a0cef03
--- /dev/null
+++ b/content/zettel/1f2.md
@@ -0,0 +1,29 @@
++++
+title = "Input language specification"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1f3a", "1f1"]
+forwardlinks = ["1f3", "1f2a"]
+zettelid = "1f2"
++++
+
+The first reason a HLS tool might be seen as unreliable is because when
+coming from C, the behaviour of the input is often unspecified. This
+means that it can be defined by the compiler, resulting in different
+behaviour in different compilers. HLS tools take great advantage of
+this, to allow for hardware specific optimisations. In addition to that,
+HLS tools will define various different subsets of the language that
+will get compiled correctly. One example of this is that if a pointer is
+used multiple times it may not be valid anymore:
+
+> using pointers which are updated multiple times is not recommended.
+> HLS would optimize the redundant assignments. Please look at this
+> section in UG902.
+>
+> Using pointers which are accessed multiple times can introduce
+> unexpected behavior after synthesis. In the following example pointer
+> d~i~ is read four times and pointer d~o~ is written to twice: the
+> pointers perform multiple accesses. — nithink, Xilinx forum[^1]
+
+[^1]: <https://forums.xilinx.com/t5/High-Level-Synthesis-HLS/Pointer-synthesis-in-Vivado-HLS-v20-1/m-p/1117009>
diff --git a/content/zettel/1f2a.md b/content/zettel/1f2a.md
new file mode 100644
index 0000000..00f5816
--- /dev/null
+++ b/content/zettel/1f2a.md
@@ -0,0 +1,33 @@
++++
+title = "How to solve this specification problem"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1f2"]
+forwardlinks = ["3a"]
+zettelid = "1f2a"
++++
+
+This specification problem can be solved informally, by basing the HLS
+tool on the C semantics of existing compilers that have been well-tested
+and are well-understood. One example of such a compiler is CompCert
+([\#3a]), which has a formal specification of Clight \[1\] language,
+which defines a subset of C that is formally verified.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-blazy09_mechan_seman_cligh_subset_c_languag"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">S. Blazy and X. Leroy, “Mechanized
+semantics for the Clight subset of the C language,” *Journal of
+Automated Reasoning*, vol. 43, no. 3, pp. 263–288, Oct. 2009, doi:
+[10.1007/s10817-009-9148-3].</span>
+
+</div>
+
+</div>
+
+ [\#3a]: /zettel/3a
+ [10.1007/s10817-009-9148-3]: https://doi.org/10.1007/s10817-009-9148-3
diff --git a/content/zettel/1f3.md b/content/zettel/1f3.md
new file mode 100644
index 0000000..3eab866
--- /dev/null
+++ b/content/zettel/1f3.md
@@ -0,0 +1,45 @@
++++
+title = "Functional correctness"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1f2"]
+forwardlinks = ["1f4", "1f3a"]
+zettelid = "1f3"
++++
+
+Functional correctness is also somewhat of a problem in HLS \[1\],
+\[2\], even though this is probably not the first property of flakiness
+that people would point to.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-lidbury15_many_core_compil_fuzzin" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">C. Lidbury, A. Lascu, N. Chong,
+and A. F. Donaldson, “Many-core compiler fuzzing,” in *Proceedings of
+the 36th ACM SIGPLAN conference on programming language design and
+implementation*, in PLDI ’15. Portland, OR, USA: Association for
+Computing Machinery, 2015, pp. 65–76. doi:
+[10.1145/2737924.2737986].</span>
+
+</div>
+
+<div id="ref-herklotz21_empir_study_reliab_high_level_synth_tools"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">Y. Herklotz, Z. Du, N. Ramanathan,
+and J. Wickerson, “An empirical study of the reliability of high-level
+synthesis tools,” in *2021 IEEE 29th annual international symposium on
+field-programmable custom computing machines (FCCM)*, 2021, pp. 219–223.
+doi: [10.1109/FCCM51124.2021.00034].</span>
+
+</div>
+
+</div>
+
+ [10.1145/2737924.2737986]: https://doi.org/10.1145/2737924.2737986
+ [10.1109/FCCM51124.2021.00034]: https://doi.org/10.1109/FCCM51124.2021.00034
diff --git a/content/zettel/1f3a.md b/content/zettel/1f3a.md
new file mode 100644
index 0000000..65ebc42
--- /dev/null
+++ b/content/zettel/1f3a.md
@@ -0,0 +1,17 @@
++++
+title = "How to solve functional correctness"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1f3"]
+forwardlinks = ["3c", "1f2"]
+zettelid = "1f3a"
++++
+
+One solution to functional correctness is to write the HLS tool in Coq,
+as we are doing with Vericert ([\#3c]). This provides guarantees that
+assuming the C semantics ([\#1f2]) are correct, and the output semantics
+are trusted, the translation can be trusted.
+
+ [\#3c]: /zettel/3c
+ [\#1f2]: /zettel/1f2
diff --git a/content/zettel/1f4.md b/content/zettel/1f4.md
new file mode 100644
index 0000000..1f1184f
--- /dev/null
+++ b/content/zettel/1f4.md
@@ -0,0 +1,28 @@
++++
+title = "Complex optimisation effects"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1f3"]
+forwardlinks = ["1f4a"]
+zettelid = "1f4"
++++
+
+Finally, the last flakiness property that HLS tools have is that they
+apply many optimisations, which may or may not have linear effects on
+the performance and area of the result.
+
+> Because of Amdahl's law arguments, most optimizations don't have a
+> linear effect. They appear to have a linear effect sometimes, but
+> eventually hit some diminishing returns. Some other effects are really
+> discontinuous, for instance, unrolling accumulation loops may not
+> change critical recurrences. — Stephen Neuendorffer ([email])
+
+It's is nearly impossible to predict what a series of optimisations will
+do to the performance. The user will implicitly try to predict the
+performance with a simplified model as reference, and as the HLS tool
+has to take everything into account, and will therefore sometimes not be
+able to optimise the loop as the user would want it to be, leading to
+discontinuous optimisations.
+
+ [email]: notmuch:id:BYAPR02MB3910A2FA9F954031FC11A232A8479@BYAPR02MB3910.namprd02.prod.outlook.com
diff --git a/content/zettel/1f4a.md b/content/zettel/1f4a.md
new file mode 100644
index 0000000..27a1367
--- /dev/null
+++ b/content/zettel/1f4a.md
@@ -0,0 +1,57 @@
++++
+title = "How to make optimisations more predictable"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1f4b", "1f4"]
+forwardlinks = ["3a7", "1f4b"]
+zettelid = "1f4a"
++++
+
+The way to address this issue is to make optimisations more predictable.
+This is quite tough though, because it's hard to even imagine what this
+means. One way this could be approached is by formalising the
+optimisations, so that properties can be proven about them. For example,
+one could formalise loop pipelining in polyhedral analysis \[1\], \[2\]
+to understand more about the algorithm. However, this would probably be
+done using translation validation ([\#3a7]), therefore not being that
+perfect for understanding the algorithm.
+
+However, in addition to that, it might be possible to define a theorem
+that proves predictability of the algorithms. For example, one might
+imagine a situation where adding no-ops to the input of an algorithm
+drastically changes the output. One might therefore want to define a
+theorem that states that adding a no-op will either not change the
+hardware in any way, or weaken it by saying that the adding a no-op
+should always increase the size of the hardware if there are any changes
+to it.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-liu18_polyh_based_dynam_loop_pipel" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">J. Liu, J. Wickerson, S. Bayliss,
+and G. A. Constantinides, “Polyhedral-based dynamic loop pipelining for
+high-level synthesis,” *IEEE Transactions on Computer-Aided Design of
+Integrated Circuits and Systems*, vol. 37, no. 9, pp. 1802–1815, Sep.
+2018, doi: [10.1109/TCAD.2017.2783363].</span>
+
+</div>
+
+<div id="ref-courant21_verif_code_gener_polyh_model" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">N. Courant and X. Leroy, “Verified
+code generation for the polyhedral model,” *Proc. ACM Program. Lang.*,
+vol. 5, no. POPL, Jan. 2021, doi: [10.1145/3434321].</span>
+
+</div>
+
+</div>
+
+ [\#3a7]: /zettel/3a7
+ [10.1109/TCAD.2017.2783363]: https://doi.org/10.1109/TCAD.2017.2783363
+ [10.1145/3434321]: https://doi.org/10.1145/3434321
diff --git a/content/zettel/1f4b.md b/content/zettel/1f4b.md
new file mode 100644
index 0000000..1ffd459
--- /dev/null
+++ b/content/zettel/1f4b.md
@@ -0,0 +1,20 @@
++++
+title = "Fuzz testing predictability of HLS tools"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["1f4a"]
+forwardlinks = ["1f4a"]
+zettelid = "1f4b"
++++
+
+One this note, one might be able to fuzz-test the predictability of HLS
+tools. One simple property that one might want to show always hold is
+the one noted in ([\#1f4a]). This could be done by adding dead-code to
+hardware designs or input to HLS tools, and noting if the HLS tool ever
+manage to optimise the hardware more due to these changes. This should
+not happen if the HLS tools should be predictable, because that means
+that adding random dead-code could actually lead to better designs, even
+though the HLS should be designing that hardware in the first place.
+
+ [\#1f4a]: /zettel/1f4a
diff --git a/content/zettel/2a.md b/content/zettel/2a.md
new file mode 100644
index 0000000..79c8096
--- /dev/null
+++ b/content/zettel/2a.md
@@ -0,0 +1,9 @@
++++
+title = "Types of types"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = []
+forwardlinks = ["2b", "2a1"]
+zettelid = "2a"
++++
diff --git a/content/zettel/2a1.md b/content/zettel/2a1.md
new file mode 100644
index 0000000..8ad4419
--- /dev/null
+++ b/content/zettel/2a1.md
@@ -0,0 +1,35 @@
++++
+title = "Nominal Typing"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2a"]
+forwardlinks = ["2ab"]
+zettelid = "2a1"
++++
+
+Distinguish types by their names. For example, an `Integer` could be a
+`Count` or an `Offset`. Operations based on those types should interact
+in a different way. This is different to structured typing ([\#2ab]),
+which bases the difference of the type not on the name, but the
+structure. For example [^1]:
+
+- `Count` + `Count` is valid, and should give a `Count`.
+- `Offset` + `Count` is also likely valid, and will give an `Offset`,
+ because we can have an `Offset` and some count from a previous
+ value, and offset by the amount and the count of that object. The
+ new offset is likely to be valid as well.
+- `Count` + `Offset` should probably not be valid, because it is maybe
+ expected that it returns a `Count`, which would however not be valid
+ (adding a count and an offset does not anything). However, to keep
+ associativity, it might make sense to allow this and return an
+ `Offset` in that case.
+- `Offset` + `Offset` should not be valid, because this would allow us
+ to add offsets which may lead to a new offset that is out of range,
+ meaning we could access memory that is not valid.
+
+[^1]: Lelechenko, Andrew. *Haskell for mathematical libraries*. lambda
+ DAλS talk (13-14 Feb 2020). Kraków, Poland. \[Online\]
+ <https://www.youtube.com/watch?v=qaPdg0mZavM>.
+
+ [\#2ab]: /zettel/2ab
diff --git a/content/zettel/2ab.md b/content/zettel/2ab.md
new file mode 100644
index 0000000..6c8b73a
--- /dev/null
+++ b/content/zettel/2ab.md
@@ -0,0 +1,13 @@
++++
+title = "Structured Typing"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2a1"]
+forwardlinks = ["2c"]
+zettelid = "2ab"
++++
+
+Structured typing, on the other hand, distinguishes types based on their
+structure instead of their name. This means that types with different
+names but the same structure would deemed to be equivalent.
diff --git a/content/zettel/2b.md b/content/zettel/2b.md
new file mode 100644
index 0000000..9c6e55b
--- /dev/null
+++ b/content/zettel/2b.md
@@ -0,0 +1,9 @@
++++
+title = "Computer Architecture"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2a"]
+forwardlinks = ["2c", "2b1"]
+zettelid = "2b"
++++
diff --git a/content/zettel/2b1.md b/content/zettel/2b1.md
new file mode 100644
index 0000000..0ff9f00
--- /dev/null
+++ b/content/zettel/2b1.md
@@ -0,0 +1,20 @@
++++
+title = "Predicated execution"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c2", "2b", "1c6c", "1b8", "1b7"]
+forwardlinks = ["1a1", "1c", "2b2", "2b1a"]
+zettelid = "2b1"
++++
+
+Predicated execution is the addition of conditional execution to normal
+instructions, which is much more efficient than using conditionals to
+separate the control flow. In addition to that, predicated execution can
+also be used to transform control flow into straight-line code, which
+then allows for optimisations that can only work on straight-line code,
+such as optimisations that use a data-flow graph ([\#1a1]) such as HLS
+optimisations ([\#1c]).
+
+ [\#1a1]: /zettel/1a1
+ [\#1c]: /zettel/1c
diff --git a/content/zettel/2b1a.md b/content/zettel/2b1a.md
new file mode 100644
index 0000000..d80e636
--- /dev/null
+++ b/content/zettel/2b1a.md
@@ -0,0 +1,37 @@
++++
+title = "Phi-predication"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1d1", "2b1"]
+forwardlinks = ["2b1b"]
+zettelid = "2b1a"
++++
+
+Phi-predication \[1\] is a lightweight predication implementation where
+phi nodes are inserted in optimal positions instead of having predicated
+instructions. This makes it much better for out-of-order processors with
+deep pipelines, as there will be less pipeline stalls. This is because
+the predicate only needs to be evaluated at the time the phi-instruction
+is executed and not when the individual instructions are executed.
+
+For example, if there are two `mov` that are in two separate branches of
+a conditional statement, then the compare can first be executed,
+followed by the two `mov`, and then finally a phi instruction is
+executed which assigns the right value to the register.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-chuang03_phi" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">W. Chuang, B. Calder, and J.
+Ferrante, “Phi-predication for light-weight if-conversion,” in
+*International symposium on code generation and optimization, 2003. CGO
+2003.*, Mar. 2003, pp. 179–190. doi: [10.1109/CGO.2003.1191544].</span>
+
+</div>
+
+</div>
+
+ [10.1109/CGO.2003.1191544]: https://doi.org/10.1109/CGO.2003.1191544
diff --git a/content/zettel/2b1b.md b/content/zettel/2b1b.md
new file mode 100644
index 0000000..1d060fa
--- /dev/null
+++ b/content/zettel/2b1b.md
@@ -0,0 +1,31 @@
++++
+title = "Classes of phi-predicated instructions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1a"]
+forwardlinks = ["2b1c"]
+zettelid = "2b1b"
++++
+
+There are various types of instructions that are needed.
+
+Phi function
+: This is used to select the right value of the current register from
+ the two alternatives that were generated by predicated instructions.
+
+Predicated memory
+: Memory operations need to be predicated, as these cannot have
+ side-effects, and therefore have to stall the pipeline if the
+ predicate is not evaluated yet.
+
+Unconditional compares
+: These are the operations that update the values of the predicates
+ for the instructions, and also have to have the predicate updated
+ immediately.
+
+Internal join
+: This instruction is needed to update the predicates to a join block,
+ and performs the join of all of the input predicates. This means
+ that if any of the control flow of those instructions was taken,
+ that the predicate will be set to true.
diff --git a/content/zettel/2b1c.md b/content/zettel/2b1c.md
new file mode 100644
index 0000000..42f73c5
--- /dev/null
+++ b/content/zettel/2b1c.md
@@ -0,0 +1,17 @@
++++
+title = "Predicated instructions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1b"]
+forwardlinks = ["2b1d", "2b1c1"]
+zettelid = "2b1c"
++++
+
+The standard solution to predicated execution is to provide optional
+predicates to the instructions themselves. This is normally undesirable
+in VLIW processors with deep pipelines, as the predicate needs to be
+known in advance to be able to execute the instruction properly.
+However, without this pipeline, it is beneficial to convert use this
+over the phi-predicated instructions, as it require less analysis at the
+compilation phase and does not require a static single assignment form.
diff --git a/content/zettel/2b1c1.md b/content/zettel/2b1c1.md
new file mode 100644
index 0000000..23e8128
--- /dev/null
+++ b/content/zettel/2b1c1.md
@@ -0,0 +1,36 @@
++++
+title = "Lightweight speculation and predication in HLS"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1c"]
+forwardlinks = []
+zettelid = "2b1c1"
++++
+
+In high-level synthesis, there is no penalty for performing jumps, and
+one can therefore implement a lightweight predicated execution, where
+less predication is used (only in writes) and jumps are used in the
+shorted branches \[1\].
+
+This is done by adding a jump statement in the shorter branch which
+jumps to the end of the if-statement, therefore allowing predication to
+work as efficiently as jumps. This is more efficient for high-level
+synthesis, as there is no cost when jumping.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-nane12" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">R. Nane, V. Sima, and K. Bertels,
+“A lightweight speculative and predicative scheme for hardware
+execution,” in *2012 international conference on reconfigurable
+computing and FPGAs*, Dec. 2012, pp. 1–6. doi:
+[10.1109/ReConFig.2012.6416721].</span>
+
+</div>
+
+</div>
+
+ [10.1109/ReConFig.2012.6416721]: https://doi.org/10.1109/ReConFig.2012.6416721
diff --git a/content/zettel/2b1d.md b/content/zettel/2b1d.md
new file mode 100644
index 0000000..6f63820
--- /dev/null
+++ b/content/zettel/2b1d.md
@@ -0,0 +1,17 @@
++++
+title = "Heuristic for if-conversion"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1c"]
+forwardlinks = ["2b1e", "2b1d1"]
+zettelid = "2b1d"
++++
+
+Various heuristics can be used in if-conversion when generating
+predicated instructions. These are needed because technically any path
+without backward edges could be transformed into predicated execution
+passes. However, there are various disadvantages with this, because it
+increases the complexity of the predicates as these will depend on other
+predicates, and makes optimisations of these large block more
+problematic.
diff --git a/content/zettel/2b1d1.md b/content/zettel/2b1d1.md
new file mode 100644
index 0000000..08d4c4e
--- /dev/null
+++ b/content/zettel/2b1d1.md
@@ -0,0 +1,20 @@
++++
+title = "Short Static Region Heuristic"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1d", "1c2h"]
+forwardlinks = ["2b1a", "1c2h", "2b1d2"]
+zettelid = "2b1d1"
++++
+
+This strategy involves only converting small regions to reduce the
+length of phi-chains in the case of phi-predication ([\#2b1a]). This
+means that regions are only formed if these only take four clock cycles
+to execute, for example. One can also put the restriction on the longest
+and the shortest path to keep these as close as possible, meaning that
+the difference between the shortest and the longest path is three
+cycles. More heuristics can be found in ([\#1c2h]).
+
+ [\#2b1a]: /zettel/2b1a
+ [\#1c2h]: /zettel/1c2h
diff --git a/content/zettel/2b1d2.md b/content/zettel/2b1d2.md
new file mode 100644
index 0000000..f6ebb51
--- /dev/null
+++ b/content/zettel/2b1d2.md
@@ -0,0 +1,17 @@
++++
+title = "Profile-based heuristics"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b1d1"]
+forwardlinks = []
+zettelid = "2b1d2"
++++
+
+These are probably the most common. A test set of inputs can be used to
+check which are the most likely paths for the program to take, which
+guides the formation of the regions to be if-converted. This gives good
+performance as paths that are often executed can be placed into one
+region, whereas paths that are not executed are placed outside of the
+blocks so that the if-converted instructions do not take up time in the
+critical path.
diff --git a/content/zettel/2b1e.md b/content/zettel/2b1e.md
new file mode 100644
index 0000000..401d1ea
--- /dev/null
+++ b/content/zettel/2b1e.md
@@ -0,0 +1,21 @@
++++
+title = "Reverse if-conversion"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f", "2b1d"]
+forwardlinks = ["1c8", "1b8", "3c3f"]
+zettelid = "2b1e"
++++
+
+To be able to target standard processors, reverse if-conversion can be
+used to create if-statements and branches based on the predicated
+instructions. Scheduling over multiple blocks can then be performed by
+adding predicates, doing if-conversion ([\#1c8]) and hyperblock
+scheduling ([\#1b8]). Then, to convert back to a basic-block view of the
+code, reverse if-conversion can be used. This therefore has a similar
+effect to scheduling instructions using trace scheduling ([\#3c3f]).
+
+ [\#1c8]: /zettel/1c8
+ [\#1b8]: /zettel/1b8
+ [\#3c3f]: /zettel/3c3f
diff --git a/content/zettel/2b2.md b/content/zettel/2b2.md
new file mode 100644
index 0000000..459f191
--- /dev/null
+++ b/content/zettel/2b2.md
@@ -0,0 +1,18 @@
++++
+title = "Rotating registers"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c2", "2b1", "1c6b"]
+forwardlinks = ["1c6"]
+zettelid = "2b2"
++++
+
+Rotating registers are an implementation of hardware registers that are
+especially beneficial to loop pipelining ([\#1c6]), as they are indexed
+based on the current iteration that the loop is in, which means that
+each loop iteration can just write to the same registers, but still
+create a pipelined loop, even if there are dependencies between the
+pipeline stages.
+
+ [\#1c6]: /zettel/1c6
diff --git a/content/zettel/2c.md b/content/zettel/2c.md
new file mode 100644
index 0000000..45a51e2
--- /dev/null
+++ b/content/zettel/2c.md
@@ -0,0 +1,15 @@
++++
+title = "Blockchain"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2b", "2ab"]
+forwardlinks = ["2d", "2c1"]
+zettelid = "2c"
++++
+
+I never thought I would write about this, but 3blue1brown's video[^1]
+explained the concept really well and made it very interesting.
+
+[^1]: Andrew Sanderson. *But how does bitcoin actually work?*. Youtube.
+ \[Online\] <https://www.youtube.com/watch?v=bBC-nXj3Ng4>.
diff --git a/content/zettel/2c1.md b/content/zettel/2c1.md
new file mode 100644
index 0000000..96d10f1
--- /dev/null
+++ b/content/zettel/2c1.md
@@ -0,0 +1,21 @@
++++
+title = "Distributed Ledger"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2c"]
+forwardlinks = []
+zettelid = "2c1"
++++
+
+The main idea behind crypto-currencies such as Bitcoin is that they are
+just a record of who paid who how much. This is a public ledger that
+anyone can read, however, it is supposed to be append only, meaning new
+items only get added to the end of it, and the ledger in general can not
+be modified.
+
+However, to have a ledger it needs to be located somewhere and
+controlled by a central entity. This can be solved by actually having a
+distributed ledger, so each user actually has their own version of the
+ledger, and checks it periodically to the other ledgers that other
+people are keeping.
diff --git a/content/zettel/2d.md b/content/zettel/2d.md
new file mode 100644
index 0000000..6745002
--- /dev/null
+++ b/content/zettel/2d.md
@@ -0,0 +1,9 @@
++++
+title = "Logic"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2c"]
+forwardlinks = ["2e", "2d1"]
+zettelid = "2d"
++++
diff --git a/content/zettel/2d1.md b/content/zettel/2d1.md
new file mode 100644
index 0000000..eabdcfe
--- /dev/null
+++ b/content/zettel/2d1.md
@@ -0,0 +1,19 @@
++++
+title = "SAT Solvers"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2d"]
+forwardlinks = ["2d2"]
+zettelid = "2d1"
++++
+
+SAT is a mathematical problem whereby the question is asked whether a
+logical formula is satisfiable, meaning there is a combination of inputs
+that will satisfy the sentence. This logical formula is often expressed
+in conjunctive normal form (CNF).
+
+SAT solvers are programs that can solve these problems automatically.
+Even though it is an intractable problem and is in O(2^n^) for the
+number of variables present in the sentence, there are efficient
+algorithms that will work well most of the time.
diff --git a/content/zettel/2d2.md b/content/zettel/2d2.md
new file mode 100644
index 0000000..83d335a
--- /dev/null
+++ b/content/zettel/2d2.md
@@ -0,0 +1,25 @@
++++
+title = "Davis–Putnam–Logemann–Loveland (DPLL) algorithm"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2d1"]
+forwardlinks = ["2d3"]
+zettelid = "2d2"
++++
+
+This is a backtracking algorithm that is used to solve SAT problems, and
+is still used in the most efficient SAT solvers. The two main rules of
+reduction are the following:
+
+Unit propagation
+: This consists of removing every clause that contains a unit clause's
+ literal and discarding the complement of the unit clause's literal.
+ This can be repeated for a large reduction in the formula.
+
+Pure literal elimination
+: If a pure propositional variable (one where it's complement is not
+ present anywhere in the formula), is present in a clause, this
+ clause can also be removed as it can trivially be set to true using
+ the pure variable. It is therefore not a constraint on the
+ satisfiability of the system.
diff --git a/content/zettel/2d3.md b/content/zettel/2d3.md
new file mode 100644
index 0000000..77e5324
--- /dev/null
+++ b/content/zettel/2d3.md
@@ -0,0 +1,25 @@
++++
+title = "Converting a formula to CNF"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2d2"]
+forwardlinks = ["2d4"]
+zettelid = "2d3"
++++
+
+This can be quite easy, as each construct can be converted to CNF
+recursively.
+
+``` text
+convert(P & Q) -> convert(P) & convert(Q) convert(P | Q) -> foreach convert(Q)
+-> q_i | convert(P)
+```
+
+However, this can result in an exponential growth of the formula. There
+are other techniques that will transform the formula into a
+equisatisfiable formula, which is not equivalent to the original, but
+enough so for the purpose of a satisfiability check. These can be
+transformations such as the Tseitin transformation ([\#2d4]).
+
+ [\#2d4]: /zettel/2d4
diff --git a/content/zettel/2d4.md b/content/zettel/2d4.md
new file mode 100644
index 0000000..f444552
--- /dev/null
+++ b/content/zettel/2d4.md
@@ -0,0 +1,39 @@
++++
+title = "Tseitin transformation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2d3"]
+forwardlinks = ["2d5"]
+zettelid = "2d4"
++++
+
+The Tseitin transformation is a way to transform a general formula into
+CNF form, by basically taking each sub-formula and assigning it to be
+equivalent to the original formula. Therefore the following formula can
+be created into the following CNF form, which only grows linearly in the
+number of clauses compared to the input.
+
+$$\phi = (a \land b) \lor c$$
+
+We can then set $x_1 \leftrightarrow a \land b$ and
+$x_2 \leftrightarrow x_1\lor c$. We can then transform the equivalence
+to the following form in terms of $\land$ and $\lor$.
+
+$$ x_1 \leftrightarrow a \land b \equiv (x_1 \rightarrow a \land b) \land (a\land b \rightarrow x_1) $$
+$$\equiv (\neg x_1 \lor (a \land b)) \land (\neg (a\land b) \lor x_1)$$
+$$\equiv (\neg x_1 \lor a) \land ( \neg x_1 \lor b) \land(\neg a \lor \neg b \lor x_1)$$
+
+For $\lor$ we can proceed in the same way:
+
+$$ x_2 \leftrightarrow x_1 \lor c \equiv (\neg x_2 \lor (x_1 \lor c)) \land(\neg (x_1 \lor c) \lor x_2) $$
+$$\equiv (\neg x_2 \lor x_1 \lor c) \land (\negx_1 \lor x_2) \land (\neg c \lor x_2)$$
+
+Then we can transform the formula in the following way:
+
+$$T(\phi) = x_2 \land (x_2 \leftrightarrow x_1 \lor c) \land (x_1\leftrightarrow a \land b) $$
+$$ = x_2 \land (\neg x_2 \lor x_1 \lor c) \land(\neg x_1 \lor x_2) \land (\neg c \lor x_2) \land (\neg x_1 \lor a) \land ( \negx_1 \lor b) \land (\neg a \lor \neg b \lor x_1) $$
+
+These two formulas are therefore equisatisfiable, as whenever the input
+is satisfiable, the output will also be satisfiable. These are, however,
+not equivalent, because the second formula has additional variables.
diff --git a/content/zettel/2d5.md b/content/zettel/2d5.md
new file mode 100644
index 0000000..87f79c2
--- /dev/null
+++ b/content/zettel/2d5.md
@@ -0,0 +1,21 @@
++++
+title = "Using a realistic SAT solver"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2d4"]
+forwardlinks = []
+zettelid = "2d5"
++++
+
+I have now implemented usage of a real SMT solver (Z3) using OCaml. It
+is surprisingly easy to use this SMT solver from the OCaml code, much
+easier than using the external Z3 executable. Expressions can easily be
+added as various assertions, and expressions themselves can also be
+built quite easily and recursively from given predicates. This makes it
+simple to add a custom solver to custom predicates.
+
+It should therefore be possible to also use the SAT solver to simplify
+arbitrary predicates as well, however, there are still various problems
+with extracting the expressions from the SAT solver again, as they are
+in the internal expression format of the solver.
diff --git a/content/zettel/2e.md b/content/zettel/2e.md
new file mode 100644
index 0000000..f3461f0
--- /dev/null
+++ b/content/zettel/2e.md
@@ -0,0 +1,9 @@
++++
+title = "Compilers"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2d"]
+forwardlinks = ["2f", "2e1"]
+zettelid = "2e"
++++
diff --git a/content/zettel/2e1.md b/content/zettel/2e1.md
new file mode 100644
index 0000000..503724d
--- /dev/null
+++ b/content/zettel/2e1.md
@@ -0,0 +1,9 @@
++++
+title = "Optimisations"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e"]
+forwardlinks = ["2e2", "2e1a"]
+zettelid = "2e1"
++++
diff --git a/content/zettel/2e1a.md b/content/zettel/2e1a.md
new file mode 100644
index 0000000..d5b21d7
--- /dev/null
+++ b/content/zettel/2e1a.md
@@ -0,0 +1,42 @@
++++
+title = "Kildall's algorithm"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1"]
+forwardlinks = ["2e1b", "2e1a1"]
+zettelid = "2e1a"
++++
+
+- Each instruction has a pool, which represents information that the
+ compiler knows at the time that function is executed.
+- The algorithm has a "work set" which at the start consists of at
+ least one instruction
+- The algorithm returns a map from instructions to pools
+- Goes through the control flow graph and updates the pool for each
+ instruction
+- At every iteration, one instruction is taken from the "work set",
+ and associated with the pool that is the meet of the pool in the map
+ and the pool it was associated with in the "work set".
+- Then the optimisation function is called on the current instruction
+ and its pool
+
+\[1\]
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-kildall73_unified_approac_global_progr_optim"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">G. A. Kildall, “A unified approach
+to global program optimization,” in *Proceedings of the 1st annual ACM
+SIGACT-SIGPLAN symposium on principles of programming languages*, in
+POPL ’73. Boston, Massachusetts: Association for Computing Machinery,
+1973, pp. 194–206. doi: [10.1145/512927.512945].</span>
+
+</div>
+
+</div>
+
+ [10.1145/512927.512945]: https://doi.org/10.1145/512927.512945
diff --git a/content/zettel/2e1a1.md b/content/zettel/2e1a1.md
new file mode 100644
index 0000000..c6ec0d9
--- /dev/null
+++ b/content/zettel/2e1a1.md
@@ -0,0 +1,17 @@
++++
+title = "Semilattice"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1a"]
+forwardlinks = []
+zettelid = "2e1a1"
++++
+
+- partially ordered set that has either a join or a meet
+- There are join-semilattice (least upper bound, supremum)
+- There are meet-semilattice (greatest lower bound, infimum)
+- a lattice is both a meet-semilattice and a join-semilattice
+- A set with finite elements is always a lattice, because if we take
+ two distinct elements, one will be the least element (join) and the
+ other will be most element (meet)
diff --git a/content/zettel/2e1b.md b/content/zettel/2e1b.md
new file mode 100644
index 0000000..394a564
--- /dev/null
+++ b/content/zettel/2e1b.md
@@ -0,0 +1,36 @@
++++
+title = "Gated-SSA"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g", "3a8f", "3a7d", "2f1", "2e1a", "1c6d", "1c3b"]
+forwardlinks = ["2e1c", "2e1b1"]
+zettelid = "2e1b"
++++
+
+Gated-SSA (GSA) \[1\] is an extension to SSA. Whereby in SSA we only
+have φ-functions to choose between expressions after a join from two
+different expressions, gated-SSA extends it with μ, γ and η functions,
+which include the predicate that chooses the expressions, which the
+φ-functions does not include.
+
+This allows for better symbolic analysis of code that contains loops, as
+the function predicates can be compared to each other as well.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-tu95_gated_ssa_based_deman_driven" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">P. Tu and D. Padua, “Gated
+SSA-based demand-driven symbolic analysis for parallelizing compilers,”
+in *Proceedings of the 9th international conference on supercomputing*,
+in ICS ’95. Barcelona, Spain: Association for Computing Machinery, 1995,
+pp. 414–423. doi: [10.1145/224538.224648].</span>
+
+</div>
+
+</div>
+
+ [10.1145/224538.224648]: https://doi.org/10.1145/224538.224648
diff --git a/content/zettel/2e1b1.md b/content/zettel/2e1b1.md
new file mode 100644
index 0000000..b64f522
--- /dev/null
+++ b/content/zettel/2e1b1.md
@@ -0,0 +1,13 @@
++++
+title = "Constructing gated-SSA"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g1", "2e1b"]
+forwardlinks = ["2e1b2", "2e1b1a"]
+zettelid = "2e1b1"
++++
+
+There are multiple ways to construct gated SSA, either directly from a
+CFG, or, which is a bit more inefficient, going to SSA first and then
+transforming that to gated-SSA.
diff --git a/content/zettel/2e1b1a.md b/content/zettel/2e1b1a.md
new file mode 100644
index 0000000..1e684fe
--- /dev/null
+++ b/content/zettel/2e1b1a.md
@@ -0,0 +1,15 @@
++++
+title = "Keeping φ functions around"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b1"]
+forwardlinks = ["2e1b1b"]
+zettelid = "2e1b1a"
++++
+
+One possibility is to actually keep φ nodes around in gated-SSA, because
+that means that with code where the predicates cannot be found, or in
+irreducible loops, the gated-SSA generation will not fail. However, this
+does mean that sometimes one will have code that contains φ functions,
+instead of just the pure GSA functions.
diff --git a/content/zettel/2e1b1b.md b/content/zettel/2e1b1b.md
new file mode 100644
index 0000000..a212bb6
--- /dev/null
+++ b/content/zettel/2e1b1b.md
@@ -0,0 +1,25 @@
++++
+title = "Main simple explanation of GSA construction"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b1a"]
+forwardlinks = ["3a8"]
+zettelid = "2e1b1b"
++++
+
+The main explanation is quite simple, however, the details are not that
+easy. Once one has constructed SSA with φ functions, the translation to
+GSA is as follows:
+
+- Loops have to be identified, and these are normalised using
+ pre-header (PH), post-body (PB), post-exit (PE) nodes, which in the
+ case of CompCertSSA ([\#3a8]), means that these are populated with
+ extra Inop instructions.
+- Then, PH nodes are populated with μ functions, and for each variable
+ that has a μ function assignment we also generate a η function
+ assignments for those variables.
+- Finally, the other φ functions are replaced by γ functions, where
+ the predicate needs to be found first.
+
+ [\#3a8]: /zettel/3a8
diff --git a/content/zettel/2e1b2.md b/content/zettel/2e1b2.md
new file mode 100644
index 0000000..51ebdaa
--- /dev/null
+++ b/content/zettel/2e1b2.md
@@ -0,0 +1,20 @@
++++
+title = "μ function"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b5a", "2e1b1"]
+forwardlinks = ["2e1b3", "2e1b2a"]
+zettelid = "2e1b2"
++++
+
+The μ function only appears in loop headers, and it selects the initial
+and loop carried values for the variables in the loop. It does not come
+with a condition, and instead works exactly like a φ function, where the
+left hand value is the initial one, and the right hand value is the
+loop-carried value.
+
+ x2 = μ(x1, x3)
+
+Means that the initial value for x2 is x1, and that the consequent
+values for x2 is the value of x3.
diff --git a/content/zettel/2e1b2a.md b/content/zettel/2e1b2a.md
new file mode 100644
index 0000000..5212f28
--- /dev/null
+++ b/content/zettel/2e1b2a.md
@@ -0,0 +1,12 @@
++++
+title = "Equivalent to phi function"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b2"]
+forwardlinks = []
+zettelid = "2e1b2a"
++++
+
+The μ function is equivalent to the φ function, as there is no predicate
+available, and the value is picked based on the join point.
diff --git a/content/zettel/2e1b3.md b/content/zettel/2e1b3.md
new file mode 100644
index 0000000..ed94ef6
--- /dev/null
+++ b/content/zettel/2e1b3.md
@@ -0,0 +1,17 @@
++++
+title = "γ function"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b2"]
+forwardlinks = ["2e1b4", "2e1b3a"]
+zettelid = "2e1b3"
++++
+
+This is an if-then-else function, which takes in a boolean predicate and
+picks one of the possible values from it. For example, the following:
+
+ x2 = γ(b, x1, x3)
+
+will pick either `x1` or `x3` based on the predicate value of `b`, where
+`b` is an expression.
diff --git a/content/zettel/2e1b3a.md b/content/zettel/2e1b3a.md
new file mode 100644
index 0000000..f2b651d
--- /dev/null
+++ b/content/zettel/2e1b3a.md
@@ -0,0 +1,14 @@
++++
+title = "Representation of predicates"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b3"]
+forwardlinks = ["2e1b3b"]
+zettelid = "2e1b3a"
++++
+
+The predicates represent what path was followed getting to the γ
+function, which will then be used to select the right variable. However,
+there are various different representations of the predicate in the γ
+function.
diff --git a/content/zettel/2e1b3b.md b/content/zettel/2e1b3b.md
new file mode 100644
index 0000000..2694ee2
--- /dev/null
+++ b/content/zettel/2e1b3b.md
@@ -0,0 +1,14 @@
++++
+title = "Representation for two argument functions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b3c1", "2e1b3a"]
+forwardlinks = ["2e1b3c"]
+zettelid = "2e1b3b"
++++
+
+If there are only two possibilities, the representation of the predicate
+is quite simple. As there are only two choices, and one of them has to
+be picked, then one single predicate is enough to make the choice, and
+pick between the two options.
diff --git a/content/zettel/2e1b3c.md b/content/zettel/2e1b3c.md
new file mode 100644
index 0000000..2846cd5
--- /dev/null
+++ b/content/zettel/2e1b3c.md
@@ -0,0 +1,13 @@
++++
+title = "Representation of multiple arguments"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b3b"]
+forwardlinks = ["2e1b3c1"]
+zettelid = "2e1b3c"
++++
+
+However, if there are more arguments in the γ function, then there are
+multiple possible representations of the predicates, which each have
+their benefits and disadvandages.
diff --git a/content/zettel/2e1b3c1.md b/content/zettel/2e1b3c1.md
new file mode 100644
index 0000000..167092b
--- /dev/null
+++ b/content/zettel/2e1b3c1.md
@@ -0,0 +1,29 @@
++++
+title = "Predicate with integer evaluation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b3c2", "2e1b3c"]
+forwardlinks = ["2e1b3b", "2e1b3c2"]
+zettelid = "2e1b3c1"
++++
+
+The first possibility is to extend the notion of predicates with two
+arguments ([\#2e1b3b]), to having multiple arguments. This can be done
+by having a predicate that evaluates to a natural number, which is then
+used to select the right variable.
+
+However, this has multiple downsides, one being that the predicate
+basically becomes a function that is opaque to the outside definitions.
+It would therefore have to carry around many proofs about properties
+that need to hold for the function, such as it being injective and
+getting the right property.
+
+It being a function would also make it difficult to analyse from the
+outside, and it would then also be difficult to do symbolic analysis on
+the predicates. Having a syntactic representation of this kind of
+function would also be quite complex, and would come with it's own
+problems. The predicate would have to be quite complex, reducing the
+amount of analysis that one can do.
+
+ [\#2e1b3b]: /zettel/2e1b3b
diff --git a/content/zettel/2e1b3c2.md b/content/zettel/2e1b3c2.md
new file mode 100644
index 0000000..015352d
--- /dev/null
+++ b/content/zettel/2e1b3c2.md
@@ -0,0 +1,25 @@
++++
+title = "Pairs of predicates and expressions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b3c1"]
+forwardlinks = ["2e1b3c1"]
+zettelid = "2e1b3c2"
++++
+
+Another solution is to basically deconstruct the function that is
+proposed as general predicate in ([\#2e1b3c1]), by assigning a predicate
+to each expression. The proof that needs to accompany this γ function,
+is that for any possible input value:
+
+$$\exists! p \in P(\gamma(...)), p$$
+
+Where $P$ is a function that retrieves the set of variables from the
+gamma function.
+
+The benefit of this representation, is that the predicates only have to
+be a simple logic over the conditionals, instead of having to evaluate
+to a value.
+
+ [\#2e1b3c1]: /zettel/2e1b3c1
diff --git a/content/zettel/2e1b4.md b/content/zettel/2e1b4.md
new file mode 100644
index 0000000..544ddca
--- /dev/null
+++ b/content/zettel/2e1b4.md
@@ -0,0 +1,30 @@
++++
+title = "η function"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b3"]
+forwardlinks = ["2e1b5"]
+zettelid = "2e1b4"
++++
+
+The η function determines the value of a variable at the end of a loop.
+This also doesn't seem like it takes a condition variable. In thinned
+GSA \[1\], the η function does not have a predicate of when the
+assignment will be performed, however, in the original formulation, the
+η function did have the predicate.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-havlak94_const" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">P. Havlak, “Construction of
+thinned gated single-assignment form,” in *Languages and compilers for
+parallel computing*, U. Banerjee, D. Gelernter, A. Nicolau, and D.
+Padua, Eds., Berlin, Heidelberg: Springer Berlin Heidelberg, 1994, pp.
+477–499.</span>
+
+</div>
+
+</div>
diff --git a/content/zettel/2e1b5.md b/content/zettel/2e1b5.md
new file mode 100644
index 0000000..04ad12a
--- /dev/null
+++ b/content/zettel/2e1b5.md
@@ -0,0 +1,13 @@
++++
+title = "Non-standard extentions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b4"]
+forwardlinks = ["2e1b5a"]
+zettelid = "2e1b5"
++++
+
+There are quite a few differences between all the different dialects of
+GSA, and there does not really seem to be a standard interpretation for
+them.
diff --git a/content/zettel/2e1b5a.md b/content/zettel/2e1b5a.md
new file mode 100644
index 0000000..8f23f42
--- /dev/null
+++ b/content/zettel/2e1b5a.md
@@ -0,0 +1,27 @@
++++
+title = "β function"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b5"]
+forwardlinks = ["2e1b2", "2e1b5b"]
+zettelid = "2e1b5a"
++++
+
+The β function is a replacement for the μ function ([\#2e1b2]), which is
+mentioned in \[1\]
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-campbell93_refin" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">P. L. Campbell, K. Krishna, and R.
+A. Ballance, “Refining and defining the program dependence web,”
+*Cs93-6, University of New Mexico, Albuquerque*, 1993.</span>
+
+</div>
+
+</div>
+
+ [\#2e1b2]: /zettel/2e1b2
diff --git a/content/zettel/2e1b5b.md b/content/zettel/2e1b5b.md
new file mode 100644
index 0000000..a95922a
--- /dev/null
+++ b/content/zettel/2e1b5b.md
@@ -0,0 +1,26 @@
++++
+title = "Switch^F^"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b5a"]
+forwardlinks = []
+zettelid = "2e1b5b"
++++
+
+The $\text{Switch}^\text{F}$ node is also a node in \[1\], which acts
+like a $\gamma^{\text{F}}$ node, except that it will not generate
+demands for values during demand-driven interpretation.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-campbell93_refin" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">P. L. Campbell, K. Krishna, and R.
+A. Ballance, “Refining and defining the program dependence web,”
+*Cs93-6, University of New Mexico, Albuquerque*, 1993.</span>
+
+</div>
+
+</div>
diff --git a/content/zettel/2e1c.md b/content/zettel/2e1c.md
new file mode 100644
index 0000000..4aecb93
--- /dev/null
+++ b/content/zettel/2e1c.md
@@ -0,0 +1,9 @@
++++
+title = "Symbolic execution"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1b"]
+forwardlinks = ["2e1d", "2e1c1"]
+zettelid = "2e1c"
++++
diff --git a/content/zettel/2e1c1.md b/content/zettel/2e1c1.md
new file mode 100644
index 0000000..9754992
--- /dev/null
+++ b/content/zettel/2e1c1.md
@@ -0,0 +1,36 @@
++++
+title = "Comparing symbolic evaluations with conditionals"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3", "3a7", "2e1c"]
+forwardlinks = ["2e1c2", "2e1c1a"]
+zettelid = "2e1c1"
++++
+
+``` c
+if (P) { x = 1; }
+else if (Q) { x = 2; }
+else if (R) { x = 3; }
+else { x = 4; }
+```
+
+is equivalent to the following if $P \land Q$ is *unsatisfiable*:
+
+``` c
+if (Q) { x = 2; }
+else if (P) { x = 1; }
+else if (R) { x = 3; }
+else { x = 4; }
+```
+
+If $Q \land R$ is *satisfiable*, then these can never change in order,
+however, if $P \land R$ is *unsatisfiable*, then it would also be
+equivalent to the following:
+
+``` c
+if (Q) { x = 2; }
+else if (R) { x = 3; }
+else if (P) { x = 1; }
+else { x = 4; }
+```
diff --git a/content/zettel/2e1c1a.md b/content/zettel/2e1c1a.md
new file mode 100644
index 0000000..3ea908b
--- /dev/null
+++ b/content/zettel/2e1c1a.md
@@ -0,0 +1,18 @@
++++
+title = "Proof carrying scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c1"]
+forwardlinks = []
+zettelid = "2e1c1a"
++++
+
+Another option to prove this theorem would be to have proof carrying
+scheduling, which gives information to the checker about which
+transformations took place, so that these can be reversed, if possible,
+to get a syntactically equal representation.
+
+However, the main problem with this, is that it is quite difficult to
+even identify the different translations that were done to the
+predicates, and it may not really help the proof.
diff --git a/content/zettel/2e1c2.md b/content/zettel/2e1c2.md
new file mode 100644
index 0000000..86f6ffc
--- /dev/null
+++ b/content/zettel/2e1c2.md
@@ -0,0 +1,26 @@
++++
+title = "Ordering the predicates to normalise them"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c1"]
+forwardlinks = ["2e1c3"]
+zettelid = "2e1c2"
++++
+
+Maybe you can just order all the connectives under some defined strict
+order ($R< Q < P$):
+
+``` c
+if (R) { x = 3; }
+else if (Q) { x = 2; }
+else if (P) { x = 1; }
+else { x = 4; }
+```
+
+The problem is, this does not behave the same as the original property,
+it will be hard to prove that the two statements that produced this code
+will behave the same as each other if this representation is equivalent
+in some way.
+
+The original behaviour of the code disappears.
diff --git a/content/zettel/2e1c3.md b/content/zettel/2e1c3.md
new file mode 100644
index 0000000..1a35438
--- /dev/null
+++ b/content/zettel/2e1c3.md
@@ -0,0 +1,20 @@
++++
+title = "Partial strict order of predicates"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c2"]
+forwardlinks = ["2e1c4", "2e1c3a"]
+zettelid = "2e1c3"
++++
+
+Therefore, only solution I can think of is to perform strict ordering
+only on conditions that are *unsatisfiable*, but this seems
+computationally expensive.
+
+``` c
+if (Q) { x = 2; }
+else if (R) { x = 3; }
+else if (P) { x = 1; }
+else { x = 4; }
+```
diff --git a/content/zettel/2e1c3a.md b/content/zettel/2e1c3a.md
new file mode 100644
index 0000000..027d1c4
--- /dev/null
+++ b/content/zettel/2e1c3a.md
@@ -0,0 +1,21 @@
++++
+title = "Abstract the nested if-statements into a list"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c3"]
+forwardlinks = ["2e1c5", "2e1c3b"]
+zettelid = "2e1c3a"
++++
+
+The main problem with the nested if-statements is that they are ordered,
+meaning that one can not simply order them and end up with the same
+behaviour. This depends on the fact of if two predicates are satisfiable
+or not, which affects on if they can be switched or not.
+
+This can be solved with a similar solution to the full SAT solution
+([\#2e1c5]), whereby the if-statements are collapsed into a list of
+conditionals, out of which only one can be active at a time. This means
+that their order does not matter, and that they can then be sorted.
+
+ [\#2e1c5]: /zettel/2e1c5
diff --git a/content/zettel/2e1c3b.md b/content/zettel/2e1c3b.md
new file mode 100644
index 0000000..6a87270
--- /dev/null
+++ b/content/zettel/2e1c3b.md
@@ -0,0 +1,19 @@
++++
+title = "Sorting list of if-statements"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c3a"]
+forwardlinks = []
+zettelid = "2e1c3b"
++++
+
+The sorting of the statements can be done by sorting the list according
+to the expressions inside the predicates, and merging the predicates
+(using $\lor$) for the expressions that are equivalent. Then, it should
+be possible to just sequentially prove the equivalence of each of the
+predicates in the list that have the same symbolic values.
+
+For expressions that do not appear in either of the list, the predicate
+on them should evaluate to being false, which would be the only valid
+reason for it to not appear as one of the possibilities.
diff --git a/content/zettel/2e1c4.md b/content/zettel/2e1c4.md
new file mode 100644
index 0000000..a8a3f31
--- /dev/null
+++ b/content/zettel/2e1c4.md
@@ -0,0 +1,40 @@
++++
+title = "Application-specific SAT solver"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c3"]
+forwardlinks = ["2e1c5", "2e1c4a"]
+zettelid = "2e1c4"
++++
+
+Another solution might be to go down one of the branches, and basically
+implement a slow and application-specific SAT solver.
+
+``` c
+else if (Q) { x = 2; }
+else if (R) { x = 3; }
+else { x = 4; }
+```
+
+``` c
+P == false;
+if (Q) { x = 2; }
+else if (P) { x = 1; }
+else if (R) { x = 3; }
+else { x = 4; }
+```
+
+and:
+
+``` c
+x = 1
+```
+
+``` c
+P == true;
+if (Q) { x = 2; }
+else if (P) { x = 1; }
+else if (R) { x = 3; }
+else { x = 4; }
+```
diff --git a/content/zettel/2e1c4a.md b/content/zettel/2e1c4a.md
new file mode 100644
index 0000000..af35c95
--- /dev/null
+++ b/content/zettel/2e1c4a.md
@@ -0,0 +1,22 @@
++++
+title = "Special SAT solving of the predicates"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c4"]
+forwardlinks = ["2e1c4b"]
+zettelid = "2e1c4a"
++++
+
+The predicates cannot be turned on and off that easily, as these are
+formulas, and not simple predicate values. There are multiple solutions
+to this. First, I could use some kind of data-structure such as a list
+to keep track of the values assigned to the predicates, and do a
+structural equality check to see if the current predicate being examined
+is part of the list. However, this is quite inefficient, as negated
+predicates will be handled as separate values to non-negated predicates,
+even though the values could be inferred from either. Doing it the
+proper SAT solving way would also make it possible to change the
+predicates that are being examined in the case that they can be
+simplified, although it's not something that is really planned, so this
+might not be too bad of a limitation.
diff --git a/content/zettel/2e1c4b.md b/content/zettel/2e1c4b.md
new file mode 100644
index 0000000..a31fc00
--- /dev/null
+++ b/content/zettel/2e1c4b.md
@@ -0,0 +1,11 @@
++++
+title = "Assumed invariant predicates"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c4a"]
+forwardlinks = []
+zettelid = "2e1c4b"
++++
+
+This assumes the invariance of the predicates
diff --git a/content/zettel/2e1c5.md b/content/zettel/2e1c5.md
new file mode 100644
index 0000000..dcfaffd
--- /dev/null
+++ b/content/zettel/2e1c5.md
@@ -0,0 +1,17 @@
++++
+title = "Proper SAT solution"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c4", "2e1c3a"]
+forwardlinks = ["2e1c6"]
+zettelid = "2e1c5"
++++
+
+One could also encode the following SAT query, which should be
+*unsatisfiable*, in the case where the SAT solver supports simple
+arithmetic.
+
+$$(P \rightarrow x = 1) \land (\neg P \land Q \rightarrow x = 2) \land (\neg P\land \neg Q \land R \rightarrow x = 3) \land (\neg P \land \neg Q \land \neg R\rightarrow x = 4) \land (Q \rightarrow x' = 2) \land (\neg Q \land P\rightarrow x' = 1) \land (\neg Q \land \neg P \land R \rightarrow x' = 3) \land(\neg Q \land \neg P \land \neg R \rightarrow x' = 4) \land \neg (x = x') $$
+
+This solution was proposed by George during the group meeting.
diff --git a/content/zettel/2e1c6.md b/content/zettel/2e1c6.md
new file mode 100644
index 0000000..1467cda
--- /dev/null
+++ b/content/zettel/2e1c6.md
@@ -0,0 +1,24 @@
++++
+title = "Using SAT with hashed expressions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c5"]
+forwardlinks = ["3a7", "2e1c7", "2e1c6a"]
+zettelid = "2e1c6"
++++
+
+The pure SAT solution is the nicest, as it just has to be passed to a
+SAT solver, however, this SAT solver needs to support features such as
+arithmetic. However, comparisons in the translation validation of
+scheduling ([\#3a7]) can also be done using hash-consed terms, which
+reduces the structural comparison of the objects by pointer equality
+checks. The assumption is made that if the pointer equality succeeds,
+that value of the pointers are structurally equal.
+
+Because only a comparison of numbers is needed for this, it is possible
+to do this comparison quite simply using a standard SAT problem. For
+this I will have to look into how hash-consing is actually performed and
+how the behaviour can be expressed from it.
+
+ [\#3a7]: /zettel/3a7
diff --git a/content/zettel/2e1c6a.md b/content/zettel/2e1c6a.md
new file mode 100644
index 0000000..740231f
--- /dev/null
+++ b/content/zettel/2e1c6a.md
@@ -0,0 +1,25 @@
++++
+title = "Using hash-consing with `PTree`"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g1", "2e1c7", "2e1c6"]
+forwardlinks = ["3c3b", "3a8g5", "3a8g5e", "2e1c6b"]
+zettelid = "2e1c6a"
++++
+
+One solution to the hash-consing problem is to use a `PTree` to store
+the values and to define an equality check between the SAT expressions.
+That way, one can assign a unique ID to each of the predicates, and can
+therefore add them into the SAT expression. Then the logical expression
+can take into account the expressions. This greatly simplifies the
+heuristic algorithm to do the comparison, as instead a single SAT
+expression can be used to do the same thing.
+
+This technique can be used to solve the proof of the scheduling
+algorithm with hyperblocks ([\#3c3b]), as well as the proof of the
+predicates in CompCertGSA ([\#3a8g5], [\#3a8g5e]).
+
+ [\#3c3b]: /zettel/3c3b
+ [\#3a8g5]: /zettel/3a8g5
+ [\#3a8g5e]: /zettel/3a8g5e
diff --git a/content/zettel/2e1c6b.md b/content/zettel/2e1c6b.md
new file mode 100644
index 0000000..29c3966
--- /dev/null
+++ b/content/zettel/2e1c6b.md
@@ -0,0 +1,34 @@
++++
+title = "Similarity of hashing for SAT and for performance"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c6a"]
+forwardlinks = ["3c3c3"]
+zettelid = "2e1c6b"
++++
+
+In the case of SAT solving, the expressions actually do need to be
+hashed just so that they are evaluatable by the SAT solver. This is
+different to doing hash-consing for performance reasons, which is what
+Six et al. \[1\] are doing ([\#3c3c3]). In the Six et al. case, you care
+a lot about performance, so you want to do the hash-consing in OCaml,
+however, in our case you don't care too much about the hash-consing, but
+more about the hashing algorithm being unique and "easy" to work with.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-six21_verif_super_sched_relat_optim" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">C. Six, L. Gourdin, S. Boulmé, and
+D. Monniaux, “<span class="nocase">Verified Superblock Scheduling with
+Related Optimizations</span>,” Apr. 2021. Available:
+<https://hal.archives-ouvertes.fr/hal-03200774></span>
+
+</div>
+
+</div>
+
+ [\#3c3c3]: /zettel/3c3c3
diff --git a/content/zettel/2e1c7.md b/content/zettel/2e1c7.md
new file mode 100644
index 0000000..eea31fb
--- /dev/null
+++ b/content/zettel/2e1c7.md
@@ -0,0 +1,40 @@
++++
+title = "Hashing symbolic expressions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2b1", "2e1c6"]
+forwardlinks = ["2e1c6a", "3c3c3"]
+zettelid = "2e1c7"
++++
+
+We want to be able to transform symbolic expressions into a
+representation that can be passed to the SAT solver. For this, we need
+to turn two different symbolic expressions, that are structurally equal,
+into the same value. Using IDs from hash-consing is maybe not the best
+solution for this, because one would need to add axioms to Coq to be
+able to access the actual IDs of the hashes. These are needed to do the
+fundamental comparison.
+
+In addition to that, it might be necessary to be able to reify the IDs
+back into expressions, although this is maybe not the most important,
+just might be a function that is needed to show the uniqueness of the
+outputs, and the injectivity of the function.
+
+The first idea would be uniquely identify the constructors with an ID,
+and then recursively translate the IDs of the other expressions as well.
+However, these lists should then be translated to a unique value to be
+used in the SAT solver. The easiest way to do that would be to add them
+all up as power of twos, but that might result in IDs that are much too
+large.
+
+Using hash-consing with the use of a map and pointers to the value, that
+would make things easier, but as stated earlier, actually accessing the
+value of the map requires some axioms, so it would be nice to not use
+them directly, but to maybe only use them as an optimisations, and a
+back-up way to generate the ids.
+
+A solution to this might be ([\#2e1c6a] or [\#3c3c3]).
+
+ [\#2e1c6a]: /zettel/2e1c6a
+ [\#3c3c3]: /zettel/3c3c3
diff --git a/content/zettel/2e1d.md b/content/zettel/2e1d.md
new file mode 100644
index 0000000..f8107b0
--- /dev/null
+++ b/content/zettel/2e1d.md
@@ -0,0 +1,35 @@
++++
+title = "Equality saturation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1c"]
+forwardlinks = ["2e1e", "2e1d1"]
+zettelid = "2e1d"
++++
+
+Often, ordering of optimisations is difficult, as they can produce
+different results, and one sometimes doesn't know beforehand which order
+will produce better results. Equality saturation \[1\] instead enriches
+the program with equivalent versions of it, which can then be later used
+to optimise different areas of it with different equal representations.
+This is performed for each optimisation, thereby enriching the tree and
+saturating it with equalities.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-tate09_equal_satur" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">R. Tate, M. Stepp, Z. Tatlock, and
+S. Lerner, “Equality saturation: A new approach to optimization,” in
+*Proceedings of the 36th annual ACM SIGPLAN-SIGACT symposium on
+principles of programming languages*, in POPL ’09. Savannah, GA, USA:
+Association for Computing Machinery, 2009, pp. 264–276. doi:
+[10.1145/1480881.1480915].</span>
+
+</div>
+
+</div>
+
+ [10.1145/1480881.1480915]: https://doi.org/10.1145/1480881.1480915
diff --git a/content/zettel/2e1d1.md b/content/zettel/2e1d1.md
new file mode 100644
index 0000000..902af9a
--- /dev/null
+++ b/content/zettel/2e1d1.md
@@ -0,0 +1,14 @@
++++
+title = "Program expression graphs"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1d"]
+forwardlinks = ["2e1d2"]
+zettelid = "2e1d1"
++++
+
+These are representations of programs in an expression form, which
+allows one to only work in this representation, and think of programs
+more like equations that can simply be transformed and saturated with
+equalities.
diff --git a/content/zettel/2e1d2.md b/content/zettel/2e1d2.md
new file mode 100644
index 0000000..f06aac2
--- /dev/null
+++ b/content/zettel/2e1d2.md
@@ -0,0 +1,16 @@
++++
+title = "Using equality saturation for translation validation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1d1"]
+forwardlinks = ["3a7"]
+zettelid = "2e1d2"
++++
+
+Equality saturation can therefore also be used to verify other
+translations ([\#3a7]), as these can be proposed as equivalent
+transformations, and then checked using the equality comparison
+algorithm for programs expression graphs.
+
+ [\#3a7]: /zettel/3a7
diff --git a/content/zettel/2e1e.md b/content/zettel/2e1e.md
new file mode 100644
index 0000000..5c0802b
--- /dev/null
+++ b/content/zettel/2e1e.md
@@ -0,0 +1,20 @@
++++
+title = "Static single assignment"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1d"]
+forwardlinks = ["2e1f"]
+zettelid = "2e1e"
++++
+
+Static single assignment (SSA) is an intermediate language that is very
+useful for the analysis and transformation for various optimisations
+that require data-flow analysis. In standard intermediate languages,
+registers can be reused, which means it can be quite tricky to analyse
+the liveness and current value of each register. However, if only one
+assignment can be made to a register, then it is much easier to perform
+symbolic analysis on the intermediate representation, because all the
+registers are fresh. Therefore no sequential substitution needs to be
+done when performing the evaluation, and it's only necessary to
+substitute in the values for the registers directly.
diff --git a/content/zettel/2e1f.md b/content/zettel/2e1f.md
new file mode 100644
index 0000000..fe960b7
--- /dev/null
+++ b/content/zettel/2e1f.md
@@ -0,0 +1,24 @@
++++
+title = "Software loop pipelining"
+date = "2022-05-03"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1e"]
+forwardlinks = ["1c6", "2e1f1"]
+zettelid = "2e1f"
++++
+
+Software loop pipelining is described here as opposed to hardware loop
+pipelining ([\#1c6]). This is also an interesting question to think
+about. Even though the answer seems quite straightforward, thinking
+about the details is quite complicated. It's not clear that modulo
+scheduling followed by standard scheduling will give the same result as
+hardware scheduling, especially because when generating hardware
+directly one has much finer control over the execution.
+
+In software, code is inherently sequential, and compared to hardware
+where one can have a lot of data flowing through the circuit
+simultaneously, in software this has to be simulated.
+
+ [\#1c6]: /zettel/1c6
diff --git a/content/zettel/2e1f1.md b/content/zettel/2e1f1.md
new file mode 100644
index 0000000..c09ca89
--- /dev/null
+++ b/content/zettel/2e1f1.md
@@ -0,0 +1,16 @@
++++
+title = "Idempotency of loop pipelining"
+date = "2022-05-03"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1f"]
+forwardlinks = []
+zettelid = "2e1f1"
++++
+
+It is interesting to think about if loop pipelining is idempotent. My
+first guess would be no, as in most cases the pipelining algorithm will
+try and transform the loop into something more efficient. However, maybe
+it does reach a steady state where it will not apply any more
+optimisations.
diff --git a/content/zettel/2e2.md b/content/zettel/2e2.md
new file mode 100644
index 0000000..0965eb8
--- /dev/null
+++ b/content/zettel/2e2.md
@@ -0,0 +1,12 @@
++++
+title = "Fuzzing Compilers"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e1"]
+forwardlinks = ["2e3", "2e2a"]
+zettelid = "2e2"
++++
+
+Fuzzing is the act of randomly generating an input, and checking in
+various ways if the output is correct.
diff --git a/content/zettel/2e2a.md b/content/zettel/2e2a.md
new file mode 100644
index 0000000..e8885a1
--- /dev/null
+++ b/content/zettel/2e2a.md
@@ -0,0 +1,14 @@
++++
+title = "Equivalence Modulo Inputs"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e2"]
+forwardlinks = ["2e2b"]
+zettelid = "2e2a"
++++
+
+Equivalence modulo inputs testing (EMI) is a version of fuzzing which
+modifies existing test cases in a way so that the inputs define which
+pieces of code should be turned on or off. The resulting test-cases
+should therefore be equivalent according to the inputs.
diff --git a/content/zettel/2e2b.md b/content/zettel/2e2b.md
new file mode 100644
index 0000000..ac06612
--- /dev/null
+++ b/content/zettel/2e2b.md
@@ -0,0 +1,15 @@
++++
+title = "Fuzzing equivalence checking tools"
+date = "2023-04-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e2a"]
+forwardlinks = []
+zettelid = "2e2b"
++++
+
+The interesting thing about fuzzing equivalence checking tools is that
+one actually wants to generate two designs or pieces of code that are
+actually not equivalent to each other, but should still be similar
+enough to try and trick the equivalence checker.
diff --git a/content/zettel/2e3.md b/content/zettel/2e3.md
new file mode 100644
index 0000000..fbc3869
--- /dev/null
+++ b/content/zettel/2e3.md
@@ -0,0 +1,9 @@
++++
+title = "Dataflow Languages"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e2"]
+forwardlinks = []
+zettelid = "2e3"
++++
diff --git a/content/zettel/2e3b.md b/content/zettel/2e3b.md
new file mode 100644
index 0000000..cae70cb
--- /dev/null
+++ b/content/zettel/2e3b.md
@@ -0,0 +1,9 @@
++++
+title = "Functional Languages"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = []
+forwardlinks = []
+zettelid = "2e3b"
++++
diff --git a/content/zettel/2e4a.md b/content/zettel/2e4a.md
new file mode 100644
index 0000000..ed270f4
--- /dev/null
+++ b/content/zettel/2e4a.md
@@ -0,0 +1,9 @@
++++
+title = "Destructive updates"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = []
+forwardlinks = ["2e4a1"]
+zettelid = "2e4a"
++++
diff --git a/content/zettel/2e4a1.md b/content/zettel/2e4a1.md
new file mode 100644
index 0000000..42ec8dc
--- /dev/null
+++ b/content/zettel/2e4a1.md
@@ -0,0 +1,13 @@
++++
+title = "Type system to enforce"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e4a"]
+forwardlinks = ["2e4a2"]
+zettelid = "2e4a1"
++++
+
+The first solution is to use linear types to enforce the resource usage
+constraints at compile time, which guarantees that destructive updates
+can be performed on the data structure, as it will never be read again.
diff --git a/content/zettel/2e4a2.md b/content/zettel/2e4a2.md
new file mode 100644
index 0000000..98d8bcb
--- /dev/null
+++ b/content/zettel/2e4a2.md
@@ -0,0 +1,13 @@
++++
+title = "Runtime solutions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e4a1"]
+forwardlinks = ["2e4a3"]
+zettelid = "2e4a2"
++++
+
+The second solution is to check at run-time whether data is still being
+used, however, this will require a garbage collector to keep track of
+the orphaned objects.
diff --git a/content/zettel/2e4a3.md b/content/zettel/2e4a3.md
new file mode 100644
index 0000000..a5445a5
--- /dev/null
+++ b/content/zettel/2e4a3.md
@@ -0,0 +1,14 @@
++++
+title = "Static code analysis"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e4a2"]
+forwardlinks = ["2e4a4"]
+zettelid = "2e4a3"
++++
+
+The third method is to use static analysis to determine if a data
+structure can be modified in place. This often requires the schedule of
+the code to be rewritten to allow for the data structure to actually be
+modified in place.
diff --git a/content/zettel/2e4a4.md b/content/zettel/2e4a4.md
new file mode 100644
index 0000000..004c850
--- /dev/null
+++ b/content/zettel/2e4a4.md
@@ -0,0 +1,15 @@
++++
+title = "Downsides of the techniques"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e4a3"]
+forwardlinks = []
+zettelid = "2e4a4"
++++
+
+The downside of the first, is that especially in specification
+languages, the programmer does not want to think about the
+implementation details and detailed accesses to the data structures. It
+would therefore be good to either use the static analysis or runtime
+solutions instead, as they produce less burden on the programme.
diff --git a/content/zettel/2f.md b/content/zettel/2f.md
new file mode 100644
index 0000000..8998abf
--- /dev/null
+++ b/content/zettel/2f.md
@@ -0,0 +1,9 @@
++++
+title = "Graphics"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2e"]
+forwardlinks = ["2f1"]
+zettelid = "2f"
++++
diff --git a/content/zettel/2f1.md b/content/zettel/2f1.md
new file mode 100644
index 0000000..29e8cc4
--- /dev/null
+++ b/content/zettel/2f1.md
@@ -0,0 +1,38 @@
++++
+title = "Optimisation of vectorised code"
+date = "2022-04-29"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["2f"]
+forwardlinks = ["3a8g", "2e1b"]
+zettelid = "2f1"
++++
+
+This is work by Caroline in Irisa/Inria Rennes. This is based on the
+idea of single program multiple data (SPMD), where you have a single
+program which is run on different threads and on different data. The
+idea then is that you can actually combine the threads again into one
+vectorised instruction (so multiple threads actually are executing one
+common vectorised instruction).
+
+You can use GSA ([\#3a8g], [\#2e1b]) to vectorise instructions by using
+a blend instruction which is based on the predicate. This basically
+means that if you are creating many threads and are somehow branching on
+the actual threads themselves (this could be the colour of the
+fragment), then you can still generate the same instructions for all the
+threads, but use blend instructions to select the correct results after
+the fact. In some way this is also speculation, but you just reroll the
+resulting case you took the wrong branch for a few of the threads.
+
+Then you can add skips by comparing the vector using `any` and `all`
+checks, which can always be checks to all 0s by performing the same
+check for all the threads.
+
+This is a really interesting use-case for GSA, because you are not
+really using the predicates that GSA generates to actually analyse the
+code, but you are using it dynamically to be able to vectorise as many
+of the instructions as possible.
+
+ [\#3a8g]: /zettel/3a8g
+ [\#2e1b]: /zettel/2e1b
diff --git a/content/zettel/3a.md b/content/zettel/3a.md
new file mode 100644
index 0000000..90c33ec
--- /dev/null
+++ b/content/zettel/3a.md
@@ -0,0 +1,36 @@
++++
+title = "CompCert "
+date = "2020-12-10"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b6", "1f2a", "1d1", "1b2"]
+forwardlinks = ["3a4", "3b", "3a1"]
+zettelid = "3a"
++++
+
+CompCert \[1\] is a formally verified C compiler, meaning it has been
+proven to always generate machine code that behaves in the same way as
+the original C code. It therefore cannot have any bugs, as every
+translation step has a proof that it is correct. This proof is encoded
+in a theorem prover called Coq, and unlike many other proofs, the
+compiler itself is also written in Coq, so the proof corresponds
+directly to the algorithms. The proofs that are performed in the
+compiler are described in ([\#3a4]).
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-leroy09_formal_verif_realis_compil" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">X. Leroy, “Formal verification of
+a realistic compiler,” *Commun. ACM*, vol. 52, no. 7, pp. 107–115, Jul.
+2009, doi: [10.1145/1538788.1538814].</span>
+
+</div>
+
+</div>
+
+ [\#3a4]: /zettel/3a4
+ [10.1145/1538788.1538814]: https://doi.org/10.1145/1538788.1538814
diff --git a/content/zettel/3a1.md b/content/zettel/3a1.md
new file mode 100644
index 0000000..cea7c3b
--- /dev/null
+++ b/content/zettel/3a1.md
@@ -0,0 +1,15 @@
++++
+title = "CompCert Internals"
+date = "2020-12-10"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a"]
+forwardlinks = ["3a2", "3a1a"]
+zettelid = "3a1"
++++
+
+CompCert has 11 intermediate languages, that are used to prove different
+properties at each stage, as the higher level programming language is
+translated to assembly. The main two parts are the front end and the
+back end of the compiler.
diff --git a/content/zettel/3a10.md b/content/zettel/3a10.md
new file mode 100644
index 0000000..287afd3
--- /dev/null
+++ b/content/zettel/3a10.md
@@ -0,0 +1,13 @@
++++
+title = "Finite Memory CompCert "
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a9"]
+forwardlinks = ["3a10a"]
+zettelid = "3a10"
++++
+
+One main problem with CompCert is that it does not have a finite memory
+model, as at it's base it is infinite. However, there have been various
+works to try and remedy this.
diff --git a/content/zettel/3a10a.md b/content/zettel/3a10a.md
new file mode 100644
index 0000000..34e03f5
--- /dev/null
+++ b/content/zettel/3a10a.md
@@ -0,0 +1,30 @@
++++
+title = "Stack Aware CompCert"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a10b", "3a10"]
+forwardlinks = ["3a10b"]
+zettelid = "3a10a"
++++
+
+The first is stack aware CompCert \[1\], which was continued in
+CompCertELF ([\#3a10b]).
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-wang19_abstr_stack_based_approac_verif" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">Y. Wang, P. Wilke, and Z. Shao,
+“An abstract stack based approach to verified compositional compilation
+to machine code,” *Proc. ACM Program. Lang.*, vol. 3, no. POPL, Jan.
+2019, doi: [10.1145/3290375].</span>
+
+</div>
+
+</div>
+
+ [\#3a10b]: /zettel/3a10b
+ [10.1145/3290375]: https://doi.org/10.1145/3290375
diff --git a/content/zettel/3a10b.md b/content/zettel/3a10b.md
new file mode 100644
index 0000000..7301fe5
--- /dev/null
+++ b/content/zettel/3a10b.md
@@ -0,0 +1,28 @@
++++
+title = "CompCertELF"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a10a"]
+forwardlinks = ["3a10a", "3a10c"]
+zettelid = "3a10b"
++++
+
+CompCertELF is a refinement of stack aware CompCert ([\#3a10a]) \[1\].
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-wang20_compc" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">Y. Wang, X. Xu, P. Wilke, and Z.
+Shao, “CompCertELF: Verified separate compilation of c programs into ELF
+object files,” *Proc. ACM Program. Lang.*, vol. 4, no. OOPSLA, Nov.
+2020, doi: [10.1145/3428265].</span>
+
+</div>
+
+</div>
+
+ [\#3a10a]: /zettel/3a10a
+ [10.1145/3428265]: https://doi.org/10.1145/3428265
diff --git a/content/zettel/3a10c.md b/content/zettel/3a10c.md
new file mode 100644
index 0000000..6e30cd2
--- /dev/null
+++ b/content/zettel/3a10c.md
@@ -0,0 +1,28 @@
++++
+title = "CompCertS"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a10b"]
+forwardlinks = ["3a10d"]
+zettelid = "3a10c"
++++
+
+CompCertS \[1\] extends CompCert to support bit manipulation of pointers
+in a finite memory model, which modifies all passes in CompCert.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-besson18_compc" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">F. Besson, S. Blazy, and P. Wilke,
+“CompCertS: A memory-aware verified c compiler using a pointer as
+integer semantics,” *Journal of Automated Reasoning*, vol. 63, no. 2,
+pp. 369–392, Nov. 2018, doi: [10.1007/s10817-018-9496-y].</span>
+
+</div>
+
+</div>
+
+ [10.1007/s10817-018-9496-y]: https://doi.org/10.1007/s10817-018-9496-y
diff --git a/content/zettel/3a10d.md b/content/zettel/3a10d.md
new file mode 100644
index 0000000..ec94b48
--- /dev/null
+++ b/content/zettel/3a10d.md
@@ -0,0 +1,28 @@
++++
+title = "CompCertTSO"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a10c"]
+forwardlinks = []
+zettelid = "3a10d"
++++
+
+CompCertTSO \[1\] builds a notion of finite memory model to support the
+TSO shared memory model.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-sevcik13_compc" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">J. Ševčı́k, V. Vafeiadis, F. Zappa
+Nardelli, S. Jagannathan, and P. Sewell, “CompCertTSO: A verified
+compiler for relaxed-memory concurrency,” *J. ACM*, vol. 60, no. 3, Jun.
+2013, doi: [10.1145/2487241.2487248].</span>
+
+</div>
+
+</div>
+
+ [10.1145/2487241.2487248]: https://doi.org/10.1145/2487241.2487248
diff --git a/content/zettel/3a1a.md b/content/zettel/3a1a.md
new file mode 100644
index 0000000..f3f6198
--- /dev/null
+++ b/content/zettel/3a1a.md
@@ -0,0 +1,33 @@
++++
+title = "Issue with Main"
+date = "2021-02-17"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a1"]
+forwardlinks = []
+zettelid = "3a1a"
++++
+
+the `main` function in CompCert can not arguments any arguments. In the
+rules for programs going wrong initially, if the initial state cannot be
+constructed, then the program will go wrong, which means that none of
+the correctness proofs can be said to hold.
+
+``` coq
+| program_goes_initially_wrong:
+ (forall s, ~initial_state L s) ->
+ program_behaves (Goes_wrong E0).
+```
+
+The initial state can also only be constructed if the signature of the
+first function that is called is of `signature_main`, which can only be
+of the function type without arguments.
+
+``` coq
+Inductive initial_state (p: program): state -> Prop :=
+ | initial_state_intro: forall b f m0,
+ ...
+ funsig f = signature_main ->
+ initial_state p (Callstate nil f nil m0).
+```
diff --git a/content/zettel/3a2.md b/content/zettel/3a2.md
new file mode 100644
index 0000000..7188095
--- /dev/null
+++ b/content/zettel/3a2.md
@@ -0,0 +1,16 @@
++++
+title = "Front End"
+date = "2020-12-10"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a1"]
+forwardlinks = ["3a3"]
+zettelid = "3a2"
++++
+
+The front end of the compiler translates Clight (a subset of C that
+CompCert supports) into a static single assignment (SSA) language called
+register transfer language (RTL). This translation mainly converts
+higher level constructs from the rich Clight language, into a language
+that models a control-flow graph (CFG) with eight instructions.
diff --git a/content/zettel/3a3.md b/content/zettel/3a3.md
new file mode 100644
index 0000000..ec8d9a5
--- /dev/null
+++ b/content/zettel/3a3.md
@@ -0,0 +1,47 @@
++++
+title = "Back End"
+date = "2020-12-10"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a2"]
+forwardlinks = ["3a4"]
+zettelid = "3a3"
++++
+
+This is where most of the optimisations happen, and where the
+intermediate CFG language is finally converted to proper assembly. The
+main optimisations are done in RTL, which is a language that models a
+CFG and has infinite registers it can choose from, therefore not using a
+stack. Optimisations such as inlining, constant propagation and dead
+code elimination are all performed at this level, using Kildall's
+algorithm to get the necessary analysis to perform these optimisations
+\[1\].
+
+RTL is then translated to LTL, which is where physical registers are
+assigned, and where the rest of the variables are put into the stack. At
+this level, the CFG is also translated into a CFG that uses basic blocks
+instead of instructions. This is an interesting addition, and I am not
+sure why that is not a property of RTL, as basic blocks can make some
+optimisations simpler. Optimisations that would affect basic blocks,
+such as lazy code motion (LCM) optimisations would be harder if the
+language contained basic blocks, as these would have to be changed. As
+these optimisations are performed at the RTL level, it does make sense
+to have a simpler representation of code at that level and make these
+transformations simpler.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-bertot06_struc_approac_provin_compil_optim"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">Y. Bertot, B. Grégoire, and X.
+Leroy, “A structured approach to proving compiler optimizations based on
+dataflow analysis,” in *Types for proofs and programs*, J.-C. Filliâtre,
+C. Paulin-Mohring, and B. Werner, Eds., Berlin, Heidelberg: Springer
+Berlin Heidelberg, 2006, pp. 66–81.</span>
+
+</div>
+
+</div>
diff --git a/content/zettel/3a4.md b/content/zettel/3a4.md
new file mode 100644
index 0000000..017cee6
--- /dev/null
+++ b/content/zettel/3a4.md
@@ -0,0 +1,55 @@
++++
+title = "Hacking CompCert"
+date = "2020-12-10"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a3", "3a"]
+forwardlinks = ["3a5", "3a4a"]
+zettelid = "3a4"
++++
+
+CompCert is built in a way which makes it very easy to hack, meaning one
+can insert extra passes and build on the proofs that they developed.
+There have been many examples of this, for example Compositional
+CompCert \[1\] or CompCertM \[2\].
+
+The main reason that CompCert facilitates this is because they have
+designed the proofs for each of their language translations is a similar
+way, meaning one can adopt the same translation style and get a similar
+proof for ones own intermediate language. In addition to that, CompCert
+contains many helper theorems for proofs of simulation, which are the
+main arguments that are used to prove equivalence. Finally, all the
+proofs of the different languages are composed at various levels,
+meaning that if they are changed, one can compose the proof in a custom
+way that still proves the original property that the behaviour stays the
+same throughout the translations.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-stewart15_compos_compc" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">G. Stewart, L. Beringer, S.
+Cuellar, and A. W. Appel, “Compositional CompCert,” in *Proceedings of
+the 42nd annual ACM SIGPLAN-SIGACT symposium on principles of
+programming languages*, in POPL ’15. Mumbai, India: Association for
+Computing Machinery, 2015, pp. 275–287. doi:
+[10.1145/2676726.2676985].</span>
+
+</div>
+
+<div id="ref-song19_compc" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">Y. Song, M. Cho, D. Kim, Y. Kim,
+J. Kang, and C.-K. Hur, “CompCertM: CompCert with C-assembly linking and
+lightweight modular verification,” *Proc. ACM Program. Lang.*, vol. 4,
+no. POPL, Dec. 2019, doi: [10.1145/3371091].</span>
+
+</div>
+
+</div>
+
+ [10.1145/2676726.2676985]: https://doi.org/10.1145/2676726.2676985
+ [10.1145/3371091]: https://doi.org/10.1145/3371091
diff --git a/content/zettel/3a4a.md b/content/zettel/3a4a.md
new file mode 100644
index 0000000..2907812
--- /dev/null
+++ b/content/zettel/3a4a.md
@@ -0,0 +1,27 @@
++++
+title = "Adding a new CompCert pass"
+date = "2020-12-10"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a4"]
+forwardlinks = ["3a4b"]
+zettelid = "3a4a"
++++
+
+The main stages that are necessary when adding a compiler pass in
+CompCert are the following:
+
+- Specify the operational semantics and syntax of the language.
+- Create the specification of the translation algorithm.
+- Create the algorithm that corresponds to the specification.
+- Prove that the algorithm implements the specification.
+- Prove that the specification retains the behaviour of the program
+ according to the operational semantics of the original language and
+ the target language.
+
+These steps are all necessary when specifying complex translations,
+however, when proving a simple transformation, it may not always be
+necessary to provide a specification of the translation algorithm.
+Instead, it is simple enough, the algorithm itself can be used for the
+proof.
diff --git a/content/zettel/3a4b.md b/content/zettel/3a4b.md
new file mode 100644
index 0000000..faa053d
--- /dev/null
+++ b/content/zettel/3a4b.md
@@ -0,0 +1,13 @@
++++
+title = "Compiling CompCert as an external library"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a4a"]
+forwardlinks = []
+zettelid = "3a4b"
++++
+
+This is fine when only using the CompCert proofs. However, when
+extending CompCert, the OCaml files are also needed, which are normally
+not available after the CompCert development is installed.
diff --git a/content/zettel/3a5.md b/content/zettel/3a5.md
new file mode 100644
index 0000000..eaeacc3
--- /dev/null
+++ b/content/zettel/3a5.md
@@ -0,0 +1,25 @@
++++
+title = "Proofs "
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a4"]
+forwardlinks = ["3a6", "3a5a"]
+zettelid = "3a5"
++++
+
+Assuming that there are semantics for the source program S and the
+compiled program C, these semantics associate a behaviour to the
+execution of the program, which can be behaviours like termination,
+divergence or "going wrong" when undefined behaviour is executed.
+
+There are several ways this proof could be done, the relationships
+between them is shown below.
+
+![][1]
+
+In the diagram above shows the relationships between different kinds of
+proofs that could be used to prove different properties about the
+compiler between the source language and the compiled output.
+
+ [1]: attachment:simulations.png
diff --git a/content/zettel/3a5a.md b/content/zettel/3a5a.md
new file mode 100644
index 0000000..7974512
--- /dev/null
+++ b/content/zettel/3a5a.md
@@ -0,0 +1,18 @@
++++
+title = "Bisimulation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5"]
+forwardlinks = ["3a5b"]
+zettelid = "3a5a"
++++
+
+The strongest property to prove is bisimulation, meaning the following
+property holds for some behaviour *B*.
+
+$\forall B,\ S \Downarrow B \iff C \Downarrow B$
+
+However, this is too strong for the notion of semantic preservation of
+the compiler, because that means that both the source and compiled
+output languages have to be deterministic.
diff --git a/content/zettel/3a5b.md b/content/zettel/3a5b.md
new file mode 100644
index 0000000..a07a469
--- /dev/null
+++ b/content/zettel/3a5b.md
@@ -0,0 +1,25 @@
++++
+title = "Backward Simulation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5a"]
+forwardlinks = ["3a5c"]
+zettelid = "3a5b"
++++
+
+A more relaxed version to prove semantic preservation of the compiler is
+to use backward simulation.
+
+$\forall B,\ C \Downarrow B \implies S \Downarrow B$
+
+However, this is also too strict, as some important optimisations
+violate this property. One example is dead code elimination, meaning if
+*S* contains dead code that also contains undefined behaviour, then C
+will not contain that undefined behaviour anymore and will therefore
+behave in a different way. To restrict that, we can therefore first
+assume that if the behaviour of the program is safe, then the backward
+simulation will hold.
+
+Therefore, assuming that the behaviour of the source is safe, we can
+then prove the same backward simulation.
diff --git a/content/zettel/3a5c.md b/content/zettel/3a5c.md
new file mode 100644
index 0000000..7148e8d
--- /dev/null
+++ b/content/zettel/3a5c.md
@@ -0,0 +1,36 @@
++++
+title = "Forward Simulation"
+date = "2020-12-10"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5b"]
+forwardlinks = ["3a5d", "3a5c1"]
+zettelid = "3a5c"
++++
+
+Forward simulation is a bit less informative, because it only says that
+if the source has some behaviour *B*, then the output will have the same
+behaviour. However, it might be that the output has some other behaviour
+in addition to that, which would still satisfy the property of forward
+simulation. These behaviours would be unwanted behaviours of *C* which
+are on top of the behaviours that *S* has.
+
+$\forall B,\ S \Downarrow B \implies C \Downarrow B$
+
+This is not possible when *C* is deterministic though, meaning that
+there is only one observable behaviour of *C*
+($C \Downarrow B_1 \land C \Downarrow B_2\implies B_1 = B_2$). If that
+is the case, then the forward simulation also implies the backwards
+simulation, as there is no other behaviour that *C* could have.
+
+As forward simulation is simpler to prove, it is used instead of a
+backward simulation together with a proof that the target language is
+deterministic. The latter proof of determinism is especially simple in
+CompCert, because the intermediate languages are single-threaded
+assembly languages that are inherently deterministic. In the case of
+compilation to hardware, this might be more difficult, because hardware
+is inherently parallel and nondeterministic. It would therefore have to
+conform to the specification no matter what path was taken. It might
+therefore also not be possible to follow the same proof, and one would
+have to prove the backward simulation directly.
diff --git a/content/zettel/3a5c1.md b/content/zettel/3a5c1.md
new file mode 100644
index 0000000..857e31d
--- /dev/null
+++ b/content/zettel/3a5c1.md
@@ -0,0 +1,64 @@
++++
+title = "Mutliple to one step simulation"
+date = "2022-04-24"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5c"]
+forwardlinks = ["3c3h1", "1b6", "3c3", "3a5c1a"]
+zettelid = "3a5c1"
++++
+
+This has been implemented in CompCert-KVX ([\#3c3h1]) \[1\] (Figure 12).
+
+The main problem that is often encountered is that you want to do
+multiple steps in the source language, and only one step in the output
+language. One example of this is when adding basic blocks ([\#1b6]) to
+the target language. In this case, one will often define the execution
+of the basic block using big-step semantics, as otherwise there would
+not be a great benefit of using these semantics. These semantics are
+useful for proving scheduling ([\#3c3]), for example. However, this
+means that the semantics are not in a one to one correspondence anymore.
+
+The traditional way to prove a forward simulation between a source and a
+target language is by using a simulation in lock-step, where one step in
+the input corresponds to one step in the output. However, a
+generalisation of this is to allow for the fact that one step in the
+input could match one or more steps in the output semantics. This is
+directly supported by the small step semantics framework inside of
+CompCert. Finally, another generalisation is that one step in the input
+could correspond to zero or more steps in the output. This comes with
+the limitation that one has to show that the semantics will not get
+stuck, meaning there is some constantly decreasing metric when one is
+not performing a step.
+
+However, none of these things directly solve the issue of having to
+perform multiple step in the input to be able to even do one step in the
+output. To do this, one will have to abuse the star simulation (zero or
+more steps), but keep track of the previous steps that one has already
+performed to be able to reconstruct the proof from the start of the
+output step to the end of the output step. This can be done by always
+performing a `star_refl` step (don't perform a step), but still create a
+way to match states until one reaches the end of the output step.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-six22_formal_verif_super_sched" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">C. Six, L. Gourdin, S. Boulmé, D.
+Monniaux, J. Fasse, and N. Nardino, “Formally verified superblock
+scheduling,” in *Proceedings of the 11th ACM SIGPLAN international
+conference on certified programs and proofs*, in CPP 2022. Philadelphia,
+PA, USA: Association for Computing Machinery, 2022, pp. 40–54. doi:
+[10.1145/3497775.3503679].</span>
+
+</div>
+
+</div>
+
+ [\#3c3h1]: /zettel/3c3h1
+ [\#1b6]: /zettel/1b6
+ [\#3c3]: /zettel/3c3
+ [10.1145/3497775.3503679]: https://doi.org/10.1145/3497775.3503679
diff --git a/content/zettel/3a5c1a.md b/content/zettel/3a5c1a.md
new file mode 100644
index 0000000..a986dab
--- /dev/null
+++ b/content/zettel/3a5c1a.md
@@ -0,0 +1,24 @@
++++
+title = "Continuously matching states without stepping"
+date = "2022-04-24"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5c1"]
+forwardlinks = []
+zettelid = "3a5c1a"
++++
+
+The `match_states` predicate is supposed to represent a similarity of a
+state in the source language and a state in the target language. It
+requires a bit of a different view point to be able to implement the
+star simulation proof that was described earlier. In essence, one is
+still comparing similarities between the two different states, however,
+one is mostly using that comparison to also carry proofs about the
+previous executions one has performed. In addition to that, the matching
+predicate will always match the currently executing instruction, to the
+state at the start of the output transition. This requires that one is
+able to find the state of the output transition, which in the case of
+basic blocks is not that hard as it can actually be reliably computed,
+but in cases where it's more difficult, one could use some additional
+information that is returned by the generation of the output program.
diff --git a/content/zettel/3a5d.md b/content/zettel/3a5d.md
new file mode 100644
index 0000000..c5b8d3f
--- /dev/null
+++ b/content/zettel/3a5d.md
@@ -0,0 +1,18 @@
++++
+title = "Proving correctness of the specification"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5c"]
+forwardlinks = ["3a5e"]
+zettelid = "3a5d"
++++
+
+Have a backwards simulation implies that the output *C* will follow the
+same specification as the source program. This can be seen by the fact
+that the backwards simulation is just a stronger statement about
+correctness compared to following a simulation. For example, if a
+specification says that a program should output a number greater than
+10, the source could output 11 and the compiled program could output 12.
+These both follow the specification, but would not hold under backward
+simulation, where the two values would have to be the same.
diff --git a/content/zettel/3a5e.md b/content/zettel/3a5e.md
new file mode 100644
index 0000000..5c6bab6
--- /dev/null
+++ b/content/zettel/3a5e.md
@@ -0,0 +1,15 @@
++++
+title = "Preservation of safety"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5d"]
+forwardlinks = ["3a5f"]
+zettelid = "3a5e"
++++
+
+This is the preservation of types and memory safety throughout the
+compilation. This is weaker than proving that a specification holds, and
+therefore also weaker than proving the backward simulation between the
+languages. Therefore, this is also implied by having the backward
+simulation.
diff --git a/content/zettel/3a5f.md b/content/zettel/3a5f.md
new file mode 100644
index 0000000..4ffb49d
--- /dev/null
+++ b/content/zettel/3a5f.md
@@ -0,0 +1,29 @@
++++
+title = "Specification for proofs"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5e"]
+forwardlinks = []
+zettelid = "3a5f"
++++
+
+To build a proof of an algorithm, it can be useful to have a
+specification of the algorithm first, and then prove that this
+specification has the property that one wants to prove. This can be
+useful because the specification is often at a higher level than the
+algorithm itself, and it can therefore be easier to identify it's
+behaviour in the proofs.
+
+However, one more step is needed if this approach is taken, because it
+also has to be proven that the algorithm that one designed does indeed
+implement the specification that was designed. However, this only has to
+be done once, and thereafter the specification can be used to prove any
+other useful properties.
+
+A concrete example for this is the proof from Cminor to RTL in CompCert.
+At the lowest level, there is an implementation of an algorithm that
+does this translation. However, then there is a specification of this
+algorithm using `Inductive` types. The latter is then used to prove that
+the semantics before the translation are equivalent to the semantics
+after the translation.
diff --git a/content/zettel/3a6.md b/content/zettel/3a6.md
new file mode 100644
index 0000000..87aa7b4
--- /dev/null
+++ b/content/zettel/3a6.md
@@ -0,0 +1,16 @@
++++
+title = "Traces"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a5"]
+forwardlinks = ["3a7"]
+zettelid = "3a6"
++++
+
+In CompCert proofs, observable behaviours are expressed as traces of
+input-output events. These events correspond to external function calls
+that interact with the outside environment in some way. For example,
+printing to the console requires some system calls and would be
+classified as calling an external function. Handling these in HLS is
+still not quite clear, but it might become clearer eventually.
diff --git a/content/zettel/3a7.md b/content/zettel/3a7.md
new file mode 100644
index 0000000..9752273
--- /dev/null
+++ b/content/zettel/3a7.md
@@ -0,0 +1,27 @@
++++
+title = "Translation validation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3", "3a6", "2e1d2", "2e1c6", "1f4a"]
+forwardlinks = ["2e1c1", "3a8", "3a7a"]
+zettelid = "3a7"
++++
+
+Translation validation is a proof technique whereby one can perform an
+unverified translation and verify that the results match after the
+translation using a verified validator. This gives exactly the same
+guarantees as normal verification for a verified compiler, for example,
+because the compiler is allowed to fail. The correctness proofs only
+need to hold if the compiler succeeds.
+
+This means that a proven validator which errors out if it cannot prove
+the equivalence between the input and output of the verified pass also
+allows for a verified pass. One therefore has to show that if the
+verified validator says that the input and output are equivalent, that
+the semantics of the input and output are as well.
+
+Translation validation is often performed by using symbolic execution
+([\#2e1c1]).
+
+ [\#2e1c1]: /zettel/2e1c1
diff --git a/content/zettel/3a7a.md b/content/zettel/3a7a.md
new file mode 100644
index 0000000..6ea573e
--- /dev/null
+++ b/content/zettel/3a7a.md
@@ -0,0 +1,37 @@
++++
+title = "Proving compiler optimisations"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a7"]
+forwardlinks = ["3a7b"]
+zettelid = "3a7a"
++++
+
+\[1\]
+
+- Module for general Analysis information
+- Use Kildall's algorithm to get information about what the compiler
+ knows at that instruction
+- Algorithm is implemented generally in Module AnalysisEntries (AE)
+- Module for general transformations (Transfer)
+- Implement transformations based on module TransferEntries, which can
+ prove other properties about the transformation for you
+- Transformations are then done by applying a functor which takes
+ TransferEntries and returns the Transfer module.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-bertot06_struc_approac_provin_compil_optim"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">Y. Bertot, B. Grégoire, and X.
+Leroy, “A structured approach to proving compiler optimizations based on
+dataflow analysis,” in *Types for proofs and programs*, J.-C. Filliâtre,
+C. Paulin-Mohring, and B. Werner, Eds., Berlin, Heidelberg: Springer
+Berlin Heidelberg, 2006, pp. 66–81.</span>
+
+</div>
+
+</div>
diff --git a/content/zettel/3a7b.md b/content/zettel/3a7b.md
new file mode 100644
index 0000000..e256c82
--- /dev/null
+++ b/content/zettel/3a7b.md
@@ -0,0 +1,26 @@
++++
+title = "Correctness argument for translation validation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a7a"]
+forwardlinks = ["3a7c"]
+zettelid = "3a7b"
++++
+
+Why is translation validation correct?
+
+If you have two languages $S$ and $T$ that you are translating in
+between, you are first going to evaluate both languages using abstract
+evaluation. This is a translation to a third intermediate language that
+consists of a forest of registers $\mathcal{A}$.
+
+The first step in the correctness of the translation from the input
+language $S$ to $\mathcal{A}$ is semantics preserving. This can be done
+because semantics have to be defined for the abstract interpretation.
+
+Then, using the same semantics for $\mathcal{A}$, one can prove that the
+translation from $T$ to $\mathcal{A}$ is also semantics preserving. Then
+one can prove the following:
+
+$$ \frac{s : S \rightarrow_s \alpha : \mathcal{A} \quad t : T \rightarrow_t\alpha' : \mathcal{A} \quad \alpha \sim \alpha'}{s \sim t} $$
diff --git a/content/zettel/3a7c.md b/content/zettel/3a7c.md
new file mode 100644
index 0000000..0b05b13
--- /dev/null
+++ b/content/zettel/3a7c.md
@@ -0,0 +1,27 @@
++++
+title = "Proof of translation validation theorem"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a7b"]
+forwardlinks = ["3a7d"]
+zettelid = "3a7c"
++++
+
+The proof follows from the fact that the translation to the abstract
+interpretation is semantics preserving. As the abstract interpretation
+of both languages is equal, this means that
+
+$$ \frac{\Sigma_s, s \downarrow_S \Sigma_s' \quad \Sigma_t, t \downarrow_T\Sigma_t' \quad s \sim t \quad \Sigma_s \sim \Sigma_t}{\Sigma_s' \sim \Sigma_t'}$$
+
+This is because:
+
+$$ \frac{\Sigma_s, \alpha \downarrow_{\mathcal{A}} \Sigma_s' \quad \Sigma_t,\alpha' \downarrow_{\mathcal{A}} \Sigma_t' \quad \alpha \sim \alpha' \quad\Sigma_s \sim \Sigma_t}{\Sigma_s' \sim \Sigma_t'} $$
+
+And the latter can be used because :
+
+$$ s \rightarrow_s \alpha \land t \rightarrow_t \alpha' $$
+
+We can therefore get the following:
+
+$$ \cfrac{\cfrac{}{\alpha \sim \alpha'} \quad \cfrac{\cfrac{}{\Sigma_s, s\downarrow_S \Sigma_s'} \quad \cfrac{}{s \rightarrow_s \alpha}}{\Sigma_s, \alpha\downarrow_{\mathcal{A}} \Sigma_s'} \quad \cfrac{\cfrac{}{\Sigma_t, t\downarrow_T \Sigma_t'} \quad \cfrac{}{t \rightarrow_t \alpha'}}{\Sigma_t,\alpha' \downarrow_{\mathcal{A}} \Sigma_t'} \quad \cfrac{}{\Sigma_s \sim\Sigma_t}}{\Sigma_s' \sim \Sigma_t' \land s \sim t} $$
diff --git a/content/zettel/3a7d.md b/content/zettel/3a7d.md
new file mode 100644
index 0000000..0ca7da1
--- /dev/null
+++ b/content/zettel/3a7d.md
@@ -0,0 +1,50 @@
++++
+title = "Adding predicates to symbolic execution"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a7c"]
+forwardlinks = ["1b8", "3c3f", "2e1b", "3a7e"]
+zettelid = "3a7d"
++++
+
+Adding predicates to the symbolic execution and abstract interpretation
+lets us compare the executions of hyperblocks ([\#1b8]), and is linked
+to the formal verification of HLS ([\#3c3f]). However, this seems to add
+a lot of complexity, because predicates take exponential time to check.
+My main argument is though that often you will not have that many
+predicates that are active within a block, and that it should therefore
+not take that long to check.
+
+In general, the idea is that each instruction can be conditionally
+executed, meaning the results of the registers should also contain
+conditionals. When checking the equivalences, it is therefore necessary
+to evaluate the predicates somehow, to be able to then evaluate the
+equivalence of the expressions, I believe that one first has to reduce
+the expressions to a form which does not include the predicates. Then
+one can compare bare expressions again.
+
+This is similar to how the comparison of predicates in gated-SSA
+([\#2e1b]) is performed \[1\], where extra functions are added to SSA
+which contain the predicate that the choice functions act upon.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-tu95_gated_ssa_based_deman_driven" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">P. Tu and D. Padua, “Gated
+SSA-based demand-driven symbolic analysis for parallelizing compilers,”
+in *Proceedings of the 9th international conference on supercomputing*,
+in ICS ’95. Barcelona, Spain: Association for Computing Machinery, 1995,
+pp. 414–423. doi: [10.1145/224538.224648].</span>
+
+</div>
+
+</div>
+
+ [\#1b8]: /zettel/1b8
+ [\#3c3f]: /zettel/3c3f
+ [\#2e1b]: /zettel/2e1b
+ [10.1145/224538.224648]: https://doi.org/10.1145/224538.224648
diff --git a/content/zettel/3a7e.md b/content/zettel/3a7e.md
new file mode 100644
index 0000000..c3e7cb8
--- /dev/null
+++ b/content/zettel/3a7e.md
@@ -0,0 +1,17 @@
++++
+title = "Simple comparisons of predicated instructions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a7d"]
+forwardlinks = []
+zettelid = "3a7e"
++++
+
+In practice, only a simple comparison of the predicates and values in
+the registers should be enough. In addition to that, we also need to
+keep track of the current state of the predicate register, which should
+always match through the symbolic execution. The predicate registers
+could just contain a simplistic overview of predicate operations that
+are allowed, such as performing an "or" between two predicates, or
+negating the predicate.
diff --git a/content/zettel/3a8.md b/content/zettel/3a8.md
new file mode 100644
index 0000000..09ece36
--- /dev/null
+++ b/content/zettel/3a8.md
@@ -0,0 +1,37 @@
++++
+title = "CompcertSSA "
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a9", "3a8g", "3a7", "2e1b1b"]
+forwardlinks = ["3c", "1c6", "3a9", "3a8a"]
+zettelid = "3a8"
++++
+
+CompcertSSA[^1] \[1\] is a version of CompCert that integrates a static
+single assignment (SSA) form as a middle-end optimisation platform in
+CompCert. The main workflow is to go from RTL to SSA and then back to
+RTL, which means that it could be easily added onto Vericert ([\#3c]).
+This would allow for more optimisations that may benefit from a static
+single assignment, such as modulo scheduling even ([\#1c6]).
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-barthe14_formal_verif_ssa_based_middl_end_compc"
+class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">G. Barthe, D. Demange, and D.
+Pichardie, “Formal verification of an SSA-based middle-end for
+CompCert,” *ACM Trans. Program. Lang. Syst.*, vol. 36, no. 1, Mar. 2014,
+doi: [10.1145/2579080].</span>
+
+</div>
+
+</div>
+
+[^1]: //gitlab.inria.fr/compcertssa/compcertssa
+
+ [\#3c]: /zettel/3c
+ [\#1c6]: /zettel/1c6
+ [10.1145/2579080]: https://doi.org/10.1145/2579080
diff --git a/content/zettel/3a8a.md b/content/zettel/3a8a.md
new file mode 100644
index 0000000..4a51e13
--- /dev/null
+++ b/content/zettel/3a8a.md
@@ -0,0 +1,25 @@
++++
+title = "Phi instructions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8"]
+forwardlinks = ["3a8b", "3a8a1"]
+zettelid = "3a8a"
++++
+
+One main difference between normal intermediate languages and SSA forms
+are phi nodes, which are needed to transform branching statements. By
+default, the translation from a non-SSA form to an SSA form can just be
+done by labelling each variable with an index, and then each new
+assignment to that variable increments the index by one. The last index
+is then used by each instruction that uses that variable, which means
+that the transformation is semantics preserving.
+
+However, this is not the case when one can have multiple branches from
+multiple locations to a block. In this case, one needs to add phi
+instructions to assign the right value to the register. The phi
+instructions is defined as a function which returns the argument
+according to which predecessor executed the instruction.
+
+This means that branching instructions can be executed correctly.
diff --git a/content/zettel/3a8a1.md b/content/zettel/3a8a1.md
new file mode 100644
index 0000000..aafc677
--- /dev/null
+++ b/content/zettel/3a8a1.md
@@ -0,0 +1,27 @@
++++
+title = "Representations of phi functions in CompCertSSA"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8a"]
+forwardlinks = ["3a8a2"]
+zettelid = "3a8a1"
++++
+
+**Question**: How are phi functions represented, as they have their own
+program tree?
+
+They are represented using their own tree so that these are separate to
+the standard RTL instructions, which alleviates the equivalence checking
+between SSA and RTL.
+
+The way that the phi instructions are then executed, is that after a
+`Inop` branch, if it is a branching instruction, then the phi
+instructions for that block are all executed in parallel, which means
+the correct variables are chosen for each destination variable in the
+phi instruction.
+
+The way we know which value to take from the phi node, is that we know
+which predecessor we are coming from. So if it's the \$k\$th
+predecessor, then we take the \$k\$th index of the phi instruction
+arguments.
diff --git a/content/zettel/3a8a2.md b/content/zettel/3a8a2.md
new file mode 100644
index 0000000..a48882b
--- /dev/null
+++ b/content/zettel/3a8a2.md
@@ -0,0 +1,20 @@
++++
+title = "Parallel semantics of phi functions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8a1"]
+forwardlinks = ["3a8a3"]
+zettelid = "3a8a2"
++++
+
+The semantics of the phi instructions are parallel, which means that
+they all use the same starting state as input, and then update the
+registers based on that that phi instruction. Modelling of the phi
+instructions as a sequential update is not performed.
+
+There are some cases, however, where the sequential execution of phi
+instructions does not lead to equivalent behaviour as the parallel
+execution of the instructions. There are then optimisations that can
+take advantage of this, and that the inputs and outputs of the phi
+instructions are independent.
diff --git a/content/zettel/3a8a3.md b/content/zettel/3a8a3.md
new file mode 100644
index 0000000..091aab7
--- /dev/null
+++ b/content/zettel/3a8a3.md
@@ -0,0 +1,15 @@
++++
+title = "When are phi functions executed"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8a2"]
+forwardlinks = []
+zettelid = "3a8a3"
++++
+
+Phi functions are executed directly after a `Inop` instruction that
+leads to a branching instruction, otherwise the standard execution of
+RTL instructions is performed. This means that it is much easier to
+define the semantics of the execution of the phi instructions, as these
+only have to be performed after an `Inop` instruction.
diff --git a/content/zettel/3a8b.md b/content/zettel/3a8b.md
new file mode 100644
index 0000000..bbd07ec
--- /dev/null
+++ b/content/zettel/3a8b.md
@@ -0,0 +1,19 @@
++++
+title = "Maximal and minimal phi"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8a"]
+forwardlinks = ["3a8c"]
+zettelid = "3a8b"
++++
+
+There are several ways in which one can generate correct SSA programs
+with phi functions. One can either just generate them with phi nodes at
+each entrance to a block, or generate them only when they are necessary.
+In the former case, the number of phi functions means that many
+optimisations are hindered and will not work optimally.
+
+To generate phi functions only when they are necessary, we only need to
+generate them where the join point can be reached by two different
+control flows with distinct definition points of the variable.
diff --git a/content/zettel/3a8c.md b/content/zettel/3a8c.md
new file mode 100644
index 0000000..852daba
--- /dev/null
+++ b/content/zettel/3a8c.md
@@ -0,0 +1,17 @@
++++
+title = "Type system for SSA"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8b"]
+forwardlinks = ["3a8d"]
+zettelid = "3a8c"
++++
+
+The type system for SSA tracks the most recent definitions of each
+variable. This technique could be quite interesting in maybe
+implementing better pointer support in Vericert, or different integer
+types in Vericert as well.
+
+To do this, it uses the liveness information for each code point to
+track the current version of the variable that is live at this point.
diff --git a/content/zettel/3a8d.md b/content/zettel/3a8d.md
new file mode 100644
index 0000000..f03cbfa
--- /dev/null
+++ b/content/zettel/3a8d.md
@@ -0,0 +1,13 @@
++++
+title = "Register definition"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8c"]
+forwardlinks = ["3a8e"]
+zettelid = "3a8d"
++++
+
+CompCertSSA redefines the instructions and registers to act on SSA
+registers instead, which are a combination of names and indices, which
+thereby create a new register every time an assignment is performed.
diff --git a/content/zettel/3a8e.md b/content/zettel/3a8e.md
new file mode 100644
index 0000000..4a1e710
--- /dev/null
+++ b/content/zettel/3a8e.md
@@ -0,0 +1,36 @@
++++
+title = "Equations on SSA"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5c", "3a8g5b", "3a8d"]
+forwardlinks = ["3a8f", "3a8e1"]
+zettelid = "3a8e"
++++
+
+There is also a notion of equations in CompCertSSA, which means that one
+can mathematically express the value that are stored in some registers.
+
+``` coq
+Lemma equation_lemma :
+ forall prog d op args x succ f m rs sp pc s,
+ wf_ssa_program prog ->
+ reachable prog (State s f sp pc rs m) ->
+ fn_code f d = Some (Iop op args x succ) ->
+ sdom f d pc ->
+ eval_operation sp op (rs##args) m = Some (rs#x).
+```
+
+Using the above Lemma, it is therefore possible to show that if
+operation at `d` is an assignment of an `Iop` instruction to `x`, then
+one can show that the evaluation of the operation with the arguments at
+the current register set will be equal to the value that is stored in
+the register set at `x`.
+
+**Question**: One question I have though, is can the args of the current
+register set not be different to the register set that was initially
+used to evaluate the initial `Iop` instruction?
+
+**Answer**: Actually, one possible answer to this is that the register
+already encode the index, so the args will actually be exactly the same
+that were used in the original evaluation of the `Iop` intruction.
diff --git a/content/zettel/3a8e1.md b/content/zettel/3a8e1.md
new file mode 100644
index 0000000..7b8dcad
--- /dev/null
+++ b/content/zettel/3a8e1.md
@@ -0,0 +1,14 @@
++++
+title = "Using equations to prove copy propagation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8e"]
+forwardlinks = ["3a8e2"]
+zettelid = "3a8e1"
++++
+
+Copy propagation is when a direct assignment is propagated. This can be
+proven using the equations Lemma directly, because at each point the
+replacement can easily be justified by the argument that `rs#y = rs#x`
+at the current point when `y` is being used instead of `x`.
diff --git a/content/zettel/3a8e2.md b/content/zettel/3a8e2.md
new file mode 100644
index 0000000..65ee9fd
--- /dev/null
+++ b/content/zettel/3a8e2.md
@@ -0,0 +1,16 @@
++++
+title = "Replacing strict dominator by use"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8e1"]
+forwardlinks = ["3a8e3"]
+zettelid = "3a8e2"
++++
+
+The strict dominator property `sdom` is a weaker precondition to the
+`use` property, which determines when a variable is being used, which
+implies that it has to be strictly dominated by the assignment. This is
+because to use a variable, it must have been assigned beforehand, which
+in SSA means it has to have been a strict dominator in the control-flow
+graph.
diff --git a/content/zettel/3a8e3.md b/content/zettel/3a8e3.md
new file mode 100644
index 0000000..92affd0
--- /dev/null
+++ b/content/zettel/3a8e3.md
@@ -0,0 +1,28 @@
++++
+title = "Equation lemma proof"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8e2"]
+forwardlinks = ["3a8g"]
+zettelid = "3a8e3"
++++
+
+The equation lemma is proven by defining a constant property over the
+semantics, and proving that correct. This is proven in two parts:
+
+1. Prove the property using the initial entry point of the control flow
+ graph.
+2. Prove the property for an arbitrary point, assuming that the
+ property holds for the previous value in the control flow graph.
+
+This therefore results in a kind of induction over the semantics
+themselves, and then allows one to use the property anywhere in the
+semantics preservation proof.
+
+This can be used to prove the equation lemma, because the proof of
+preservation of the registers can be proven that way, but it can also be
+used for other proofs, such as proving the CompCertGSA ([\#3a8g])
+generation correct.
+
+ [\#3a8g]: /zettel/3a8g
diff --git a/content/zettel/3a8f.md b/content/zettel/3a8f.md
new file mode 100644
index 0000000..e49673e
--- /dev/null
+++ b/content/zettel/3a8f.md
@@ -0,0 +1,21 @@
++++
+title = "Dominator calculations in CompCertSSA"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8e"]
+forwardlinks = ["2e1b", "3a8g"]
+zettelid = "3a8f"
++++
+
+CompCertSSA currently does not have the completeness proof of the
+dominator trees, but does have dominator trees itself. These can be
+calculated using the `compute_test_dom` function, which includes a
+validator for the correct generation of the dominator tree.
+
+This generates the whole structure of the dominator hierarchy, which can
+then be used to do various analysis, such as finding out if one is in a
+loop, and where the loop header is. This can be useful for generating
+gated-SSA for example ([\#2e1b]).
+
+ [\#2e1b]: /zettel/2e1b
diff --git a/content/zettel/3a8g.md b/content/zettel/3a8g.md
new file mode 100644
index 0000000..b868394
--- /dev/null
+++ b/content/zettel/3a8g.md
@@ -0,0 +1,17 @@
++++
+title = "CompCertGSA "
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8f", "3a8e3", "2f1"]
+forwardlinks = ["2e1b", "3a8", "3a8g1"]
+zettelid = "3a8g"
++++
+
+This version of CompCert introduces gated-SSA ([\#2e1b]) into
+CompCertSSA ([\#3a8]). The translation goes from SSA to GSA, and then
+GSA gets deconstructed into either SSA or RTL, depending on which will
+be easier to implement.
+
+ [\#2e1b]: /zettel/2e1b
+ [\#3a8]: /zettel/3a8
diff --git a/content/zettel/3a8g1.md b/content/zettel/3a8g1.md
new file mode 100644
index 0000000..3e9ba34
--- /dev/null
+++ b/content/zettel/3a8g1.md
@@ -0,0 +1,38 @@
++++
+title = "Transformation of η functions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g"]
+forwardlinks = ["2e1b1", "3a8g1a", "3a8g2"]
+zettelid = "3a8g1"
++++
+
+Construction of GSA can be done in various ways ([\#2e1b1]), however,
+there are some methods that are specifically useful for CompCertSSA and
+a certified compiler in general. First of all, rewriting in the language
+is a big hassle already, and is something that the SSA generation
+already does quite well, as that is one of the requirements of SSA
+generation.
+
+Rewriting is needed for η functions in particular, because during the
+conversion from SSA to GSA, there is no φ function there to replace. It
+is therefore necessary to insert another assignment, which in SSA means
+that all the subsequent uses of that variable need to be replaced by the
+new variable that was introduced.
+
+Instead of having to implement the rewriting again for GSA generation,
+one can reuse the rewriting that SSA already has, by inserting a simple
+identity assignment anywhere an η function is needed. This will then
+correctly be rewritten in SSA to be a fresh variable, which then can be
+replaced by the η function (This is actually not quite possible
+([\#3a8g1a])).
+
+The main problem with this is that the predicate in the η function needs
+to be shown to always hold at that point in the program, but in general
+this shouldn't really be a problem because the loop condition can be
+used for that. In the case that the η function directly follows the
+false branch of the loop condition, this is trivial.
+
+ [\#2e1b1]: /zettel/2e1b1
+ [\#3a8g1a]: /zettel/3a8g1a
diff --git a/content/zettel/3a8g1a.md b/content/zettel/3a8g1a.md
new file mode 100644
index 0000000..e728da8
--- /dev/null
+++ b/content/zettel/3a8g1a.md
@@ -0,0 +1,15 @@
++++
+title = "Cannot add moves too early"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g1"]
+forwardlinks = ["3a8g1b"]
+zettelid = "3a8g1a"
++++
+
+Unfortunately, the η instructions actually cannot really be identified
+before the SSA is actually generated, which means that the renaming
+cannot really be done by the SSA pass. This is because one would
+otherwise need some liveness analysis to be able to figure out which
+variables one would have to rename at that point.
diff --git a/content/zettel/3a8g1b.md b/content/zettel/3a8g1b.md
new file mode 100644
index 0000000..eaaa087
--- /dev/null
+++ b/content/zettel/3a8g1b.md
@@ -0,0 +1,19 @@
++++
+title = "Insertion of multiple η's"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g1a"]
+forwardlinks = ["3a8g5i"]
+zettelid = "3a8g1b"
++++
+
+As mentioned in the discussion on the renaming of the eta functions
+([\#3a8g5i]), the renaming of the η functions is not actually that
+straightforward either. As there can be multiple instances of η
+functions for the same variable, this means that one has to insert
+multiple new variables for the current variable. This means that the
+renaming has to take that into account, and may rename the registers to
+various different names.
+
+ [\#3a8g5i]: /zettel/3a8g5i
diff --git a/content/zettel/3a8g2.md b/content/zettel/3a8g2.md
new file mode 100644
index 0000000..e69f592
--- /dev/null
+++ b/content/zettel/3a8g2.md
@@ -0,0 +1,16 @@
++++
+title = "Deconstructing GSA"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g1"]
+forwardlinks = ["3a8g3", "3a8g2a"]
+zettelid = "3a8g2"
++++
+
+After generating GSA, one problem is deconstructing the GSA into SSA
+again, so that compilation can take place again. The main issue with
+this translation is the conversion of the eta functions, as they will be
+required to be transformed into various move instructions, which will
+need to be added to the control-flow graph. However, there are various
+SSA restrictions that need to be respected.
diff --git a/content/zettel/3a8g2a.md b/content/zettel/3a8g2a.md
new file mode 100644
index 0000000..ee613b4
--- /dev/null
+++ b/content/zettel/3a8g2a.md
@@ -0,0 +1,27 @@
++++
+title = "Detailed implementation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g2"]
+forwardlinks = ["3a8g2b"]
+zettelid = "3a8g2a"
++++
+
+Destruction of GSA is actually quite tricky to do correctly, as a lot of
+information was lost during it's construction, and there are currently
+not enough strong well-formedness conditions about the predicates in the
+γ and η functions. Especially the γ functions need to be turned into φ
+functions, which essentially means reconstructing control-flow
+information just from the predicates.
+
+These predicates should be correct, which is hard to quantify itself,
+and actually does not relate to the control-flow in any way without
+having additional restrictions. The only thing one knows for certain is
+that the semantics ensure that the program will not block, meaning there
+will always be a predicate that is true, which allows the semantics to
+advance. This does not help for the destruction though, as this does not
+guarantee that the predicates talk about the control-flow paths in any
+way. For that one will have to add a well-formedness condition
+specifically for the destruction which will be true when the predicates
+do relate to control-flow in some way.
diff --git a/content/zettel/3a8g2b.md b/content/zettel/3a8g2b.md
new file mode 100644
index 0000000..c017128
--- /dev/null
+++ b/content/zettel/3a8g2b.md
@@ -0,0 +1,25 @@
++++
+title = "Simplifying predicates over paths"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g2a"]
+forwardlinks = ["3a8g2c"]
+zettelid = "3a8g2b"
++++
+
+One way to achieve this is to talk about how predicates are simplified
+symbolically when traversing a path from a dominator of the predicate to
+the predecessor of the γ function that the predicate should choose.
+Incidentally, it does not even make sense to really talk about a
+predecessor belonging to a predicate, because technically a predicate
+could execute to true for two independent predecessors or none at all.
+
+However, in a version of GSA where the predicates should be structurally
+correct, as is the case right after the construction of GSA, then one
+can actually formulate a property about the predicates which should hold
+for them when symbolically executed on the path. This could say that all
+predicates should simplify to a true value when executed along any of
+the paths in the control-flow graph. This might be quite a strong
+property, but it allows for a simple reconstruction of the control-flow
+paths that lead to each predicate.
diff --git a/content/zettel/3a8g2c.md b/content/zettel/3a8g2c.md
new file mode 100644
index 0000000..351ea9e
--- /dev/null
+++ b/content/zettel/3a8g2c.md
@@ -0,0 +1,16 @@
++++
+title = "Main difficulty in destruction proof"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g2b"]
+forwardlinks = ["3a8g2d", "3a8g2c1"]
+zettelid = "3a8g2c"
++++
+
+The main difficulty is relating dynamic evaluation behaviours of
+predicates to the static properties of the predicates. And without a SAT
+solver. In the proof, one needs to show that given a predicate $P$ which
+evaluates to true, and a predicates $P'$ which simplifies to true along
+a single path, that these are actually the same predicate, and therefore
+must refer to the same register.
diff --git a/content/zettel/3a8g2c1.md b/content/zettel/3a8g2c1.md
new file mode 100644
index 0000000..f66340a
--- /dev/null
+++ b/content/zettel/3a8g2c1.md
@@ -0,0 +1,16 @@
++++
+title = "Generalising the Simplification"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g2c"]
+forwardlinks = []
+zettelid = "3a8g2c1"
++++
+
+The first step seems to be to generalise the simplification argument so
+that it talks about all possible paths. However, this can only be done
+if one knows how the predicates are constructed. As otherwise one cannot
+reason about the predicates at all. This in turn is quite strong, as
+this definitely will not hold after optimisations have been performed in
+GSA.
diff --git a/content/zettel/3a8g2d.md b/content/zettel/3a8g2d.md
new file mode 100644
index 0000000..48b5a44
--- /dev/null
+++ b/content/zettel/3a8g2d.md
@@ -0,0 +1,44 @@
++++
+title = "A Possible Destruction Proof"
+date = "2022-06-28"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g2c"]
+forwardlinks = ["3a8g2b1", "3a8g2e"]
+zettelid = "3a8g2d"
++++
+
+The idea hinges on the following lemma, which talks about a path $p$
+from nodes $n$ to $m$, an arbitrary predicate $\mathcal{P}$ which gets
+simplified over that path into $\mathcal{P}'$, and finally an execution
+along a path from $n$ with register state $R$ to $m$ with register state
+$R'$. The execution of the predicate $P$ with $R'$ should give the same
+result as executing the predicate $P'$ with the initial state $R'$.
+
+```{=latex}
+\begin{gather*}
+\frac{\begin{array}{c}
+ S^p(\mathcal{P}) = \mathcal{P}' \\
+ n, R \overset{p}{\longrightarrow^*} m, R'
+\end{array}}{
+ R', \mathcal{P} \Downarrow \lfloor b \rfloor \Longleftrightarrow R, \mathcal{P}'
+\Downarrow \lfloor b \rfloor
+}
+\end{gather*}
+```
+However, in addition to that we will also have to prove the
+generalisation of the simplification ([\#3a8g2b1]) to show that this has
+been done along the path of execution.
+
+The idea, finally, is that during the proof of semantic preservation, a
+path $n\rightsquigarrow_p m$, as well as the execution along that path
+$n, R\overset{p}{\longrightarrow^*} m, R'$ are remembered, so that once
+a γ function is reached, the lemma above can be applied. It might also
+be true that:
+
+$$ \frac{n, R \overset{p}{\longrightarrow^*} m, R'}{n \rightsquigarrow_p m} $$
+
+In which case the lemma can be simplified.
+
+ [\#3a8g2b1]: /zettel/3a8g2b1
diff --git a/content/zettel/3a8g2e.md b/content/zettel/3a8g2e.md
new file mode 100644
index 0000000..bfad0ea
--- /dev/null
+++ b/content/zettel/3a8g2e.md
@@ -0,0 +1,20 @@
++++
+title = "Need for Recursion in the Proof "
+date = "2022-06-28"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g2d"]
+forwardlinks = []
+zettelid = "3a8g2e"
++++
+
+When using the above lemmas, each gamma needs to be paired with a
+dominator. If one is at a dominator, then one needs to start the
+collection of the execution along the path
+$n, R \overset{p}{\longrightarrow^*} m, R'$. If one is then at a γ
+function, one can use that path. However, one could also come accross.
+
+![][1]
+
+ [1]: attachment:proof-edge-case.svg
diff --git a/content/zettel/3a8g3.md b/content/zettel/3a8g3.md
new file mode 100644
index 0000000..9a69da7
--- /dev/null
+++ b/content/zettel/3a8g3.md
@@ -0,0 +1,27 @@
++++
+title = "Inserting nodes and ensuring SSA and CSSA well-formedness"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g2"]
+forwardlinks = ["3a8g4"]
+zettelid = "3a8g3"
++++
+
+There are various issues that are encountered when generating SSA code,
+not only for the SSA properties, but also to allow the SSA to be
+destructured properly to RTL code.
+
+Therefore, when adding an arbitrary node, which has to be done for the
+conversion of η functions [(#3a8g1]), one has to be extremely careful
+what nodes one adds. The first problem is that η functions might
+actually lead to a junction point, or be right after a junction point,
+in which case the previous and next instructions must be Inop
+instructions according to the well-formedness property that CSSA
+requires.
+
+This well-formedness property can be ensured by adding an Inop
+instruction before and after the moves introduced during the translation
+of the η function.
+
+ [(#3a8g1]: 3a8g1.md#3a8g1
diff --git a/content/zettel/3a8g4.md b/content/zettel/3a8g4.md
new file mode 100644
index 0000000..d06546b
--- /dev/null
+++ b/content/zettel/3a8g4.md
@@ -0,0 +1,27 @@
++++
+title = "Issues caused by adding new nodes"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g3"]
+forwardlinks = ["3a8g5", "3a8g4a"]
+zettelid = "3a8g4"
++++
+
+However, more issues are caused by adding new nodes to the control-flow
+graph, which are specific to how CompCertSSA represents and handles φ
+functions. The predecessors function is directly dependent on the order
+in which the predecessors are visited, and it is therefore quite
+dependent on the control-flow graph layout. Even small changes in the
+nodes of the graph can change the order of the predecessors. This means
+that the predecessors for the nodes need to be recalculated.
+
+Previously, SSA optimisations did not really edit the control-flow graph
+much, and therefore never ran into this problem of having to rebuild the
+predecessors of phi functions. However, as we add moves into the
+control-flow graph to eliminate the η functions, this means that it does
+change, and that there is a chance it could move around the predecessors
+(this was found to be the case in the `clause.c` test case). In addition
+to that, GSA in general should not be dependent on the order of the
+arguments of the Ɣ function, so these should be able to be reordered
+without any issues.
diff --git a/content/zettel/3a8g4a.md b/content/zettel/3a8g4a.md
new file mode 100644
index 0000000..6c3973f
--- /dev/null
+++ b/content/zettel/3a8g4a.md
@@ -0,0 +1,35 @@
++++
+title = "Rebuilding the correct order of arguments"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g4"]
+forwardlinks = ["3a8g4b"]
+zettelid = "3a8g4a"
++++
+
+This has been corrected in another note ([\#3a8g4b]).
+
+We therefore have to recreate the correct order of the arguments based
+on the predecessors. The correctness of this might be quite hard to
+show, however, the idea of the algorithm is the following:
+
+1. Find all the points of definition of the variables inside the φ
+ function, and get all the predecessors. The main idea is that the
+ definition points of the variables must necessarily dominate the
+ predecessor that should be used to select that variable. However,
+ the difficulty is that it might not uniquely dominate the
+ predecessor of the variable.
+2. Find all the points of definition that dominate the minimum number
+ of predecessors. If there are more than one, this means that there
+ must be duplicate versions of the variable inside the φ function.
+3. Take all the points where the number of predecessors that are
+ dominated by the point of definition also coincides with the number
+ of uses of that variable inside of the φ function.
+4. Take the list of predecessors and map through it, always looking up
+ the variables that were assigned to that predecessor in the previous
+ step, and replace the predecessor in the list.
+5. The result should be a correctly ordered list for the phi function,
+ which will work with an arbitrary order in the GSA input.
+
+ [\#3a8g4b]: /zettel/3a8g4b
diff --git a/content/zettel/3a8g4b.md b/content/zettel/3a8g4b.md
new file mode 100644
index 0000000..f5186f8
--- /dev/null
+++ b/content/zettel/3a8g4b.md
@@ -0,0 +1,34 @@
++++
+title = "Better way of rebuilding the order"
+date = "2022-04-29"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g4a"]
+forwardlinks = []
+zettelid = "3a8g4b"
++++
+
+The previous way of rebuilding the order is actually not correct. It's
+completely valid to have control-flow graphs where the variables in the
+argument list are declared at the same kind of domination point. This
+would lead to ambiguities and make it impossible to correctly rebuild
+the code using the domination method above, as there is only one order
+that is actually correct.
+
+For μ-functions this is not a problem, because their execution is
+dependent on predecessors, which is the same as for φ-functions. In that
+case, we only have to look at the previous predecessors and the current
+predecessors after having inserted the nodes to convert the η-functions.
+If the predecessors are the same, then the registers are switched, and
+otherwise they are kept the same, as that means the predecessors have
+not changed.
+
+However, for γ-functions this is a bit more complicated, because one
+does not have the correct predecessors available anymore. In that case,
+however, it's interesting to note that the only correctness argument
+that is really necessary to find the right predecessor is to show that
+there exists a path from the dominator to the program counter for which
+the chosen predicate simplifies syntactically to the value T. In that
+case, on knows that no other predicate will be true, and so this must be
+the correct register to pick for that predecessor.
diff --git a/content/zettel/3a8g5.md b/content/zettel/3a8g5.md
new file mode 100644
index 0000000..06a4bd8
--- /dev/null
+++ b/content/zettel/3a8g5.md
@@ -0,0 +1,15 @@
++++
+title = "Proof of CompCertGSA"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g4", "2e1c6a"]
+forwardlinks = ["3a8g5a"]
+zettelid = "3a8g5"
++++
+
+The proof of CompCertGSA is quite complicated, especially the proof of
+the correctness of the predicates. The predicates are generated using
+path expressions, so the proof of correctness of the translation
+requires a proof of correctness of the path expressions, of which there
+can be various different forms.
diff --git a/content/zettel/3a8g5a.md b/content/zettel/3a8g5a.md
new file mode 100644
index 0000000..58c1326
--- /dev/null
+++ b/content/zettel/3a8g5a.md
@@ -0,0 +1,50 @@
++++
+title = "Proof of path expressions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5"]
+forwardlinks = ["3a8g5b"]
+zettelid = "3a8g5a"
++++
+
+The base of the proof relies on the path expressions being correct. What
+does this mean?
+
+Well, there are various correctness properties one could try and come up
+with. The base structure of path expressions is a regex, which can match
+a possible path \[1\]. Therefore, the basic correctness property of the
+path expressions is that it should only match paths that are actually
+valid:
+
+$$ \forall a p b R_{a \rightarrow b}, p \in \sigma(R_{a \rightarrow b}) \implies\textit{path}\ a\ p\ b $$
+
+where $p$ is a path from $a$ to $b$ and $R$ is a path expression from
+$a$ to $b$. $\sigma$ is a function that returns the set of all the paths
+that were matched.
+
+However, that is not the correctness theorem that is needed during the
+proof. In actuality, we want to ensure that all the possible paths from
+$a$ to $b$ will be matched by the path expression $R$, which leads to
+the following correctness property that needs to be proven:
+
+$$ \forall a p b R_{a \rightarrow b}, \textit{path}\ a\ p\ b \implies p \in\sigma(R_{a \rightarrow b}) $$
+
+This allows us to show that if there is a certain path in the graph,
+that it will definitely be in the set of $\sigma(R)$.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-tarjan81_fast_algor_solvin_path_probl" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">R. E. Tarjan, “Fast algorithms for
+solving path problems,” *J. ACM*, vol. 28, no. 3, pp. 594–614, Jul.
+1981, doi: [10.1145/322261.322273].</span>
+
+</div>
+
+</div>
+
+ [10.1145/322261.322273]: https://doi.org/10.1145/322261.322273
diff --git a/content/zettel/3a8g5b.md b/content/zettel/3a8g5b.md
new file mode 100644
index 0000000..37938de
--- /dev/null
+++ b/content/zettel/3a8g5b.md
@@ -0,0 +1,31 @@
++++
+title = "Proving correctness by reducing to correctness of paths"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5d", "3a8g5a"]
+forwardlinks = ["3a8e", "3a8g5c"]
+zettelid = "3a8g5b"
++++
+
+The problem of correctness of path expressions can therefore be reduced
+to the correctness of paths themselves, using the following theorem (if
+it can be proven):
+
+```{=latex}
+\begin{align}
+\forall a\ &b\ P\ P'\ R\ s,\ \exists p,\\
+ &\textit{path}\ a\ p\ b \\
+ &\implies t_p\ p\ P \\
+ &\implies t\ R_{a \rightarrow b}\ P' \\
+ &\implies P \Downarrow s = T \\
+ &\implies P' \Downarrow s = T
+\end{align}
+```
+This therefore reduces the proof to a proof that there is at least one
+path that convert to a predicate which will then finally evaluate to
+true. This property is therefore useful to prove the induction of the
+semantics of SSA to prove the proper invariant, similar to the equations
+Lemma ([\#3a8e]).
+
+ [\#3a8e]: /zettel/3a8e
diff --git a/content/zettel/3a8g5c.md b/content/zettel/3a8g5c.md
new file mode 100644
index 0000000..722f8d5
--- /dev/null
+++ b/content/zettel/3a8g5c.md
@@ -0,0 +1,39 @@
++++
+title = "Semantic invariance property"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5b"]
+forwardlinks = ["3a8e", "3a8g5d", "3a8g5c1"]
+zettelid = "3a8g5c"
++++
+
+A semantic invariance property needs to be found, which can be proven to
+be true at any point in the graph. This then makes it easier to prove
+the correctness of the predicates, as the semantic property can be used
+instead if all the preconditions are met.
+
+The other benefit is that this property can be proven by induction on
+the semantics themselves, which means that one can assume that it is
+true for the previous state, and prove that it still holds for the next
+state. Afterwards, one also needs to prove that it holds for the initial
+state of the semantics, just like how the equations lemma was proven
+([\#3a8e]).
+
+The invariance property that needs to be proven for the correctness
+proof between the predicates and the path expressions is the following:
+
+```{=latex}
+\begin{align}
+\forall p_c\ r_s\ &m,\\
+(\forall d\ &R_{d\ \rightarrow p_c}\ P,\\
+&(\forall p, \text{path}\ d\ p\ p_c \implies
+ p \in \sigma(R_{d \rightarrow p_c}))\\
+&\implies t\ R_{d\ \rightarrow p_c}\ P \\
+&\implies \text{sdom}\ d\ p_c \\
+&\implies P \Downarrow (r_s, m) = T)\\
+\implies &\text{path_prop}\ p_c\ r_s\ m
+\end{align}
+```
+
+ [\#3a8e]: /zettel/3a8e
diff --git a/content/zettel/3a8g5c1.md b/content/zettel/3a8g5c1.md
new file mode 100644
index 0000000..af27b61
--- /dev/null
+++ b/content/zettel/3a8g5c1.md
@@ -0,0 +1,22 @@
++++
+title = "Semantic invariance on the evaluation of conditions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5c"]
+forwardlinks = ["3a8g5c2"]
+zettelid = "3a8g5c1"
++++
+
+We can design a semantic invariance on the evaluation of conditions,
+showing that if we have a point that has predecessors, for any two
+predecessors, there exists a condition such that the successors of that
+condition dominate the two predecessors.
+
+This property should allow you to prove that in the region that is
+dominated by a successor of a condition, that condition will always
+evaluate to either true or false, depending on which successor was
+chosen.
+
+This can be proven independently of the predicates themselves, which
+makes the proof much easier.
diff --git a/content/zettel/3a8g5c2.md b/content/zettel/3a8g5c2.md
new file mode 100644
index 0000000..d9bed78
--- /dev/null
+++ b/content/zettel/3a8g5c2.md
@@ -0,0 +1,27 @@
++++
+title = "Linking the invariance of conditions to predicates"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5c1"]
+forwardlinks = []
+zettelid = "3a8g5c2"
++++
+
+However, linking the invariance of conditions to the predicates is more
+difficult than it originally seems, mainly because of disjunctions and
+because the form of the predicates is well defined. The only structural
+property one has of those predicates is the dynamic evaluation
+equivalence of that predicate to the disjunction of the previous
+predicates together with their path condition.
+
+The random disjunctions in the predicate severely complicate the
+argument of why a predicate will evaluate to false, because it is not
+local anymore. As soon as one encounters a disjunction, one has to look
+at the predecessors and one must find a condition which separates each
+path from at least one other path from all the other predicates.
+
+Otherwise, it would have been enough to say that if the condition who's
+branch dominates the current point evaluates to false, that it implies
+that the whole predicate evaluates to false. That is not the case though
+with disjunctions, and a more complicated algorithm would be needed.
diff --git a/content/zettel/3a8g5d.md b/content/zettel/3a8g5d.md
new file mode 100644
index 0000000..00b4d44
--- /dev/null
+++ b/content/zettel/3a8g5d.md
@@ -0,0 +1,44 @@
++++
+title = "Why this proof is tricky"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5c"]
+forwardlinks = ["3a8g5b", "3a8g5e"]
+zettelid = "3a8g5d"
++++
+
+However, one problem is that this proof is quite difficult, as there are
+many independent parts that need to be combined to show the correctness
+of the algorithm. One example is that the $t$ function, which translates
+a path expression to a predicate is completely independent from the
+correctness argument, which involves a separate function $\sigma$.
+Combining these parts all requires quite a lot of proofs of correctness
+between the different functions and how these interact with each other.
+
+For example, proving the path property ([\#3a8g5b]) requires
+interactions between $t$ and $\sigma$ already, proving that the
+predicates obtained from translating a list of paths compared to the
+regex are equivalent, which is not straightforward.
+
+In addition to that, even proving that the path expressions are indeed
+correct and follow the correctness property is non-trivial, because the
+algorithm by Tarjan \[1\] was followed, which uses Gaussian elimination
+for matrices to solve the path equation.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-tarjan81_fast_algor_solvin_path_probl" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">R. E. Tarjan, “Fast algorithms for
+solving path problems,” *J. ACM*, vol. 28, no. 3, pp. 594–614, Jul.
+1981, doi: [10.1145/322261.322273].</span>
+
+</div>
+
+</div>
+
+ [\#3a8g5b]: /zettel/3a8g5b
+ [10.1145/322261.322273]: https://doi.org/10.1145/322261.322273
diff --git a/content/zettel/3a8g5e.md b/content/zettel/3a8g5e.md
new file mode 100644
index 0000000..d15ab65
--- /dev/null
+++ b/content/zettel/3a8g5e.md
@@ -0,0 +1,29 @@
++++
+title = "Designing a proof with validation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3m2", "3a8g5g", "3a8g5e1a", "3a8g5d", "2e1c6a"]
+forwardlinks = ["3a8g5f", "3a8g5e1"]
+zettelid = "3a8g5e"
++++
+
+Because of the trickiness of the proof, it would therefore be good to
+integrate some validation into the algorithm, however, it's not quite
+clear where to do this check. One possibility, which might allow for
+quite a few optimisations to not make it too inefficient, is to prove
+the following invariant on the semantics:
+
+$$ \forall d\ s,\ t\ R_{d \rightarrow p_c} \Downarrow s \Longleftrightarrow t\\left( \bigcup_{i \in \text{preds}(p_c)} R_{d \rightarrow i} \cdot c_i \right)\Downarrow s. $$
+
+However, this can be simplified even further by actually only verifying
+the predicates that were generated from the path expressions instead,
+giving the following invariant:
+
+$$ \forall d\ s,\ P_{d \rightarrow p_c} \Downarrow s \Longleftrightarrow \left(\bigvee_{i \in \text{preds}(p_c)} P_{d \rightarrow i} \land c_i \right)\Downarrow s. $$
+
+This property allows us to properly prove the correctness of the
+predicates, because on can use this property to prove that if one of the
+predecessors evaluates to true (which we can assume for the induction),
+then due to the predicate being equivalent to the ors of all the
+predecessors, it means that the current predicate must also be correct.
diff --git a/content/zettel/3a8g5e1.md b/content/zettel/3a8g5e1.md
new file mode 100644
index 0000000..1234d40
--- /dev/null
+++ b/content/zettel/3a8g5e1.md
@@ -0,0 +1,21 @@
++++
+title = "Removing variables that cannot be evaluated"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5e"]
+forwardlinks = ["3a8g5e2", "3a8g5e1a"]
+zettelid = "3a8g5e1"
++++
+
+In SSA, at each program point, only variables who's definition point
+dominates the current program point can be evaluated using the SSA
+equations. It is therefore important that all the variables inside of
+the SSA form should always dominate the current program point, and
+especially with predicates that are propagated, these predicates will
+need to be adjusted to only contain the correct variables.
+
+This can be done by replacing any predicate, whether it's the condition
+or it's negation, by T. This will definitely be correct for the
+propagation of the truth values of predicates, but might not be
+sufficient for the value of ɣ functions.
diff --git a/content/zettel/3a8g5e1a.md b/content/zettel/3a8g5e1a.md
new file mode 100644
index 0000000..5dd7043
--- /dev/null
+++ b/content/zettel/3a8g5e1a.md
@@ -0,0 +1,55 @@
++++
+title = "PTrue preservation for γ-functions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5e1"]
+forwardlinks = ["3a8g5e"]
+zettelid = "3a8g5e1a"
++++
+
+When using γ-functions, one needs to prove that for a join-point, the
+`PTrue` property is preserved accross the semantics. For the η-function
+when there is no join-point, this was quite straightforward as no
+previous nodes had to be examined, there was only one. However, when
+proving the γ-function, there are multiple predecessors. Even though we
+have the `pred_correct` property, which has been validated by the SAT
+solver ([\#3a8g5e]), there is no guarantee that each of the predecessors
+will be evaluated to a `Some` value. Therefore, when we do the global
+`or` of all the predecessors, and say that this implies the correctness
+of the predicate at this point, we cannot be sure that we are evaluating
+that predicate to a value.
+
+Furthermore, it is actually most likely that we will not evaluate that
+predicate to a value, because there are various conditions and variables
+that remain undefined at that point. This means that various conditions
+will evaluate to a `None` value.
+
+There are three different possible solutions to this:
+
+1. Evaluate each predecessor with the original `eval_predicate`
+ function, but then or them together lazily using a slightly
+ different evaluation function. This evaluation function will take
+ into account the fact that some predicates will be able to return
+ `None`. The main problem with this is that this doesn't make it easy
+ to guarantee that the current predicate will return a `Some` value
+ with the same state. Instead, the current predicate would probably
+ also have to be evaluated lazily.
+2. Evaluate the whole predicate lazily, and only care about the truth
+ value of correctness at the output. This is definitely a better
+ solution than the previous, even though it probably requires more
+ changes. One main problem with this solution is that it may not be
+ possible to prove the required correctness proof using the SAT
+ solver.
+3. Finally, another solution would be to keep track of the evaluation
+ of each of the conditions, and therefore show that each condition is
+ evaluating to a `Some` value. Then, proving that when we encounter a
+ predicate that we must have traversed it previously, and due to the
+ fact that SSA guarantees that if the definition of a point dominates
+ the current point, then it's value is the value is known to be the
+ same as at the definition point, one can show that the condition at
+ the current point will also evaluate to a `Some` value. This is
+ probably the most realistic, but quite a difficult solution with a
+ lot of work involved to get it to work.
+
+ [\#3a8g5e]: /zettel/3a8g5e
diff --git a/content/zettel/3a8g5e2.md b/content/zettel/3a8g5e2.md
new file mode 100644
index 0000000..2387f9b
--- /dev/null
+++ b/content/zettel/3a8g5e2.md
@@ -0,0 +1,25 @@
++++
+title = "Double implication does not hold"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5e1"]
+forwardlinks = ["3a8g5g", "3a8g5e3"]
+zettelid = "3a8g5e2"
++++
+
+However, one of the downsides of eliminating these variables from the
+formulas means that the equivalence mentioned in the validation section
+([\#3a8g5g]) does not hold anymore, and one can only really prove the
+backward implication. However, maybe if the variables are also
+eliminated for the formulas from the previous predicates, then an actual
+equivalence can be shown between the two.
+
+However, this equivalence might not be useful enough though. One of the
+main problems faced with the predicates is that paths are completely
+removed and some kind of proofs on the paths need to be reinstated. This
+means that a lot of metadata needs to accompany the proof to show that
+if one formula is true, then all the other formulas must be false. This
+is the selection criteria for ɣ functions and therefore needs to hold.
+
+ [\#3a8g5g]: /zettel/3a8g5g
diff --git a/content/zettel/3a8g5e3.md b/content/zettel/3a8g5e3.md
new file mode 100644
index 0000000..9ac35b2
--- /dev/null
+++ b/content/zettel/3a8g5e3.md
@@ -0,0 +1,24 @@
++++
+title = "PTrue predicate under dominance"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5e2"]
+forwardlinks = ["3a8g5e4", "3a8g5e3a"]
+zettelid = "3a8g5e3"
++++
+
+The global predicate correct property is only true under dominance of
+the header (even though in reality this seems to be an optimisation
+actually). This means that not every predicate needs to be checked, but
+only the predicates that are actually dominated by the header node.
+Because of this optimisation, however, we actually need to preserve the
+fact that if the successor is dominated by the original header, that the
+current node is dominated too.
+
+This is correct, because if the successor is strictly dominated by the
+node, then all the previous nodes have to be dominated too, or one of
+the previous nodes must be the dominator, and there must be a path from
+that node to the other predecessors of the current node. Otherwise, not
+all paths from the input to the current node would pass through the
+dominator.
diff --git a/content/zettel/3a8g5e3a.md b/content/zettel/3a8g5e3a.md
new file mode 100644
index 0000000..331eba8
--- /dev/null
+++ b/content/zettel/3a8g5e3a.md
@@ -0,0 +1,15 @@
++++
+title = "Proving `PTrue` through the states"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5e3"]
+forwardlinks = []
+zettelid = "3a8g5e3a"
++++
+
+`PTrue` is a lemma that should be proven to be preserved throughout the
+execution of the semantics. This means that at each proof of semantic
+preservation, one must prove that assuming `PTrue pc` is true, that
+means that `PTrue pc'` is true. `PTrue pc` always refers to the current
+predicate that is assigned to `pc` from the map.
diff --git a/content/zettel/3a8g5e4.md b/content/zettel/3a8g5e4.md
new file mode 100644
index 0000000..f22bdec
--- /dev/null
+++ b/content/zettel/3a8g5e4.md
@@ -0,0 +1,15 @@
++++
+title = "Another problem with predicate elimination"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5e3"]
+forwardlinks = []
+zettelid = "3a8g5e4"
++++
+
+There is an additional problem when parts of the predicate are
+eliminated, which is that the predicates are actually not independent
+anymore. This means that the predicates are actually incorrect, even
+when domination based elimination is used, because conditions are
+eliminated that are still useful during the checking of the predicate.
diff --git a/content/zettel/3a8g5f.md b/content/zettel/3a8g5f.md
new file mode 100644
index 0000000..e983ff4
--- /dev/null
+++ b/content/zettel/3a8g5f.md
@@ -0,0 +1,30 @@
++++
+title = "Proving correctness of renaming"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5e"]
+forwardlinks = ["3a8g5g"]
+zettelid = "3a8g5f"
++++
+
+To prove the correctness in general, one has to prove that the result of
+the register in `rs'` is equivalent to the register in `rs`, where `rs'`
+is the register state in GSA and `rs` is the register state in SSA.
+However, the problem is how to map the registers in GSA to the registers
+in SSA, because they may have been renamed.
+
+The first thing to note is that renaming is only performed for η
+functions, and the special feature of η functions is that the argument
+can only be the assignment of a μ function. Therefore, the argument to
+the η function has to be a value that is less than the max register in
+the SSA function. It also means that the renaming succeeded, and
+therefore indexing the map with the SSA register gave the GSA register.
+
+This cannot continue recursively, therefor the map will always go from
+SSA registers which are less than the max register, and return a
+register that is greater than the max register.
+
+The agree function should therefore have the following form:
+
+$$ (\forall r,\ r \leq m \Rightarrow r_s \mathbin{\#} r = r_s' \mathbin{\#} r)\lor (\forall r \ r',\ r > m \Rightarrow t_r\ !\ r' = \texttt{Some } r\Rightarrow rs' \mathbin{\#} r = rs' \mathbin{\#} r' \land r' \leq m) $$
diff --git a/content/zettel/3a8g5g.md b/content/zettel/3a8g5g.md
new file mode 100644
index 0000000..e258e49
--- /dev/null
+++ b/content/zettel/3a8g5g.md
@@ -0,0 +1,19 @@
++++
+title = "Using a SAT solver to prove correctness of predicates"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g6a", "3a8g5f", "3a8g5e2"]
+forwardlinks = ["3a8g5e", "3a8g5h", "3a8g5g1"]
+zettelid = "3a8g5g"
++++
+
+As mentioned in the proof of validation ([\#3a8g5e]) A SAT solver can be
+used to prove the predicate property for all of the program points.
+However, the main problem is that the predicates can sometimes be large
+and the checking, especially with a naïve SAT solver that is proven
+correct. However, it should be possible to use a SAT solver like Z3 to
+simplify the predicate when it is constructed, so that the checking time
+is minimised.
+
+ [\#3a8g5e]: /zettel/3a8g5e
diff --git a/content/zettel/3a8g5g1.md b/content/zettel/3a8g5g1.md
new file mode 100644
index 0000000..2b8f5db
--- /dev/null
+++ b/content/zettel/3a8g5g1.md
@@ -0,0 +1,23 @@
++++
+title = "Implementing a three-valued solver"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3m2", "3a8g5g"]
+forwardlinks = ["3a8g5h3", "3a8g5h5"]
+zettelid = "3a8g5g1"
++++
+
+In the end, the current conversion to GSA uses a three-valued logic
+solver, as described in the third solution ([\#3a8g5h3]). The main
+problem with this check is that it is quite slow, and needs to be
+performed a lot of times. Even using a solver like Z3, it takes quite a
+bit of time to get a solution, and especially with large constructs like
+case statements, it becomes even harder of a translation problem.
+
+One way to approach this is to properly reason about the predicates
+instead of validating them, which is similar to the solution proposed in
+([\#3a8g5h5]).
+
+ [\#3a8g5h3]: /zettel/3a8g5h3
+ [\#3a8g5h5]: /zettel/3a8g5h5
diff --git a/content/zettel/3a8g5h.md b/content/zettel/3a8g5h.md
new file mode 100644
index 0000000..31ab96c
--- /dev/null
+++ b/content/zettel/3a8g5h.md
@@ -0,0 +1,34 @@
++++
+title = "Problem with SAT verification "
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5g"]
+forwardlinks = ["3a8g5i", "3a8g5h1"]
+zettelid = "3a8g5h"
++++
+
+In the current implementation, instead of changing the GSA semantics to
+match the SSA semantics for the γ-function, I have added validation that
+the predicates are indeed independent (if one predicate is true, all the
+others should be false) in `check_pred_independent` which should then
+prove that `pred_independent` holds.
+
+However, this actually discovered a bug in the compilation of
+`knucleotide.c`, where the simplification of predicates according to the
+dominance of the definition of the variables is actually not correct.
+One ends up having predicates that can both be true at the same time:
+
+![][1]
+
+The example above goes over this, and it is similar to the examples we
+discussed before, but we thought that dominance of variables was
+conservative enough, which actually isn't the case. In the example, `x2`
+is defined under the false branch, but is necessary to correctly
+identify the difference between the two predicates
+$1 \lor (\neg 1 \land 2)$ and $\neg 1 \land \neg 2$, which are logically
+independent before the simplification to $1 \lor \neg 1$ and
+$\neg1 \land \neg 2$, where the left hand branch will actually always be
+true.
+
+ [1]: attachment:gsa-simplification-problem.png
diff --git a/content/zettel/3a8g5h1.md b/content/zettel/3a8g5h1.md
new file mode 100644
index 0000000..cf3c1cf
--- /dev/null
+++ b/content/zettel/3a8g5h1.md
@@ -0,0 +1,20 @@
++++
+title = "First possible solution: Quantifiers"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5h"]
+forwardlinks = ["3a8g5h2"]
+zettelid = "3a8g5h1"
++++
+
+The first solution to this problem would be to add quantifiers to the
+atoms that cannot always be evaluated at the current spot. However, this
+would significantly increase the complexity of the Sat solver, as well
+as make predicates unevaluatable in general, because it might contain
+quantifiers that don't evaluate to one specific value.
+
+However, with a suitably strong SAT solver, it should be possible to
+show the independence between the different predicates. However, the
+correctness property should probably be correct as well and should be
+checkable with the SAT solver.
diff --git a/content/zettel/3a8g5h2.md b/content/zettel/3a8g5h2.md
new file mode 100644
index 0000000..b8facdd
--- /dev/null
+++ b/content/zettel/3a8g5h2.md
@@ -0,0 +1,18 @@
++++
+title = "Second possible solution: Craig interpolation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5h1"]
+forwardlinks = ["4e1", "3a8g5h3"]
+zettelid = "3a8g5h2"
++++
+
+The second possible solution could be to use Craig interpolation
+([\#4e1]), which was a suggestion by John. This is close to what we
+currently already do, however a bit more sound (replacing positive
+literals by T and replacing negative literals by $\perp$). This does not
+seem to completely solve the problem, because it only really works for
+implication, and for the independence property that's not enough.
+
+ [\#4e1]: /zettel/4e1
diff --git a/content/zettel/3a8g5h3.md b/content/zettel/3a8g5h3.md
new file mode 100644
index 0000000..73576e5
--- /dev/null
+++ b/content/zettel/3a8g5h3.md
@@ -0,0 +1,17 @@
++++
+title = "Third possible solution: Tri-state logic"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5h2", "3a8g5g1"]
+forwardlinks = ["3a8g5h4"]
+zettelid = "3a8g5h3"
++++
+
+Tri-state logic is also an interesting solution and it goes back to what
+we had initially thought about implementing, however, the problem with
+that is again the SAT solver, as we want to be able to assume that we do
+get an answer out of it. However, maybe it's possible to use the SAT
+solver natively on 3-state logic. We would then be able to construct the
+evaluation rules for our gates so that they follow the correctness
+property that we are hoping for.
diff --git a/content/zettel/3a8g5h4.md b/content/zettel/3a8g5h4.md
new file mode 100644
index 0000000..9e044b8
--- /dev/null
+++ b/content/zettel/3a8g5h4.md
@@ -0,0 +1,15 @@
++++
+title = "Fourth possible solution: Ordering of predicates"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5h3"]
+forwardlinks = ["3a8g5h5"]
+zettelid = "3a8g5h4"
++++
+
+Finally, the fourth solution could be to have lazy evaluation of the
+expressions, and order the predicates so that the more general property
+comes first, followed by the more specific properties. However, the main
+problem with this is that we don't really know which predicate came from
+which node, and what the order should be for those predicates.
diff --git a/content/zettel/3a8g5h5.md b/content/zettel/3a8g5h5.md
new file mode 100644
index 0000000..63b4610
--- /dev/null
+++ b/content/zettel/3a8g5h5.md
@@ -0,0 +1,18 @@
++++
+title = "Syntactic checks"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5h4", "3a8g5g1"]
+forwardlinks = ["3a8g5h6"]
+zettelid = "3a8g5h5"
++++
+
+Prove the Tarjan algorithm and get a syntactical proof that they will be
+independent and correct. This will then allow one to make syntactic
+statements about the predicates, which can be much more useful than the
+abstract statements that are currently made.
+
+In addition to that, one problem is that currently the proof does not
+have a notion of post-dominance, which seems to be necessary to reason
+about these predicates in a syntactic manner.
diff --git a/content/zettel/3a8g5h6.md b/content/zettel/3a8g5h6.md
new file mode 100644
index 0000000..03a4660
--- /dev/null
+++ b/content/zettel/3a8g5h6.md
@@ -0,0 +1,15 @@
++++
+title = "Add check of correctness for Iop"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5h5"]
+forwardlinks = []
+zettelid = "3a8g5h6"
++++
+
+At each Iop, the defining variable should not be in the predicate. Once
+the simplification is not present anymore, it will be difficult to prove
+that the Iop instruction does not modify the value of the current
+predicate. Because of this, the current development always removes all
+the variables from the predicate that are currently being defined there.
diff --git a/content/zettel/3a8g5i.md b/content/zettel/3a8g5i.md
new file mode 100644
index 0000000..8e99f27
--- /dev/null
+++ b/content/zettel/3a8g5i.md
@@ -0,0 +1,32 @@
++++
+title = "Problem with renaming in GSA"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8g5h", "3a8g1b"]
+forwardlinks = []
+zettelid = "3a8g5i"
++++
+
+There is an additional problem that had to be fixed with respect to the
+renaming of registers. Initially, this was just a map from registers to
+a rename of that register. However, the problem is that this actually
+only allows for a single rename, whereas actually each eta variable can
+be renamed to multiple other eta variables, as we are instantiating a
+new variable for each η, and one may have multiple η for each μ.
+Therefore, we actually need to keep track of multiple possible renames,
+by having a list of tuples, containing the new name and the initial node
+where the register is initially assigned. Then, during the rename, every
+possible node in the rename list is checked for on if the assigning node
+dominates the current point, and if it does, we can rename the current
+register.
+
+One extra thing to take care of is that a property that should be true
+in general is that in a list of possible renames for a register, there
+will only ever be one whose definition point dominates the current
+point, because otherwise the insertion of the etas did not complete
+correctly. This is something that doesn't really affect the correctness
+of the generation, because if this situation ever occurs, then it should
+be safe to take either rename. However, when one needs properties about
+the GSA language, then it might be necessary to prove those things about
+it.
diff --git a/content/zettel/3a9.md b/content/zettel/3a9.md
new file mode 100644
index 0000000..936da67
--- /dev/null
+++ b/content/zettel/3a9.md
@@ -0,0 +1,24 @@
++++
+title = "Teams working on CompCert"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a8"]
+forwardlinks = ["3a8", "3a10"]
+zettelid = "3a9"
++++
+
+[Verimag]
+: Works on CompCert-KVX, Hash consing in CompCert, Impure computations
+ in Coq.
+
+[Celtique]
+: Works on constant time CompCert and CompCertSSA ([\#3a8]).
+
+[Xavier Leroy]
+: Works on original CompCert.
+
+ [Verimag]: https://www-verimag.imag.fr/FormalProofs-members.html
+ [Celtique]: https://team.inria.fr/celtique/
+ [\#3a8]: /zettel/3a8
+ [Xavier Leroy]: https://xavierleroy.com/
diff --git a/content/zettel/3b.md b/content/zettel/3b.md
new file mode 100644
index 0000000..617e2b9
--- /dev/null
+++ b/content/zettel/3b.md
@@ -0,0 +1,14 @@
++++
+title = "Coq"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3a"]
+forwardlinks = ["3c", "3b1"]
+zettelid = "3b"
++++
+
+Some notes regarding Adam Chlipala's book "Certified Programming with
+Dependent Types".[^1]
+
+[^1]: <http://adam.chlipala.net/cpdt/html/toc.html>
diff --git a/content/zettel/3b1.md b/content/zettel/3b1.md
new file mode 100644
index 0000000..133b638
--- /dev/null
+++ b/content/zettel/3b1.md
@@ -0,0 +1,25 @@
++++
+title = "Dependent Types"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b"]
+forwardlinks = ["3b2"]
+zettelid = "3b1"
++++
+
+A language with *dependent types* may include programs inside of the
+types. For arrays, this could, for example, be an array which has a
+program expression in the type which specifies the size of the array.
+
+The kernel proof language is what underlies the theorem proving. The "de
+Bruijn criterion" is satisfied by a theorem prover if the kernel
+language in which the proof is expressed in is expressed in a small
+kernel language. This is satisfied by most theorem provers including
+Coq. Only this small kernel language has to be trusted, as any searches
+for proofs will result in this kernel language and then be checked
+independently. That is why the search itself does not actually need to
+be trusted. Coq and Isabelle/HOL allow the programmer to define proof
+manipulations that cannot end up in the acceptance of invalid proofs.
+This can either be done in OCaml for Coq, but also in Ltac, which is a
+language in Coq designed for that purpose.
diff --git a/content/zettel/3b2.md b/content/zettel/3b2.md
new file mode 100644
index 0000000..387ac8a
--- /dev/null
+++ b/content/zettel/3b2.md
@@ -0,0 +1,17 @@
++++
+title = "Stack Machine"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b1"]
+forwardlinks = ["3b3"]
+zettelid = "3b2"
++++
+
+The theoretical foundation of Coq is called Calculus of Inductive
+Constructions (CIC), an extension to the older Calculus of Construction
+(CoC). Gallina is actually an extension of CIC which is all the code
+that comes after the `:=` and before the period. Next there is LTac,
+which is a domain-specific language for writing proofs and decision
+procedures. Finally, commands like `Inductive` and `Definition` are the
+Vernacular, which supports many queries to the Coq system.
diff --git a/content/zettel/3b3.md b/content/zettel/3b3.md
new file mode 100644
index 0000000..c5dfb27
--- /dev/null
+++ b/content/zettel/3b3.md
@@ -0,0 +1,9 @@
++++
+title = "Coq Types"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b2"]
+forwardlinks = ["3b4", "3b3a"]
+zettelid = "3b3"
++++
diff --git a/content/zettel/3b3a.md b/content/zettel/3b3a.md
new file mode 100644
index 0000000..3bc44f6
--- /dev/null
+++ b/content/zettel/3b3a.md
@@ -0,0 +1,20 @@
++++
+title = "Inductive Types"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b3"]
+forwardlinks = ["3b3b"]
+zettelid = "3b3a"
++++
+
+CIC is built only on two straightforward features, function types and
+inductive types, which can be used to reason about all theorems in math.
+In Coq, implication and function types are actually the same thing, and
+both therefore use the `->` operator. There is no overloading at play as
+they are the same according to the Curry-Howard isomorphism.
+
+Inductive and recursive types are the same because of the Curry-Howard
+isomorphism, except for the `Prop` and `Set` distinction. Inductive
+types would be easier to prove theorems with whereas recursive types
+would actually be computable and can be extracted to OCaml code.
diff --git a/content/zettel/3b3b.md b/content/zettel/3b3b.md
new file mode 100644
index 0000000..63e06ee
--- /dev/null
+++ b/content/zettel/3b3b.md
@@ -0,0 +1,19 @@
++++
+title = "Reflexive Types"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b3a"]
+forwardlinks = ["3b3c"]
+zettelid = "3b3b"
++++
+
+Reflexive types are of the nature of taking an argument which is a
+function and returning the Inductive type. Some of these are not legal
+in Coq, even though they would be legal in Haskell/ML, as they might
+produce computations that run forever. This would destroy all the
+confidence that one could have in the proof system, as that would mean
+that one could produce a proof for any theorem using an infinite loop.
+This can be done because proofs are combined with functions. For
+example, a reflexive type that takes the type it defines as an argument
+could recurse indefinitely, which should not be allowed to be defined.
diff --git a/content/zettel/3b3c.md b/content/zettel/3b3c.md
new file mode 100644
index 0000000..159fbad
--- /dev/null
+++ b/content/zettel/3b3c.md
@@ -0,0 +1,26 @@
++++
+title = "Inductive Predicates"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b3b"]
+forwardlinks = ["3b3d"]
+zettelid = "3b3c"
++++
+
+`tauto` is a complete decision procedure to solve propositional proofs,
+however, if other theorems are needed, then intuition can be used to
+prove more about those theorems.
+
+Coq implements *constructive*, or *intuitionistic* logic. Therefore,
+simple theorems like $\neg \neg P \rightarrow P$ do not always hold, and
+can only be proven if *P* is decidable.
+
+Therefore, the difference between `Prop` and `bool` is that `Prop` is
+often not decidable and cannot be constructed, whereas `bool` terms can
+be constructed by default.
+
+We can also think of `forall` as the type constructor for dependent
+function type constructor. This is because implication and `forall` are
+actually the same, for example we can define `forall x : P, Q`, where x
+does not appear in `Q`, which is equivalent to `P -> Q`.
diff --git a/content/zettel/3b3d.md b/content/zettel/3b3d.md
new file mode 100644
index 0000000..0b85ee0
--- /dev/null
+++ b/content/zettel/3b3d.md
@@ -0,0 +1,32 @@
++++
+title = "Subset Types and Variations"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b3c"]
+forwardlinks = []
+zettelid = "3b3d"
++++
+
+`refine` can be used to define functions that take in and return subset
+types, by allowing you to replace placeholders with underscores, which
+can be proven afterwards. Together with powerful proof automation, this
+can be a nice way to define these dependent functions in a more similar
+way that they would be defined in Haskell or ML.
+
+However, `Program` is a term in Galina which can be used to prove these
+without `refine`, however, `refine` gives more control over how the
+proof is done.
+
+The `sumbool` type is the type that can return a proof of either type in
+it's constructor.
+
+`if then else` is actually overloaded in Coq and can work for any
+two-constructor `Inductive` type. This means it can be used with
+`sumbool` as well. When extracting `sumbool` to OCaml code, it uses
+`Left` and `Right` by default, however, it is better to use `true` and
+`false` in `bool` instead, which are built into OCaml. This can easily
+be done by telling Coq how it should extract the `sumbool` type.
+
+`sumor` is another type, which can either return a value of type A, or a
+proof of type B, which is written as the following: `A + {B}`.
diff --git a/content/zettel/3b4.md b/content/zettel/3b4.md
new file mode 100644
index 0000000..76eb7d8
--- /dev/null
+++ b/content/zettel/3b4.md
@@ -0,0 +1,19 @@
++++
+title = "General Recursion"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b3"]
+forwardlinks = ["3b5"]
+zettelid = "3b4"
++++
+
+One can define a well-formed relation on a recursive function and
+therefore prove that it terminates by using the `Fix` inductive.
+However, one can also define a termination monad which can represent
+functions that terminate and that don't terminate. Using it, one can
+define recursive without functions having to prove if they terminate or
+not, and one can still reason about them if the arguments terminate.
+
+Finally, one can also use co-inductive types to represent termination in
+a nicer way.
diff --git a/content/zettel/3b5.md b/content/zettel/3b5.md
new file mode 100644
index 0000000..82551fd
--- /dev/null
+++ b/content/zettel/3b5.md
@@ -0,0 +1,45 @@
++++
+title = "Proof search by logic programming"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b4"]
+forwardlinks = ["3b6", "3b5a"]
+zettelid = "3b5"
++++
+
+Proof search is simpler than automatic programming because any proof is
+good enough for the current goal, whereas not every program is good
+enough for the specification. That is why proof search is useful and
+should be used to find proofs in theorem provers.
+
+The `Hint Constructors` construct can be used to add hints for
+constructors for inductive types, so that auto can solve these
+automatically.
+
+There are two main important functions that are performed during proof
+search, unification and backtracking. Backtracking is performed by
+`auto`, where it will go back and try different values for a tactic.
+However, it does not perform unification. This is done by the `eauto`
+tactic instead, as it can lead to an explosion of proof terms that have
+to be considered. Unifications of variables are, for example, placing a
+unification variable into an existential and then proceeding.
+
+`Hint Immediate` can be used for `auto` to consider lemmas at the leaf
+nodes of a tree. To let `auto` consider them at any level in the search
+tree, `Hint
+Resolve` can be used instead.
+
+One problem that can greatly increase the search tree to find a possible
+solution is the use of transitivity, which can break existing proofs or
+increase the time taken to prove them dramatically.
+
+If hints such as transitivity are needed in `auto` to solve a particular
+proof, they can be added using `eauto with trans`. This way it does not
+pollute the hint database in cases this lemma is not needed.
+
+`Hint Extern` can be used to define custom matching rules with a
+priority in which they are attempted. Lower numbers will be attempted
+before higher numbers. For example,
+`Hint Extern 1 (sum _ = _) => simpl.` will run `simpl` whenever the
+pattern before the `=>` is encountered.
diff --git a/content/zettel/3b5a.md b/content/zettel/3b5a.md
new file mode 100644
index 0000000..0166a10
--- /dev/null
+++ b/content/zettel/3b5a.md
@@ -0,0 +1,18 @@
++++
+title = "Continuation passing style for `Ltac` functions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b5"]
+forwardlinks = []
+zettelid = "3b5a"
++++
+
+`Ltac` functions behave a lot like monads in Haskell, whereby they will
+be pure if no tactic has been invoked in them, but will be monadic if a
+tactic was executed before evaluating a `constr:()` construct. This is
+normally not a problem, as in Haskell there is the `return` function to
+lift any value into the monad.
+
+However, this is not defined for `Ltac`, and there is no `return`
+available.
diff --git a/content/zettel/3b6.md b/content/zettel/3b6.md
new file mode 100644
index 0000000..18e4874
--- /dev/null
+++ b/content/zettel/3b6.md
@@ -0,0 +1,50 @@
++++
+title = "Unsoundness of Coq FFI"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b6a", "3b5"]
+forwardlinks = ["3a", "3b7", "3b6a"]
+zettelid = "3b6"
++++
+
+There are examples where reasoning about the Coq FFI by using the
+`Parameter` declaration can actually result in unsoundness of the proof
+\[1\]. This happens in CompCert ([\#3a]) quite a lot when something is
+proven by translation validation. For example, in the following code
+there are quite a few possibilities that could introduce unsoundness.
+
+``` coq
+Definition one: nat := (S O).
+Axiom test: nat -> bool.
+Extract Constant test => "oracle".
+
+Lemma cong: test one = test (S O).
+ auto.
+Qed.
+```
+
+The first option which could introduce this unsoundness is that the
+`cong` lemma might not hold if if equality is implemented using `==` in
+Ocaml, which does equality on pointers.
+
+Second, if one relies on the fact that $\forall x, f(x) = f(x)$, this
+might not be true because the function might rely on external state and
+therefore might not be pure.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-boulmé19_embed_untrus_imper_ml_oracl" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">S. Boulmé and T. Vandendorpe,
+“<span class="nocase">Embedding Untrusted Imperative ML Oracles into Coq
+Verified Code</span>,” Jul. 2019. Available:
+<https://hal.archives-ouvertes.fr/hal-02062288></span>
+
+</div>
+
+</div>
+
+ [\#3a]: /zettel/3a
diff --git a/content/zettel/3b6a.md b/content/zettel/3b6a.md
new file mode 100644
index 0000000..d9faa24
--- /dev/null
+++ b/content/zettel/3b6a.md
@@ -0,0 +1,24 @@
++++
+title = "Represent external functions as non-deterministic"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b6"]
+forwardlinks = ["3b6"]
+zettelid = "3b6a"
++++
+
+By representing external function as being non-deterministic, one can
+better reason about the basic properties that the OCaml function gives,
+such as not allowing the property proven in `cong` in ([\#3b6]).
+
+To do this in the Coq type-system, one can represent return values as
+the type of all predicates of the function type. The return value of
+non-deterministic computations which may return a value `A` can be
+represented as $\mathcal{P}(A)$, which represents `A -> Prop`, all the
+predicates of `A`.
+
+This *may-return* monad is encoded in the [Impure Library].
+
+ [\#3b6]: /zettel/3b6
+ [Impure Library]: https://github.com/boulme/Impure
diff --git a/content/zettel/3b7.md b/content/zettel/3b7.md
new file mode 100644
index 0000000..9a6e659
--- /dev/null
+++ b/content/zettel/3b7.md
@@ -0,0 +1,38 @@
++++
+title = "Proof by reflection"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b6"]
+forwardlinks = ["3b8", "3b7a"]
+zettelid = "3b7"
++++
+
+Proofs by reflection \[1\] are extremely interesting, as the allow one
+to write decision procedures in Gallina itself, which is the typed
+language of Coq. The idea is that one can define inductive types which
+represent the inputs to the decision procedure, and can then prove
+statements about this structure using the decision procedure. Then, to
+prove theorems about actual Coq terms, one can use reification to
+convert Coq propositions and statements into the defined inductive type,
+and then perform the proof by reflection on the type. This then results
+in a proof for the original statement, as the decision procedure is
+verified to be correct.
+
+One then also has to define a function that evaluates (denotes) a Coq
+value from the data-structure. This then has to be proven to be correct
+according to the assigned semantics to the inductive structure. This
+evaluation provides the reflection for the procedure.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-chlipala13_certif" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">A. Chlipala, *Certified
+programming with dependent types: A pragmatic introduction to the coq
+proof assistant*. MIT Press, 2013.</span>
+
+</div>
+
+</div>
diff --git a/content/zettel/3b7a.md b/content/zettel/3b7a.md
new file mode 100644
index 0000000..af90b26
--- /dev/null
+++ b/content/zettel/3b7a.md
@@ -0,0 +1,21 @@
++++
+title = "Benefits of proofs by reflection"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b7"]
+forwardlinks = []
+zettelid = "3b7a"
++++
+
+The two main benefits of proof by reflection are that:
+
+- The size of the proof is small, because the statement of the
+ function is enough as a proof for it, meaning that it is often only
+ linear in the size of the number being proven, for example.
+- Proofs by reflection also "just work", because the dependent type of
+ the function gives a good indication about what proofs it will
+ return. As it has been verified to be correct, one knows that the
+ decision procedure will also work correctly. This is much better
+ than defining a recursive `Ltac` function as that does not have any
+ guarantees at all.
diff --git a/content/zettel/3b8.md b/content/zettel/3b8.md
new file mode 100644
index 0000000..2fa9ea4
--- /dev/null
+++ b/content/zettel/3b8.md
@@ -0,0 +1,12 @@
++++
+title = "Equality in Coq"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b7"]
+forwardlinks = []
+zettelid = "3b8"
++++
+
+Equality is quite important in coq, as it allows you to rewrite the
+terms.
diff --git a/content/zettel/3c.md b/content/zettel/3c.md
new file mode 100644
index 0000000..ce45470
--- /dev/null
+++ b/content/zettel/3c.md
@@ -0,0 +1,9 @@
++++
+title = "Verification of HLS "
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3b", "3a8", "1f3a"]
+forwardlinks = ["3d", "3c1"]
+zettelid = "3c"
++++
diff --git a/content/zettel/3c1.md b/content/zettel/3c1.md
new file mode 100644
index 0000000..6c9450a
--- /dev/null
+++ b/content/zettel/3c1.md
@@ -0,0 +1,43 @@
++++
+title = "Combining Loop Pipelining and Scheduling "
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c", "1c6"]
+forwardlinks = ["1c6", "1c6a", "3c2"]
+zettelid = "3c1"
++++
+
+A [high-level synthesis] tool needs to perform various optimisations to.
+
+The idea is to simulate full resource constrained scheduling with loop
+pipelining ([\#1c6]) by splitting up the transformation into two steps.
+One thing to consider is that this is software loop scheduling
+([\#1c6a]).
+
+1. First, resource constrained iterative modulo scheduling \[1\] can be
+ performed, which only affects loops and schedules each instruction
+ into a clock cycle. This can move instructions over loop boundaries
+ to create a more compact schedule and pipeline the instructions.
+2. Second, resource constrained scheduling can be performed on all the
+ instructions in the program, by limiting each schedule to a basic
+ block at a time. This will parallelise the epilogue that was
+ generated in the iterative modulo scheduling step.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-rau96_iterat_modul_sched" class="csl-entry" markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">B. R. Rau, “Iterative modulo
+scheduling,” *International Journal of Parallel Programming*, vol. 24,
+no. 1, pp. 3–64, Feb. 1996, Available:
+<https://doi.org/10.1007/BF03356742></span>
+
+</div>
+
+</div>
+
+ [high-level synthesis]: hls.org
+ [\#1c6]: /zettel/1c6
+ [\#1c6a]: /zettel/1c6a
diff --git a/content/zettel/3c2.md b/content/zettel/3c2.md
new file mode 100644
index 0000000..ad9070f
--- /dev/null
+++ b/content/zettel/3c2.md
@@ -0,0 +1,23 @@
++++
+title = "The need for duplicate loops"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c1"]
+forwardlinks = ["2b1", "1c6c", "1c6b", "2b2", "3c3"]
+zettelid = "3c2"
++++
+
+One problem with loop pipelining is that it may require duplicate loops,
+because if the loop iteration number is dynamic, the loop might only be
+executed once, which would not work in the pipelined loop version if a
+prologue and an epilogue are used. However, maybe with predicated
+execution ([\#2b1]) and not generating a prologue and an epilogue
+([\#1c6c]), this might actually still be possible. However, the loop
+cannot be unrolled, which means that rotating registers ([\#1c6b],
+[\#2b2]) would also have to be supported.
+
+ [\#2b1]: /zettel/2b1
+ [\#1c6c]: /zettel/1c6c
+ [\#1c6b]: /zettel/1c6b
+ [\#2b2]: /zettel/2b2
diff --git a/content/zettel/3c3.md b/content/zettel/3c3.md
new file mode 100644
index 0000000..83c3b36
--- /dev/null
+++ b/content/zettel/3c3.md
@@ -0,0 +1,16 @@
++++
+title = "Verification of scheduling "
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c2", "3a5c1", "1c3", "1c2a2b"]
+forwardlinks = ["1c1", "3a7", "2e1c1", "3c4", "3c3a"]
+zettelid = "3c3"
++++
+
+This covers notes on verification of scheduling ([\#1c1]) using
+translation validation ([\#3a7]) and symbolic execution ([\#2e1c1]).
+
+ [\#1c1]: /zettel/1c1
+ [\#3a7]: /zettel/3a7
+ [\#2e1c1]: /zettel/2e1c1
diff --git a/content/zettel/3c3a.md b/content/zettel/3c3a.md
new file mode 100644
index 0000000..c4e9d5b
--- /dev/null
+++ b/content/zettel/3c3a.md
@@ -0,0 +1,14 @@
++++
+title = "Semantics for both languages"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3", "1c5e"]
+forwardlinks = ["3c3b", "3c3a1"]
+zettelid = "3c3a"
++++
+
+The first thing that is needed is to define semantics for the input and
+output languages of the scheduling pass. In our case, this is
+`RTLBlock`, a sequential language, and `RTLPar`, a language which
+includes some parallelism.
diff --git a/content/zettel/3c3a1.md b/content/zettel/3c3a1.md
new file mode 100644
index 0000000..3fddadd
--- /dev/null
+++ b/content/zettel/3c3a1.md
@@ -0,0 +1,13 @@
++++
+title = " Designing a proper CDFG intermediate language"
+date = "2022-05-09"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3a"]
+forwardlinks = []
+zettelid = "3c3a1"
++++
+
+- Talk about DFG semantics for a language.
+- Diagram that John drew.
diff --git a/content/zettel/3c3b.md b/content/zettel/3c3b.md
new file mode 100644
index 0000000..7b68517
--- /dev/null
+++ b/content/zettel/3c3b.md
@@ -0,0 +1,30 @@
++++
+title = "General overview"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3a", "2e1c6a"]
+forwardlinks = ["1b8", "3c3c", "3c3b1"]
+zettelid = "3c3b"
++++
+
+The following two intermediate languages were created for scheduling:
+
+- **RTLPar**: A basic block version of RTL with parallel blocks inside
+ a basic block.
+- **RTLBlock**: A standard basic block.
+
+It should be possible to extend these with conditional execution to
+support hyperblocks ([\#1b8]), however the translation validation might
+have to change for that.
+
+- **RTL** is translated to **RTLBlock** using translation validation
+ to prove the correctness of the generation of basic blocks.
+- **RTLBlock** is translated to **RTLPar** by scheduling, which is
+ also proven using translation validation.
+- **RTLPar** is finally translated to **HTL** in a verified manner, as
+ no optimisations need to be performed.
+
+This should replace the verified **RTL** to **HTL** translation.
+
+ [\#1b8]: /zettel/1b8
diff --git a/content/zettel/3c3b1.md b/content/zettel/3c3b1.md
new file mode 100644
index 0000000..f4f8ce2
--- /dev/null
+++ b/content/zettel/3c3b1.md
@@ -0,0 +1,25 @@
++++
+title = "Matching states in RTLBlock and RTLPar"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3b"]
+forwardlinks = ["3c3b2"]
+zettelid = "3c3b1"
++++
+
+One issue is how to match states from the translation from RTLBlock to
+RTLPar. Even though the translation is quite straightforward, matching
+the states is still quite tricky because of issues with missing
+intentional equality between memories and registers.
+
+To match states, we therefore have to ensure that the RTLPar memory is
+*extended* from the RTLBlock memory, so that all loads and stores still
+result in the same values. In addition to that we need to check that the
+register file is *less defined* than the RTLBlock register file.
+Finally, stack frames also need to match, which also only have to be
+*less defined*.
+
+Finally, we also need to assert that the function was translated using
+the translation function that ensures that all the basic blocks succeed
+in the validation.
diff --git a/content/zettel/3c3b2.md b/content/zettel/3c3b2.md
new file mode 100644
index 0000000..ce7c1f7
--- /dev/null
+++ b/content/zettel/3c3b2.md
@@ -0,0 +1,17 @@
++++
+title = "Defining states"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3b1"]
+forwardlinks = ["3c3b3"]
+zettelid = "3c3b2"
++++
+
+One important property is to define the states of the RTLBlock and
+RTLPar language, as these will define how the semantics are defined.
+Currently they are defined using the same state as RTL and HTL, as it is
+possible to execute a basic block directly within a `State`. However,
+when in RTL or HTL, that is not the case anymore and therefore a
+different semantics might be needed that can execute one instruction at
+a time.
diff --git a/content/zettel/3c3b3.md b/content/zettel/3c3b3.md
new file mode 100644
index 0000000..703fcb9
--- /dev/null
+++ b/content/zettel/3c3b3.md
@@ -0,0 +1,26 @@
++++
+title = "New semantics to prove equivalent to RTL and HTL"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3b2"]
+forwardlinks = []
+zettelid = "3c3b3"
++++
+
+Maybe new semantics are needed to prove the equivalence between RTL
+$\to$ RTLBlock and RTLPar $\to$ HTL. These would have to then be proven
+equivalent to the current semantics, so that finally each translation is
+formally proven.
+
+However, if it is possible then the same semantics could be used to
+prove the equivalence between the languages, which might be more tricky
+but would not need another equivalent semantics. It probably needs to be
+investigated first to be able to tell which technique would be faster
+and simpler.
+
+It seems that from RTLPar to HTL there should not be such a big gap in
+the semantics, however, it does need to be shown that no states will be
+overwritten and therefore every state will still be reachable. It might
+therefore be necessary to do some renaming to ensure this property
+before continuing.
diff --git a/content/zettel/3c3c.md b/content/zettel/3c3c.md
new file mode 100644
index 0000000..2751f65
--- /dev/null
+++ b/content/zettel/3c3c.md
@@ -0,0 +1,40 @@
++++
+title = "Hash consing"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3b"]
+forwardlinks = ["3c3d", "3c3c1"]
+zettelid = "3c3c"
++++
+
+Hash consing is a property that can be used to perform faster equality
+checks between abstract terms, without having to go through each term
+sequentially. This is basically done by having a hash table that gets
+assigned terms of the same structure. This means that each term of the
+same structure will get the same pointer, which means that one just has
+to compare pointers to confirm structural equality.
+
+The following property has, however, not been formally proven in \[1\],
+which instead is left as an unproven assumption that:
+
+$$ p = q \rightarrow *p = *q $$
+
+Which should be true for hash consed terms.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-six20_certif_effic_instr_sched" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">C. Six, S. Boulmé, and D.
+Monniaux, “Certified and efficient instruction scheduling: Application
+to interlocked VLIW processors,” *Proc. ACM Program. Lang.*, vol. 4, no.
+OOPSLA, Nov. 2020, doi: [10.1145/3428197].</span>
+
+</div>
+
+</div>
+
+ [10.1145/3428197]: https://doi.org/10.1145/3428197
diff --git a/content/zettel/3c3c1.md b/content/zettel/3c3c1.md
new file mode 100644
index 0000000..05fb1fa
--- /dev/null
+++ b/content/zettel/3c3c1.md
@@ -0,0 +1,20 @@
++++
+title = "Using the State monad for hash consing"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3c"]
+forwardlinks = ["3c3c2"]
+zettelid = "3c3c1"
++++
+
+One example is to use the State monad to remember inputs and their
+results. However, there are many down sides to doing it this way. First,
+this means that every function that should be memoized needs to use a
+monadic type. Therefore `A -> B` is converted to `A -> M B`, where `M`
+is the State monad.
+
+This then means that every other call site needs to be changed to work
+in the State monad, which is quite difficult, and makes it even harder
+to reason about the code. This is because it is a deep embedding of
+memoization in Coq.
diff --git a/content/zettel/3c3c2.md b/content/zettel/3c3c2.md
new file mode 100644
index 0000000..bfb08ba
--- /dev/null
+++ b/content/zettel/3c3c2.md
@@ -0,0 +1,15 @@
++++
+title = "Weakly embedded memoization"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3c1"]
+forwardlinks = ["3c3c3"]
+zettelid = "3c3c2"
++++
+
+Memoization can also be weakly embedded in Coq by using co-inductive
+datatypes. This works extremely well for functions that take Peano
+numbers as input, as a lazy data structure can be defined using
+co-inductive types, which represents unevaluated thunks. Once these are
+evaluated, they can just be read back normally.
diff --git a/content/zettel/3c3c3.md b/content/zettel/3c3c3.md
new file mode 100644
index 0000000..ba62c7c
--- /dev/null
+++ b/content/zettel/3c3c3.md
@@ -0,0 +1,64 @@
++++
+title = "Hash-consing for proof of scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3c2", "2e1c7", "2e1c6b"]
+forwardlinks = []
+zettelid = "3c3c3"
++++
+
+Hash-consing is used by Six et al. \[1\] to prove scheduling
+efficiently, as that was proposed by Tristan et al. \[2\] as one way to
+improve the scheduling algorithm. However, I don't think that this is a
+necessary improvement for the kinds of scheduling that was done in that
+paper, as in Tristan et al., the note about hash-consing was mostly
+meant about their trace-scheduling equivalence checker, as that does
+equivalence checking of a tree without any redundancy help.
+
+However, the work on superblock scheduling \[3\] might actually benefit
+from that, as it also does equivalence checking of trees in some way, I
+think.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-six20_certif_effic_instr_sched" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">C. Six, S. Boulmé, and D.
+Monniaux, “Certified and efficient instruction scheduling: Application
+to interlocked VLIW processors,” *Proc. ACM Program. Lang.*, vol. 4, no.
+OOPSLA, Nov. 2020, doi: [10.1145/3428197].</span>
+
+</div>
+
+<div id="ref-tristan08_formal_verif_trans_valid" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[2\]
+</span><span class="csl-right-inline">J.-B. Tristan and X. Leroy,
+“Formal verification of translation validators: A case study on
+instruction scheduling optimizations,” in *Proceedings of the 35th
+annual ACM SIGPLAN-SIGACT symposium on principles of programming
+languages*, in POPL ’08. San Francisco, California, USA: Association for
+Computing Machinery, 2008, pp. 17–27. doi:
+[10.1145/1328438.1328444].</span>
+
+</div>
+
+<div id="ref-six21_verif_super_sched_relat_optim" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[3\]
+</span><span class="csl-right-inline">C. Six, L. Gourdin, S. Boulmé, and
+D. Monniaux, “<span class="nocase">Verified Superblock Scheduling with
+Related Optimizations</span>,” Apr. 2021. Available:
+<https://hal.archives-ouvertes.fr/hal-03200774></span>
+
+</div>
+
+</div>
+
+ [10.1145/3428197]: https://doi.org/10.1145/3428197
+ [10.1145/1328438.1328444]: https://doi.org/10.1145/1328438.1328444
diff --git a/content/zettel/3c3d.md b/content/zettel/3c3d.md
new file mode 100644
index 0000000..527a6a6
--- /dev/null
+++ b/content/zettel/3c3d.md
@@ -0,0 +1,15 @@
++++
+title = "Normalisation of terms"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3c"]
+forwardlinks = ["3c3e"]
+zettelid = "3c3d"
++++
+
+Another property of the abstract interpretation is that the abstract
+interpretation should be normalised before it is compared, which then
+means that different rewrite optimisations can also be proven about the
+code. For example, additions and multiplications should be rewritten in
+a common form.
diff --git a/content/zettel/3c3e.md b/content/zettel/3c3e.md
new file mode 100644
index 0000000..ecea682
--- /dev/null
+++ b/content/zettel/3c3e.md
@@ -0,0 +1,28 @@
++++
+title = "Benefits of explicit basic blocks for scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3d"]
+forwardlinks = ["3c3f", "3c3e1"]
+zettelid = "3c3e"
++++
+
+Explicit basic blocks for scheduling means that there is a much more
+structured approach to the scheduling algorithm. But it also means that
+the intermediate languages cannot be directly translated to RTL again,
+as that language is unaware of basic blocks.
+
+The benefit of having support of parallel execution in the target
+language is that the scheduling can be much more powerful as well, as it
+can express the parallel execution of instructions directly. This means
+that any back end that can take advantage of these parallel instructions
+can directly use their parallel nature, without having to do that in a
+post-scheduling step.
+
+In the worst case, this can then be translated back into a sequential
+language anyways, if the goal was just to schedule instructions for a
+sequential processor, which would have gotten rid of any inefficiencies
+due to a back end specific scheduling algorithm. This would mean that
+the back end can be taken advantage of for all the following back ends
+that compcert has.
diff --git a/content/zettel/3c3e1.md b/content/zettel/3c3e1.md
new file mode 100644
index 0000000..9ba020c
--- /dev/null
+++ b/content/zettel/3c3e1.md
@@ -0,0 +1,16 @@
++++
+title = "Alternative trace scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3e"]
+forwardlinks = ["3c3e2"]
+zettelid = "3c3e1"
++++
+
+An alternative to the specific basic block representation is maybe trace
+scheduling, or superblock scheduling. For this representation, you
+actually do not need to change the structure of the code in general, you
+just need to add some extra information which designates the different
+traces inside of the current code. These will just be lists of paths,
+that designate the sets
diff --git a/content/zettel/3c3e2.md b/content/zettel/3c3e2.md
new file mode 100644
index 0000000..6b40f19
--- /dev/null
+++ b/content/zettel/3c3e2.md
@@ -0,0 +1,19 @@
++++
+title = "Superblock scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3e1"]
+forwardlinks = ["1b7"]
+zettelid = "3c3e2"
++++
+
+OOPSLA '21 paper to appear.
+
+An alternative is superblock ([\#1b7]) scheduling, where a block is
+composed of a path for each entry node. The scheduling algorithm then
+converts the path into a different path, and also returns a mapping of
+the old path to the original. This mapping is then used during the proof
+to select the paths to be compared to each other.
+
+ [\#1b7]: /zettel/1b7
diff --git a/content/zettel/3c3f.md b/content/zettel/3c3f.md
new file mode 100644
index 0000000..ee05072
--- /dev/null
+++ b/content/zettel/3c3f.md
@@ -0,0 +1,24 @@
++++
+title = "Using if-conversion on other CompCert languages"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3e", "3a7d", "2b1e"]
+forwardlinks = ["2b1e", "3c3g", "3c3f1"]
+zettelid = "3c3f"
++++
+
+If-conversion would also be natively supported by the intermediate
+language implemented in Vericert. However, the problem is that RTL
+itself does not support if-conversion, so it would therefore require a
+reverse if-conversion pass to get rid of the predicated instructions
+([\#2b1e]). This should be possible, as predicated instructions can just
+be placed into conditional statements again.
+
+I am not yet sure if this would actually improve the performance of the
+generated code though, but it does seem intuitive that this should be
+possible. If the scheduler is aware of the if-conversion and the reverse
+if-conversion pass, then it should be possible to guide it so that it
+converts the code in a favourable way for reverse if-conversion.
+
+ [\#2b1e]: /zettel/2b1e
diff --git a/content/zettel/3c3f1.md b/content/zettel/3c3f1.md
new file mode 100644
index 0000000..77ea3d7
--- /dev/null
+++ b/content/zettel/3c3f1.md
@@ -0,0 +1,21 @@
++++
+title = "Semantics of predicates in the abstract"
+date = "2023-02-04"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f"]
+forwardlinks = ["3c3f2"]
+zettelid = "3c3f1"
++++
+
+In the abstract language, which is used to verify the schedules produced
+by the scheduler, it also has to verify the equivalence of the
+predicates. However, these predicates have rich semantics, because they
+include expressions that are taken from the code during the symbolic
+evaluation. This, however, means that these predicates do not always
+evaluate to a value, as the context might not contain enough information
+to evaluate an expression. In addition to that, it's not certain that
+the expressions are free of undefined behaviour when they are evaluated
+with an arbitrary context, which would mean that a division that was
+safe in the input could suddenly become a division by 0.
diff --git a/content/zettel/3c3f2.md b/content/zettel/3c3f2.md
new file mode 100644
index 0000000..83b7b03
--- /dev/null
+++ b/content/zettel/3c3f2.md
@@ -0,0 +1,18 @@
++++
+title = "Properly reasoning about these predicates"
+date = "2023-02-04"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f1"]
+forwardlinks = ["3c3f3"]
+zettelid = "3c3f2"
++++
+
+To properly reason about these predicates, one would normally have to
+use three-valued logic. This allows one to reason about the cases where
+an atom is not evaluable, which would lead to the final formula to be
+non-evaluable. This allows one to be precise about the result of
+evaluating a predicate, with the downside of it being difficult to
+reason about all the combinations in proofs and having to use a powerful
+solver.
diff --git a/content/zettel/3c3f3.md b/content/zettel/3c3f3.md
new file mode 100644
index 0000000..856b100
--- /dev/null
+++ b/content/zettel/3c3f3.md
@@ -0,0 +1,27 @@
++++
+title = "Proving that all atoms are evaluable"
+date = "2023-02-04"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f2"]
+forwardlinks = ["3c3f4"]
+zettelid = "3c3f3"
++++
+
+Instead, if one can show that all the atoms of formulas are evaluable,
+then one should be able to show that the whole formula will also be
+evaluable. Then, one can simply reason about the formulas using binary
+logic. This simplifies the solver used to check these predicates, and
+reasoning about the predicates in general.
+
+The main proof-burden of this method is that it requires one proves that
+every atom is evaluable, and if one uses a context to evaluate some
+predicates, one has to carry around a proof that every predicate in that
+context is also evaluable. In addition to that, predicates that could
+technically evaluate to a result using short-circuiting will not be
+expressible in general in these formulas. For example, a predicate like
+the following: `p1 \/ p2`, where `p1` always evaluates to true, and `p2`
+might sometimes not be evaluable, is not expressible. This is because in
+general one would have to evaluate a predicate `p2` to three possible
+values, which would then have to be handled in the `\/` construct.
diff --git a/content/zettel/3c3f4.md b/content/zettel/3c3f4.md
new file mode 100644
index 0000000..db3eefd
--- /dev/null
+++ b/content/zettel/3c3f4.md
@@ -0,0 +1,28 @@
++++
+title = "Using lazy evaluation by construction"
+date = "2023-02-04"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f3"]
+forwardlinks = ["3c3f5"]
+zettelid = "3c3f4"
++++
+
+One possible solution to this is to write down lazy evaluation semantics
+for the predicate. These are equivalent to the standard evaluation
+semantics in the case where all the atoms are evaluable, however, they
+can help in the case where one of the atoms may not be evaluable. Then,
+as long as one can show that in the cases where this predicate has to be
+evaluated, it can be, one can show that the whole predicate will also
+always result in a value.
+
+This can be done in the case of the symbolic evaluation of the basic
+blocks, because the only cases where one is not certain if one can
+evaluate a predicate is when the predicate itself is predicated, and
+that predicate evaluates to false. This link is captured by an
+implication, and if a lazy implication is used instead, this means that
+one can evaluate the predicate eagerly in all cases, even when one
+cannot evaluate everyone of the atoms. In addition to that, this can be
+done using binary logic and without having to resort to three-valued
+logic.
diff --git a/content/zettel/3c3f5.md b/content/zettel/3c3f5.md
new file mode 100644
index 0000000..4e30c35
--- /dev/null
+++ b/content/zettel/3c3f5.md
@@ -0,0 +1,25 @@
++++
+title = "On evaluability of predicates"
+date = "2023-02-05"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f4"]
+forwardlinks = ["3c3f6"]
+zettelid = "3c3f5"
++++
+
+1. First, I need to check if I can have lazy evaluation of predicates
+ with respect to the equivalence check. I need to verify that
+ equivalent formulas imply equivalent behaviour with lazy evaluation
+ of predicates. This might come down to a statement of "There exists
+ a way to evaluate the current predicate so that it evaluates to the
+ same version of the other predicate."
+2. I currently need to show that all predicates in the forest are
+ evaluable. This may not be the case in practice, but currently I use
+ it when merging. However, I think that with the lazy evaluation of
+ predicates, this might not be needed.
+3. Check what the difference is between evaluable and the sem~predpred~
+ that is in the forest, as that says that one assigns the result of
+ evaluating the predicates to a predicate register map. This should
+ imply that they are all evaluable.
diff --git a/content/zettel/3c3f6.md b/content/zettel/3c3f6.md
new file mode 100644
index 0000000..c4e4a1e
--- /dev/null
+++ b/content/zettel/3c3f6.md
@@ -0,0 +1,33 @@
++++
+title = "Forest evaluable not needed in the end"
+date = "2023-02-14"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f5"]
+forwardlinks = ["3c3f7", "3c3f6a"]
+zettelid = "3c3f6"
++++
+
+At first I had a definition of `forest_evaluable` which said that the
+whole tree of predicate expressions could always be evaluated. However,
+in addition to that I had strict semantics of predicates, which is why
+to evaluate a predicate I had to know that I could evaluate both sides
+of the predicate before actually giving it a value. There were two
+problems:
+
+1. With the strict evaluation of predicates, the evaluability of the
+ predicates was actually not right, because there were some cases
+ where the predicates would not be evaluable by design. For example,
+ when we have a predicate assignment that itself is gated:
+ `if (p) p2 = c`. In this case, when the predicate `p` evaluates to
+ `false`, then one cannot know if the condition `c` can be evaluated.
+ This means that one cannot actually evaluate the predicate
+ $p \implies p_2 := c$. Strict semantics dictate that one has to be
+ able to get a value for $p_2 := c$ though.
+2. Many problems go away when one does not have strict evaluation
+ anymore, because one will always be able to evaluate on of the
+ predicates and therefore know the result of predicate. This also
+ solves the previous problem, which is actually quite subtle, that
+ when a predicate is maybe not evaluable, it is hidden behind an
+ implication.
diff --git a/content/zettel/3c3f6a.md b/content/zettel/3c3f6a.md
new file mode 100644
index 0000000..e5029b4
--- /dev/null
+++ b/content/zettel/3c3f6a.md
@@ -0,0 +1,23 @@
++++
+title = "Evaluability inside of a current context"
+date = "2023-02-14"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f6"]
+forwardlinks = []
+zettelid = "3c3f6a"
++++
+
+The main important property is also not that one can always find an
+execution of a predicate in any context, but that in the current context
+that we are proving things in, that one can evaluate it only there. This
+is a much easier property to maintain (and is maintained automatically
+by the proof), because the semantic interpretation of the forest ensures
+that one evaluates all the predicates to some boolean already. This
+means that given the current context which can interpret the forest, we
+can also interpret any predicate that is inside the forest already.
+
+This also means that if we want to add a new predicate to the forest,
+that we need to show we can evaluate it, where the lazy predicate
+evaluation is important again.
diff --git a/content/zettel/3c3f7.md b/content/zettel/3c3f7.md
new file mode 100644
index 0000000..830df58
--- /dev/null
+++ b/content/zettel/3c3f7.md
@@ -0,0 +1,23 @@
++++
+title = "Strict evaluation or lazy evaluation of predicates"
+date = "2023-02-14"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f6"]
+forwardlinks = ["3c3f7a"]
+zettelid = "3c3f7"
++++
+
+Strict evaluation of predicates is the following:
+
+$$\frac{\theta \vdash p_1 \Downarrow b_1 \qquad \theta \vdash p_2 \Downarrowb_2}{\theta \vdash p_1 \land p_2 \Downarrow b_1 \mathbin{\texttt{\&}} b_2}$$
+
+Lazy evaluation of predicates will instead have multiple rules that take
+into account when we know the result of the computation:
+
+$$\frac{\theta \vdash p_1 \Downarrow \perp}{\theta \vdash p_1 \land p_2 \Downarrow \perp}$$
+
+$$\frac{\theta \vdash p_2 \Downarrow \perp}{\theta \vdash p_1 \land p_2 \Downarrow \perp}$$
+
+$$\frac{\theta \vdash p_1 \Downarrow \top \qquad \theta \vdash p_2 \Downarrow \top}{\theta \vdash p_1 \land p_2 \Downarrow \top}$$
diff --git a/content/zettel/3c3f7a.md b/content/zettel/3c3f7a.md
new file mode 100644
index 0000000..e5e02fc
--- /dev/null
+++ b/content/zettel/3c3f7a.md
@@ -0,0 +1,24 @@
++++
+title = "Strict evaluation and lazy evaluation are equivalent"
+date = "2023-02-14"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f7"]
+forwardlinks = []
+zettelid = "3c3f7a"
++++
+
+The main downside of the lazy evaluation approach is actually that there
+are additional constructors, and that the evaluation is less automatic.
+However, unintuitively, they should actually be equivalent from an
+evaluation perspective if one knows that one can evaluate all the
+branches. However, with richer predicates this is not the case anymore,
+which is why we have to use the lazy evaluation approach.
+
+However, when doing a SAT solve, it should be provable that given the
+evaluation of one of the predicates, and that the predicates used are
+the same (or maybe a subset), then one knows that one can evaluate both
+predicates. This is not as clear as I thought it would be though because
+I forgot that executing the leaves is not the same in boolean logic and
+in the rich predicate logic I use in the end.
diff --git a/content/zettel/3c3g.md b/content/zettel/3c3g.md
new file mode 100644
index 0000000..00c2775
--- /dev/null
+++ b/content/zettel/3c3g.md
@@ -0,0 +1,25 @@
++++
+title = "Defining the abstract language for symbolic evaluation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3f"]
+forwardlinks = ["1b8", "3c3h", "3c3g1"]
+zettelid = "3c3g"
++++
+
+To perform symbolic evaluation, we need to define a language that will
+syntactically produce equivalent programs if translated from RTLBlock or
+RTLPar if the schedule is correct. The language is represented as a
+tree, where each register maps to it's symbolic representation, that
+being the entry register by default. The difficulty comes because of the
+introduction of predicates because of the use of hyperblocks ([\#1b8]).
+
+The main idea of this abstract language is that the symbolic expressions
+that are assigned to each of the registers needs to be self-contained,
+and contain the full expression that will be assigned to the register.
+Especially with symbolic instructions, one therefore have to make the
+decision whether to have recursive predicated expressions, or linear
+predicated expressions.
+
+ [\#1b8]: /zettel/1b8
diff --git a/content/zettel/3c3g1.md b/content/zettel/3c3g1.md
new file mode 100644
index 0000000..eb5ae64
--- /dev/null
+++ b/content/zettel/3c3g1.md
@@ -0,0 +1,25 @@
++++
+title = "Recursive predicated expressions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2b1", "3c3g2", "3c3g"]
+forwardlinks = ["2e1c6a", "3c3g2"]
+zettelid = "3c3g1"
++++
+
+These are expressions of the following type:
+
+$$ P ::= [p?, P] \ |\ e $$
+
+This means that one can either have a list of predicates, linked to more
+predicated expressions, or be an expression. The one difficulty with
+dealing with such an expression is that the comparison using a SAT
+solver becomes basically impossible. This is because one cannot perform
+hashing of the predicates, as described in `Ptree` hashing ([\#2e1c6a]).
+The hashing of the expressions is necessary to be able to pass the
+expressions to the SAT solver in the first place, and because one
+doesn't have pure expressions anymore, one cannot define the syntactic
+equality operator anymore to compare (and therefore hash) expressions.
+
+ [\#2e1c6a]: /zettel/2e1c6a
diff --git a/content/zettel/3c3g2.md b/content/zettel/3c3g2.md
new file mode 100644
index 0000000..a0ce841
--- /dev/null
+++ b/content/zettel/3c3g2.md
@@ -0,0 +1,23 @@
++++
+title = "Linear predicated expressions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g1"]
+forwardlinks = ["3c3g1", "3c3g3"]
+zettelid = "3c3g2"
++++
+
+Instead of the recursive predicated expressions ([\#3c3g1]), one can
+create linear versions of those expressions, by basically flattening the
+trees into a single, non-recursive list.
+
+$$ P ::= [p?, e] $$
+
+There is always a direct possible translation between the recursive and
+linear representations, the backwards translation being trivial, as a
+linear representation is also a recursive one. The other direction is a
+bit more difficult, because one has to effectively multiply each of the
+expressions as one moves one level up.
+
+ [\#3c3g1]: /zettel/3c3g1
diff --git a/content/zettel/3c3g3.md b/content/zettel/3c3g3.md
new file mode 100644
index 0000000..bc08fd7
--- /dev/null
+++ b/content/zettel/3c3g3.md
@@ -0,0 +1,20 @@
++++
+title = "Combining linear predicated expressions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g2"]
+forwardlinks = ["3c3g4"]
+zettelid = "3c3g3"
++++
+
+$$ P_{1} \otimes P_{2} \equiv \texttt{map } (\lambda ((p_{1}, e_{1}), (p_{2},e_{2})) . (p_{1} \land p_{2}, f\ e_{1}\ e_{2}))\ P_{1} \times P_{2} $$
+
+The expressions are then constructed using a function which updates the
+symbolic expressions assigned for each resource. This is done using
+multiple primitives which act on predicated types, which are made up of
+a list of pairs of predicates and the element of that type. The first
+important primitive is multiplication of two predicated types, which is
+implemented as performing a cartesian multiplication between the
+predicated types in the two lists, anding the two predicates and joining
+the two types of each list using a function.
diff --git a/content/zettel/3c3g4.md b/content/zettel/3c3g4.md
new file mode 100644
index 0000000..be2326a
--- /dev/null
+++ b/content/zettel/3c3g4.md
@@ -0,0 +1,23 @@
++++
+title = "Appending predicated expressions to existing expressions"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g3"]
+forwardlinks = ["3c3g5"]
+zettelid = "3c3g4"
++++
+
+In addition to that, another primitive that is needed is the following
+append operation, which will negate the combination of all predicates in
+the second predicated type, and it to the first predicated type and
+append the first predicated type to the second:
+
+$$ \mu(p, P) \equiv \texttt{map } (\lambda (p', e') . (p \land p', e'))\ P $$
+
+$$ P_{1} \oplus_{p} P_{2} \equiv \mu(\neg p, P_{1}) \mathbin{++} \mu(p, P_{2})$$
+
+The append operation, denoted by $\oplus$, will take two predicated
+expressions $P_1$ and $P_2$ and negates all the predicates in the second
+and anding them to the predicates in the first, while then appending the
+whole expression to the previous one.
diff --git a/content/zettel/3c3g5.md b/content/zettel/3c3g5.md
new file mode 100644
index 0000000..33f3523
--- /dev/null
+++ b/content/zettel/3c3g5.md
@@ -0,0 +1,21 @@
++++
+title = "Update function for operators"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g4"]
+forwardlinks = ["3c3g6"]
+zettelid = "3c3g5"
++++
+
+Updating an existing abstract tree with an operation, which has the
+following syntax $i =$ `Iop` $p$ $\mathit{op}$ $\vec{r}$ $d$, can then
+be implemented as follows, where `Eop` $\mathit{op}$ $\vec{r}$ is the
+equivalent abstract expression used in the abstract language. This is
+done by using the following update function.
+
+$$ \upsilon (f, i) \equiv f \# d \leftarrow (f \# d) \oplus_{p} ((\top,\texttt{Eop } \mathit{op}) \otimes_f (\texttt{fold} \otimes_l (f\ \#\#\\vec{r})\ (\top, []))) $$
+
+This update function $\upsilon$ will update the correct node in the
+abstract evaluation tree with the correct symbolic value, which is a
+combination of the original symbolic value and the next.
diff --git a/content/zettel/3c3g6.md b/content/zettel/3c3g6.md
new file mode 100644
index 0000000..cbdf84f
--- /dev/null
+++ b/content/zettel/3c3g6.md
@@ -0,0 +1,26 @@
++++
+title = "The need for abstract predicates"
+date = "2022-07-19"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g5"]
+forwardlinks = ["3c3g7", "3c3g6a"]
+zettelid = "3c3g6"
++++
+
+In the previous version of the abstract language with predicated
+expressions, the predicates were concrete, which is wrong because they
+will then always be executed with respect to the original context. The
+way this can be fixed is by also keeping track of abstract predicates,
+however, then one ends up with predicates that might themselves be
+predicated. Currently, the idea is to recombine these into a predicate
+whenever this is needed, by anding all the elements in the list
+together. This generates massive predicates because at each exit and
+each predicate assignment, the predicates are multiplied together. Most
+of the time, these predicates are very easy to solve, however, after
+using the tseytin transformation, many additional variables are added
+which makes it much harder to solve.
+
+In general though, it seems like this analysis is at least correct,
+meaning it can find bugs hopefully.
diff --git a/content/zettel/3c3g6a.md b/content/zettel/3c3g6a.md
new file mode 100644
index 0000000..e98773e
--- /dev/null
+++ b/content/zettel/3c3g6a.md
@@ -0,0 +1,27 @@
++++
+title = "Abstract Predicates being Unevaluable"
+date = "2022-11-17"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g6"]
+forwardlinks = ["3a8g5g"]
+zettelid = "3c3g6a"
++++
+
+One issue with having abstract predicates is that they could just not be
+evaluable, because the semantics of abstract predicates is so much more
+rich than standard predicates. For example, one must assume that one can
+evaluate the whole predicate, which means that one must show that all
+the computations will terminate and not get stuck. Another solution
+would be to use three-valued logic to perform the equivalence check,
+which would be able to reason about cases that are unevaluable. This
+would be possible because a three-valued logic checker has now been
+implemented in CompCertGSA ([\#3a8g5g]). However, the main problem that
+I am anticipating is that when doing the reverse proof, one has to be
+able to show that the original program that generated the symbolic state
+is also evaluable, which would require reasoning about the evaluability
+explicitly anyways. If that is the case, then using three-valued logic
+only adds complexity to the proof.
+
+ [\#3a8g5g]: /zettel/3a8g5g
diff --git a/content/zettel/3c3g7.md b/content/zettel/3c3g7.md
new file mode 100644
index 0000000..51880ea
--- /dev/null
+++ b/content/zettel/3c3g7.md
@@ -0,0 +1,17 @@
++++
+title = "Another possible solution making assumptions about predicates"
+date = "2022-07-20"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g6"]
+forwardlinks = []
+zettelid = "3c3g7"
++++
+
+The main problem is the intractability of having predicated
+set-predicate operations, as well as the exit commands. This is because
+they both make the global predicate much larger meaning the expressions
+grow exponentially with it. When one has an exit command, one will have
+to exit from the block, meaning one needs to predicate all the remaining
+expressions with the negation of the exit predicate.
diff --git a/content/zettel/3c3h.md b/content/zettel/3c3h.md
new file mode 100644
index 0000000..ef7529d
--- /dev/null
+++ b/content/zettel/3c3h.md
@@ -0,0 +1,9 @@
++++
+title = "Practical comparison against superblock scheduling"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3g"]
+forwardlinks = ["3c3i", "3c3h1"]
+zettelid = "3c3h"
++++
diff --git a/content/zettel/3c3h1.md b/content/zettel/3c3h1.md
new file mode 100644
index 0000000..3d2f146
--- /dev/null
+++ b/content/zettel/3c3h1.md
@@ -0,0 +1,16 @@
++++
+title = "Problems with comparing against CompCert-KVX"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3h", "3a5c1"]
+forwardlinks = ["3c3h2"]
+zettelid = "3c3h1"
++++
+
+CompCert-KVX is the only other implementation of advanced scheduling
+techniques, however, it is hard to directly compare against it. The main
+reason for all these difficulties, is that they have modified CompCert
+itself quite heavily. Secondly, they also have only implemented the
+superblock scheduling oracle for a subset of back end languages, such as
+their KVX back end, riscV and aarch64.
diff --git a/content/zettel/3c3h2.md b/content/zettel/3c3h2.md
new file mode 100644
index 0000000..e9605bc
--- /dev/null
+++ b/content/zettel/3c3h2.md
@@ -0,0 +1,32 @@
++++
+title = "Implementing hyperblock scheduling in CompCert-KVX"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3h1"]
+forwardlinks = ["3c3h3"]
+zettelid = "3c3h2"
++++
+
+One way to compare hyperblock scheduling to superblock scheduling would
+be to implement it in CompCert-KVX, and therefore show the difference
+between the two types of scheduling directly. However, the main issue
+with this comparison is that this requires quite a lot of work to do,
+but might also not provide any useful comparison.
+
+Firstly, hyperblock scheduling is quite different to superblock
+scheduling, and if the processor does not support predicated execution
+natively, then it might be much more efficient to use superblock
+scheduling to approximate a trace schedule. However, for high-level
+synthesis, where we have full control over the hardware that is
+generated, it might be better to use hyperblock scheduling because we
+could have predicated expressions.
+
+Secondly, to even use hyperblock scheduling in an environment that does
+not have predicated execution would mean that one would have to
+implement reverse if-conversion. Naïve if-conversion would be easy to
+implement, however, to not lose any of the performance that was gained
+by using the scheduling, one would have to implement reverse
+if-conversion based on some heuristics. This means that one would be
+comparing the implementation of reverse if-conversion as well as the
+implementation of the scheduling against superblock scheduling.
diff --git a/content/zettel/3c3h3.md b/content/zettel/3c3h3.md
new file mode 100644
index 0000000..00710ce
--- /dev/null
+++ b/content/zettel/3c3h3.md
@@ -0,0 +1,20 @@
++++
+title = "Implementing superblock scheduling in Vericert"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3h2"]
+forwardlinks = ["3c3h4"]
+zettelid = "3c3h3"
++++
+
+The other way one could compare hyperblock scheduling and superblock
+scheduling, with a focus on how they compare in HLS specifically, would
+be to take the superblock scheduling and implement it inside of
+Vericert. However, the main problem is that CompCert-KVX is a heavily
+modified version of CompCert, and therefore cannot be integrated easily
+into CompCert itself on it's own. In addition to that, trying to compile
+Vericert only with CompCert-KVX runs into more issues, mainly being that
+Vericert uses the x86 backend in CompCert, whereas CompCert-KVX has only
+implemented the riscV, aarch64 and KVX back ends for superblock
+scheduling.
diff --git a/content/zettel/3c3h4.md b/content/zettel/3c3h4.md
new file mode 100644
index 0000000..dbbbd43
--- /dev/null
+++ b/content/zettel/3c3h4.md
@@ -0,0 +1,25 @@
++++
+title = "Adding to the BTL language"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3h3"]
+forwardlinks = []
+zettelid = "3c3h4"
++++
+
+Implementing scheduling seems to be more difficult than initially
+thought. Proving the translation to basic blocks is not simple, however,
+CompCert-kvx seems to have a working solution.
+
+However, their BTL language is quite special, it uses recursive blocks,
+which I had thought of initially, instead of list of instructions.
+However, this makes scheduling harder (and reasoning about scheduling
+harder I think). But it seems like CompCert KVX have already implemented
+all of this. If I reuse their work, then I would at least have the
+proofs available for most of the scheduling, but it also means I have to
+understand it again and also reimplement the scheduling algorithm.
+
+The other option is just to reimplement their proofs and use it as
+inspiration. The differences in the scheduling and the proofs at a high
+level are still quite substantial.
diff --git a/content/zettel/3c3i.md b/content/zettel/3c3i.md
new file mode 100644
index 0000000..94955fb
--- /dev/null
+++ b/content/zettel/3c3i.md
@@ -0,0 +1,18 @@
++++
+title = "Proving if-conversion correct"
+date = "2022-06-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3h"]
+forwardlinks = ["3c3j", "3c3i1"]
+zettelid = "3c3i"
++++
+
+The main problem is that one has two duplicate versions of the block. At
+every point, one may be executing the copy, or the original body. The
+only thing that I care about in the end is that the **behaviour** is the
+same. The problem with that is that I need to be executing one or two
+steps in the input to do this. However, this can still be proven using
+just one step in the input by saving the intermediate proof state in
+when I need to execute two sets.
diff --git a/content/zettel/3c3i1.md b/content/zettel/3c3i1.md
new file mode 100644
index 0000000..284b63b
--- /dev/null
+++ b/content/zettel/3c3i1.md
@@ -0,0 +1,29 @@
++++
+title = "Repeated application of a translation pass in CompCert"
+date = "2022-10-04"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3i"]
+forwardlinks = []
+zettelid = "3c3i1"
++++
+
+Currently it's true that if-conversion is quite tricky to prove, because
+the recursive nature of the translation is quite complicated. However,
+if one limits the translation to just modifying parts of the code in a
+non-recursive way, then the proof becomes simpler. At every point one
+only has to reason about two possibilities, either the current point has
+been converted, meaning another block that exists in the graph has been
+chosen and has been inlined, or the current block has not changed at
+all.
+
+Once one has defined a translation like that, one can just repeat it
+multiple times with different rewrites to get any kind of
+transformations one wants. To prove the correctness of the multiple
+applications one should be able to prove that the `match_prog` property
+holds for any reflexive and transitive closure. In addition to that, an
+instance of the linker type class also needs to be implemented to show
+that separate compilation is still supported as well, however that
+should also just work because the `match_prog` property is very similar
+to other `match_prog` properties.
diff --git a/content/zettel/3c3j.md b/content/zettel/3c3j.md
new file mode 100644
index 0000000..1bbe9a9
--- /dev/null
+++ b/content/zettel/3c3j.md
@@ -0,0 +1,24 @@
++++
+title = "Design Trade-offs for the Scheduling Proof"
+date = "2022-11-16"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3i"]
+forwardlinks = ["3c3k"]
+zettelid = "3c3j"
++++
+
+There are a few trade-offs that were made during the scheduling proof,
+such as:
+
+- deciding to use flat predicates instead of a tree-like structure
+- if-conversion, which is a more self-contained way to prove the
+ translation without having to worry about the performance of the
+ code
+- The representation of the predicates is also an interesting
+ trade-off, because there are various ways to do this.
+- Using if-statements or constraints to prove the backwards simulation
+ in the abstract interpretation.
+
+**\*\***
diff --git a/content/zettel/3c3k.md b/content/zettel/3c3k.md
new file mode 100644
index 0000000..50a5910
--- /dev/null
+++ b/content/zettel/3c3k.md
@@ -0,0 +1,41 @@
++++
+title = "Making the Proof Tractable using the Identity Semantics"
+date = "2022-11-16"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3j"]
+forwardlinks = ["3c3l", "3c3k1"]
+zettelid = "3c3k"
++++
+
+A trick that also had to be pulled to make the proofs of the abstract
+interpretation tractable was using an identity semantics to prove basic
+properties about `sem_pred_expr`. For some reason, when using the
+identity semantics one can nicely compose behaviours of all the
+`Applicative` functions.
+
+Essentially we have a function which implements the semantics for our
+`predicated A` type, which happens to at least be an Applicative. The
+function itself is implemented similarly to a foldMap, but instead of
+using a general monoid instance to combine the elements, it just keeps
+the element that evaluates to true. This could be the monoid instance,
+but it requires to introspect into the current context:
+
+``` coq
+sem_pred_expr
+ : forall A B,
+ PTree.t pred_pexpr -> (Abstr.ctx -> A -> B -> Prop) ->
+ Abstr.ctx -> predicated A -> B -> Prop
+```
+
+Then, once the predicates are evaluated, it's not quite a monoid
+instance, because you are getting an element out of a list based on the
+evaluation of the predicate.
+
+However, you could also see it as reducing a list of things to the list
+of things that evaluated to true, in which case the monoid instance
+would still hold. Then, we are just converting the monoid to a maybe and
+checking that the list only contained one element. That would mean that
+the evaluation should take in predicated instructions and to the
+predicate evaluation, but also evaluate the expression.
diff --git a/content/zettel/3c3k1.md b/content/zettel/3c3k1.md
new file mode 100644
index 0000000..358910d
--- /dev/null
+++ b/content/zettel/3c3k1.md
@@ -0,0 +1,22 @@
++++
+title = "Theory Around `sem_pred_expr`"
+date = "2022-11-16"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3k"]
+forwardlinks = []
+zettelid = "3c3k1"
++++
+
+The function `sem_pred_expr` has the same type signature as a `foldMap`
+function, maybe also the same signature as `foldMapM`, where the monad
+is the `Reader` monad. The only difference seems to be with how the
+elements are combined. Instead of being combined using the monoid, they
+are combined using the evaluation of the predicates, because this is
+only implemented for predicated objects.
+
+In addition to that, I feel like there should be a similarity between
+sending the identity semantics into the predicated semantics and the
+general **Free Applicative**. This is because we are essentially
+extracting the structure of the object that is inside of the predicates.
diff --git a/content/zettel/3c3l.md b/content/zettel/3c3l.md
new file mode 100644
index 0000000..817dc09
--- /dev/null
+++ b/content/zettel/3c3l.md
@@ -0,0 +1,17 @@
++++
+title = "Verifying the decidability of SAT"
+date = "2023-04-28"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3k"]
+forwardlinks = ["3c3m", "3c3l1"]
+zettelid = "3c3l"
++++
+
+One proof that is necessary for the current scheduling proof development
+is the proof that SAT is decidable, meaning a proof that the algorithm
+terminates in a definite answer. This allows for various simplifications
+in the proof itself, such as being able to define an equivalence
+relation around semantic equivalence of formulas. These can then
+drastically simplify the proofs themselves.
diff --git a/content/zettel/3c3l1.md b/content/zettel/3c3l1.md
new file mode 100644
index 0000000..05ce6ae
--- /dev/null
+++ b/content/zettel/3c3l1.md
@@ -0,0 +1,25 @@
++++
+title = "Initial SAT algorithm was not terminating"
+date = "2023-04-28"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3l"]
+forwardlinks = []
+zettelid = "3c3l1"
++++
+
+One interesting property of the initial SAT algorithm was that it was
+not actually terminating in some cases, and would therefore hit the
+bound limit. This was because of `setFormula` in the case where
+`unitPropagation` failed. The root cause of this was because if
+`unitPropagation` failed, it would pick the first clause and propagate
+the first variable that occurs inside of it. However, when it could not
+find a variable, it would instead return a default variable to
+propagate, which might not actually be present in the formula. This
+would lead to the size of variables not actually reducing on every
+recursive call.
+
+The solution to this is to abort the algorithm early if there are any
+empty clauses in the formula. This means that the formula is
+unsatisfiable, so there is not need to continue the analysis.
diff --git a/content/zettel/3c3m.md b/content/zettel/3c3m.md
new file mode 100644
index 0000000..6fbbe73
--- /dev/null
+++ b/content/zettel/3c3m.md
@@ -0,0 +1,31 @@
++++
+title = "Interesting properties of predicate execution"
+date = "2023-05-01"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3l"]
+forwardlinks = ["3c3n", "3c3m1"]
+zettelid = "3c3m"
++++
+
+The execution semantics of predicates are tricky to get right. There is
+a balance between having flexible execution semantics and being able to
+prove the equivalence between two of these predicates. In this context,
+I mean that in flexible (non-deterministic) semantics of predicates are
+ones where:
+
+``` text
+a || true == true
+```
+
+regardless of if the value of a is in the map or not, and strict
+execution of semantics need to be able to show that:
+
+```{=latex}
+\begin{equation*}
+\exists b\ldotp m \mathbin{!} a = b
+\end{equation*}
+```
+This is an important distinction, as one allows for *lazy* evaluation of
+predicates, but this complicates reasoning about the predicates.
diff --git a/content/zettel/3c3m1.md b/content/zettel/3c3m1.md
new file mode 100644
index 0000000..7d47f76
--- /dev/null
+++ b/content/zettel/3c3m1.md
@@ -0,0 +1,24 @@
++++
+title = "Needing flexible evaluation for symbolic execution"
+date = "2023-05-01"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3m"]
+forwardlinks = ["3c3m2"]
+zettelid = "3c3m1"
++++
+
+When performing symbolic execution, we want flexible predicate
+semantics, because we will encounter various situations in which
+predicates are actually not evaluable by design. For example, we will
+conditionally assign a predicate based on the value of another
+predicate. In that case, we can only deduce what the evaluation is if
+the original predicate was true. Otherwise, we do not even know whether
+the expression assigned to the predicate is even evaluable, and it is
+likely that that is not the case.
+
+Therefore, when we generate a predicate, we can use the flexible
+evaluation semantics for a predicate to encode this information, and
+still end up with a predicate that is evaluable, even though there are
+parts of it that are not evaluable.
diff --git a/content/zettel/3c3m2.md b/content/zettel/3c3m2.md
new file mode 100644
index 0000000..13705ef
--- /dev/null
+++ b/content/zettel/3c3m2.md
@@ -0,0 +1,26 @@
++++
+title = "Needing strict evaluation for equivalence of predicates"
+date = "2023-05-01"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3m1"]
+forwardlinks = ["3a8g5e", "3a8g5g1", "3c3m3"]
+zettelid = "3c3m2"
++++
+
+Strict evaluation of predicates is important when one wants to prove the
+equivalence between two predicates, and therefore be able to conclude
+that these predicates will always behave the same given the same inputs.
+However, if the evaluation semantics of the predicate relies on the
+laziness for evaluation, meaning there are truly paths for which the
+return value is unknown, then one might have to resort to three-valued
+logic to prove equivalence of two predicates. This is because one needs
+to show that of one predicates does not get stuck, that the other
+predicate will also not get stuck. This is very similar to the problem
+that was encountered during the proof of correctness for GSA
+([\#3a8g5e]) and for which we needed to use three-valued logic
+([\#3a8g5g1]).
+
+ [\#3a8g5e]: /zettel/3a8g5e
+ [\#3a8g5g1]: /zettel/3a8g5g1
diff --git a/content/zettel/3c3m3.md b/content/zettel/3c3m3.md
new file mode 100644
index 0000000..6ededd5
--- /dev/null
+++ b/content/zettel/3c3m3.md
@@ -0,0 +1,20 @@
++++
+title = "Converting between the strict and lazy predicate evaluation"
+date = "2023-05-01"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3m2"]
+forwardlinks = []
+zettelid = "3c3m3"
++++
+
+When simply dealing with predicates that are evaluated given a function
+from literals to values, then the flexible semantics of a predicate are
+actually equivalent to the strict semantics of evaluation the predicate.
+So in simple cases, there is essentially no difference between the two,
+and one can be exchanged for the other.
+
+However, as soon as the laziness is actually needed to be able to
+evaluate the predicate, then it can be difficult ot convert between the
+strict and lazy structures.
diff --git a/content/zettel/3c3n.md b/content/zettel/3c3n.md
new file mode 100644
index 0000000..d757174
--- /dev/null
+++ b/content/zettel/3c3n.md
@@ -0,0 +1,14 @@
++++
+title = "The need for both types of semantics"
+date = "2023-05-01"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3m"]
+forwardlinks = []
+zettelid = "3c3n"
++++
+
+- Interestingly, we somehow need both types of these semantics, to be
+ able to symbolically execute a program, as well as use the
+ predicates later on to perform equivalence checks.
diff --git a/content/zettel/3c4.md b/content/zettel/3c4.md
new file mode 100644
index 0000000..f117e1b
--- /dev/null
+++ b/content/zettel/3c4.md
@@ -0,0 +1,21 @@
++++
+title = "Some assumptions that were made"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c3"]
+forwardlinks = ["3c5"]
+zettelid = "3c4"
++++
+
+Currently, the assumption is made that only the reset value is used in
+the module from the inputs. In addition to that, the clock is never used
+either, and it is assumed that all the always blocks in the code are
+`always_ff` with a `posedge clk`. All the other assumptions that are
+valid for the semantics paper are also applicable to our subset.
+Finally, the assumption is made that all the variables are initialised
+to 0, because undefined is currently not supported. This is a severe
+limitation and should probably be supported at some point.
+
+Currently `values` cannot be undefined, therefore, at the moment when a
+register is uninitialised, it is assumed to be 0.
diff --git a/content/zettel/3c5.md b/content/zettel/3c5.md
new file mode 100644
index 0000000..ca0367a
--- /dev/null
+++ b/content/zettel/3c5.md
@@ -0,0 +1,14 @@
++++
+title = "Novel ideas"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c4"]
+forwardlinks = ["3c6"]
+zettelid = "3c5"
++++
+
+One main novel idea might be how the proof between the RTL and the
+optimised Verilog is done, as it will be a multiple steps to one step
+comparison, instead of the other way round. Normally all the comparisons
+are one to many or one to one.
diff --git a/content/zettel/3c6.md b/content/zettel/3c6.md
new file mode 100644
index 0000000..1c69eaa
--- /dev/null
+++ b/content/zettel/3c6.md
@@ -0,0 +1,18 @@
++++
+title = "Verification of function calls in HTL"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c5"]
+forwardlinks = ["1c9", "1c2c", "3c7", "3c6a"]
+zettelid = "3c6"
++++
+
+Verification of function calls is a move towards supporting verified
+resource sharing ([\#1c9]), because one can use a proper function call
+to hide the hardware duplication. The main problem is also supporting
+pipelining ([\#1c2c]) of such functions, which would meant that they
+have the ability to execute in parallel.
+
+ [\#1c9]: /zettel/1c9
+ [\#1c2c]: /zettel/1c2c
diff --git a/content/zettel/3c6a.md b/content/zettel/3c6a.md
new file mode 100644
index 0000000..85e8ba4
--- /dev/null
+++ b/content/zettel/3c6a.md
@@ -0,0 +1,18 @@
++++
+title = "Base pointer of main memory"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c6"]
+forwardlinks = ["3c6b"]
+zettelid = "3c6a"
++++
+
+One of the current limitations of function calls in HTL is that they
+don't support any loads or stores in these functions. This is because
+Vericert currently only generates one global memory. Therefore, there
+should not be any pointers in the program that have a base pointer that
+is different to the base pointer of the main function's stack.
+
+One possible solution to this is to find what the base of the stack is,
+and then prove if we ever have a base pointer.
diff --git a/content/zettel/3c6b.md b/content/zettel/3c6b.md
new file mode 100644
index 0000000..a602a5a
--- /dev/null
+++ b/content/zettel/3c6b.md
@@ -0,0 +1,36 @@
++++
+title = "Semantics of function calls"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c6a"]
+forwardlinks = []
+zettelid = "3c6b"
++++
+
+I believe that your concern was that because we match on the value of
+the reset signal to start the function call, and that this is defined in
+the specification as well as in the implementation, that this means that
+we basically could replace it by any other value (not just 1), and the
+proof would still pass.
+
+I think that this is actually true, but similar to a different problem
+that is already present in the original version of Vericert from OOPSLA.
+In RTL, everything is started with a call to main, whereas in HTL we
+make necessary assumptions about this. The main one is a kind of calling
+convention, that says that to start the module one has to assert the
+reset signal for one clock cycle, and then set it to low until the
+module finishes execution. This is an assumption that is just in the
+semantics of Verilog and HTL directly.
+
+So for HTL from VericertFun, it's basically a similar problem and
+solution. We basically define a certain calling convention. The
+semantics then say that if a reset signal of one of the modules is now
+**low**, we will go into a call state. I think that another assumption
+that is kind of implicit to the semantics (and which will need to be
+proven during the translation to Verilog), is that the current state
+machine will indeed completely stop and relinquish control to the other
+state machine. Even though the current code does do that, it is actually
+not required to prove this until we combine the state machines into one
+large Verilog module. HTL semantics basically still have a magic jump
+where it will go from the current state machine into the new one.
diff --git a/content/zettel/3c7.md b/content/zettel/3c7.md
new file mode 100644
index 0000000..2cd5857
--- /dev/null
+++ b/content/zettel/3c7.md
@@ -0,0 +1,10 @@
++++
+title = "Verification of loop pipelining"
+date = "2022-05-01"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c6"]
+forwardlinks = ["3c8", "3c7a"]
+zettelid = "3c7"
++++
diff --git a/content/zettel/3c7a.md b/content/zettel/3c7a.md
new file mode 100644
index 0000000..bcd0923
--- /dev/null
+++ b/content/zettel/3c7a.md
@@ -0,0 +1,71 @@
++++
+title = "Notes to Tristan and Leroy's paper"
+date = "2022-05-01"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c7"]
+forwardlinks = []
+zettelid = "3c7a"
++++
+
+These are notes to the following paper \[1\].
+
+The proof mainly relies on creating the pipelined version of the loop,
+using $\mathcal{P}$, $\mathcal{S}$ and the epilogue $\mathcal{E}$, which
+is then followed by final iterations of the loop $\mathcal{B}$ to prove
+the correctness.
+
+Then, the proof shows the equivalence of all the variables, up to the
+equivalence of temporary variables introduced in the pipelined loop to
+add the pipeline stages.
+
+The actual reasoning of the proof is the following, where the idea is
+that it changes the undecidable property of:
+
+$$ \forall N, \alpha(X_N) = \alpha(Y_N) $$
+
+for two pieces of code $X_N$ and $Y_N$.
+
+The first insight is that the following property holds:
+
+$$ \alpha(\mathcal{E}; \mathcal{B}^{\delta}) = \alpha(\mathcal{S}; \mathcal{E})$$
+
+This property says that one always has the choice of doing two things.
+
+- Either one leaves the steady state immediately by executing the
+ epilogue $\mathcal{E}$, and then executing $\delta$ iterations of
+ the initial loop $\mathcal{B}$,
+- or one executes one more iteration of the steady state followed by
+ the epilogue.
+
+Secondly, the following property is also true:
+
+$$ \alpha(\mathcal{B}^{\mu}) = \alpha(\mathcal{P}; \mathcal{E}) $$
+
+This says that if the original code performs exactly $\mu$ iterations,
+then the initial loop performed $\mathcal{B}^{\mu}$ whereas the
+transformed and pipelined loop performed exactly
+$\mathcal{P}; \mathcal{E}$.
+
+These two properties are now checkable statically, because $\delta$ and
+$\mu$ are already known.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-tristan10_simpl_verif_valid_softw_pipel" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">J.-B. Tristan and X. Leroy, “A
+simple, verified validator for software pipelining,” in *Proceedings of
+the 37th annual ACM SIGPLAN-SIGACT symposium on principles of
+programming languages*, in POPL ’10. Madrid, Spain: Association for
+Computing Machinery, 2010, pp. 83–92. doi:
+[10.1145/1706299.1706311].</span>
+
+</div>
+
+</div>
+
+ [10.1145/1706299.1706311]: https://doi.org/10.1145/1706299.1706311
diff --git a/content/zettel/3c8.md b/content/zettel/3c8.md
new file mode 100644
index 0000000..76cad56
--- /dev/null
+++ b/content/zettel/3c8.md
@@ -0,0 +1,26 @@
++++
+title = "Verifying Stability Conditions on HLS"
+date = "2022-10-04"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c7"]
+forwardlinks = ["3c8a"]
+zettelid = "3c8"
++++
+
+Currently, the correctness proof for HLS only takes into account the
+actual correctness of the final result that is computed. Instead, it
+would be useful to also have a notion of stability of the output. This
+means that changes in the input to a different, equivalent program
+should not result in a large difference in the design of the output.
+However, even there, there are two possibilities:
+
+- Stability of performance metrics, such as latency, throughput and
+ area.
+- Stability of architecture, which implies the above but may be
+ restrictive.
+
+It might be easiest to start by proving stability of the architecture
+and see how restrictive that metric is, and how many optimisations do
+not actually have that property.
diff --git a/content/zettel/3c8a.md b/content/zettel/3c8a.md
new file mode 100644
index 0000000..fcf77f5
--- /dev/null
+++ b/content/zettel/3c8a.md
@@ -0,0 +1,23 @@
++++
+title = "Testing for stability in current HLS tools"
+date = "2022-10-04"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c8"]
+forwardlinks = []
+zettelid = "3c8a"
++++
+
+One thing that might be useful to motivate the work above would be to
+test existing HLS tools for stability by changing the metrics that
+measured at the output of the HLS tools. For this, one would have to
+reuse the script from Zewei and change the conditions that trigger a
+bug. It should be expected that the size and latency of the designs
+would change quite a lot though.
+
+For example, synthesis tools already change quite a lot because of the
+randomness that is introduced in the various stages. This can make
+designs fluctuate by 10-15%, which means that optimisations that do not
+add significantly more performance or area improvements might actually
+not be contributing significantly.
diff --git a/content/zettel/3d.md b/content/zettel/3d.md
new file mode 100644
index 0000000..f25eba2
--- /dev/null
+++ b/content/zettel/3d.md
@@ -0,0 +1,9 @@
++++
+title = "Verification tools"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3c"]
+forwardlinks = ["3d1"]
+zettelid = "3d"
++++
diff --git a/content/zettel/3d1.md b/content/zettel/3d1.md
new file mode 100644
index 0000000..ff0a159
--- /dev/null
+++ b/content/zettel/3d1.md
@@ -0,0 +1,19 @@
++++
+title = "Dafny"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d"]
+forwardlinks = ["3d2"]
+zettelid = "3d1"
++++
+
+Dafny is a tool that was created by Runstan Lino, which is a language
+that was designed specifically for the purposes of verification. The
+main idea is that you can have preconditions and postconditions that
+describe the functionality of your program. These are then proven
+automatically by dafny, which uses the implementation to try and prove
+the postcondition assuming that the precondition holds.
+
+This cannot always be the case though, and one can therefore help the
+theorem prover by adding assertions in different places.
diff --git a/content/zettel/3d2.md b/content/zettel/3d2.md
new file mode 100644
index 0000000..4d34b52
--- /dev/null
+++ b/content/zettel/3d2.md
@@ -0,0 +1,9 @@
++++
+title = "Automatic theorem provers"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d1"]
+forwardlinks = ["3d2a"]
+zettelid = "3d2"
++++
diff --git a/content/zettel/3d2a.md b/content/zettel/3d2a.md
new file mode 100644
index 0000000..d79e596
--- /dev/null
+++ b/content/zettel/3d2a.md
@@ -0,0 +1,20 @@
++++
+title = "Validating SAT and SMT proofs"
+date = "2022-08-02"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2"]
+forwardlinks = ["3d2b"]
+zettelid = "3d2a"
++++
+
+Validating a satisfiable result is quite easy, it's just a matter of
+plugging it into the formula. However, a more difficult question is a
+proof that a result is unsatisfiable. The main idea is that if a formula
+is unsatisfiable, then it should be possible to simplify it to $\perp$.
+Therefore, one can prove unsatisfiability by producing a trace of
+rewriting rules that simplify the goal into $\perp$. If one can show
+that the rewriting rules preserve satisfiability, one can show that
+applying them will produce a $\perp$ result and therefore must be
+equivalent to $\perp$.
diff --git a/content/zettel/3d2b.md b/content/zettel/3d2b.md
new file mode 100644
index 0000000..f4898aa
--- /dev/null
+++ b/content/zettel/3d2b.md
@@ -0,0 +1,38 @@
++++
+title = "SMTCoq as a formalisation in Coq"
+date = "2022-08-02"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2a"]
+forwardlinks = ["3d2b1"]
+zettelid = "3d2b"
++++
+
+SMTCoq \[1\] is a tool that formalises these rewriting traces in Coq, so
+that traces from various tools (veriT, cvc4 and zchaff) can be read and
+then checked according to the input formula. This actually does not
+require any trust in the parsing and pretty printing to pass the input
+to the SAT solver, because the initial formula is already represented in
+Coq, then the trace will also be represented in Coq, and so the checker
+can just check that this arbitrary trace present in Coq does indeed
+simplify the formula to $\perp$.
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-armand11_modul_integ_sat_smt_solver" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">M. Armand, G. Faure, B. Grégoire,
+C. Keller, L. Théry, and B. Werner, “A modular integration of SAT/SMT
+solvers to coq through proof witnesses,” in *Certified programs and
+proofs*, J.-P. Jouannaud and Z. Shao, Eds., Berlin, Heidelberg: Springer
+Berlin Heidelberg, 2011, pp. 135–150. doi:
+[10.1007/978-3-642-25379-9_12].</span>
+
+</div>
+
+</div>
+
+ [10.1007/978-3-642-25379-9_12]: https://doi.org/10.1007/978-3-642-25379-9_12
diff --git a/content/zettel/3d2b1.md b/content/zettel/3d2b1.md
new file mode 100644
index 0000000..f7b04af
--- /dev/null
+++ b/content/zettel/3d2b1.md
@@ -0,0 +1,26 @@
++++
+title = "Contents of Different Tables in SMTCoq"
+date = "2022-08-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2b"]
+forwardlinks = ["2e1c7", "3c3g1", "3d2b1a"]
+zettelid = "3d2b1"
++++
+
+In SMTCoq the formula needs to be represented somehow, which is easy to
+interpret as well as efficient to represent. For this they go with a
+multi-table approach, which is similar to the hashing that we are doing
+as well ([\#2e1c7], [\#3c3g1]). There are multiple different tables
+which are going to be presented in the next sections. At the top-level,
+the main representation of the logical formula is done by a list of
+literals, which refer to formulas inside of the formula table. This is
+called the state. Every formula in the state needs to be true for the
+whole system to be valid. These variables referring to formulas take the
+shape of an int, like `x`, where `x>>1` will refer to the location of
+the formula in the formula table, and `x&1` will be true if the formula
+should be negated and false otherwise.
+
+ [\#2e1c7]: /zettel/2e1c7
+ [\#3c3g1]: /zettel/3c3g1
diff --git a/content/zettel/3d2b1a.md b/content/zettel/3d2b1a.md
new file mode 100644
index 0000000..9c89310
--- /dev/null
+++ b/content/zettel/3d2b1a.md
@@ -0,0 +1,15 @@
++++
+title = "Formula Table"
+date = "2022-08-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2b1"]
+forwardlinks = ["3d2b1b"]
+zettelid = "3d2b1a"
++++
+
+The main, top-level, table is the one of formulas, also called `ftable`,
+which is easy to confuse with a function table. This table contains the
+main operations on formulas, for example, an `and` operation between
+multiple literals/atoms.
diff --git a/content/zettel/3d2b1b.md b/content/zettel/3d2b1b.md
new file mode 100644
index 0000000..35cf0c9
--- /dev/null
+++ b/content/zettel/3d2b1b.md
@@ -0,0 +1,23 @@
++++
+title = "Atom Table"
+date = "2022-08-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2b1a"]
+forwardlinks = ["3d2b1c", "3d2b1d"]
+zettelid = "3d2b1b"
++++
+
+To be able to support various theories, atoms are used instead of
+literals so that additional operations in different theories can be
+represented. This allows for application of a function to arguments, for
+example an `OZlt` operation, as well as simple variables or constants of
+different types. The atoms can either contain these concrete operations
+that are part of a theory directly, or they will refer to uninterpreted
+functions that are declared in the operation table ([\#3d2b1c]).
+Variables can be the same and will refer to variables declared in the
+variable table ([\#3d2b1d]).
+
+ [\#3d2b1c]: /zettel/3d2b1c
+ [\#3d2b1d]: /zettel/3d2b1d
diff --git a/content/zettel/3d2b1c.md b/content/zettel/3d2b1c.md
new file mode 100644
index 0000000..9d5b8b0
--- /dev/null
+++ b/content/zettel/3d2b1c.md
@@ -0,0 +1,13 @@
++++
+title = "Operation Table"
+date = "2022-08-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2b1b"]
+forwardlinks = ["3d2b1d"]
+zettelid = "3d2b1c"
++++
+
+This table consists of various uninterpreted functions that are used
+within the theories, together with their types.
diff --git a/content/zettel/3d2b1d.md b/content/zettel/3d2b1d.md
new file mode 100644
index 0000000..a04eed6
--- /dev/null
+++ b/content/zettel/3d2b1d.md
@@ -0,0 +1,12 @@
++++
+title = "Variable Table"
+date = "2022-08-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2b1c", "3d2b1b"]
+forwardlinks = ["3d2b1e"]
+zettelid = "3d2b1d"
++++
+
+This contains various uninterpreted variables together with their type.
diff --git a/content/zettel/3d2b1e.md b/content/zettel/3d2b1e.md
new file mode 100644
index 0000000..b08dfd0
--- /dev/null
+++ b/content/zettel/3d2b1e.md
@@ -0,0 +1,16 @@
++++
+title = "Type Table"
+date = "2022-08-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["3d2b1d"]
+forwardlinks = []
+zettelid = "3d2b1e"
++++
+
+Finally, this table consists of uninterpreted types that are used in the
+formulas, which is probably not something that I will have to use, which
+is good. This can therefore just be kept empty (although I don't really
+know how to construct an empty table because they all need some kind of
+default variable to be assigned to them).
diff --git a/content/zettel/4a.md b/content/zettel/4a.md
new file mode 100644
index 0000000..d103e38
--- /dev/null
+++ b/content/zettel/4a.md
@@ -0,0 +1,13 @@
++++
+title = "Riehmann-Zeta Function"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = []
+forwardlinks = ["4b"]
+zettelid = "4a"
++++
+
+The Riehmann-Zeta function can be defined as follows:
+
+$$\zeta(s) = \sum_{n = 0}^{\infty} \frac{1}{n^s}$$
diff --git a/content/zettel/4b.md b/content/zettel/4b.md
new file mode 100644
index 0000000..a957852
--- /dev/null
+++ b/content/zettel/4b.md
@@ -0,0 +1,12 @@
++++
+title = "Set Theory"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d1", "4a"]
+forwardlinks = ["4c", "4b1"]
+zettelid = "4b"
++++
+
+Set theory is one of the fundamental theories of maths, others being
+type-theory or cartesian closed category theory.
diff --git a/content/zettel/4b1.md b/content/zettel/4b1.md
new file mode 100644
index 0000000..093084d
--- /dev/null
+++ b/content/zettel/4b1.md
@@ -0,0 +1,27 @@
++++
+title = "Presburger Sets"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4b"]
+forwardlinks = ["4b2"]
+zettelid = "4b1"
++++
+
+These are sets that define relations on presburger numbers, meaning only
+addition is defined.
+
+For example:
+
+``` example
+x = { [i] : forall i, 0 <= i /\ i <= 10 }
+```
+
+Is actually an empty formula, because there is no instance where this is
+satisfied for all numbers. However, the following is defined:
+
+``` example
+x = { [i] : 0 <= i /\ i <= 10 }
+```
+
+And will be equal to the single tuples of numbers in the range (0,10).
diff --git a/content/zettel/4b2.md b/content/zettel/4b2.md
new file mode 100644
index 0000000..7a68b86
--- /dev/null
+++ b/content/zettel/4b2.md
@@ -0,0 +1,21 @@
++++
+title = "Arithmetic Hierarchy"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4b1"]
+forwardlinks = []
+zettelid = "4b2"
++++
+
+This is a hierarchy constructed in the following way, where $k$ is the
+type. $k=0$ is the type of $\mathbb{N}$, whereas $k=1$ gives the type of
+$\mathbb{N}\rightarrow \mathbb{N}$:
+
+- the formula is $\Sigma^k_{n+1}$ if the outermost quantifier is
+ $\exists^k$, and there are $n$ alternations between blocks of
+ $\exists^k$ and $\forall^k$ at the front of the formula.
+
+- the formula is $\Pi^k_{n+1}$ if the outermost quantifier is
+ $\forall^k$, and there are $n$ alternations between blocks of
+ $\exists^k$ and $\forall^k$ at the front of the formula.
diff --git a/content/zettel/4c.md b/content/zettel/4c.md
new file mode 100644
index 0000000..2f8488a
--- /dev/null
+++ b/content/zettel/4c.md
@@ -0,0 +1,29 @@
++++
+title = "Proof Theory"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4b"]
+forwardlinks = ["4d", "4c1"]
+zettelid = "4c"
++++
+
+Proof theory is the theory of formal proofs, which differ from the
+social proofs that are normally done. One therefore vies the proofs from
+a purely syntactic point of view.
+
+The main book that is being followed for this is \[1\].
+
+<div id="refs" class="references csl-bib-body" markdown="1">
+
+<div id="ref-gerard87_proof_theor_logic_compl" class="csl-entry"
+markdown="1">
+
+<span class="csl-left-margin">\[1\]
+</span><span class="csl-right-inline">J.-Y. Gerard, *Proof theory and
+logical complexity*. Napoli, via Arangio Ruiz 83: Bibliopolis,
+1987.</span>
+
+</div>
+
+</div>
diff --git a/content/zettel/4c1.md b/content/zettel/4c1.md
new file mode 100644
index 0000000..4274bd4
--- /dev/null
+++ b/content/zettel/4c1.md
@@ -0,0 +1,19 @@
++++
+title = "Different logical foundations"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4c"]
+forwardlinks = ["4c2"]
+zettelid = "4c1"
++++
+
+There are different logical foundations to mathematics. The main one
+that is used is Zermelo-Fraenkel set theory with the axiom of choice
+(ZFC). However, there can be alternatives. For example, type theory
+provides and alternative basis for the theory of mathematics, and also
+result in a different logic.
+
+Propositional logic always needs set theory as a base to work with,
+however, logical systems like intuitionistic logic can be built up using
+type theory without ever needing set theory.
diff --git a/content/zettel/4c2.md b/content/zettel/4c2.md
new file mode 100644
index 0000000..1548494
--- /dev/null
+++ b/content/zettel/4c2.md
@@ -0,0 +1,9 @@
++++
+title = "Gentzen's Sequent Calculus"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4c1"]
+forwardlinks = ["4c3", "4c2a"]
+zettelid = "4c2"
++++
diff --git a/content/zettel/4c2a.md b/content/zettel/4c2a.md
new file mode 100644
index 0000000..b9b6be6
--- /dev/null
+++ b/content/zettel/4c2a.md
@@ -0,0 +1,14 @@
++++
+title = "Cut elimination"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4c2"]
+forwardlinks = ["4c2b"]
+zettelid = "4c2a"
++++
+
+Cut elimination is a rule that is present in Gentzen's sequent calculus
+(LK).
+
+$$ \frac{\Gamma \vdash \Delta, A \quad A, \Sigma \vdash \Lambda}{\Gamma, \Sigma\vdash \Delta, \Lambda} $$
diff --git a/content/zettel/4c2b.md b/content/zettel/4c2b.md
new file mode 100644
index 0000000..c670e08
--- /dev/null
+++ b/content/zettel/4c2b.md
@@ -0,0 +1,13 @@
++++
+title = "Sequent definition"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4c2a"]
+forwardlinks = []
+zettelid = "4c2b"
++++
+
+The sequent $\vdash$ can be interpreted in classical logic as follows:
+
+$$ A_1, A_2, ..., A_n \vdash B_1, B_2, ..., B_m \equiv (A_1 \land A_2\ \land... \land\ A_n) \rightarrow (B_1 \lor B_2\ \lor ... \lor\ B_m) $$
diff --git a/content/zettel/4c3.md b/content/zettel/4c3.md
new file mode 100644
index 0000000..64a9b2c
--- /dev/null
+++ b/content/zettel/4c3.md
@@ -0,0 +1,44 @@
++++
+title = "Hilbert Program"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4c2"]
+forwardlinks = []
+zettelid = "4c3"
++++
+
+Hilbert was under the impression that mathematics should be completely
+formalised in a minimal mathematical model (elementary methods), instead
+of the standard abstract methods that are used (axiom of choice). All
+proofs should be translated to this formalised mathematical
+representation, which differs from the social proofs that are normally
+made. This is the start of the proof-theory field of research.
+
+Hilbert considers these real formulas to be numerical equations, which
+can also be defined as $\Pi_1^0$-formulas (formulas that only have
+numbers in them and are preceded by a set amount of $\forall$). The idea
+is then that any elementary formula can be proven by an elementary
+proof, and that elementary mathematics is therefore complete.
+
+The idea is therefore that the Hilbert program states that any
+mathematical property could be expressed in elementary mathematics, and
+that it is therefore true iff there is an elementary proof of it. The
+program also defines the following concepts that must hold[^1]:
+
+Completeness
+: Proof that all true elementary mathematical statements are provable.
+
+Consistency
+: No contradiction can be obtained in elementary mathematics.
+
+Conservation
+: A proof that was obtained for real objects using ideal objects (such
+ as uncountable sets), can be proven without the use of ideal
+ objects.
+
+Decidability
+: That there exists an algorithm that can determine the truth or
+ falseness of any elementary statement.
+
+[^1]: <https://en.wikipedia.org/wiki/Hilbert%27s_program>
diff --git a/content/zettel/4d.md b/content/zettel/4d.md
new file mode 100644
index 0000000..764053b
--- /dev/null
+++ b/content/zettel/4d.md
@@ -0,0 +1,9 @@
++++
+title = "Category Theory"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4c"]
+forwardlinks = ["4e", "4d1"]
+zettelid = "4d"
++++
diff --git a/content/zettel/4d1.md b/content/zettel/4d1.md
new file mode 100644
index 0000000..564ccd2
--- /dev/null
+++ b/content/zettel/4d1.md
@@ -0,0 +1,22 @@
++++
+title = "Solutions to Set of all {Sets, Groups, Top. Spaces} does not exist"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d"]
+forwardlinks = ["4b", "4d2"]
+zettelid = "4d1"
++++
+
+The problem in Set Theory ([\#4b]) and the other spaces and collections,
+is that one cannot build a collection of all collections. In Category
+Theory, one has the same problem, not being able to have a Category of
+all Categories. There are various possible solutions to this:
+
+- Bound the size of objects by cardinal κ.
+- Use Classes (from Set Theory: Sets that satisfy first order
+ formula).
+- Grothendiek Universes (similar to bounding the size).
+- Ignore the problem (possible for simple category theory).
+
+ [\#4b]: /zettel/4b
diff --git a/content/zettel/4d2.md b/content/zettel/4d2.md
new file mode 100644
index 0000000..a46fe85
--- /dev/null
+++ b/content/zettel/4d2.md
@@ -0,0 +1,21 @@
++++
+title = "Objects"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d1"]
+forwardlinks = ["4d3", "4d2a"]
+zettelid = "4d2"
++++
+
+Objects have to have the following properties:
+
+- For objects A, B, we have "Set" of morphisms: $A \rightarrow_f B$
+- Composition:
+ $\mathit{Mor}(A, B) \times \mathit{Mor}(B, C) = \mathit{Mor}(A, C)$
+- Identity morphisms: $\mathit{Mor}(A, A)$
+
+And morphisms have to follow the following axioms:
+
+- Association: $(f \circ g) \circ h = f \circ (g \circ h)$
+- Identity: $f \circ I_B = f$, $I_A \circ f = f$
diff --git a/content/zettel/4d2a.md b/content/zettel/4d2a.md
new file mode 100644
index 0000000..1747d38
--- /dev/null
+++ b/content/zettel/4d2a.md
@@ -0,0 +1,18 @@
++++
+title = "Monomorphisms"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d2"]
+forwardlinks = ["4d2b"]
+zettelid = "4d2a"
++++
+
+$g: B \rightarrow C$
+$A \underset{f_2}{\overset{f_1}{\rightrightarrows}} B\overset{g}{\rightarrow} C$
+
+Monomorphism: $g \circ f_1 = g \circ f_2 \implies f_1 = f_2$.
+
+This can be compared to injective maps in Sets, however, one does not
+have to define it in terms of the actual objects, but just on the
+morphisms ($g(b_1) =g(b_2) \implies b_1 = b_2$)
diff --git a/content/zettel/4d2b.md b/content/zettel/4d2b.md
new file mode 100644
index 0000000..3fcc41b
--- /dev/null
+++ b/content/zettel/4d2b.md
@@ -0,0 +1,14 @@
++++
+title = "Epimorphisms"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d2a"]
+forwardlinks = ["4d2c"]
+zettelid = "4d2b"
++++
+
+$g: B \leftarrow C$
+$A \underset{f_2}{\overset{f_1}{\leftleftarrows}} B\overset{g}{\leftarrow} C$
+
+Epimorphisms: $f_1 \circ g = f_2 \circ g \implies f_1 = f_2$.
diff --git a/content/zettel/4d2c.md b/content/zettel/4d2c.md
new file mode 100644
index 0000000..b046d43
--- /dev/null
+++ b/content/zettel/4d2c.md
@@ -0,0 +1,16 @@
++++
+title = "Isomorphism"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d2b"]
+forwardlinks = ["4d2d"]
+zettelid = "4d2c"
++++
+
+An Isomorphism is a very strong property, which is quite rare as it
+states that two morphisms have to create an identity morphism. Instead,
+equivalence classes are often used instead, where there must be an
+Isomorphism between the composition and the identity instead.
+
+$A \underset{g}{\overset{f}{\rightleftarrows}} B \implies f \circ g = I_B \landg \circ f = I_A$
diff --git a/content/zettel/4d2d.md b/content/zettel/4d2d.md
new file mode 100644
index 0000000..7fcc138
--- /dev/null
+++ b/content/zettel/4d2d.md
@@ -0,0 +1,14 @@
++++
+title = "Presheaf"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d2c"]
+forwardlinks = []
+zettelid = "4d2d"
++++
+
+A presheaf is a functor from the co-category to the category of sets.
+This means that it can be expressed as the following:
+
+$C^{\mathit{op}}\rightarrow \mathit{Set}$
diff --git a/content/zettel/4d3.md b/content/zettel/4d3.md
new file mode 100644
index 0000000..efd0374
--- /dev/null
+++ b/content/zettel/4d3.md
@@ -0,0 +1,9 @@
++++
+title = "Examples of Categories"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d2"]
+forwardlinks = ["4d3a"]
+zettelid = "4d3"
++++
diff --git a/content/zettel/4d3a.md b/content/zettel/4d3a.md
new file mode 100644
index 0000000..283f524
--- /dev/null
+++ b/content/zettel/4d3a.md
@@ -0,0 +1,15 @@
++++
+title = "Single Group (Monoid)"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d3"]
+forwardlinks = ["4d3b"]
+zettelid = "4d3a"
++++
+
+This is a Monoid because we do not consider any inverses.
+
+- Category: 1 object.
+ - Morphisms: Elements of G.
+ - Composition: product in G.
diff --git a/content/zettel/4d3b.md b/content/zettel/4d3b.md
new file mode 100644
index 0000000..108cf60
--- /dev/null
+++ b/content/zettel/4d3b.md
@@ -0,0 +1,14 @@
++++
+title = "Poset P"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d3a"]
+forwardlinks = []
+zettelid = "4d3b"
++++
+
+- Category: Elements of P.
+ - Morphisms: 1 morphism $a \rightarrow b$ if $a \leq b$, 0
+ morphism otherwise.
+ - Composition: straightforward.
diff --git a/content/zettel/4e.md b/content/zettel/4e.md
new file mode 100644
index 0000000..e62bfe3
--- /dev/null
+++ b/content/zettel/4e.md
@@ -0,0 +1,9 @@
++++
+title = "Logic"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4d"]
+forwardlinks = ["4f", "4e1"]
+zettelid = "4e"
++++
diff --git a/content/zettel/4e1.md b/content/zettel/4e1.md
new file mode 100644
index 0000000..71407e9
--- /dev/null
+++ b/content/zettel/4e1.md
@@ -0,0 +1,19 @@
++++
+title = "Craig interpolation"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e", "3a8g5h2"]
+forwardlinks = ["4e2"]
+zettelid = "4e1"
++++
+
+Craig interpolation is an interesting method used in model checkers to
+generate intermediate propositions that only contain the atoms of the
+intersection between the other propositions in the implication.
+
+The idea is that if one has $p \rightarrow q$, then one can generate
+$p\rightarrow c \rightarrow q$, where $a(c) = a(p) \cap a(q)$. This
+means that one can generate a more minimal proposition that still
+captures the important information. However, without adding quantifiers,
+it's not possible to remove atoms without breaking strict equivalence.
diff --git a/content/zettel/4e2.md b/content/zettel/4e2.md
new file mode 100644
index 0000000..8531a35
--- /dev/null
+++ b/content/zettel/4e2.md
@@ -0,0 +1,22 @@
++++
+title = "Three-valued logic"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e1"]
+forwardlinks = ["4e3", "4e2a"]
+zettelid = "4e2"
++++
+
+Three-valued logic, and it's extensions into infinitely-valued logic is
+an interesting expansion on propositional logic, where more properties
+can be expressed. Especially when one has an evaluation function which
+can fail, one will have a result that is either true or false, or
+unevaluatable/undefined.
+
+There are many different possible definitions of a three-valued logic,
+each with it's positives and it's downsides. It depends on the
+application which one should be chosen. Each different logic will have
+slightly different definitions of and/or/implication. Three-valued logic
+can also have many additional useful connectives, such as weak and
+strong and/or.
diff --git a/content/zettel/4e2a.md b/content/zettel/4e2a.md
new file mode 100644
index 0000000..ccdaf48
--- /dev/null
+++ b/content/zettel/4e2a.md
@@ -0,0 +1,35 @@
++++
+title = "Kleene and Priest logics"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e2b", "4e2"]
+forwardlinks = ["4e2b", "4e2a1"]
+zettelid = "4e2a"
++++
+
+These logics are the more "standard", and seem to be the most
+straightforward definition of three-valued logic. For example, it has a
+lazy or and lazy and, becoming true and false when the answer could be
+nothing else. Otherwise, the result can also be undefined.
+
+| AND | F | U | T |
+|-----|-----|-----|-----|
+| F | F | F | F |
+| U | F | U | U |
+| T | F | U | T |
+
+| OR | F | U | T |
+|-----|-----|-----|-----|
+| F | F | U | T |
+| U | U | U | T |
+| T | T | T | T |
+
+Then, implication can be defined in terms of AND and OR. In addition to
+that NEG(U) = U.
+
+| A-\>B | F | U | T |
+|-------|-----|-----|-----|
+| F | T | T | T |
+| U | U | U | T |
+| T | F | U | T |
diff --git a/content/zettel/4e2a1.md b/content/zettel/4e2a1.md
new file mode 100644
index 0000000..b352a5c
--- /dev/null
+++ b/content/zettel/4e2a1.md
@@ -0,0 +1,14 @@
++++
+title = "Defining in terms of Min/Max"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e2a"]
+forwardlinks = []
+zettelid = "4e2a1"
++++
+
+`T` and `F` can be expressed in terms of 1 and -1 respectively, which
+means that AND and OR can be represented by MIN and MAX respectively,
+which also works when `U` is 0. This makes it quite easy to generalise
+the logic to infinite values.
diff --git a/content/zettel/4e2b.md b/content/zettel/4e2b.md
new file mode 100644
index 0000000..54b731a
--- /dev/null
+++ b/content/zettel/4e2b.md
@@ -0,0 +1,47 @@
++++
+title = "Łukasiewicz logic"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e2a"]
+forwardlinks = ["4e2a"]
+zettelid = "4e2b"
++++
+
+This logic has a different definition of implication. This has many
+benefits, especially when one wants to differentiate between undefined
+values and truth/false values. The main benefit is that one can actually
+have tautologies in this logic, even with undefined values, whereas in
+the Kleene logic ([\#4e2a]) there can be no tautologies, because
+assigning all variable to `U` will end up with `U` every time.
+
+| A-\>B | F | U | T |
+|-------|-----|-----|-----|
+| F | T | T | T |
+| U | U | T | T |
+| T | F | U | T |
+
+This logic has the same exact definition of AND and OR than Kleene logic
+([\#4e2a]), and these connectives can be expressed in terms of Ł3
+implication.
+
+```{=latex}
+\begin{align}
+ A \lor B &= (A \rightarrow B) \rightarrow B \\
+ A \land B &= \neg (\neg A \lor \neg B) \\
+ A \Leftrightarrow B &= (A \rightarrow B) \land (B \rightarrow A)
+\end{align}
+```
+We can then define additional unary operators using the following:
+
+```{=latex}
+\begin{align}
+ \mathcal{M} A &= \neg A \rightarrow A \\
+ \mathcal{L} A &= \neg \mathcal{M} \neg A \\
+ \mathcal{I} A &= \mathcal{M} A \land \neg \mathcal{L} A
+\end{align}
+```
+Especially the last, $\mathcal{I} A$, has interesting properties,
+because it will be 1 iff A is 0, and will be -1 otherwise.
+
+ [\#4e2a]: /zettel/4e2a
diff --git a/content/zettel/4e3.md b/content/zettel/4e3.md
new file mode 100644
index 0000000..6f11e82
--- /dev/null
+++ b/content/zettel/4e3.md
@@ -0,0 +1,36 @@
++++
+title = "Models in propositional logic"
+date = "2022-04-11"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e2"]
+forwardlinks = ["4e4"]
+zettelid = "4e3"
++++
+
+When $\Gamma \vDash A$ ($\Gamma$ models $A$), it means that semantically
+$A$ will hold in the environment of $\Gamma$. This semantics approach is
+quite different to what $\Gamma \vdash A$ ($A$ can be derived from
+$\Gamma$) says, which is that there is a derivation (which is inherently
+finite), that proves $A$ given the environment $\Gamma$.
+
+This means that to reason about the semantics of the proof framework
+requires a stronger, external model, to be able to manipulate the
+$\sigma$ in $M, \sigma\vDash A$, where $M$ is the model and $\sigma$ is
+the state of all the variables.
+
+Finally, using the semantic meaning of $A$ and the syntactic meaning of
+$A$, one can express soundness and consistency by the following:
+
+soundness
+: a formula for which one can derive a proof is also true:
+ $\Gamma \vdash A \implies \Gamma \vDash A$.
+
+consistency
+: if a formula is true, then there exists a derivation of it's proof:
+ $\Gamma \vDash A \implies \Gamma \vdash A$.
+
+As these talk about the semantics of A, they also need to be reasoned
+about in an external logic, which is often just assumed to exist and is
+normally stronger than the current logic.
diff --git a/content/zettel/4e4.md b/content/zettel/4e4.md
new file mode 100644
index 0000000..d443be1
--- /dev/null
+++ b/content/zettel/4e4.md
@@ -0,0 +1,21 @@
++++
+title = "Realizability"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e3"]
+forwardlinks = ["4e5"]
+zettelid = "4e4"
++++
+
+Mathematitians like to work with abstract notions when proving theorems.
+However, often we would like to compute with that same abstract notion,
+so we want to find an equivalent model, or realizer, that will allow us
+to do that. This realizer is special though, in that it is an element of
+the set of partial combinator algebras (pca) ($r \in \mathbf{A}$).
+
+One can then create an Assembly $S$, which is composed of a type $|S|$
+and realizers which are part of $\mathbf{A}$. Then, it's necessary that
+for each element in $|S|$ there is a realizer:
+
+$r \Vdash p$ for $p \in |S|$ and $r \in \mathbf{A}$.
diff --git a/content/zettel/4e5.md b/content/zettel/4e5.md
new file mode 100644
index 0000000..78c116b
--- /dev/null
+++ b/content/zettel/4e5.md
@@ -0,0 +1,16 @@
++++
+title = "Homotopy Type Theory (HOTT)"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e4"]
+forwardlinks = ["4e6"]
+zettelid = "4e5"
++++
+
+Homotopy type theory is essentially extended Martin-Löf Type Theory
+(MLTT), with a better formulation of equality which allows it to express
+equality of types in a tower of equalities. This notion of equality
+comes from homotopy, but doesn't really need homotopy to understand the
+formulation of equality. Instead, it is just a different formulation,
+using the univalence axiom (UA) as the basis to the system.
diff --git a/content/zettel/4e6.md b/content/zettel/4e6.md
new file mode 100644
index 0000000..382b4ef
--- /dev/null
+++ b/content/zettel/4e6.md
@@ -0,0 +1,9 @@
++++
+title = "Proof Theory"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e5"]
+forwardlinks = []
+zettelid = "4e6"
++++
diff --git a/content/zettel/4f.md b/content/zettel/4f.md
new file mode 100644
index 0000000..eae8eb9
--- /dev/null
+++ b/content/zettel/4f.md
@@ -0,0 +1,23 @@
++++
+title = "Informal Mathematics"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["4e"]
+forwardlinks = []
+zettelid = "4f"
++++
+
+Often when I write the mathematics seem to be informal. For example, the
+following is meant to describe a **well-founded** relation:
+
+> some metric stored in the $\sim$ relation is decreasing
+
+Then, in many definitions I also do not seem to define things precisely.
+I think this is mostly because there is so much content in the Coq proof
+that cannot all be translated into words, and needs to be reduced
+instead.
+
+Another example is when describing hashed expressions, I think I can be
+a bit more precise about what these are semantically, instead of
+syntactically in the formalisation.
diff --git a/content/zettel/5a.md b/content/zettel/5a.md
new file mode 100644
index 0000000..34ad34e
--- /dev/null
+++ b/content/zettel/5a.md
@@ -0,0 +1,9 @@
++++
+title = "Circuits"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = []
+forwardlinks = ["5b", "5a1"]
+zettelid = "5a"
++++
diff --git a/content/zettel/5a1.md b/content/zettel/5a1.md
new file mode 100644
index 0000000..5737d52
--- /dev/null
+++ b/content/zettel/5a1.md
@@ -0,0 +1,14 @@
++++
+title = "Keyboard Circuit"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a"]
+forwardlinks = ["5a2", "5a1a"]
+zettelid = "5a1"
++++
+
+I just built my split my split keyboard and was therefore curious as to
+how the circuit in a keyboard actually worked. I had to solder various
+components onto the keyboard and wanted to know how the circuit then
+meant that it worked.
diff --git a/content/zettel/5a1a.md b/content/zettel/5a1a.md
new file mode 100644
index 0000000..aca3fe7
--- /dev/null
+++ b/content/zettel/5a1a.md
@@ -0,0 +1,35 @@
++++
+title = "Matrix scanning"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a1"]
+forwardlinks = ["5a1b"]
+zettelid = "5a1a"
++++
+
+The main idea of how the keyboard circuit works together with the
+embedded processor, is that instead of requiring an IO pin for each of
+the keys on the keyboard, it instead really only needs one IO pin for
+all the keys in the matrix. The keys are placed in a matrix circuit such
+that when a key is pressed, the columns of the matrix can be scanned
+sequentially to figure out where the key is.
+
+This can be done, for example, by connecting all the keys, which are
+also switches, into on column with the other keys, and then also
+connecting the keys in the same row together. To then identify if a key
+has been pressed, the micro-controller has to scan all the rows and
+columns. This is done by setting all the columns to high sequentially,
+and then scanning which rows receive that signal. This means that one
+can then get a matrix representation with a bit set for each key that
+has been set.
+
+However, there are situations when keys can be ghosted, where pressing
+three keys connects all the lines in the matrix, which means that it
+then is not possible to distinguish a key from the others. The larger
+the matrix, the more often keys can be ghosted. To solve this issue, we
+therefore have to stop current from flowing backwards in the matrix,
+which is the original cause of the ghosted keys. This can be done by
+adding diodes after each key, before the key connects to the row. This
+means that ghosted keys are therefore not possible, as the only way to
+go from one row to the other is if the switch in the current row is set.
diff --git a/content/zettel/5a1b.md b/content/zettel/5a1b.md
new file mode 100644
index 0000000..f7690d7
--- /dev/null
+++ b/content/zettel/5a1b.md
@@ -0,0 +1,18 @@
++++
+title = "Split keyboard communication"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a1a"]
+forwardlinks = []
+zettelid = "5a1b"
++++
+
+For a split keyboard, the two sides need to communicate with each other
+as keys can be pressed on either half and are identified by the
+micro-controller on each side. One micro-controller acts as a master and
+the other is the slave. These are connected by four cables, often in a
+TRRS cable, which can then communicate serially between each other.
+Power is also communicated through that wire. Then, when a key is
+pressed, it is sent to the master controller, which then sends the right
+keycodes to the computer.
diff --git a/content/zettel/5a2.md b/content/zettel/5a2.md
new file mode 100644
index 0000000..4880931
--- /dev/null
+++ b/content/zettel/5a2.md
@@ -0,0 +1,19 @@
++++
+title = "Hardware pipelining"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a1"]
+forwardlinks = ["5a2a"]
+zettelid = "5a2"
++++
+
+Pipelining is a common optimisation way to optimise hardware and use up
+more resources in the FPGA. There are various ways in which this can be
+done from an arbitrary circuit, so instead of representing the circuit
+as a single state machine, which can only execute one state at a time,
+pipelined hardware will process different parts of the input at
+different iterations. As a source for a lot of this information, I am
+using a blog post by ZipCPU [^1].
+
+[^1]: <https://zipcpu.com/blog/2017/08/14/strategies-for-pipelining.html>
diff --git a/content/zettel/5a2a.md b/content/zettel/5a2a.md
new file mode 100644
index 0000000..839eca0
--- /dev/null
+++ b/content/zettel/5a2a.md
@@ -0,0 +1,23 @@
++++
+title = "The global valid signal"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a2"]
+forwardlinks = ["5a2b"]
+zettelid = "5a2a"
++++
+
+This is the simplest way to design pipelined hardware, and can be useful
+for one important scenario, when the rate of the input data is constant.
+This allows the enable signal to be asserted at the rate at which new
+data will enter the pipeline, therefore advancing the data to the next
+stage. The logic for a pipeline stage using a global enable looks
+something like the following:
+
+``` verilog
+always @(posedge clk) if (CE) out <= $compute(in);
+```
+
+One main use-case of such an application is DSP, as signals will come in
+and exit a the rate that the ADC or DAC is sampling at.
diff --git a/content/zettel/5a2b.md b/content/zettel/5a2b.md
new file mode 100644
index 0000000..4bde16c
--- /dev/null
+++ b/content/zettel/5a2b.md
@@ -0,0 +1,25 @@
++++
+title = "Travelling CE"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a2a"]
+forwardlinks = ["5a2c"]
+zettelid = "5a2b"
++++
+
+One solution to the requirement of constant rate of the input data, is
+to have a travelling enable signal that sets each stage to be true
+sequentially. This can be formulated as the following:
+
+``` verilog
+initial o_ce = 1'b0;
+always @(posedge i_clk)
+ if (i_reset)
+ o_ce <= 1'b0;
+ else
+ o_ce <= i_ce;
+always @(posedge i_clk)
+ if (i_ce)
+ o_output <= $func(i_input);
+```
diff --git a/content/zettel/5a2c.md b/content/zettel/5a2c.md
new file mode 100644
index 0000000..33942ac
--- /dev/null
+++ b/content/zettel/5a2c.md
@@ -0,0 +1,16 @@
++++
+title = "Handshaking signals"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a2b"]
+forwardlinks = ["5a2d"]
+zettelid = "5a2c"
++++
+
+The problem with travelling CE, is that it is only useful when one
+single input signal is enough to implement the pipelining. In addition
+to that, the travelling CE does not allow for the next pipeline stage to
+communicate that it is busy with an operation. The idea, therefore, is
+to have a `STB` signal, and a `BUSY` signal, the former saying that the
+data is ready, and the latter saying that the module is busy.
diff --git a/content/zettel/5a2d.md b/content/zettel/5a2d.md
new file mode 100644
index 0000000..85e0f17
--- /dev/null
+++ b/content/zettel/5a2d.md
@@ -0,0 +1,13 @@
++++
+title = "Pipeline design in Cλash"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a2c"]
+forwardlinks = ["1e"]
+zettelid = "5a2d"
++++
+
+Cλash is a mixture of HLS ([\#1e]) and Chisel like design.
+
+ [\#1e]: /zettel/1e
diff --git a/content/zettel/5b.md b/content/zettel/5b.md
new file mode 100644
index 0000000..efce74a
--- /dev/null
+++ b/content/zettel/5b.md
@@ -0,0 +1,10 @@
++++
+title = "Arithmetic"
+date = "2022-11-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5a"]
+forwardlinks = ["5b1"]
+zettelid = "5b"
++++
diff --git a/content/zettel/5b1.md b/content/zettel/5b1.md
new file mode 100644
index 0000000..3563b0e
--- /dev/null
+++ b/content/zettel/5b1.md
@@ -0,0 +1,10 @@
++++
+title = "Adder Circuits"
+date = "2022-11-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5b2", "5b"]
+forwardlinks = ["5b2", "5b1a"]
+zettelid = "5b1"
++++
diff --git a/content/zettel/5b1a.md b/content/zettel/5b1a.md
new file mode 100644
index 0000000..1d523c9
--- /dev/null
+++ b/content/zettel/5b1a.md
@@ -0,0 +1,18 @@
++++
+title = "Carry-Propagate Adders"
+date = "2022-11-09"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5b1b", "5b1"]
+forwardlinks = ["5b1b"]
+zettelid = "5b1a"
++++
+
+These adders, like the ripple-carry and carry look-ahead adders perform
+proper additions by propagating the carry throughout the number.
+However, this comes at a cost of parallelism, because the carry has to
+be propagated sequentially. Carry look-ahead adders are interesting
+because they can predict the propagation of a carry within larger
+blocks, and then only have to do the sequential carry propagation inside
+each unit.
diff --git a/content/zettel/5b1b.md b/content/zettel/5b1b.md
new file mode 100644
index 0000000..f4fbbd5
--- /dev/null
+++ b/content/zettel/5b1b.md
@@ -0,0 +1,18 @@
++++
+title = "Carry-Save Adder"
+date = "2022-11-09"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5b3", "5b1a"]
+forwardlinks = ["5b1a"]
+zettelid = "5b1b"
++++
+
+The carry-save adder is interesting because it allows you to add $n > 2$
+numbers together in a very parallel way, where the carries are then
+reconciled in the end using a standard carry-propagate adder ([\#5b1a]).
+This is done by having an array of full-adders which don't feed the
+carry into the next full-adder, but instead save the carry for the end.
+
+ [\#5b1a]: /zettel/5b1a
diff --git a/content/zettel/5b2.md b/content/zettel/5b2.md
new file mode 100644
index 0000000..7142794
--- /dev/null
+++ b/content/zettel/5b2.md
@@ -0,0 +1,22 @@
++++
+title = "Multipliers"
+date = "2022-11-08"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5b1"]
+forwardlinks = ["5b1", "5b3"]
+zettelid = "5b2"
++++
+
+Multipliers are implemented in terms of adders. For example, one way of
+implementing multiplication would just be to perform multiple additions.
+This could be a long-multiplication algorithm, where the digits are
+multiplied together, and then shifted and added together. By default, if
+one uses a naïve addition algorithm, then this will be a very sequential
+algorithm. The first improvement would be to implement a tree of
+additions, which reduces the dependencies and delays between the adders.
+The second improvement could be to use more parallel adder algorithms,
+like described in the adder circuit section ([\#5b1]).
+
+ [\#5b1]: /zettel/5b1
diff --git a/content/zettel/5b3.md b/content/zettel/5b3.md
new file mode 100644
index 0000000..b231241
--- /dev/null
+++ b/content/zettel/5b3.md
@@ -0,0 +1,21 @@
++++
+title = "MAC Optimisations"
+date = "2022-11-09"
+author = "Yann Herklotz"
+tags = []
+categories = []
+backlinks = ["5b2", "1c4b1"]
+forwardlinks = ["5b1b"]
+zettelid = "5b3"
++++
+
+One useful optimisation is performing a fused multiply and add, which
+can be done directly using the DSP units on the FPGA. One example of
+such an implementation is using a carry-save adder ([\#5b1b]) actually,
+as the multiplication will use various additions, which can just be
+chained as a large array of full-adders like in the the carry-save
+adder. Then, the accumulator can also be added to this parallel chain of
+full-adders before the carry is reconciled with a propagate adder at the
+end.
+
+ [\#5b1b]: /zettel/5b1b
diff --git a/layouts/_default/list.html b/layouts/_default/list.html
new file mode 100644
index 0000000..744f390
--- /dev/null
+++ b/layouts/_default/list.html
@@ -0,0 +1,17 @@
+{{ partial "header.html" . }}
+
+{{if not .IsHome }}
+<h1>{{ .Title | markdownify }}</h1>
+{{ end }}
+
+{{ .Content }}
+
+<ul>
+ {{ $pages := .Pages }}
+ {{ if .IsHome }}{{ $pages = .Site.RegularPages }}{{ end }}
+ {{ range (where $pages "Section" "!=" "") }}
+ {{ partial "post-element.html" . }}
+ {{ end }}
+</ul>
+
+{{ partial "footer.html" . }}
diff --git a/layouts/_default/single.html b/layouts/_default/single.html
new file mode 100644
index 0000000..f3953af
--- /dev/null
+++ b/layouts/_default/single.html
@@ -0,0 +1,30 @@
+{{ partial "header.html" . }}
+<div class="article-meta">
+<h1><span class="title">{{ .Title | markdownify }}</span></h1>
+<!--{{ with .Params.author }}<h2 class="author">{{ . }}</h2>{{ end }}-->
+{{ if (gt .Params.date 0) }}<h2 class="date">{{ .Date.Format "2006/01/02" }}</h2>{{ end }}
+</div>
+
+<main>
+{{ .Content }}
+</main>
+
+ {{ if (.Page.Param "backlinks") }}
+ <h2>Referenced from</h2>
+ {{ range (.Page.Param "backlinks") }}
+ {{ with ($.Site.GetPage (printf "/zettel/%s" .)) }}
+ {{ partial "post-element.html" . }}
+ {{- end }}
+ {{ end }}
+ {{ end }}
+
+ {{ if (.Page.Param "forwardlinks") }}
+ <h2>Links to</h2>
+ {{ range (.Page.Param "forwardlinks") }}
+ {{ with ($.Site.GetPage (printf "/zettel/%s" .)) }}
+ {{ partial "post-element.html" . }}
+ {{- end }}
+ {{ end }}
+ {{ end }}
+
+{{ partial "footer.html" . }}
diff --git a/layouts/partials/head_custom.html b/layouts/partials/head_custom.html
new file mode 100644
index 0000000..47ba699
--- /dev/null
+++ b/layouts/partials/head_custom.html
@@ -0,0 +1,19 @@
+<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.7/dist/katex.min.css" integrity="sha384-3UiQGuEI4TTMaFmGIZumfRPtfKQ3trwQE2JgosJxCnGmQpL/lJdjpcHkaaFwHlcI" crossorigin="anonymous">
+<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.7/dist/katex.min.js" integrity="sha384-G0zcxDFp5LWZtDuRMnBkk3EphCK1lhEf4UEyEM693ka574TZGwo4IWwS6QLzM/2t" crossorigin="anonymous"></script>
+<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.7/dist/contrib/auto-render.min.js" integrity="sha384-+VBxd3r6XgURycqtZ117nYw44OOcIax56Z4dCRWbxyPt0Koah1uHoK0o4+/RRE05" crossorigin="anonymous"></script>
+<script>
+ document.addEventListener("DOMContentLoaded", function() {
+ renderMathInElement(document.body, {
+ // customised options
+ // • auto-render specific keys, e.g.:
+ delimiters: [
+ {left: '$$', right: '$$', display: true},
+ {left: '$', right: '$', display: false},
+ {left: '\\(', right: '\\)', display: false},
+ {left: '\\[', right: '\\]', display: true}
+ ],
+ // • rendering keys, e.g.:
+ throwOnError : false
+ });
+ });
+</script>
diff --git a/layouts/partials/post-element.html b/layouts/partials/post-element.html
new file mode 100644
index 0000000..b45a1e1
--- /dev/null
+++ b/layouts/partials/post-element.html
@@ -0,0 +1,4 @@
+<li>
+ <span class="date">{{ .Date.Format "2006/01/02" }}</span>
+ <a href="{{ .RelPermalink }}">{{ .Title | markdownify }}</a>
+</li>
diff --git a/layouts/shortcodes/transclude-1.html b/layouts/shortcodes/transclude-1.html
new file mode 100644
index 0000000..80a24da
--- /dev/null
+++ b/layouts/shortcodes/transclude-1.html
@@ -0,0 +1,4 @@
+<div class="transclude-1">
+ <h2><a class="transclude-link" href="/zettel/{{ .Get "zettel" }}">{{ .Get "zettel" }}: {{ with ($.Site.GetPage (printf "/zettel/%s" (.Get "zettel"))) }}{{ .Title | markdownify }}{{ end }}</a></h2>
+ {{ .Inner | markdownify }}
+</div>
diff --git a/layouts/shortcodes/transclude-2.html b/layouts/shortcodes/transclude-2.html
new file mode 100644
index 0000000..a63de87
--- /dev/null
+++ b/layouts/shortcodes/transclude-2.html
@@ -0,0 +1,4 @@
+<div class="transclude-2">
+ <h3><a class="transclude-link" href="/zettel/{{ .Get "zettel" }}">{{ .Get "zettel" }}: {{ with ($.Site.GetPage (printf "/zettel/%s" (.Get "zettel"))) }}{{ .Title | markdownify }}{{ end }}</a></h3>
+ {{ .Inner | markdownify }}
+</div>
diff --git a/layouts/shortcodes/transclude-3.html b/layouts/shortcodes/transclude-3.html
new file mode 100644
index 0000000..1e43c85
--- /dev/null
+++ b/layouts/shortcodes/transclude-3.html
@@ -0,0 +1,4 @@
+<div class="transclude-3">
+ <h4><a class="transclude-link" href="/zettel/{{ .Get "zettel" }}">{{ .Get "zettel" }}: {{ with ($.Site.GetPage (printf "/zettel/%s" (.Get "zettel"))) }}{{ .Title | markdownify }}{{ end }}</a></h4>
+ {{ .Inner | markdownify }}
+</div>
diff --git a/themes/hugo-xmin b/themes/hugo-xmin
new file mode 160000
+Subproject 67f2ed048d0494891a6e9ea8ed2e2bffda43190