Browse Source

parsing layout

Amélia Liao 2 years ago
parent
commit
8ba6920ff4
31 changed files with 1617 additions and 574 deletions
  1. +0
    -31
      css/colors.scss
  2. +301
    -462
      css/default.scss
  3. +24
    -6
      pages/contact.md
  4. +11
    -7
      pages/index.html
  5. +1
    -0
      pages/posts/2016-08-17-parsec.md
  6. +1
    -0
      pages/posts/2016-08-23-hasochism.lhs
  7. +1
    -0
      pages/posts/2016-08-26-parsec2.lhs
  8. +1
    -0
      pages/posts/2018-02-18-amulet-tc2.md
  9. +0
    -4
      pages/posts/2020-01-31-lazy-eval.lhs
  10. +3
    -3
      pages/posts/2020-10-30-reflecting-equality.md
  11. +5
    -9
      pages/posts/2021-06-21-cubical-sets.md
  12. +884
    -0
      pages/posts/2021-09-03-parsing-layout.md
  13. +232
    -0
      rubtmpjd3t_2bh.log
  14. +75
    -17
      site.hs
  15. BIN
      static/icon/pfp-alt.png
  16. BIN
      static/icon/[email protected]
  17. BIN
      static/icon/[email protected]
  18. BIN
      static/icon/[email protected]
  19. BIN
      static/icon/[email protected]
  20. BIN
      static/icon/pfp-tired.png
  21. BIN
      static/icon/[email protected]
  22. BIN
      static/icon/[email protected]
  23. BIN
      static/icon/[email protected]
  24. BIN
      static/icon/[email protected]
  25. +31
    -0
      static/not-doom.svg
  26. +6
    -2
      templates/archive.html
  27. +20
    -26
      templates/default.html
  28. +5
    -0
      templates/page.html
  29. +1
    -1
      templates/post-list.html
  30. +12
    -6
      templates/post.html
  31. +3
    -0
      test.dot

+ 0
- 31
css/colors.scss View File

@ -1,31 +0,0 @@
$black: #282c34;
$white: #abb2bf;
$light_red: #e06c75;
$dark_red: #be5046;
$green: #98c379;
$light_yellow: #e5c07b;
$dark_yellow: #d19a66;
$blue: #61afef;
$magenta: #c678dd;
$cyan: #56b6c2;
$gutter_grey: #4b5263;
$comment_grey: #5c6370;
$orange: #ffac5f;
$blonde: #f5ddbc;
$light-purple: #f2f1f8;
$purple: #7060eb;
$yugo: #ea8472;
$code-background: $black;
$code-language-background: lighten($black, 5%);
$code-language-color: $white;
$code-fg: $white;
$code-kw: $light_red;
$code-dt: $dark_yellow;
$code-ot: $code-fg;
$code-co: $comment_grey;
// comment

+ 301
- 462
css/default.scss View File

@ -1,596 +1,435 @@
$purple: #4834d4;
$orange: #ffbe76;
$blonde: #f5ddbc;
$light-purple: #faf0fa;
$yugo: #ea8472;
$pink_glamour: #ff7979;
$carmine_pink: #eb4d4b;
$header: $orange;
$header-height: 50px;
$max-width: 80ch;
.mathpar, .math-paragraph {
$purple-50: #faf5ff;
$purple-100: #f3e8ff;
$purple-200: #e9d5ff;
$purple-300: #d8b4fe;
$purple-400: #c084fc;
$purple-500: #a855f7;
$purple-600: #9333ea;
$purple-700: #7e22ce;
$purple-800: #6b21a8;
$purple-900: #581c87;
$yellow-50: #fefce8;
$yellow-100: #fef9c3;
$yellow-200: #fef08a;
$yellow-300: #fde047;
$yellow-400: #facc15;
$yellow-500: #eab308;
$yellow-600: #ca8a04;
$yellow-700: #a16207;
$yellow-800: #854d0e;
$yellow-900: #713f12;
$bluegray-50: #f8fafc;
$bluegray-100: #f1f5f9;
$bluegray-200: #e2e8f0;
$bluegray-300: #cbd5e1;
$bluegray-400: #94a3b8;
$bluegray-500: #64748b;
$bluegray-600: #475569;
$bluegray-700: #334155;
$bluegray-800: #1e293b;
$bluegray-900: #0f172a;
$red-50: #fef2f2;
$red-100: #fee2e2;
$red-200: #fecaca;
$red-300: #fca5a5;
$red-400: #f87171;
$red-500: #ef4444;
$red-600: #dc2626;
$red-700: #b91c1c;
$red-800: #991b1b;
$red-900: #7f1d1d;
$nav-height: 48px;
$font-size: 14pt;
@mixin center-that-bitch {
display: flex; display: flex;
flex-direction: row;
flex-wrap: wrap;
justify-content: space-around;
flex-direction: column;
align-items: center; align-items: center;
> figure {
width: auto;
}
} }
a#mastodon {
display: none;
}
html, body {
html {
min-height: 100%; min-height: 100%;
height: 100%; height: 100%;
margin: 0;
background-color: white;
max-width: 100%;
margin: 0;
font-family: -apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Oxygen-Sans,Ubuntu,Cantarell,"Helvetica Neue",sans-serif;
overflow-x: clip;
} }
body { body {
display: flex;
flex-direction: column;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
counter-reset: theorem figure;
}
width: 100%;
div#header {
background-color: $header;
height: $header-height;
@include center-that-bitch;
margin: 0;
color: $bluegray-900;
font-size: $font-size;
overflow-x: clip;
}
header {
background-color: $purple-600;
display: flex; display: flex;
flex-direction: row;
align-items: stretch;
height: $nav-height;
width: 100%;
z-index: 999;
position: fixed;
justify-content: space-between; justify-content: space-between;
flex-wrap: nowrap;
overflow-x: auto;
align-items: center;
div#logo {
margin-left: .5em;
line-height: $header-height;
padding-left: 1em;
padding-right: 1em;
padding-left: .5em;
padding-right: .5em;
font-size: 18pt;
border-bottom: 3px solid $purple-700;
box-sizing: border-box;
a {
color: black;
text-decoration: none;
}
div, nav > a {
height: $nav-height;
padding-left: 0.3em;
padding-right: 0.3em;
transition: background-color .2s ease-in-out;
display: flex;
align-items: center;
} }
div#logo:hover {
background-color: darken($header, 10%);
div:hover, nav > a:hover {
background-color: $purple-500;
transition: background-color 0.3s ease-in-out;
} }
div#navigation {
margin-right: .5em;
a {
color: white;
font-size: $font-size * 1.2;
text-decoration: none;
}
nav {
display: flex; display: flex;
flex-direction: row;
align-items: stretch;
align-self: flex-end;
justify-content: flex-end;
align-items: center;
gap: 0.5em;
}
}
line-height: $header-height;
@mixin left-bordered-block($color) {
padding-left: 1em;
padding-top: 0.2em;
padding-bottom: 0.2em;
a {
color: black;
text-decoration: none;
border-left: 5px dashed $color;
}
margin-left: .5em;
margin-right: .5em;
}
@mixin material {
padding-left: 1em;
padding-top: 0.2em;
padding-bottom: 0.2em;
div.nav-button {
height: 100%;
transition: background-color .2s ease-in-out;
}
div.nav-button:hover {
background-color: darken($header, 10%);
}
}
margin-top: 1em;
margin-bottom: 1em;
padding-right: 1em;
box-shadow: 2px 2px 6px black;
} }
div#content, div.post-list-synopsys {
max-width: $max-width + 4ch;
main {
max-width: 100ch;
width: 100%;
margin: 0px auto 0px auto; margin: 0px auto 0px auto;
flex: 1 0 auto; flex: 1 0 auto;
padding: 2ch; padding: 2ch;
padding-top: $nav-height;
span.katex, span.together {
display: inline-block;
}
span.together {
white-space: nowrap;
}
p {
code {
display: inline-block;
}
span.qed {
float: right;
}
box-sizing: border-box;
span.theorem ::after {
counter-increment: theorem;
content: " " counter(theorem);
}
span.theorem, span.paragraph-marker {
font-style: italic;
div#post-toc-container {
aside#toc {
display: none;
} }
span.word {
display: inline-block;
article {
grid-column: 2;
width: 100%;
line-height: 1.5;
} }
} }
p.image {
text-align: center !important;
}
div#post-info {
font-style: italic;
line-height: 1.2;
h1 {
max-width: 120ch;
@include left-bordered-block($bluegray-500);
} }
}
> article {
> blockquote {
width: 85%;
margin: auto;
border-left: 12px solid $purple;
border-right: 3px dashed $purple;
border-top: 3px dashed $purple;
border-bottom: 3px dashed $purple;
padding: 2ch;
}
> div.code-container {
// width: 80%;
// margin: auto;
pre {
padding: 0px 1em;
width: 80%;
margin: auto;
}
> span {
display: inline;
}
}
}
div.code-container {
padding-top: 5px;
background-color: $light-purple;
border-radius: 5px;
border: 1px solid darken($light-purple, 10%);
overflow-x: auto;
> div.code-tag {
background-color: darken($light-purple, 10%);
overflow: hidden;
line-height: 80%;
font-size: 12pt;
margin-top: 5px;
width: 100%;
span {
padding: 0.5em 1em 0.25em 1em;
border-top-left-radius: 10px;
div.warning {
@include material;
background-color: $red-200;
}
box-shadow: 1px 1px darken($light-purple, 20%);
// display: none;
figure.wraparound {
float: right;
width: auto;
float: right;
}
}
margin-left: 2em;
}
pre {
overflow-x: auto;
}
figure {
overflow-x: auto;
overflow-y: clip;
width: 100%;
margin: auto;
div.sourceCode {
padding: 0px .5em;
}
@include center-that-bitch;
figcaption {
margin-top: 0.3em;
display: inline-block;
text-align: center;
} }
div.code-container.custom-tag {
> span::after {
display: none;
}
p {
margin: 0;
} }
}
div.code-container.continues {
> span {
display: none;
}
.katex-display {
> span.katex {
white-space: normal;
} }
}
div.warning {
&:before {
content: "Heads up!";
display: block;
font-size: 16pt;
}
margin: 16px auto;
width: 85%;
border-left: 12px solid $carmine_pink;
border-right: 3px dashed $carmine_pink;
border-top: 3px dashed $carmine_pink;
border-bottom: 3px dashed $carmine_pink;
padding: 2ch;
&>:first-child {
margin-top: 0.5em;
}
&>:last-child {
margin-bottom: 0px;
}
}
div.mathpar {
display: flex;
flex-flow: row wrap;
justify-content: center;
align-items: center;
div.text-image {
display: flex;
margin-bottom: -18px;
gap: 1em;
> figure {
min-width: 25px;
padding-left: 2em;
}
> figure {
width: auto;
max-width: 33%;
} }
}
* {
max-width: 100%;
}
.sourceCode {
font-size: $font-size;
}
details {
summary {
margin-left: 1em;
font-size: 14pt;
padding: .2em 1em;
}
div.sourceCode {
background-color: $yellow-50;
border-radius: 5px;
border: 1px solid $purple;
@include material;
padding: .2em 1em;
flex-grow: 0;
height: auto;
}
background-color: white;
div.code-container {
padding: 0;
display: flex;
flex-direction: column;
margin-top: 0.5em;
margin-bottom: 0.5em;
box-shadow: 2px 2px 6px black;
transition: font-size 0.3s ease-in-out, background-color 0.3s ease-in-out;
transform: rotate3d(0, 0, 0, 0);
}
> div.sourceCode, > pre {
background-color: $yellow-50;
details[open] {
font-size: 14pt;
}
border: 0;
box-shadow: none;
padding: 2em;
padding-bottom: 0.5em;
padding-top: 0.5em;
.special-thanks {
display: flex;
flex-direction: column;
align-items: center;
justify-content: space-around;
margin: 0;
p {
font-size: 21pt;
margin-top: .5em;
margin-bottom: 0px;
pre {
margin-top: 5px;
margin-bottom: 5px;
} }
ul {
list-style: none;
li {
margin: .5em 0px;
border-radius: 10px;
border: 1px solid $purple;
background-color: $light-purple;
padding: 1em;
a {
text-decoration: underline;
}
transition: all 0.2s ease-in-out;
}
li:hover {
border-radius: 5px;
transform: scale(1.01);
background-color: lighten($light-purple, 5%);
}
}
overflow-x: auto;
} }
.eqn-list {
list-style: none;
direction: rtl;
counter-reset: equation;
li {
counter-increment: equation;
// this is fucking criminal lmao
div.empty-code-tag {
background-color: $yellow-50;
}
span.katex-display {
margin: 2px;
}
div.code-tag {
display: flex;
flex-direction: row-reverse;
height: 1.4em;
* {
direction: ltr;
}
}
background-color: $yellow-300;
padding: 0.2em;
li::marker {
content: "(" counter(equation) ")";
font-family: KaTeX_Main, Times New Roman, serif;
span {
@include center-that-bitch;
margin-right: 0.4em;
} }
} }
font-size: 14pt;
line-height: 1.4;
span.katex-display {
overflow-x: auto;
overflow-y: clip;
}
} }
.footnote-back {
margin-left: 0.5em;
}
blockquote {
@include left-bordered-block($bluegray-700);
div.info, span#reading-length {
padding-left: 1em;
font-style: italic;
}
background-color: $bluegray-100;
span#reading-length::before {
content: "Word count: "
padding-left: 2.5em;
margin-left: 0;
} }
div#footer {
display: flex;
align-items: center;
justify-content: space-between;
padding-left: 1em;
padding-right: 1em;
background-color: $light-purple;
height: 50px;
}
table {
width: 70%;
margin: auto;
border-collapse: collapse;
.definition {
text-decoration: dotted underline;
td, th {
text-align: center;
padding: 0px 1em 0px 1em;
border: 2px solid $purple-400;
}
} }
.post-list {
list-style: none;
padding: 0px;
ul#post-list {
list-style-type: none;
display: flex;
flex-direction: column;
gap: 2em;
width: 90%;
margin: auto;
.post-list-item {
@include left-bordered-block($yellow-500);
div.post-list-item {
margin-top: .2em;
margin-bottom: .2em;
background-color: $yellow-50;
padding: 1em; padding: 1em;
border-radius: 10px;
background-color: $light-purple;
.post-list-header {
margin-top: 0.2em;
div.post-list-header {
display: flex; display: flex;
justify-content: space-between; justify-content: space-between;
flex-wrap: wrap;
align-items: flex-end;
line-height: 14pt;
font-style: italic;
font-size: 10pt;
a { a {
font-size: 14pt; font-size: 14pt;
padding-right: 2em;
font-style: normal;
color: $bluegray-800;
} }
} }
font-size: 11pt;
}
}
table {
margin: auto;
border-collapse: collapse;
td, th {
border: 1px solid $purple;
text-align: center;
padding: 0px 1em 0px 1em;
> * {
vertical-align: middle;
}
}
td.image {
padding: 0px;
img {
margin-top: auto;
}
} }
} }
figure {
width: 100%;
margin: auto;
div.contact-list {
display: flex; display: flex;
flex-direction: column;
align-items: center;
justify-content: space-around;
justify-content: space-evenly;
align-items: stretch;
gap: 3em;
div {
width: 100%;
overflow-x: auto;
}
div.contact-card {
background-color: $purple-200;
@include material;
> p {
margin-bottom: 0px;
}
width: 33%;
max-width: 33%;
flex-grow: 1;
figcaption {
font-size: 14pt;
display: inline-block;
> p {
margin-top: 0px;
margin-bottom: 0px;
p {
margin: 0;
} }
}
// figcaption::before {
// counter-increment: figure;
// content: "Figure " counter(figure) ". ";
// display: inline;
// }
}
@media only screen and (max-width: $max-width) {
div#content {
margin-left: 1px;
margin-right: 1px;
padding: 1ch;
div.text-image {
display: block;
> figure {
padding-left: 0px;
}
}
div.contact-header {
// I really hate Pandoc sometimes
display: flex;
align-items: center;
gap: 1em;
figure figcaption {
max-width: 85%;
margin: auto;
margin-bottom: 10px;
p {
font-style: italic;
img {
height: 48px;
clip-path: url(#squircle);
} }
}
ul {
padding-left: 20px;
}
p {
hyphens: auto;
span.katex {
display: inline;
span.username {
font-size: 16pt;
} }
} }
} }
}
.mathpar {
overflow-x: auto;
justify-content: space-evenly;
> * {
margin-left: .2em;
margin-right: .2em;
flex-shrink: 0;
flex-grow: 0;
}
}
@media only screen and (max-width: 450px) {
header { header {
nav div {
&>:nth-child(3) {
display: none;
}
div#logo {
width: 100%;
display: flex;
flex-direction: row;
justify-content: center;
}
nav {
display: none;
} }
} }
} }
// Contact page
.contact-list {
display: flex;
flex-direction: row;
flex-wrap: wrap;
justify-content: space-around;
align-items: center;
@media only screen and (min-width: 1500px) {
main {
max-width: 100%;
> h1 {
font-size: 26pt;
@include center-that-bitch;
}
.contact-card {
margin: 0px .5em;
div#post-toc-container {
display: grid;
grid-template-columns: 0.5fr 2fr 0.5fr;
border: 1px solid $purple;
border-radius: 10px;
background-color: $blonde;
aside#toc {
display: block !important;
max-width: 200px;
padding: 0px 1em;
h3 { @include center-that-bitch; }
.username, .username * {
font-size: 21pt;
color: $purple;
}
ul {
border-left: 2px solid $bluegray-400;
list-style-type: none;
padding-left: 1em;
p {
display: flex;
align-items: center;
justify-content: space-evenly;
a {
text-decoration: none;
}
}
}
max-width: 40ch !important;
article {
max-width: 100ch;
margin-top: -100px;
margin: auto;
}
} }
transition: all 0.2s ease-in-out;
}
.contact-card:hover {
margin: 0px .55em;
background-color: darken($blonde, 10%);
border-radius: 5px;
transform: scale(1.01);
} }
} }
span.math.inline {
display: inline-block;
}
@font-face {
font-family: 'Fantasque Sans Mono';
src: url('fonts/FantasqueSansMono-Regular.woff2') format('woff2');
font-weight: 400;
font-style: normal;
}
details {
margin-top: 1em;
margin-bottom: 1em;
}

+ 24
- 6
pages/contact.md View File

@ -9,18 +9,36 @@ span#reading-length { display: none; }
<div class="contact-list"> <div class="contact-list">
<div class="contact-card"> <div class="contact-card">
<span class="username">Abbie#4600</span>
<div class="contact-header">
<img alt="profile picture" title="Abbie's profile picture" src=/static/icon/[email protected] />
<span class="username">Abbie#4600</span>
</div>
My Discord friend requests are always open. Feel free to add me for questions or comments!
My Discord friend requests are always open. Feel free to add me for questions or comments!
</div> </div>
<div class="contact-card"> <div class="contact-card">
<span class="username">{abby}</span>
<div class="contact-header">
<img alt="profile picture" title="Abbie's profile picture" src=/static/icon/[email protected] />
<span class="username">@plt_abbie</span>
</div>
<span>
I am unhealthily active on the bird website, so follow me on Twitter to stay up to date with what I think about.. everything!
</span>
</div>
<span>
Message me directly on <a href="https://libera.chat">Libera</a> IRC, or join `##dependent` to talk about types!
</span>
<div class="contact-card">
<div class="contact-header">
<img alt="profile picture" title="Abbie's profile picture" src=/static/icon/[email protected] />
<span class="username">abbie</span>
</div>
<span>
Message me directly on <a href="https://libera.chat">Libera</a> IRC, or join `##dependent` to talk about types!
</span>
</div> </div>
</div> </div>
If you like what I do, here are some ways you can support this blog: If you like what I do, here are some ways you can support this blog:


+ 11
- 7
pages/index.html View File

@ -2,12 +2,16 @@
title: Home title: Home
--- ---
<p>
You've reached Abigail's blog, a place where I exposit and reflect on programming languages and type theory.
Here's the posts I've written recently:
</p>
<div id="post-toc-container">
<article>
<p>
You've reached Abigail's blog, a place where I exposit and reflect on programming languages and type theory.
Here's the posts I've written recently:
</p>
<h2>Posts</h2>
$partial("templates/post-list.html")$
<h2>Posts</h2>
$partial("templates/post-list.html")$
<p>…or you can find more in the <a href="/archive.html">archives</a>.</p>
<p>…or you can find more in the <a href="/archive.html">archives</a>.</p>
</article>
</div>

+ 1
- 0
pages/posts/2016-08-17-parsec.md View File

@ -1,6 +1,7 @@
--- ---
title: You could have invented Parsec title: You could have invented Parsec
date: August 17, 2016 01:29 AM date: August 17, 2016 01:29 AM
synopsys: 2
--- ---
As most of us should know, [Parsec](https://hackage.haskell.org/package/parsec) As most of us should know, [Parsec](https://hackage.haskell.org/package/parsec)


+ 1
- 0
pages/posts/2016-08-23-hasochism.lhs View File

@ -1,6 +1,7 @@
--- ---
title: Dependent types in Haskell - Sort of title: Dependent types in Haskell - Sort of
date: August 23, 2016 date: August 23, 2016
synopsys: 2
--- ---
**Warning**: An intermediate level of type-fu is necessary for understanding **Warning**: An intermediate level of type-fu is necessary for understanding


+ 1
- 0
pages/posts/2016-08-26-parsec2.lhs View File

@ -1,6 +1,7 @@
--- ---
title: Monadic Parsing with User State title: Monadic Parsing with User State
date: August 26, 2016 date: August 26, 2016
synopsys: 2
--- ---
> {-# LANGUAGE FlexibleInstances, MultiParamTypeClasses #-} > {-# LANGUAGE FlexibleInstances, MultiParamTypeClasses #-}


+ 1
- 0
pages/posts/2018-02-18-amulet-tc2.md View File

@ -1,6 +1,7 @@
--- ---
title: Amulet's New Type Checker title: Amulet's New Type Checker
date: February 18, 2018 date: February 18, 2018
synopsys: 2
--- ---
In the last post about Amulet I wrote about rewriting the type checking In the last post about Amulet I wrote about rewriting the type checking


+ 0
- 4
pages/posts/2020-01-31-lazy-eval.lhs View File

@ -840,7 +840,6 @@ comprises, and implement state transitions corresponding to each of the
instructions above. instructions above.
\begin{code} \begin{code}
type Addr = Int type Addr = Int
data GmNode data GmNode
@ -945,7 +944,6 @@ Now we can define the stepper function that takes one step to its
successor: successor:
\begin{code} \begin{code}
step :: GmState -> GmState step :: GmState -> GmState
step state@GmState{ code = [] } = error "step final state" step state@GmState{ code = [] } = error "step final state"
step state@GmState{ code = i:is } = step state@GmState{ code = i:is } =
@ -958,7 +956,6 @@ The many cases of the `instruction` function represent the various
transition rules for each instruction we detailed above. transition rules for each instruction we detailed above.
\begin{code} \begin{code}
instruction (Push val) st@GmState{..} = instruction (Push val) st@GmState{..} =
case val of case val of
Global str -> st { stack = globals Map.! str:stack } Global str -> st { stack = globals Map.! str:stack }
@ -1169,7 +1166,6 @@ reference table", or SRT for short. In our simulator, this would be a
`Set` of `Addr`s that each supercombinator keeps alive. `Set` of `Addr`s that each supercombinator keeps alive.
\begin{code} \begin{code}
liveAddrs :: GmState -> Set Addr liveAddrs :: GmState -> Set Addr
liveAddrs GmState{..} = roots <> foldMap explore roots where liveAddrs GmState{..} = roots <> foldMap explore roots where
roots = Set.fromList stack <> foldMap (Set.fromList . fst) dump roots = Set.fromList stack <> foldMap (Set.fromList . fst) dump


+ 3
- 3
pages/posts/2020-10-30-reflecting-equality.md View File

@ -193,16 +193,16 @@ $$
\frac{\Gamma, i : \mathbb{I} \vdash t : A}{\Gamma \vdash \langle i \rangle\ t : \mathrm{Path}\ A\ t[0/i]\ t[1/i]} \frac{\Gamma, i : \mathbb{I} \vdash t : A}{\Gamma \vdash \langle i \rangle\ t : \mathrm{Path}\ A\ t[0/i]\ t[1/i]}
$$ $$
</div> </div>
</figure>
<figcaption>Path formation</figcaption> <figcaption>Path formation</figcaption>
</figure>
<figure> <figure>
<div> <div>
$$ $$
\frac{\Gamma, t : \mathrm{Path}\ A\ a\ b\quad\Gamma \vdash r : \mathbb{I}}{\Gamma \vdash t\ r : A} \frac{\Gamma, t : \mathrm{Path}\ A\ a\ b\quad\Gamma \vdash r : \mathbb{I}}{\Gamma \vdash t\ r : A}
$$ $$
</div> </div>
</figure>
<figcaption>Path elimination</figcaption> <figcaption>Path elimination</figcaption>
</figure>
</div> </div>
The intuition, the authors say, is that a term with $n$ variables of $\mathbb{I}$-type free corresponds to an $n$-dimensional cube. The intuition, the authors say, is that a term with $n$ variables of $\mathbb{I}$-type free corresponds to an $n$-dimensional cube.
@ -222,7 +222,7 @@ The intuition, the authors say, is that a term with $n$ variables of $\mathbb{I}
</tr> </tr>
</table> </table>
This is about where anyone's "intuition" for Cubical Type Theory, especially my own, flies out the window. Specifically, using abstraction and the de Morgan algebra on names, we can define operations such as reflexivity ($\langle i \rangle a : \mathrm{Path}\ A\ a\ a$), symmetry ($\lambda p. \langle i \rangle p\ (1 - i) : \mathrm{Path}\ A\ a\ b \to \mathrm{Path}\ A\ b\ a$), congruence, and even function extensionality, which has a delightfully simple proof: $\lambda p. \langle i \rangle\ \lambda x. p\ x\ i$.
This is about where anyone's "intuition" for Cubical Type Theory, especially my own, flies out the window. Specifically, using abstraction and the de Morgan algebra on names, we can define operations such as reflexivity (introduced with $\langle i \rangle a : \mathrm{Path}\ A\ a\ a$), symmetry ($\lambda p. \langle i \rangle p\ (1 - i) : \mathrm{Path}\ A\ a\ b \to \mathrm{Path}\ A\ b\ a$), congruence, and even function extensionality, which has a delightfully simple proof: $\lambda p. \langle i \rangle\ \lambda x. p\ x\ i$.
However, to transport along these paths, the paper defines a "face lattice", which consists of constraints on elements of the interval, uses that to define "systems", which are arbitrary restrictions of cubes; From systems, one can define "composition", which compute the lid of an open box (yeah, I don't get it either), "Kan filling", and finally, transport. Since the authors give a semantics of Cubical Type Theory in a previously well-established model of cubical sets, I'll just.. take their word on this. However, to transport along these paths, the paper defines a "face lattice", which consists of constraints on elements of the interval, uses that to define "systems", which are arbitrary restrictions of cubes; From systems, one can define "composition", which compute the lid of an open box (yeah, I don't get it either), "Kan filling", and finally, transport. Since the authors give a semantics of Cubical Type Theory in a previously well-established model of cubical sets, I'll just.. take their word on this.


+ 5
- 9
pages/posts/2021-06-21-cubical-sets.md View File

@ -132,19 +132,15 @@ Now we can investigate a particular $n$-cube in $X$ as being a diagram in $X$ wi
In this diagram too we can understand the lower-dimensional cubes contained in $\sigma$ to be compositions $\sigma \circ \yo(p)$ for some composition of face maps $p : [m] \to [2], m \le 2$. As an example (the same example as in the section on &cube;), the arrow $p$ is the map $\sigma \circ \yo(\delta^0_0)$, and the point $b$ is the map $\sigma \circ \yo(\delta^0_0) \circ \yo(\delta^1)$. By functoriality of $\yo$, that composite is the same thing as $\sigma \circ \yo(\delta^0_0 \circ \delta^1)$. In this diagram too we can understand the lower-dimensional cubes contained in $\sigma$ to be compositions $\sigma \circ \yo(p)$ for some composition of face maps $p : [m] \to [2], m \le 2$. As an example (the same example as in the section on &cube;), the arrow $p$ is the map $\sigma \circ \yo(\delta^0_0)$, and the point $b$ is the map $\sigma \circ \yo(\delta^0_0) \circ \yo(\delta^1)$. By functoriality of $\yo$, that composite is the same thing as $\sigma \circ \yo(\delta^0_0 \circ \delta^1)$.
- <div class="text-image">
<div class="ti-text">
A $3$-cube in X is a map $\aleph : \square^3 \to X$, which could be visualized as the proper _cube_ below, and has 6 2-faces (squares), 12 1-faces (edges) and 8 0-faces (vertices). As an exercise, work out which sequence of face maps in the underlying cube category leads leads to each of the possible 24 faces you can project. Honestly, the drawing of the $3$-cube isn't even _that_ enlightening, I just wanted to be fancy.
- A $3$-cube in X is a map $\aleph : \square^3 \to X$, which could be visualized as the proper _cube_ below, and has 6 2-faces (squares), 12 1-faces (edges) and 8 0-faces (vertices). As an exercise, work out which sequence of face maps in the underlying cube category leads leads to each of the possible 24 faces you can project. Honestly, the drawing of the $3$-cube isn't even _that_ enlightening, I just wanted to be fancy.
Like, check out this absolute _flex_ of a diagram, it's god damn useless. Wow. Like, check out this absolute _flex_ of a diagram, it's god damn useless. Wow.
As an a quick aside, can we talk about how god damn confusing this projection is? I can never tell whether I'm looking top-down at a truncated square pyramid ($\kappa$ is the top face) or if I'm looking _through_ a normal solid 3-cube whose front face is transparent ($\kappa$ is the back face). As an a quick aside, can we talk about how god damn confusing this projection is? I can never tell whether I'm looking top-down at a truncated square pyramid ($\kappa$ is the top face) or if I'm looking _through_ a normal solid 3-cube whose front face is transparent ($\kappa$ is the back face).
</div>
<figure>
<img alt="A diagrammatic representation of a particular cube in a cubical set. The diagram is incredibly busy and not very helpful." height=200px src="/diagrams/cubicalsets/acube.svg" />
<figcaption>A _proper_ cube, finally!</figcaption>
</figure>
</div>
<figure class="wraparound">
<img alt="A diagrammatic representation of a particular cube in a cubical set. The diagram is incredibly busy and not very helpful." height=200px src="/diagrams/cubicalsets/acube.svg" />
<figcaption>A _proper_ cube, finally!</figcaption>
</figure>
In case it's not clear (it's not clear, I know), the 2-cubes present in the 3-cube $\aleph$ -- yes, $\aleph$, that's how hard I'm running out of letters over here -- are these: In case it's not clear (it's not clear, I know), the 2-cubes present in the 3-cube $\aleph$ -- yes, $\aleph$, that's how hard I'm running out of letters over here -- are these:


+ 884
- 0
pages/posts/2021-09-03-parsing-layout.md View File

@ -0,0 +1,884 @@
---
title: "Parsing Layout, or: Haskell's Syntax is a Mess"
date: September 3rd, 2021
abbreviations:
sparkles: ✨
---
Hello! Today we're going to talk about something I'm actually _good_ at, for a change: writing compilers. Specifically, I'm going to demonstrate how to wrangle [Alex] and [Happy] to implement a parser for a simple language with the same indentation sensitive parsing behaviour as Haskell, the _layout rule_.
[Alex]: https://www.haskell.org/alex/
[Happy]: https://www.haskell.org/happy/
Alex and Happy are incredibly important parts of the Haskell ecosystem. If you're a Haskeller, you use a program using an Alex lexer and a Happy parser _every single day_ - every single working day, at least - GHC! Despite this fundamental importance, Alex and Happy are... _sparsely_ documented, to say the least. Hopefully this post can serve as an example of how to do something non-trivial using them.
However! While I'm going to talk about Alex and Happy here, it would be entirely possible to write a layout parser using Alex and whatever flavour of Parsec is popular this week, as long as your combinators are expressed on top of a monad transformer. It's also entirely possible to write a layout parser without Alex at all, but that's beyond my abilities. I am a mere mortal, after all.
Get ready to read the word "layout" a lot. Layout layout layout. How's your semantic satiation going? Should I say layout a couple more times?
# The Offside Rule
So, how does Haskell layout work? A small subset of tokens (`where`, `of`, `let`, `do`[^1]), called _layout keywords_, are followed by a _laid out block_ (my terminology). The happiest (hah) case is where one of these keywords is followed by a `{` token. In this case, layout parsing doesn't happen at all!
[^1]: GHC extends this set to also contain the "token" `\case`. However, `LambdaCase` isn't a single token! The &sparkles; correct &sparkles; specification is that `case` is a layout keyword if the preceding token is `\`.
```{.haskell .notag}
main = do { putStrLn
"foo"
; putStrLn "bar"
; putStrLn "quux" }
```
This _abomination_ is perfectly valid Haskell code, since layout is disabled in a context that was started with a `{`. Great success though, since this is a very simple thing to support in a parser. The unhappy case is when we actually have to do layout parsing. In that case, the starting column of the token immediately following the layout token becomes the _reference column_ (again my terminology), we emit a (virtual) opening brace, and the **offside rule** applies.
The offside rule says that a player must have at least two opposing players, counting the goalkeep- No no, that's not right. Give me a second. Ah! Yes. The offside rule governs automatic insertion of (virtual) semicolons and closing braces. When we encounter the first token of a new line, we are burdened to compare its starting column with the reference:
- If it's on the same column as the reference column, we emit a semicolon. This is a new statement/declaration/case.
<div class=mathpar>
```haskell
do foo
bar
-- ^ same column, insert ; before.
```
```haskell
do
foo
bar
-- ^ same column, insert ; before.
-- yes, three spaces
```
</div>
The two token streams above have the same prefix as `do { foo; bar }`{.haskell}.
- If it's further indented than the reference column, we.. do nothing! Just go back to normal lexing. Tokens indented to the right of the reference column are interpreted as continuing the statement in the previous line. That's why you can do this:
```haskell
do
putStrLn $
wavy
function
application
please
don't
though
```
_All_ of those tokens are (in addition to being the first token in a line) indented further than `putStrLn`, which is our reference column. This block has no semicolons at all!
- If it's less indented than the reference column, we emit a virtual closing `}` (to end the block) and _**apply the rule again**_. This last bit is crucial: it says a single token can end all of the layout contexts it's leaving. For instance:
```haskell
foo = do a -- context 1
do b -- context 2
do c -- context 3
do d -- context 4
e
bar = 123
```
Assuming there was a layout context at the first column, i.e., we're in a module, then the token `bar` will be responsible for closing 4 whole layout contexts:
- It's to the left of `d`, so it closes context 4;
- It's to the left of `c`, so it closes context 3;
- It's to the left of `b`, so it closes context 2;
- It's to the left of `a`, so it closes context 1.
With all the semicolons we have a right to, the code above is this:
``` haskell
; foo = do { a -- context 1
; do { b -- context 2
; do { c -- context 3
; do { d -- context 4
; e
}
}
}
}
; bar = 123
```
Why do we have semicolons before `foo` and `bar`? Why, because they're in the same column as the reference token, which was presumably an import or something.
# Laid-out blocks
With that, the parser productions for laid out blocks should be clear - or, at least, easily approximable. Right?
Wrong.
You might think the production for `do` blocks is something like the following, and you'd be forgiven for doing so. It's clean, it's reasonable, it's not _actually_ Happy syntax, but it's a close enough approximation. Except that it's way incorrect!
```
expr
: ...
| 'do' '{' statement ';' ... '}' { ... }
| 'do' VOpen statement VSemi ... VClose { ... }
```
Well, for `do` you might be able to get away with that. But consider the laid-out code on the left, and what the lexer naïvely produces for us on the right.
<div class=mathpar>
```haskell
foo = let x = 1 in x
```
```haskell
; foo = let { x = 1 in x
```
</div>
You see it, right? Since no token was on a column before that of the token `x` (the reference token for the layout context started by `let`), no close brace was emitted before `in`. Woe is us! However, the Haskell report has a way around this. They write it cryptically, like this:
>
```
...
L (t : ts) (m : ms) = } : (L (t : ts) ms) if m ≠ 0 and parse-error(t)
...
```
> The side condition `parse-error(t)` is to be interpreted as follows: if the tokens generated so far by `L` together with the next token `t` represent an invalid prefix of the Haskell grammar, and the tokens generated so far by `L` followed by the token `}` represent a valid prefix of the Haskell grammar, then `parse-error(t)` is true.
>
> The test `m ≠ 0` checks that an implicitly-added closing brace would match an implicit open brace.
I'll translate, since I'm fluent in standardese: Parse errors are allowed to terminate layout blocks, as long as no explicit `{` was given. This is the entire reason that Happy has an `error` token, which "matches parse errors"! For further reference, `L` is a function `[Token] -> [Int] -> [Token]`{.haskell} which is responsible for inserting virtual `{`, `;` and `}` tokens. The `[Int]`{.haskell} argument is the stack of reference columns.
So a better approximation of the grammar is:
```
expr
: ...
| 'do' '{' statement ';' ... '}' { ... }
| 'do' VOpen statement VSemi ... LClose { ... }
LClose
: VClose {- lexer inserted '}' -}
| error {- parse error generated '}' -}
```
We have unfortunately introduced some dragons, since the parser now needs to finesse the lexer state, meaning they must be interleaved _explicitly_, instead of explicitly (using a lazy list of tokens or similar). They must be in the same Monad.
So. How do we implement this?
# How we implement this
## Preliminaries
To start with, we create a new Haskell project. I'd normally gloss over this, but in this case, there are adjustments to the Cabal file that must be made to inform our build of the dependencies on `alex` and `happy`. I use Stack; You can use whatever.
```bash
% stack new layout simple
```
To our Cabal file, we add a `build-tool-depends` on Alex and Happy. Cabal (the build system) comes with built-in rules to detect `.x` and `.y` files and compile these as Ale**x** and Happ**y** respectively.
```{.haskell tag="layout.cabal"}
build-tool-depends: alex:alex >= 3.2.4 && < 4.0
, happy:happy >= 1.19.12 && < 2.0
build-depends: base >= 4.7 && < 5
, array >= 0.5 && < 0.6
```
This has been the recommended way of depending on build tools since Cabal 2. The syntax of build-tool-depends entries is `package:executable [version bound]`, where the version bound is optional but good style. With this, running `stack build` (and/or `cabal build`) will automatically compile parser and lexer specifications **listed in your `other-modules` field** to Haskell files.
Alex generated code has a dependency on the `array` package.
## What are we parsing
For the language we're parsing, I've chosen to go with a representative subset of Haskell's grammar: Variables, lambda expressions, `let` expressions, and application. For the top-level, we'll support function definitions, where the lhs must be a sequence of variables, and the rhs can optionally have a `where` clause.
```{ .haskell tag="src/Syntax.hs" }
module Syntax (Expr(..), Decl(..), Program) where
data Expr
= Var String
| App Expr Expr
| Lam String Expr
| Let [Decl] Expr
deriving (Eq, Show)
data Decl
= Decl { declName :: String
, declRhs :: Expr
, declWhere :: Maybe [Decl]
}
deriving (Eq, Show)
type Program = [Decl]
```
For simplicity, identifiers will be ASCII only. We're also using strings and lists everywhere, instead of more appropriate data structures (`Text` and `Seq`), for clarity. Don't forget to add the `Syntax` module to the `other-modules` field in `layout.cabal`.
## The Lexer
Before we can parse, we must lex. But before we can lex, we must know the type of tokens. We create a separate Haskell module to contain the definition of the token type and `Lexer` monad. This is mostly done because HIE does not support Alex and Happy, and I've become dependent on HIE for writing correct code fast.
We'll call this new module `Lexer.Support`, just because. Our type of tokens must contain our keywords, but also punctuation (`=`, `{`, `;`, `}`, `\\`, `->`) and _virtual_ punctuation (tokens inserted by layout). We declare:
```{.haskell tag="src/Lexer/Support.hs"}
module Lexer.Support where
data Token
= TkIdent String -- identifiers
-- Keywords
| TkLet | TkIn | TkWhere
-- Punctuation
| TkEqual | TkOpen | TkSemi | TkClose
| TkLParen | TkRParen
| TkBackslash | TkArrow
-- Layout punctuation
| TkVOpen | TkVSemi | TkVClose
-- End of file
| TkEOF
deriving (Eq, Show)
```
### An Alex file
Alex modules always start with a Haskell header, between braces. In general, braces in Alex code represent a bit of Haskell we're inserting: The header, lexer actions, and the footer.
```{.alex tag="src/Lexer.x"}
{
module Lexer where
import Lexer.Support
}
%encoding "latin1"
```
After the header, we can also include magical incantations: `%wrapper` will tell Alex to include a support code template with our lexer, and `%encoding` will tell it whether to work with bytes or with Unicode. _Nobody uses the Unicode support_, not even GHC: The community wisdom is to trick Alex into reading Unicode by compressing Unicode classes down into high byte characters. Yeah, **yikes**.
Our file can then have some macro definitions. Macros with the `$` sigil are character classes, and `@` macros are complete regular expressions.
```{.alex tag="src/Lexer.x"}
$lower = [ a-z ]
$upper = [ A-Z ]
@ident = $lower [ $lower $upper _ ' ]*
```
And, finally, comes the actual lexer specification. We include the final magic word `:-` on a line by itself, and then list a bunch of lexing rules. Lexing rules are specified by:
- A _startcode_, which names a _state_. These are written `<ident>` or `<0>`, where `<0>` is taken to be the "default" startcode. Rules are by default enabled in all states, and can be enabled in many;
- A _left context_, which is a regular expression matched against the character immediately preceding the token;
- A _regular expression_, describing the actual token;
- A _right context_, which can be a regular expression to be matched after the token or a fragment of Haskell code, called a _predicate_. If the predicate is present, it must have the following type:
```{.haskell .notag}
{ ... } :: user -- predicate state
-> AlexInput -- input stream before the token
-> Int -- length of the token
-> AlexInput -- input stream after the token
-> Bool -- True <=> accept the token
```
- An _action_, which can be `;`, causing the lexer to skip the token, or some Haskell code, which can be any expression, as long as every action has the same type.
Here's a couple rules so we can get started. Don't worry - `emit` is a secret tool that will help us later.
```{.alex tag="src/Lexer.x"}
:-
[\ \t]+ ;
<0> @ident { emit TkIdent }
```
Alright, let's compile this code and see what we get! Oh, we get some type errors. Okay. Let's see what's up:
```
Not in scope: type constructor or class ‘AlexInput’
|
264 | | AlexLastSkip !AlexInput !Int
| ^^^^^^^^^
```
### Making our own wrapper
Right. That's probably related to that `%wrapper` thing I told you about. You'd be correct: The wrappers solve this problem by including a handful of common patterns pre-made, but we can very well supply our own! The interface to an Alex-generated lexer is documented [here](https://www.haskell.org/alex/doc/html/api.html), but we're interested in §5.1 specifically. We have to provide the following definitions:
```{.haskell .notag}
type AlexInput
alexGetByte :: AlexInput -> Maybe (Word8, AlexInput)
alexInputPrevChar :: AlexInput -> Char
```
And we get in return a lexing function, whose type and interface I'm not going to copy-paste here. The `alexGetByte` function is called by the lexer whenever it wants input, so that's the natural place to do position handling, which, yes, we have to do ourselves. Let's fill in these definitions in the `Lexer.Support` module.
Here's an okay choice for `AlexInput`:
```{.haskell tag="src/Lexer/Support.hs"}
data AlexInput
= Input { inpLine :: {-# UNPACK #-} !Int
, inpColumn :: {-# UNPACK #-} !Int
, inpLast :: {-# UNPACK #-} !Char
, inpStream :: String
}
deriving (Eq, Show)
```
We can immediately take `alexInputPrevChar = inpLast` as the definition of that function and be done with it, which is fantastic. `alexGetByte`, on the other hand, is a bit more involved, since it needs to update the position based on what character was read. The column _must_ be set properly, otherwise layout won't work! The line counter is less important, though.
```haskell
alexGetByte :: AlexInput -> Maybe (Word8, AlexInput)
alexGetByte inp@Input{inpStream = str} = advance <$> uncons str where
advance ('\n', rest) =
( fromIntegral (ord '\n')
, Input { inpLine = inpLine inp + 1
, inpColumn = 1
, inpLast = '\n'
, inpStream = rest }
)
advance (c, rest) =
( fromIntegral (ord c)
, Input { inpLine = inpLine inp
, inpColumn = inpColumn inp + 1
, inpLast = c
, inpStream = rest }
)
```
Now, our lexer has a lot of state. We have the start codes, which form a stack. We have the stack of reference columns, and we have the input. Let's use a State monad to keep track of this, with an `Either String` base to keep track of errors.
```{.haskell tag="src/Lexer/Support.hs"}
newtype Lexer a = Lexer { _getLexer :: StateT LexerState (Either String) a }
deriving
( Functor
, Applicative
, Monad
, MonadState LexerState
, MonadError String
)
data Layout = ExplicitLayout | LayoutColumn Int
deriving (Eq, Show, Ord)
data LexerState
= LS { lexerInput :: {-# UNPACK #-} !AlexInput
, lexerStartCodes :: {-# UNPACK #-} !(NonEmpty Int)
, lexerLayout :: [Layout]
}
deriving (Eq, Show)
initState :: String -> LexerState
initState str = LS { lexerInput = Input 0 1 '\n' str
, lexerStartCodes = 0 :| []
, lexerLayout = []
}
runLexer :: Lexer a -> String -> Either String a
runLexer act s = fst <$> runStateT (_getLexer act) (initState s)
```
<details>
<summary> I'll spare you the boring stack manipulation stuff by putting it in one of these \<details\> elements you can expand: </summary>
```haskell
startCode :: Lexer Int
startCode = gets (NE.head . lexerStartCodes)
pushStartCode :: Int -> Lexer ()
pushStartCode i = modify' $ \st ->
st { lexerStartCodes = NE.cons i (lexerStartCodes st )
}
-- If there is no start code to go back to, we go back to the 0 start code.
popStartCode :: Lexer ()
popStartCode = modify' $ \st ->
st { lexerStartCodes =
case lexerStartCodes st of
_ :| [] -> 0 :| []
_ :| (x:xs) -> x :| xs
}
layout :: Lexer (Maybe Layout)
layout = gets (fmap fst . uncons . lexerLayout)
pushLayout :: Layout -> Lexer ()
pushLayout i = modify' $ \st ->
st { lexerLayout = i:lexerLayout st }
popLayout :: Lexer ()
popLayout = modify' $ \st ->
st { lexerLayout =
case lexerLayout st of
_:xs -> xs
[] -> []
}
```
</details>
### Putting it all together
It's up to us to specify what an action is - remember, the action is the code block following a lexer rule - so we'll go with `String -> Lexer Token`. The `String` argument is the lexed token, and we'll have to take this slice ourselves when we implement the interface between the Alex lexer and our `Lexer` monad. The `emit` action is simple, and we'll throw in `token` for no extra cost:
```haskell
emit :: (String -> Token) -> String -> Lexer Token
emit = (pure .)
token :: Token -> String -> Lexer Token
token = const . pure
```
Back to our `Lexer.x`, we have to write the function to interpret Alex lexer results as `Lexer` monad actions. It goes like this:
```{.haskell tag="src/Lexer.x, add at the bottom" }
{
handleEOF = do
-- TODO: handle layout
pure TkEOF
scan :: Lexer Token
scan = do
input@(Input _ _ _ string) <- gets lexerInput
startcode <- startCode
case alexScan input startcode of
AlexEOF -> handleEOF
AlexError (Input _ _ _ inp) ->
throwError $ "Lexical error: " ++ show (head inp)
AlexSkip input' _ -> do
modify' $ \s -> s { lexerInput = input' }
scan
AlexToken input' tokl action -> do
modify' $ \s -> s { lexerInput = input' }
action (take tokl string)
}
```
Now we can do a `stack build` to compile the lexer and `stack repl` to play around with it!
```{.haskell tag="Did you know my Myers-Briggs type is GHCI?"}
λ runLexer scan "abc"
Right (TkIdent "abc")
λ runLexer scan " abc"
Right (TkIdent "abc")
λ runLexer scan " {"
Left "Lexical error: '{'"
```
Okay, yeah, let's fill out our lexer a bit more.
```{.alex tag="src/Lexer.x, lexing rules"}
<0> in { token TkIn }
<0> \\ { token TkBackslash }
<0> "->" { token TkArrow }
<0> \= { token TkEqual }
<0> \( { token TkLParen }
<0> \) { token TkRParen }
<0> \{ { token TkOpen }
<0> \} { token TkClose }
```
That's all of the easy rules we can do - All of the others interact with the layout state, which we'll see how to do in the paragraph immediately following this one. I'm writing a bit of padding here so you can take a breather and prepare yourself for the lexer states that we'll deal with now. But, please believe me when I say we're doing this lexer madness so our parser can be sane.
### Actually Doing Layout (trademark pending)
We'll need two rules for the layout keywords. Alex rules are matched in order, top-to-bottom, so **make sure your keywords are before your identifier rule**.
```{.alex tag="src/Lexer.x"}
<0> let { layoutKw TkLet }
<0> where { layoutKw TkWhere }
```
And the action for layout keywords, which has to go in the lexer since it'll refer to a startcode. Alex automatically generates definitions for all the startcodes we mention.
```haskell
layoutKw t _ = do
pushStartCode layout
pure t
```
The interesting rules for handling layout are in the `layout` startcode, which we'll declare as a block to keep things a bit tidier. When in this startcode, we need to handle either an explicitly laid-out block (that is, `{`), or the start of a layout context: The indentation of the next token determines where we start.
```{.alex tag="src/Lexer.x"}
<layout> {
-- Skip comments and whitespace
"--" .* \n ;
\n ;
\{ { openBrace }
() { startLayout }
}
```
The `openBrace` and `startLayout` lexer actions are also simple:
```haskell
openBrace _ = do
popStartCode
pushLayout ExplicitLayout
pure TkOpen
startLayout _ = do
popStartCode
reference <- Lexer.Support.layout
col <- gets (inpColumn . lexerInput)
if Just (LayoutColumn col) <= reference
then pushStartCode empty_layout
else pushLayout (LayoutColumn col)
pure TkVOpen
```
Here's another rule. suppose we have:
```haskell
foo = bar where
spam = ham
```
If we just apply the rule that the next token after a layout keyword determines the column for the layout context, then we're starting another layout context at column 1! that's definitely not what we want.
The fix: A new layout context only starts if the first token is to the right of the previous layout context. That is: a block only starts if it's on the same column as the layout context, or indented further.
But! We still need to emit a closing `}` for the one that `openBrace` generated! This is the sole function of the `empty_layout` startcode:
<div class=mathpar>
```
<empty_layout> () { emptyLayout }
```
```haskell
emptyLayout _ = do
popStartCode
pushStartCode newline
pure TkVClose
```
</div>
We're on the home stretch. I mentioned another startcode - `newline`. It's where we do the offside rule, and our lexer will finally be complete.
### The Offside Rule, again
The `newline` state is entered in two places: After an empty layout block (as a short-circuit), and after, well, a new line character. Comments also count as newline characters, by the way.
```{.alex tag="src/Lexer.x, rule"}
<0> "--" .* \n { \_ -> pushStartCode newline *> scan }
<0> \n { \_ -> pushStartCode newline *> scan }
```
In the `newline` state, we again scan for a token, and call for an action, just like for `layout`. The difference is only in the action: Whenever _any_ token is encountered, we perform the offside rule, _if_ we're in a layout context that mandates it.
```{.alex tag="src/Lexer.x, rule"}
<newline> {
\n ;
"--" .* \n ;
() { offsideRule }
}
```
The code for the offside rule is a bit hairy, but follows from the spec:
```{.haskell tag="src/Lexer.x, epilogue code"}
offsideRule _ = do
context <- Lexer.Support.layout
col <- gets (inpColumn . lexerInput)
let continue = popStartCode *> scan
case context of
Just (LayoutColumn col') -> do
case col `compare` col' of
EQ -> do
popStartCode
pure TkVSemi
GT -> continue
LT -> do
popLayout
pure TkVClose
_ -> continue
```
Check out how cleanly those three cases map to the rules I described [way back when](#h0). We `compare`{.haskell} the current column with the reference, and:
- If it's `EQ`, add a semicolon.
- If it's `GT`, continue lexing.
- If it's `LT`, close as many layout contexts as possible.
<details>
<summary>
**Exercise**: In the `handleEOF` action, close all the pending layout contexts. As a hint, the easiest way to emit a token that doesn't is using a startcode and a lexer action. Figuring out when we've run out is part of the challenge :)
</summary>
The rule:
```{.alex tag="src/Lexer.x, rule"}
<eof> () { doEOF }
```
The action:
```{.haskell tag="src/Lexer.x, epilogue code"}
handleEOF = pushStartCode eof *> scan
doEOF _ = do
t <- Lexer.Support.layout
case t of
Nothing -> do
popStartCode
pure TkEOF
_ -> do
popLayout
pure TkVClose
```
</details>
We can write a `Lexer` action (not a lexer action!) to lex and `Debug.Trace.trace`{.haskell} - sue me - as many tokens as the lexer wants to give us, until an EOF is reached:
```haskell
lexAll :: Lexer ()
lexAll = do
tok <- scan
case tok of
TkEOF -> pure ()
x -> do
traceM (show x)
lexAll
```
Now we can actually lex some Haskell code! Well, not much of it. Forget numbers, strings, and most keywords, but we _can_ lex this:
<div class="mathpar">
```haskell
foo = let
x = let
y = z
in y
in x
```
```haskell
TkIdent "foo"
TkEqual
TkLet
TkVOpen
TkIdent "x"
TkEqual
TkLet
TkVOpen
TkIdent "y"
TkEqual
TkIdent "z"
TkVSemi
TkIn
TkIdent "y"
TkVClose
TkVClose
TkIn
TkIdent "x"
```
</div>
That is, that code is lexed as if it had been written:
```{.haskell tag="Hmm..."}
foo = let {
x = let {
y = z
; in y
}} in x
```
That's... Yeah. Hmm. That's _not right_. What are we forgetting? Ah, who am I kidding, you've guessed this bit. I even said it myself!
> Parse errors are allowed to terminate layout blocks.
We don't have a parser to get errors from, so our layout blocks are terminating too late. Let's write a parser!
## The Parser
Happy is, fortunately, less picky about how to generate code. Instead of appealing to some magic symbols that it just hopes really hard are in scope, Happy asks us how we want it to interface with the lexer. We'll do it &sparkles; Monadically &sparkles;, of course.
Happy files start the same way as Alex files: A Haskell code block, between braces, and some magic words. You can look up what the magic words do in the documentation, or you can guess - I'm just gonna include all the header here:
```{.happy tag="src/Parser.y"}
{
module Parser where
import Control.Monad.Error
import Lexer.Support
}
%name parseExpr Expr
%tokentype { Token }
%monad { Lexer }
%lexer { lexer } { TkEOF }
%errorhandlertype explist
%error { parseError }
```
After these magic incantations (by the way, if you can't find the docs for errorhandlertype, that's because the docs you're looking at are out of date. See [here](https://monlih.github.io/happy-docs/)), we list our tokens in the `%token` directive. In the braces we write Haskell - not an expression, but a pattern.
```{.happy tag="src/Parser.y, after the directives"}
%token
VAR { TkIdent $$ }
'let' { TkLet }
'in' { TkIn }
'where' { TkWhere }
'=' { TkEqual }
'{' { TkOpen }
';' { TkSemi }
'}' { TkClose }
'\\' { TkBackslash }
'->' { TkArrow }
'(' { TkLParen }
')' { TkRParen }
OPEN { TkVOpen }
SEMI { TkVSemi }
CLOSE { TkVClose }
%%
```
The special `$$` pattern says that if we use a `VAR` token in a production, its value should be the string contained in the token, rather than the token itself. We write productions after the `%%`, and they have this general syntax:
```happy
Production :: { Type }
: rule1 { code1 }
| rule2 { code2 }
| ...
```
For starters, we have these productions. You can see that in the code associated with a rule, we can refer to the tokens parsed using `$1, $2, $3, ...`.
```{.happy tag="src/Parser.y, after the %%"}
Atom :: { Expr }
: VAR { Var $1 }
| '(' Expr ')' { $2 }
Expr :: { Expr }
: '\\' VAR '->' Expr { Lam $2 $4 }
| FuncExpr { $1 }
FuncExpr :: { Expr }
: FuncExpr Atom { App $1 $2 }
| Atom { $1 }
```
In the epilogue, we need to define two functions, since I mentioned them way up there in the directives. The `lexer` function is a continuation-passing style function that needs to call `cont` with the next token from the lexer. The `parseError` function is how we should deal with parser errors.
```{.happy tag="src/Parser.y, on the very bottom"}
{
lexer cont = scan >>= cont
parseError = throwError . show
}
```
By using the `%name` directive we can export a parser production as an action in the `Lexer` monad (since that's what we told Happy to use). Combining that with our `runLexer`, we can parse some expressions, yay!
```haskell
λ runLexer parseExpr "(\\x -> x) (\\y -> y)"
Right (App (Lam "x" (Var "x")) (Lam "y" (Var "y")))
```
### Laid-out productions
Now we'll introduce some productions for parsing laid-out lists of declarations, then we'll circle back and finish with the parser for declarations itself.
```{.happy tag="src/Parser.y, add wherever"}
DeclBlock :: { [Decl] }
: '{' DeclListSemi '}' { $2 }
| OPEN DeclListSEMI Close { $2 }
DeclListSemi :: { [Decl] }
: Decl ';' DeclListSemi { $1:$3 }
| Decl { [$1] }
| {- empty -} { [] }
DeclListSEMI :: { [Decl] }
: Decl SEMI DeclListSemi { $1:$3 }
| Decl { [$1] }
| {- empty -} { [] }
```
That is, a block of declarations is either surrounded by `{ ... }` or by `OPEN ... Close`. But what's `Close`? That's right, you've guessed this bit too:
```{.happy tag="src/Parser.y, add wherever"}
Close
: CLOSE { () }
| error {% popLayout }
```
Say it louder for the folks in the cheap seats - Parse! Errors! Can! End! Layout! Blocks! Isn't that just magical?
Now we can write a production for `let` (in `Expr`):
```{.happy tag="src/Parser.y, add to Expr"}
| 'let' DeclBlock 'in' Expr { Let $2 $4 }
```
And one for declarations:
```{.happy tag="src/Parser.y, add wherever"}
Decl
: VAR '=' Expr { Decl $1 $3 Nothing }
| VAR '=' Expr 'where' DeclBlock { Decl $1 $3 (Just $5) }
```
Add a name directive for `Decl` and..
```{.happy tag="src/Parser.y, add to the directives"}
%name parseDecl Decl
```
We're done!
# No, seriously, that's it.
Yeah, 3000 words is all it takes to implement a parser for Haskell layout. Running this on the example where the lexer dropped the ball from earlier, we can see that the parser has correctly inserted all the missing `}`s in the right place because of the `Close` production, and the AST we get is what we expect:
```haskell
λ runLexer parseDecl <$> readFile "that-code-from-before.hs"
Right
(Decl { declName = "foo"
, declRhs =
Let [ Decl { declName = "x"
, declRhs =
Let
[ Decl { declName = "y", declRhs = Var "z"
, declWhere = Nothing} ]
(Var "y")
, declWhere = Nothing
}
]
(Var "x")
, declWhere = Nothing
})
```
I've thrown the code from this post up in an organised manner on [my Gitea](https://git.abby.how/abby/layout-parser/). The lexer worked out to be 130 lines, and the parser - just 81.
Here's why I favour this approach:
- It's maintainable. Apart from the rendezvous in `Close`, the lexer and the parser are completely independent. They're also entirely declarative - Reading the lexer rules tells you exactly what the lexer does, without having to drop down to how the actions are implemented.
- It cleanly extends to supporting ASTs with annotations - you'd change our current `Token`{.haskell} type to a `TokenClass`{.haskell} type, and a `Token` would be finished using the line and column from the lexer state. Annotating the AST with these positions can be done by projecting from `$N` in the Happy rules.
- It's performant. Obviously the implementation here, using `String`, is not, but by changing how the `AlexInput` type behaves internally, we can optimise by using e.g. a lazy ByteString, a lazy Text, or some other kind of crazy performant stream type. I don't think anyone's ever complained about parsing being their bottleneck with GHC.
- It's popular! The code implemented here is a simplification (wild simplification) of the approach used in GHC and Agda.
Thank you for reading this post. I have no idea what I'm going to write about next!

+ 232
- 0
rubtmpjd3t_2bh.log View File

@ -0,0 +1,232 @@
This is pdfTeX, Version 3.141592653-2.6-1.40.22 (TeX Live 2021/Arch Linux) (preloaded format=pdflatex 2021.5.25) 26 AUG 2021 19:58
entering extended mode
restricted \write18 enabled.
%&-line parsing enabled.
**\nonstopmode \input{/home/me/Sync/Projects/blag/rubtmpjd3t_2bh.tex}
(/home/me/Sync/Projects/blag/rubtmpjd3t_2bh.tex
(/usr/share/texmf-dist/tex/latex/base/article.cls
Document Class: article 2020/04/10 v1.4m Standard LaTeX document class
(/usr/share/texmf-dist/tex/latex/base/size10.clo
File: size10.clo 2020/04/10 v1.4m Standard LaTeX file (size option)
)
\c@part=\count179
\c@section=\count180
\c@subsection=\count181
\c@subsubsection=\count182
\c@paragraph=\count183
\c@subparagraph=\count184
\c@figure=\count185
\c@table=\count186
\abovecaptionskip=\skip47
\belowcaptionskip=\skip48
\bibindent=\dimen138
)
(/usr/share/texmf-dist/tex/latex/preview/preview.sty
Package: preview 2017/04/24 12.3 (AUCTeX/preview-latex)
(/usr/share/texmf-dist/tex/generic/luatex85/luatex85.sty
Package: luatex85 2016/06/15 v1.4 pdftex aliases for luatex
)
(/usr/share/texmf-dist/tex/latex/preview/prtightpage.def
\PreviewBorder=\dimen139
)
\pr@snippet=\count187
\pr@box=\box47
\pr@output=\toks15
)
(/usr/share/texmf-dist/tex/latex/amsmath/amsmath.sty
Package: amsmath 2020/09/23 v2.17i AMS math features
\@mathmargin=\skip49
For additional information on amsmath, use the `?' option.
(/usr/share/texmf-dist/tex/latex/amsmath/amstext.sty
Package: amstext 2000/06/29 v2.01 AMS text
(/usr/share/texmf-dist/tex/latex/amsmath/amsgen.sty
File: amsgen.sty 1999/11/30 v2.0 generic functions
\@emptytoks=\toks16
\ex@=\dimen140
))
(/usr/share/texmf-dist/tex/latex/amsmath/amsbsy.sty
Package: amsbsy 1999/11/29 v1.2d Bold Symbols
\pmbraise@=\dimen141
)
(/usr/share/texmf-dist/tex/latex/amsmath/amsopn.sty
Package: amsopn 2016/03/08 v2.02 operator names
)
\inf@bad=\count188
LaTeX Info: Redefining \frac on input line 234.
\uproot@=\count189
\leftroot@=\count190
LaTeX Info: Redefining \overline on input line 399.
\classnum@=\count191
\DOTSCASE@=\count192
LaTeX Info: Redefining \ldots on input line 496.
LaTeX Info: Redefining \dots on input line 499.
LaTeX Info: Redefining \cdots on input line 620.
\Mathstrutbox@=\box48
\strutbox@=\box49
\big@size=\dimen142
LaTeX Font Info: Redeclaring font encoding OML on input line 743.
LaTeX Font Info: Redeclaring font encoding OMS on input line 744.
\macc@depth=\count193
\c@MaxMatrixCols=\count194
\dotsspace@=\muskip16
\c@parentequation=\count195
\dspbrk@lvl=\count196
\tag@help=\toks17
\row@=\count197
\column@=\count198
\maxfields@=\count199
\andhelp@=\toks18
\eqnshift@=\dimen143
\alignsep@=\dimen144
\tagshift@=\dimen145
\tagwidth@=\dimen146
\totwidth@=\dimen147
\lineht@=\dimen148
\@envbody=\toks19
\multlinegap=\skip50
\multlinetaggap=\skip51
\mathdisplay@stack=\toks20
LaTeX Info: Redefining \[ on input line 2923.
LaTeX Info: Redefining \] on input line 2924.
)
(/usr/share/texmf-dist/tex/latex/amsfonts/amssymb.sty
Package: amssymb 2013/01/14 v3.01 AMS font symbols
(/usr/share/texmf-dist/tex/latex/amsfonts/amsfonts.sty
Package: amsfonts 2013/01/14 v3.01 Basic AMSFonts support
\symAMSa=\mathgroup4
\symAMSb=\mathgroup5
LaTeX Font Info: Redeclaring math symbol \hbar on input line 98.
LaTeX Font Info: Overwriting math alphabet `\mathfrak' in version `bold'
(Font) U/euf/m/n --> U/euf/b/n on input line 106.
))
(/usr/share/texmf-dist/tex/latex/jknapltx/mathrsfs.sty
Package: mathrsfs 1996/01/01 Math RSFS package v1.0 (jk)
\symrsfs=\mathgroup6
)
(/usr/share/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty
(/usr/share/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty
(/usr/share/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty
(/usr/share/texmf-dist/tex/generic/pgf/utilities/pgfutil-common.tex
\pgfutil@everybye=\toks21
\pgfutil@tempdima=\dimen149
\pgfutil@tempdimb=\dimen150
(/usr/share/texmf-dist/tex/generic/pgf/utilities/pgfutil-common-lists.tex))
(/usr/share/texmf-dist/tex/generic/pgf/utilities/pgfutil-latex.def
\pgfutil@abb=\box50
)
(/usr/share/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex
(/usr/share/texmf-dist/tex/generic/pgf/pgf.revision.tex)
Package: pgfrcs 2020/12/27 v3.1.8b (3.1.8b)
))
Package: pgf 2020/12/27 v3.1.8b (3.1.8b)
(/usr/share/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty
(/usr/share/texmf-dist/tex/latex/graphics/graphicx.sty
Package: graphicx 2020/09/09 v1.2b Enhanced LaTeX Graphics (DPC,SPQR)
(/usr/share/texmf-dist/tex/latex/graphics/keyval.sty
Package: keyval 2014/10/28 v1.15 key=value parser (DPC)
\KV@toks@=\toks22
)
(/usr/share/texmf-dist/tex/latex/graphics/graphics.sty
Package: graphics 2020/08/30 v1.4c Standard LaTeX Graphics (DPC,SPQR)
(/usr/share/texmf-dist/tex/latex/graphics/trig.sty
Package: trig 2016/01/03 v1.10 sin cos tan (DPC)
)
(/usr/share/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
File: graphics.cfg 2016/06/04 v1.11 sample graphics configuration
)
Package graphics Info: Driver file: pdftex.def on input line 105.
(/usr/share/texmf-dist/tex/latex/graphics-def/pdftex.def
File: pdftex.def 2020/10/05 v1.2a Graphics/color driver for pdftex
))
\Gin@req@height=\dimen151
\Gin@req@width=\dimen152
)
(/usr/share/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty
(/usr/share/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex
Package: pgfsys 2020/12/27 v3.1.8b (3.1.8b)
(/usr/share/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex
\pgfkeys@pathtoks=\toks23
\pgfkeys@temptoks=\toks24
(/usr/share/texmf-dist/tex/generic/pgf/utilities/pgfkeysfiltered.code.tex
\pgfkeys@tmptoks=\toks25
))
\pgf@x=\dimen153
\pgf@y=\dimen154
\pgf@xa=\dimen155
\pgf@ya=\dimen156
\pgf@xb=\dimen157
\pgf@yb=\dimen158
\pgf@xc=\dimen159
\pgf@yc=\dimen160
\pgf@xd=\dimen161
\pgf@yd=\dimen162
\w@pgf@writea=\write3
\r@pgf@reada=\read2
\c@pgf@counta=\count266
\c@pgf@countb=\count267
\c@pgf@countc=\count268
\c@pgf@countd=\count269
\t@pgf@toka=\toks26
\t@pgf@tokb=\toks27
\t@pgf@tokc=\toks28
\pgf@sys@id@count=\count270
(/usr/share/texmf-dist/tex/generic/pgf/systemlayer/pgf.cfg
File: pgf.cfg 2020/12/27 v3.1.8b (3.1.8b)
)
Driver file for pgf: pgfsys-pdftex.def
(/usr/share/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def
File: pgfsys-pdftex.def 2020/12/27 v3.1.8b (3.1.8b)
(/usr/share/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-common-pdf.def
File: pgfsys-common-pdf.def 2020/12/27 v3.1.8b (3.1.8b)
)))
(/usr/share/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex
File: pgfsyssoftpath.code.tex 2020/12/27 v3.1.8b (3.1.8b)
\pgfsyssoftpath@smallbuffer@items=\count271
\pgfsyssoftpath@bigbuffer@items=\count272
)
(/usr/share/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex
File: pgfsysprotocol.code.tex 2020/12/27 v3.1.8b (3.1.8b)
))
(/usr/share/texmf-dist/tex/latex/xcolor/xcolor.sty
Package: xcolor 2016/05/11 v2.12 LaTeX color extensions (UK)
(/usr/share/texmf-dist/tex/latex/graphics-cfg/color.cfg
File: color.cfg 2016/01/02 v1.6 sample color configuration
)
! Interruption.
<argument> ...sextension \else \@classoptionslist
,\fi \@curroptions ,
l.216 \ProcessOptions\relax
?
! Emergency stop.
<argument> ...sextension \else \@classoptionslist
,\fi \@curroptions ,
l.216 \ProcessOptions\relax
End of file on the terminal!
Here is how much of TeX's memory you used:
3149 strings out of 478994
50546 string characters out of 5864751
338722 words of memory out of 5000000
20600 multiletter control sequences out of 15000+600000
403430 words of font info for 27 fonts, out of 8000000 for 9000
1141 hyphenation exceptions out of 8191
117i,0n,119p,402b,36s stack positions out of 5000i,500n,10000p,200000b,80000s
! ==> Fatal error occurred, no output PDF file produced!

+ 75
- 17
site.hs View File

@ -2,6 +2,7 @@
{-# LANGUAGE MultiWayIf #-} {-# LANGUAGE MultiWayIf #-}
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE BlockArguments #-} {-# LANGUAGE BlockArguments #-}
{-# LANGUAGE LambdaCase #-}
import Control.DeepSeq (rnf) import Control.DeepSeq (rnf)
import Control.Concurrent import Control.Concurrent
@ -49,6 +50,7 @@ import Data.IORef
import Data.Hashable (Hashable (hashWithSalt)) import Data.Hashable (Hashable (hashWithSalt))
import GHC.Stack import GHC.Stack
import Text.Read (readMaybe) import Text.Read (readMaybe)
import GHC.Show (showCommaSpace)
readerOpts :: ReaderOptions readerOpts :: ReaderOptions
readerOpts = def { readerExtensions = pandocExtensions readerOpts = def { readerExtensions = pandocExtensions
@ -164,15 +166,6 @@ abbreviationFilter (Pandoc meta doc) =
in pure (name, rest) in pure (name, rest)
| otherwise = Nothing | otherwise = Nothing
estimateReadingTime :: Pandoc -> Pandoc
estimateReadingTime (Pandoc meta doc) = Pandoc meta doc' where
wordCount = T.pack (show (getSum (query inlineLen doc)))
inlineLen (Str s) = Sum (length (T.words s))
inlineLen _ = mempty
doc' = RawBlock "html" ("<span id=reading-length>" <> wordCount <> "</span>")
: doc
addLanguageTag :: Pandoc -> Pandoc addLanguageTag :: Pandoc -> Pandoc
addLanguageTag (Pandoc meta blocks) = Pandoc meta (map go blocks) where addLanguageTag (Pandoc meta blocks) = Pandoc meta (map go blocks) where
@ -183,15 +176,20 @@ addLanguageTag (Pandoc meta blocks) = Pandoc meta (map go blocks) where
, "code-container":if haskv then "custom-tag":classes' else classes' , "code-container":if haskv then "custom-tag":classes' else classes'
, [] , []
) )
[block, Div (mempty, ["code-tag"], []) [Plain [Span (mempty, [], []) [Str tag]]]]
$ [block] ++ maybe [Div (mempty, ["code-tag"], []) [Plain [Span (mempty, [], []) [Str tag]]]]
where where
language' = case T.uncons language of language' = case T.uncons language of
Nothing -> mempty Nothing -> mempty
Just (c, cs) -> T.cons (toUpper c) cs Just (c, cs) -> T.cons (toUpper c) cs
tag = fromMaybe language' (lookup "tag" kv) tag = fromMaybe language' (lookup "tag" kv)
haskv = fromMaybe False (True <$ lookup "tag" kv) haskv = fromMaybe False (True <$ lookup "tag" kv)
maybe
| "notag" `elem` classes' = const []
| otherwise = id
go block@(CodeBlock (identifier, [], kv) text) = Div (mempty, ["code-container"], []) [block]
go block@(CodeBlock (identifier, [], kv) text) =
Div (mempty, ["code-container"], [])
[block, Div (mempty, ["empty-code-tag"], []) []]
go x = x go x = x
saveSynopsys :: Pandoc -> Compiler Pandoc saveSynopsys :: Pandoc -> Compiler Pandoc
@ -202,7 +200,7 @@ saveSynopsys (Pandoc meta doc) =
case dropWhile (not . isParagraph) doc of case dropWhile (not . isParagraph) doc of
p:ps -> do p:ps -> do
saveSnapshot "synopsys" =<< makeItem (take n (p:ps))
saveSnapshot "synopsys" =<< makeItem (map removeFootnotes (take n (p:ps)))
pure () pure ()
[] -> pure () [] -> pure ()
pure $ Pandoc meta doc pure $ Pandoc meta doc
@ -210,6 +208,52 @@ saveSynopsys (Pandoc meta doc) =
isParagraph Para{} = True isParagraph Para{} = True
isParagraph _ = False isParagraph _ = False
removeFootnotes (Para xs) = Para $ filter (\case { Note _ -> False; _ -> True }) xs
removeFootnotes x = x
saveWordCount :: Pandoc -> Compiler Pandoc
saveWordCount (Pandoc meta doc) =
do
saveSnapshot "wc" =<< makeItem wordCount
pure $ Pandoc meta doc
where
wordCount = show (getSum (query inlineLen doc))
inlineLen (Str s) = Sum (length (T.words s))
inlineLen _ = mempty
saveTableOfContents :: Pandoc -> Compiler Pandoc
saveTableOfContents (Pandoc meta input) =
do
saveSnapshot "table-of-contents" =<< makeItem toc
pure $ Pandoc meta (fixHeaders 0 doc)
where
headers = filter (\case { Header _ _ _ -> True; _ -> False }) doc
doc = fixHeaders 0 input
fixHeaders n (Header l (_, ms, mt) x:bs) =
Header l (anchor, ms, mt) (Link (anchor, ms, mt) [] (T.singleton '#' <> anchor, mempty):x):fixHeaders (n + 1) bs where
anchor = T.pack ("h" ++ show n)
fixHeaders k (x:bs) = x:fixHeaders k bs
fixHeaders _ [] = []
into :: [Block] -> [[Block]]
into (Header l m@(anchor, _, _) x:ms) =
let
contained (Header l' _ _) = l' > l
contained _ = undefined
(ours, rest) = span contained ms
in [Para [Link (mempty, mempty, mempty) (tail x) (T.singleton '#' <> anchor, mempty)], list (into ours)]:into rest
into [] = []
into _ = undefined
list = BulletList
toc :: Block
toc = list (into headers)
sassImporter :: SassImporter sassImporter :: SassImporter
sassImporter = SassImporter 0 go where sassImporter = SassImporter 0 go where
go "normalize" _ = do go "normalize" _ = do
@ -233,7 +277,8 @@ compiler katexCache = do
katexFilter katexCache katexFilter katexCache
>=> abbreviationFilter >=> abbreviationFilter
>=> saveSynopsys >=> saveSynopsys
>=> pure . estimateReadingTime
>=> saveWordCount
>=> saveTableOfContents
>=> pure . addLanguageTag >=> pure . addLanguageTag
main :: IO () main :: IO ()
@ -281,7 +326,12 @@ main = setup >>= \katexCache -> hakyllWith conf $ do
>>= loadAndApplyTemplate "templates/default.html" postCtx >>= loadAndApplyTemplate "templates/default.html" postCtx
>>= relativizeUrls >>= relativizeUrls
loadSnapshot id "synopsys" >>= saveSnapshot "synopsys" . writePandocWith wops . fmap (Pandoc mempty)
loadSnapshot id "synopsys"
>>= saveSnapshot "synopsys"
. writePandocWith wops
. fmap (Pandoc mempty)
pure r pure r
match "pages/posts/*.lhs" $ version "raw" $ do match "pages/posts/*.lhs" $ version "raw" $ do
@ -317,6 +367,7 @@ main = setup >>= \katexCache -> hakyllWith conf $ do
match "pages/*.md" $ do match "pages/*.md" $ do
route $ setExtension "html" <> gsubRoute "pages/" (const "") route $ setExtension "html" <> gsubRoute "pages/" (const "")
compile $ compiler katexCache compile $ compiler katexCache
>>= loadAndApplyTemplate "templates/page.html" defaultContext
>>= loadAndApplyTemplate "templates/default.html" defaultContext >>= loadAndApplyTemplate "templates/default.html" defaultContext
>>= relativizeUrls >>= relativizeUrls
@ -350,12 +401,19 @@ onlyPublic = filterM isPublic where
postCtx :: Context String postCtx :: Context String
postCtx = postCtx =
dateField "date" "%B %e, %Y" dateField "date" "%B %e, %Y"
<> synopsysField
<> snapshotField "synopsys" "synopsys"
<> snapshotField "words" "wc"
<> snapshotField' render "toc" "table-of-contents"
<> defaultContext <> defaultContext
where where
synopsysField = field "synopsys" $ \x -> do
snapshotField = snapshotField' pure
snapshotField' f key snap = field key $ \x -> do
let id = itemIdentifier x let id = itemIdentifier x
itemBody <$> loadSnapshot id "synopsys"
fmap itemBody . f =<< loadSnapshot id snap
render x = do
wops <- writerOptions
pure . writePandocWith wops . fmap (Pandoc mempty . pure) $ x
readProcessBS :: FilePath -> [String] -> BS.ByteString -> IO (BS.ByteString, String) readProcessBS :: FilePath -> [String] -> BS.ByteString -> IO (BS.ByteString, String)
readProcessBS path args input = readProcessBS path args input =


BIN
static/icon/pfp-alt.png View File

Before After
Width: 723  |  Height: 723  |  Size: 759 KiB

BIN
static/icon/[email protected] View File

Before After
Width: 128  |  Height: 128  |  Size: 44 KiB

BIN
static/icon/[email protected] View File

Before After
Width: 256  |  Height: 256  |  Size: 151 KiB

BIN
static/icon/[email protected] View File

Before After
Width: 512  |  Height: 512  |  Size: 531 KiB

BIN
static/icon/[email protected] View File

Before After
Width: 96  |  Height: 96  |  Size: 27 KiB

BIN
static/icon/pfp-tired.png View File

Before After
Width: 1057  |  Height: 1057  |  Size: 1.6 MiB

BIN
static/icon/[email protected] View File

Before After
Width: 128  |  Height: 128  |  Size: 53 KiB

BIN
static/icon/[email protected] View File

Before After
Width: 256  |  Height: 256  |  Size: 168 KiB

BIN
static/icon/[email protected] View File

Before After
Width: 512  |  Height: 512  |  Size: 562 KiB

BIN
static/icon/[email protected] View File

Before After
Width: 96  |  Height: 96  |  Size: 33 KiB

+ 31
- 0
static/not-doom.svg View File

@ -0,0 +1,31 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 2.48.0 (0)
-->
<!-- Title: G Pages: 1 -->
<svg width="112pt" height="116pt"
viewBox="0.00 0.00 111.99 116.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 112)">
<title>G</title>
<polygon fill="white" stroke="transparent" points="-4,4 -4,-112 107.99,-112 107.99,4 -4,4"/>
<!-- stack -->
<g id="node1" class="node">
<title>stack</title>
<ellipse fill="none" stroke="black" cx="52" cy="-90" rx="35.19" ry="18"/>
<text text-anchor="middle" x="52" y="-86.3" font-family="Times,serif" font-size="14.00">stack</text>
</g>
<!-- 3628800 -->
<g id="node2" class="node">
<title>3628800</title>
<ellipse fill="none" stroke="black" cx="52" cy="-18" rx="51.99" ry="18"/>
<text text-anchor="middle" x="52" y="-14.3" font-family="Times,serif" font-size="14.00">3628800</text>
</g>
<!-- stack&#45;&gt;3628800 -->
<g id="edge1" class="edge">
<title>stack&#45;&gt;3628800</title>
<path fill="none" stroke="black" d="M52,-71.7C52,-63.98 52,-54.71 52,-46.11"/>
<polygon fill="black" stroke="black" points="55.5,-46.1 52,-36.1 48.5,-46.1 55.5,-46.1"/>
</g>
</g>
</svg>

+ 6
- 2
templates/archive.html View File

@ -1,2 +1,6 @@
Here you can find all my previous posts:
$partial("templates/post-list.html")$
<div id="post-toc-container">
<article>
Here you can find all my previous posts:
$partial("templates/post-list.html")$
</article>
</div>

+ 20
- 26
templates/default.html View File

@ -35,36 +35,30 @@
<body> <body>
<header> <header>
<div id="header">
<div id="logo">
<a href="/">Home</a>
</div>
<nav>
<div id="navigation">
<div class="nav-button"><a href="/pages/contact.html">Contact</a></div>
<div class="nav-button"><a href="/archive.html">Archive</a></div>
<div class="nav-button"><a href="/pages/oss.html">Open-Source</a></div>
<div class="nav-button"><a href="/feed.xml">RSS</a></div>
</div>
</nav>
<div id="logo">
<a href="/">Home</a>
</div> </div>
<nav>
<a href="/pages/contact.html">Contact</a>
<a href="/archive.html">Archive</a>
<a href="/pages/oss.html">OSS</a>
<a href="/feed.xml">RSS</a>
</nav>
</header> </header>
<div id="content">
<main>
<h1>$title$</h1> <h1>$title$</h1>
$body$ $body$
</div>
<footer>
<div id="footer" style="display: flex; justify-content: space-between;">
<div>
</div>
<div>
This wobsite is proudly powered by
<a href="https://haskell.org">Haskell</a>.
Hosted by <a href="https://shamiko.tmpim.pw">a demon</a>.
</div>
</div>
</footer>
</main>
</body> </body>
<svg width="0" height="0">
<clipPath id="squircle" clipPathUnits="objectBoundingBox">
<path
fill="red"
stroke="none"
d="M 0,0.5 C 0,0 0,0 0.5,0 S 1,0 1,0.5 1,1 0.5,1 0,1 0,0.5"
/>
</clipPath>
</svg>
</html> </html>

+ 5
- 0
templates/page.html View File

@ -0,0 +1,5 @@
<div id="post-toc-container">
<article>
$body$
</article>
</div>

+ 1
- 1
templates/post-list.html View File

@ -1,4 +1,4 @@
<ul class="post-list">
<ul id="post-list">
$for(posts)$ $for(posts)$
<li> <li>
<div class="post-list-item"> <div class="post-list-item">


+ 12
- 6
templates/post.html View File

@ -1,9 +1,15 @@
<div class="info">
Posted on $date$ <br />
<div id="post-toc-container">
<aside id=toc>
<h3>Contents</h3>
$toc$
</aside>
<article>
<div id="post-info">
Posted on $date$ <br />
Word count: $words$ <br />
</div>
$body$
</article>
</div> </div>
<article>
$body$
</article>
<hr /> <hr />

+ 3
- 0
test.dot View File

@ -0,0 +1,3 @@
digraph G {
stack -> 3628800
}

Loading…
Cancel
Save