Browse Source

fijsdoksdlafjsadf

Amélia Liao 3 years ago
parent
commit
d4a8d4ddd1
49 changed files with 1717 additions and 0 deletions
  1. +31
    -0
      css/colors.scss
  2. +2
    -0
      diagrams/ctt/axes.tex
  3. +3
    -0
      diagrams/ctt/circle.tex
  4. +11
    -0
      diagrams/ctt/comp_path.tex
  5. +4
    -0
      diagrams/ctt/eq_i0_i1.tex
  6. +11
    -0
      diagrams/ctt/land_connection.tex
  7. +11
    -0
      diagrams/ctt/lor_connection.tex
  8. +4
    -0
      diagrams/ctt/pi_vs_pnoti_1.tex
  9. +4
    -0
      diagrams/ctt/pi_vs_pnoti_2.tex
  10. +4
    -0
      diagrams/ctt/refl_tt.tex
  11. +6
    -0
      diagrams/ctt/span.tex
  12. +9
    -0
      diagrams/ctt/span_colimit.tex
  13. +1
    -0
      diagrams/ctt/transp.tex
  14. +9
    -0
      diagrams/ctt/univalence.tex
  15. +30
    -0
      diagrams/cubicalsets/acube.tex
  16. +6
    -0
      diagrams/cubicalsets/aglobe.tex
  17. +10
    -0
      diagrams/cubicalsets/asquare.tex
  18. +11
    -0
      diagrams/cubicalsets/cubical_2cell.tex
  19. +24
    -0
      diagrams/cubicalsets/degeneracies.tex
  20. +9
    -0
      diagrams/cubicalsets/del_asquare.tex
  21. +17
    -0
      diagrams/cubicalsets/delta10_delta0.tex
  22. +43
    -0
      diagrams/cubicalsets/facemap.tex
  23. +38
    -0
      diagrams/cubicalsets/first_ncubes.tex
  24. +6
    -0
      diagrams/cubicalsets/globular_2cell.tex
  25. +7
    -0
      diagrams/cubicalsets/kan_condition.tex
  26. +9
    -0
      diagrams/cubicalsets/left_inv.tex
  27. +9
    -0
      diagrams/cubicalsets/naturality.tex
  28. +9
    -0
      diagrams/cubicalsets/open_box.tex
  29. +39
    -0
      diagrams/cubicalsets/open_boxes.tex
  30. +9
    -0
      diagrams/cubicalsets/right_inv.tex
  31. +19
    -0
      diagrams/cubicalsets/thin_squares.tex
  32. +12
    -0
      diagrams/tt/prodfx.tex
  33. +164
    -0
      pages/posts/.2020-09-09-typing-proof.md
  34. +607
    -0
      pages/posts/2021-03-07-cubical.md
  35. +118
    -0
      pages/posts/2021-06-07-ax-j.md
  36. +411
    -0
      pages/posts/2021-06-21-cubical-sets.md
  37. BIN
      static/icon/android-chrome-192x192.png
  38. BIN
      static/icon/android-chrome-512x512.png
  39. BIN
      static/icon/apple-touch-icon.png
  40. BIN
      static/icon/favicon-16x16.png
  41. BIN
      static/icon/favicon-32x32.png
  42. BIN
      static/icon/favicon.ico
  43. BIN
      static/icon/pfp.jpg
  44. BIN
      static/icon/pfp.png
  45. BIN
      static/icon/[email protected]
  46. BIN
      static/icon/[email protected]
  47. BIN
      static/icon/[email protected]
  48. BIN
      static/icon/[email protected]
  49. BIN
      static/icon/valid-html20.png

+ 31
- 0
css/colors.scss View File

@ -0,0 +1,31 @@
$black: #282c34;
$white: #abb2bf;
$light_red: #e06c75;
$dark_red: #be5046;
$green: #98c379;
$light_yellow: #e5c07b;
$dark_yellow: #d19a66;
$blue: #61afef;
$magenta: #c678dd;
$cyan: #56b6c2;
$gutter_grey: #4b5263;
$comment_grey: #5c6370;
$orange: #ffac5f;
$blonde: #f5ddbc;
$light-purple: #f2f1f8;
$purple: #7060eb;
$yugo: #ea8472;
$code-background: $black;
$code-language-background: lighten($black, 5%);
$code-language-color: $white;
$code-fg: $white;
$code-kw: $light_red;
$code-dt: $dark_yellow;
$code-ot: $code-fg;
$code-co: $comment_grey;
// comment

+ 2
- 0
diagrams/ctt/axes.tex View File

@ -0,0 +1,2 @@
\draw[->,thick] (0,0)--(1,0) node[midway,below]{\large{i}};
\draw[->,thick] (0,0)--(0,1) node[midway,left]{\large{j}};

+ 3
- 0
diagrams/ctt/circle.tex View File

@ -0,0 +1,3 @@
\node[draw,circle,label=below:{$\mathrm{base}$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (a0) at (0, -1) {};
\draw[->] (0, 0) circle (1cm);
\node[] (loop) at (0, 0) {$\mathrm{loop}\ i$};

+ 11
- 0
diagrams/ctt/comp_path.tex View File

@ -0,0 +1,11 @@
\node[] (i0j0) at (-1, -1) {x};
\node[] (i1j0) at (1, -1) {y};
\node[] (i0j1) at (-1, 1) {x};
\node[] (i1j1) at (1, 1) {z};
\node (in) at (0, 0) {};
\draw[->] (i0j0) -- (i0j1) node [midway] {$a$};
\draw[->] (i0j0) -- (i1j0) node [midway, below] {$p(i)$};
\draw[->,dashed] (i0j1) -- (i1j1) node [midway] {};
\draw[->] (i1j0) -- (i1j1) node [midway, right] {$q(j)$};

+ 4
- 0
diagrams/ctt/eq_i0_i1.tex View File

@ -0,0 +1,4 @@
\node[draw,circle,label=left:{$i0$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (i0) at (-1, 0) {};
\node[draw,circle,label=right:{$i1$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (i1) at (1, 0) {};
\draw[->] (i0) -- (i1) node [midway] {$\lambda i. i$};

+ 11
- 0
diagrams/ctt/land_connection.tex View File

@ -0,0 +1,11 @@
\node[] (i0j0) at (-1, -1) {a};
\node[] (i1j0) at (1, -1) {a};
\node[] (i0j1) at (-1, 1) {a};
\node[] (i1j1) at (1, 1) {b};
\node (in) at (0, 0) {$\lambda i j. p (i \land j)$};
\draw[->] (i0j0) -- (i0j1) node [midway] {$\lambda j. p\ i0$};
\draw[->] (i0j0) -- (i1j0) node [midway, below] {$\lambda i. p\ i0$};
\draw[->] (i0j1) -- (i1j1) node [midway] {$p$};
\draw[->] (i1j0) -- (i1j1) node [midway, right] {$p$};

+ 11
- 0
diagrams/ctt/lor_connection.tex View File

@ -0,0 +1,11 @@
\node[] (i0j0) at (-1, -1) {a};
\node[] (i1j0) at (1, -1) {b};
\node[] (i0j1) at (-1, 1) {b};
\node[] (i1j1) at (1, 1) {b};
\node (in) at (0, 0) {$\lambda i j. p (i \lor j)$};
\draw[->] (i0j0) -- (i0j1) node [midway] {$p$};
\draw[->] (i0j0) -- (i1j0) node [midway, below] {$p$};
\draw[->] (i0j1) -- (i1j1) node [midway] {$\lambda i. p\ i1$};
\draw[->] (i1j0) -- (i1j1) node [midway, right] {$\lambda j. p\ i1$};

+ 4
- 0
diagrams/ctt/pi_vs_pnoti_1.tex View File

@ -0,0 +1,4 @@
\node[draw,circle,label=left:{$a$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (i0) at (-1, 0) {};
\node[draw,circle,label=right:{$b$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (i1) at (1, 0) {};
\draw[->] (i0) -> (i1) node [midway] {$\lambda i. p(i)$};

+ 4
- 0
diagrams/ctt/pi_vs_pnoti_2.tex View File

@ -0,0 +1,4 @@
\node[draw,circle,label=left:{$b$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (i0) at (-1, 0) {};
\node[draw,circle,label=right:{$a$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (i1) at (1, 0) {};
\draw[<-] (i0) -> (i1) node [midway] {$\lambda i. p(\neg i)$};

+ 4
- 0
diagrams/ctt/refl_tt.tex View File

@ -0,0 +1,4 @@
\node[draw,circle,label=left:{$a$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (i0) at (-1, 0) {};
\node[draw,circle,label=right:{$a$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (i1) at (1, 0) {};
\draw[->] (i0) -> (i1) node [midway] {$\lambda i. a$};

+ 6
- 0
diagrams/ctt/span.tex View File

@ -0,0 +1,6 @@
\node[] (i0j1) at (-1, 1) {A};
\node[] (i1j1) at (1, 1) {C};
\node[] (i0j0) at (-1, -1) {B};
\draw[<-] (i0j0) -- (i0j1) node [midway] {$f$};
\draw[->] (i0j1) -- (i1j1) node [midway] {$g$};

+ 9
- 0
diagrams/ctt/span_colimit.tex View File

@ -0,0 +1,9 @@
\node[] (i1j0) at (1, -1) {P};
\node[] (i1j1) at (1, 1) {C};
\node[] (i0j0) at (-1, -1) {B};
\node[] (i0j1) at (-1, 1) {A};
\draw[<-] (i0j0) -- (i0j1) node [midway] {$f$};
\draw[->] (i0j0) -- (i1j0) node [midway, below] {$i_1$};
\draw[->] (i0j1) -- (i1j1) node [midway] {$g$};
\draw[<-] (i1j0) -- (i1j1) node [midway, right] {$i_2$};

+ 1
- 0
diagrams/ctt/transp.tex View File

@ -0,0 +1 @@
\node[draw,circle,label=below:{$a0 : A(i0)$},fill,outer sep=0.1cm, inner sep=0pt, minimum size=0.1cm] (a0) at (-1, 0) {};

+ 9
- 0
diagrams/ctt/univalence.tex View File

@ -0,0 +1,9 @@
\node[] (i0j1) at (-1, 1) {A};
\node[] (i1j1) at (1, 1) {B};
\node[] (i0j0) at (-1, -1) {B};
\node[] (i1j0) at (1, -1) {B};
\draw[<-] (i0j0) -- (i0j1) node [midway] {$\mathrm{equiv}$} node [midway, above, rotate=-90] {$\sim$};
\draw[->] (i0j0) -- (i1j0) node [midway, below] {$B$};
\draw[->,dashed] (i0j1) -- (i1j1) node [midway] {};
\draw[<-] (i1j0) -- (i1j1) node [midway, right] {$\mathrm{id}_B$} node [midway, above, rotate=90] {$\sim$};

+ 30
- 0
diagrams/cubicalsets/acube.tex View File

@ -0,0 +1,30 @@
\node (a) at (-2.5, 2.5) {a};
\node (b) at (2.5, 2.5) {b};
\node (c) at (-2.5, -2.5) {c};
\node (d) at (2.5, -2.5) {d};
\node (w) at (-1, 1) {w};
\node (x) at (1, 1) {x};
\node (y) at (-1, -1) {y};
\node (z) at (1, -1) {z};
\draw[->] (a) -- node[midway] {f} (b);
\draw[->] (b) -- node[midway,right] {q} (d);
\draw[->] (a) -- node[midway,left] {p} (c);
\draw[->] (c) -- node[midway,below] {g} (d);
\draw[->] (w) -- node[midway,below] {h} (x);
\draw[->] (x) -- node[midway,left] {j} (z);
\draw[->] (y) -- node[midway,above] {k} (z);
\draw[->] (w) -- node[midway,right] {l} (y);
\draw[->] (a) -- node[midway] {$\upsilon$} (w);
\draw[->] (b) -- node[midway] {$\phi$} (x);
\draw[->] (c) -- node[midway] {$\chi$} (y);
\draw[->] (d) -- node[midway] {$\psi$} (z);
\node (wxyz) at (0, 0) {$\kappa$};
\node (awyc) at (-1.8, 0) {$\lambda$};
\node (awxb) at (0, 1.8) {$\mu$};
\node (bxzd) at (1.8, 0) {$\nu$};
\node (cyzd) at (0, -1.8) {$\xi$};

+ 6
- 0
diagrams/cubicalsets/aglobe.tex View File

@ -0,0 +1,6 @@
\node (a) at (-1, 0) {a};
\node (b) at (1, 0) {d};
\draw[->] (a) to[out=45,in=135] node[midway] (f) {$q \circ f$} (b);
\draw[->] (a) to[out=-45,in=-135] node[midway,below] (g) {$g \circ p$} (b);
\draw[double,->] ([yshift=-2pt]f.south) -- node[midway,right] {$\sigma$} ([yshift=2pt]g.north);

+ 10
- 0
diagrams/cubicalsets/asquare.tex View File

@ -0,0 +1,10 @@
\node (fi0j1) at (-0.75,0.75) {$a$};
\node (fi0j0) at (-0.75,-0.75) {$b$};
\node (fi1j1) at (0.75,0.75) {$c$};
\node (fi1j0) at (0.75,-0.75) {$d$};
\node (f) at (0, 0) {$\sigma$};
\draw[->] (fi0j1) -- (fi1j1) node[midway] (f) {f};
\draw[->] (fi0j0) -- (fi1j0) node[midway,below] (g) {g};
\draw[<-] (fi0j0) -- (fi0j1) node[midway,left] (p) {p};
\draw[<-] (fi1j0) -- (fi1j1) node[midway,right] (q) {q};

+ 11
- 0
diagrams/cubicalsets/cubical_2cell.tex View File

@ -0,0 +1,11 @@
\node (atop) at (-1, 0.5) {a};
\node (abot) at (-1, -0.5) {a};
\node (btop) at (1, 0.5) {b};
\node (bbot) at (1, -0.5) {b};
\draw[->] (atop) to[out=30,in=150] node[midway] (f) {f} (btop);
\draw[->] (atop) -- (abot);
\draw[->] (abot) to[out=-30,in=-150] node[midway,below] (g) {g} (bbot);
\draw[->] (btop) -- (bbot);
\node at (0, 0) {$\alpha$};

+ 24
- 0
diagrams/cubicalsets/degeneracies.tex View File

@ -0,0 +1,24 @@
\node (a) at (-1, 0) {$a$};
\node (a0) at (0, 0.75) {$a$};
\node (a1) at (0, -0.75) {$a$};
\draw[->] (a0) -- node[midway] (al) {} (a1);
\draw[dashed,->] (a) to[] node[midway,above] {$\sigma$} ([xshift=-0.5em]al);
\node (fi0) at (1, 0.75) {$a$};
\node (fi1) at (1, -0.75) {$b$};
\draw[->] (fi0) -- node[midway,right] (f) {f} (fi1);
\node (fi0j1) at (4 + -0.75, 0.75) {$a$};
\node (fi0j0) at (4 + -0.75, -0.75) {$a$};
\node (fi1j1) at (4 + 0.75, 0.75) {$b$};
\node (fi1j0) at (4 + 0.75, -0.75) {$b$};
\draw[->] (fi0j1) -- (fi1j1) node[midway] (fs) {f};
\draw[->] (fi0j0) -- (fi1j0) node[midway,below] (gs) {f};
\draw[<-] (fi0j0) -- (fi0j1) node[midway,left] (p) {};
\draw[<-] (fi1j0) -- (fi1j1) node[midway,right] (q) {};
\node (sq) at (4, 0) {$\bullet$};
\draw[dashed,->] (f) to[out=20,in=160] node[midway,below] {$\sigma_0 \circ \sigma$} (sq);

+ 9
- 0
diagrams/cubicalsets/del_asquare.tex View File

@ -0,0 +1,9 @@
\node (fi0j1) at (-0.75,0.75) {$a$};
\node (fi0j0) at (-0.75,-0.75) {$b$};
\node (fi1j1) at (0.75,0.75) {$c$};
\node (fi1j0) at (0.75,-0.75) {$d$};
\draw[->] (fi0j1) -- (fi1j1) node[midway] (f) {f};
\draw[->] (fi0j0) -- (fi1j0) node[midway,below] (g) {g};
\draw[<-] (fi0j0) -- (fi0j1) node[midway,left] (p) {p};
\draw[<-] (fi1j0) -- (fi1j1) node[midway,right] (q) {q};

+ 17
- 0
diagrams/cubicalsets/delta10_delta0.tex View File

@ -0,0 +1,17 @@
\node (point) at (0, 0) {$\color{red}{\bullet}$};
\node (line0) at (2, 1) {$\color{blue}{\bullet}_0$};
\node (line1) at (2, -1) {$\color{red}{\bullet}_1$};
\draw (line0) -- (line1) node[midway] (linemid) {};
;
\draw[->] (point) to[out=-70,in=180] node[midway] (a) {$\delta^1$} (line1);
\node (sq00) at (4, -1) {$\color{red}\bullet_{01}$};
\node (sq01) at (4, 1) {$\color{blue}{\bullet}_{00}$};
\node (sq10) at (6, -1) {$\bullet_{11}$};
\node (sq11) at (6, 1) {$\bullet_{10}$};
\draw (sq00) -- node[midway] (linemid_img) {} (sq01) -- (sq11) -- (sq10) -- (sq00);
\draw[->] (linemid) -- node[midway] (b) {$\delta^0_0$} (linemid_img);

+ 43
- 0
diagrams/cubicalsets/facemap.tex View File

@ -0,0 +1,43 @@
\node (j1) at (0,1) {$\color{red}\bullet$};
\node (j0) at (0,0) {$\color{red}\bullet$};
\node (mid) at (0,0.5) {};
\draw[color=red] (j1) -- (j0);
\node (i0j1) at (1,1) {$\color{red}\bullet$};
\node (i0j0) at (1,0) {$\color{red}\bullet$};
\node (i1j1) at (2,1) {$\bullet$};
\node (i1j0) at (2,0) {$\bullet$};
\draw[color=red] (i0j1) -- (i0j0);
\draw (i0j1) -- (i1j1) -- (i1j0) -- (i0j0);
\node (mid2) at (1,0.5) {};
\draw[->] (mid) -> (mid2);
\node (F) at (-0.6, 0.5) {$\mathcal{F}$};
\node[fit=(j0)(i1j1), left delimiter=(, inner sep=-0.7ex, right delimiter=)] (openF) {};
\node (colon) at (2.6, 0.5) {$:$};
\node (F2) at (2.9, 0.5) {$\mathcal{F}$};
\node (fi0j1) at (3.5,1) {$\color{red}\bullet$};
\node (fi0j0) at (3.5,0) {$\color{red}\bullet$};
\node (fi1j1) at (4.5,1) {$\bullet$};
\node (fi1j0) at (4.5,0) {$\bullet$};
\draw[color=red] (fi0j1) -- (fi0j0);
\draw (fi0j1) -- (fi1j1) -- (fi1j0) -- (fi0j0);
\node[fit=(fi0j0)(fi1j1), left delimiter=(, inner sep=-0.7ex, right delimiter=)] (openF2) {};
\node (F3) at (6.1, 0.5) {$\mathcal{F}$};
\draw[->] ([xshift=2.1ex]openF2.east) -- (F3);
\node (fj1) at (6.7,1) {$\color{red}\bullet$};
\node (fj0) at (6.7,0) {$\color{red}\bullet$};
\node[fit=(fj1)(fj0), left delimiter=(, inner sep=-0.7ex, right delimiter=)] (openF3) {};
\draw[color=red] (fj1) -- (fj0);

+ 38
- 0
diagrams/cubicalsets/first_ncubes.tex View File

@ -0,0 +1,38 @@
\node at (-5, 0) {$\bullet_{()}$};
\node (line0) at (-3, 1) {$\bullet_0$};
\node (line1) at (-3, -1) {$\bullet_1$};
\draw[->] (line0) -> (line1);
\node (sq00) at (-1, 1) {$\bullet_{00}$};
\node (sq01) at (-1, -1) {$\bullet_{01}$};
\node (sq10) at (1, 1) {$\bullet_{10}$};
\node (sq11) at (1, -1) {$\bullet_{11}$};
\draw[->] (sq00) -> (sq01);
\draw[->] (sq00) -> (sq10);
\draw[->] (sq10) -> (sq11);
\draw[->] (sq01) -> (sq11);
\node (sq010) at (3, -1) {$\bullet_{000}$};
\node (sq011) at (4, 0) {$\bullet_{001}$};
\node (sq110) at (5, -1) {$\bullet_{100}$};
\node (sq111) at (6, 0) {$\bullet_{101}$};
\node (sq000) at (3, 1) {$\bullet_{010}$};
\node (sq001) at (4, 2) {$\bullet_{011}$};
\node (sq100) at (5, 1) {$\bullet_{110}$};
\node (sq101) at (6, 2) {$\bullet_{111}$};
\draw[->] (sq000) -- (sq001);
\draw[->] (sq000) -- (sq100);
\draw[->] (sq000) -- (sq010);
\draw[->] (sq001) -- (sq011);
\draw[->] (sq001) -- (sq101);
\draw[->] (sq010) -- (sq110);
\draw[->] (sq010) -- (sq011);
\draw[->] (sq100) -- (sq101);
\draw[->] (sq100) -- (sq110);
\draw[->] (sq101) -- (sq111);
\draw[->] (sq110) -- (sq111);
\draw[->] (sq011) -- (sq111);

+ 6
- 0
diagrams/cubicalsets/globular_2cell.tex View File

@ -0,0 +1,6 @@
\node (a) at (-1, 0) {a};
\node (b) at (1, 0) {b};
\draw[->] (a) to[out=30,in=150] node[midway] (f) {f} (b);
\draw[->] (a) to[out=-30,in=-150] node[midway,below] (g) {g} (b);
\draw[double,->] ([yshift=-2pt]f.south) -- node[midway,right] {$\alpha$} ([yshift=2pt]g.north);

+ 7
- 0
diagrams/cubicalsets/kan_condition.tex View File

@ -0,0 +1,7 @@
\node (open) at (0, 2) {$\sqcap^{n,i,\varepsilon}$};
\node (box) at (0, 0) {$\square^n$};
\node (set) at (2, 0) {$X$};
\draw[right hook->] (open) -- (box);
\draw[->] (open) -- node[midway] {f} (set);
\draw[dotted, ->] (box) -- node[midway, below] {g} (set);

+ 9
- 0
diagrams/cubicalsets/left_inv.tex View File

@ -0,0 +1,9 @@
\node (sq1_b00) at (-3, 1) {B};
\node (sq1_a10) at (-1, 1) {A};
\node (sq1_b01) at (-3, -1) {B};
\node (sq1_b11) at (-1, -1) {B};
\draw[dashed,->] (sq1_b00) -- node[midway] {g} (sq1_a10);
\draw[->] (sq1_a10) -> node[midway] {f} (sq1_b11);
\draw[->] (sq1_b00) -> node[midway,left] {1} (sq1_b01);
\draw[->] (sq1_b01) -> node[midway,below] {1} (sq1_b11);

+ 9
- 0
diagrams/cubicalsets/naturality.tex View File

@ -0,0 +1,9 @@
\node (sq1_b00) at (-3, 1) {$F(c\prime)$};
\node (sq1_a10) at (-1, 1) {$F(c)$};
\node (sq1_b01) at (-3, -1) {$G(c\prime)$};
\node (sq1_b11) at (-1, -1) {$G(c)$};
\draw[->] (sq1_b00) -- node[midway] {$F(f)$} (sq1_a10);
\draw[->] (sq1_a10) -> node[midway] {$\alpha_{c}$} (sq1_b11);
\draw[->] (sq1_b00) -> node[midway,left] {$\alpha_{c\prime}$} (sq1_b01);
\draw[->] (sq1_b01) -> node[midway,below] {$G(f)$} (sq1_b11);

+ 9
- 0
diagrams/cubicalsets/open_box.tex View File

@ -0,0 +1,9 @@
\node (fi0j1) at (-0.75, 0.75) {$a$};
\node (fi0j0) at (-0.75, -0.75) {$b$};
\node (fi1j1) at (0.75, 0.75) {$c$};
\node (fi1j0) at (0.75, -0.75) {$d$};
\draw[->,dotted] (fi0j1) -- (fi1j1) node[midway] (f) {f};
\draw[->] (fi0j0) -- (fi1j0) node[midway,below] (g) {g};
\draw[<-] (fi0j0) -- (fi0j1) node[midway,left] (p) {p};
\draw[<-] (fi1j0) -- (fi1j1) node[midway,right] (q) {q};

+ 39
- 0
diagrams/cubicalsets/open_boxes.tex View File

@ -0,0 +1,39 @@
\node (fi0j1) at (-0.75, 0.75) {$a$};
\node (fi0j0) at (-0.75, -0.75) {$b$};
\node (fi1j1) at (0.75, 0.75) {$c$};
\node (fi1j0) at (0.75, -0.75) {$d$};
\draw[->,dotted] (fi0j1) -- (fi1j1) node[midway] (f) {f};
\draw[->] (fi0j0) -- (fi1j0) node[midway,below] (g) {g};
\draw[<-] (fi0j0) -- (fi0j1) node[midway,left] (p) {p};
\draw[<-] (fi1j0) -- (fi1j1) node[midway,right] (q) {q};
\node (fi0j1) at (2.5 + -0.75, 0.75) {$a$};
\node (fi0j0) at (2.5 + -0.75, -0.75) {$b$};
\node (fi1j1) at (2.5 + 0.75, 0.75) {$c$};
\node (fi1j0) at (2.5 + 0.75, -0.75) {$d$};
\draw[->] (fi0j1) -- (fi1j1) node[midway] (f) {f};
\draw[->,dotted] (fi0j0) -- (fi1j0) node[midway,below] (g) {g};
\draw[<-] (fi0j0) -- (fi0j1) node[midway,left] (p) {p};
\draw[<-] (fi1j0) -- (fi1j1) node[midway,right] (q) {q};
\node (fi0j1) at (5 + -0.75, 0.75) {$a$};
\node (fi0j0) at (5 + -0.75, -0.75) {$b$};
\node (fi1j1) at (5 + 0.75, 0.75) {$c$};
\node (fi1j0) at (5 + 0.75, -0.75) {$d$};
\draw[->] (fi0j1) -- (fi1j1) node[midway] (f) {f};
\draw[->] (fi0j0) -- (fi1j0) node[midway,below] (g) {g};
\draw[<-,dotted] (fi0j0) -- (fi0j1) node[midway,left] (p) {p};
\draw[<-] (fi1j0) -- (fi1j1) node[midway,right] (q) {q};
\node (fi0j1) at (7.5 + -0.75, 0.75) {$a$};
\node (fi0j0) at (7.5 + -0.75, -0.75) {$b$};
\node (fi1j1) at (7.5 + 0.75, 0.75) {$c$};
\node (fi1j0) at (7.5 + 0.75, -0.75) {$d$};
\draw[->] (fi0j1) -- (fi1j1) node[midway] (f) {f};
\draw[->] (fi0j0) -- (fi1j0) node[midway,below] (g) {g};
\draw[<-] (fi0j0) -- (fi0j1) node[midway,left] (p) {p};
\draw[<-,dotted] (fi1j0) -- (fi1j1) node[midway,right] (q) {q};

+ 9
- 0
diagrams/cubicalsets/right_inv.tex View File

@ -0,0 +1,9 @@
\node (sq1_a00) at (-3, 1) {A};
\node (sq1_b10) at (-1, 1) {B};
\node (sq1_a01) at (-3, -1) {A};
\node (sq1_a11) at (-1, -1) {A};
\draw[dashed,->] (sq1_b10) -- node[midway] {h} (sq1_a11);
\draw[->] (sq1_a00) -- node[midway] {f} (sq1_b10);
\draw[->] (sq1_a00) -- node[midway,left] {1} (sq1_a01);
\draw[->] (sq1_a01) -- node[midway,below] {1} (sq1_a11);

+ 19
- 0
diagrams/cubicalsets/thin_squares.tex View File

@ -0,0 +1,19 @@
\node (fi0j1) at (-0.75, 0.75) {$a$};
\node (fi0j0) at (-0.75, -0.75) {$b$};
\node (fi1j1) at (0.75, 0.75) {$a$};
\node (fi1j0) at (0.75, -0.75) {$b$};
\draw[->] (fi0j1) -- (fi1j1) node[midway] (f) {1};
\draw[->] (fi0j0) -- (fi1j0) node[midway,below] (g) {1};
\draw[<-] (fi0j0) -- (fi0j1) node[midway,left] (p) {f};
\draw[<-] (fi1j0) -- (fi1j1) node[midway,right] (q) {f};
\node (fi0j1) at (2.5 + -0.75, 0.75) {$a$};
\node (fi0j0) at (2.5 + -0.75, -0.75) {$a$};
\node (fi1j1) at (2.5 + 0.75, 0.75) {$b$};
\node (fi1j0) at (2.5 + 0.75, -0.75) {$b$};
\draw[->] (fi0j1) -- (fi1j1) node[midway] (f) {f};
\draw[->] (fi0j0) -- (fi1j0) node[midway,below] (g) {f};
\draw[<-] (fi0j0) -- (fi0j1) node[midway,left] (p) {1};
\draw[<-] (fi1j0) -- (fi1j1) node[midway,right] (q) {1};

+ 12
- 0
diagrams/tt/prodfx.tex View File

@ -0,0 +1,12 @@
\node (gamma) at (0, 2) {$\Gamma$};
\node (prod) at (0, 0) {$\beta^\alpha \times \alpha$};
\node (arg) at (2.5, 0) {$\alpha$};
\node (func) at (-2.5, 0) {$\beta^\alpha$};
\draw[->] (gamma) -- node[midway] {$\left<f,x\right>$} (prod);
\draw[->] (prod) -- node[midway] {$\pi_1$} (func);
\draw[->] (prod) -- node[midway,below] {$\pi_2$} (arg);
\draw[->] (gamma) -- node[midway] {$f$} (func);
\draw[->] (gamma) -- node[midway] {$x$} (arg);

+ 164
- 0
pages/posts/.2020-09-09-typing-proof.md View File

@ -0,0 +1,164 @@
---
title: "This Sentence is False, or: On Natural Language, Typing and Proof"
date: September 9th, 2020
---
The Liar's paradox is often the first paradox someone dealing with logic, even in an informal setting, encounters. It is _intuitively_ paradoxical: how can a sentence be both true, and false? This contradicts (ahem) the law of non-contradiction, that states that "no proposition is both true and false", or, symbolically, $\neg (A \land \neg A)$. Appealing to symbols like that gives us warm fuzzy feelings, because, _of course, the algebra doesn't lie!_
There's a problem with that the appeal to symbols, though. And it's nothing to do with non-contradiction: It's to do with well-formedness. How do you accurately translate the "this sentence is false" sentence into a logical formula? We can try by giving it a name, say $L$ (for liar), and state that $L$ must represent some logical formula. Note that the equality symbol $=$ here is _not_ a member of the logic we're using to express $L$, it's a symbol of this discourse. It's _meta_​logical.
$$ L = \dots $$
But what should fill in the dots? $L$ is the sentence we're symbolising, so "this sentence" must mean $L$. Saying "X is false" can be notated in a couple of equivalent ways, such as $\neg X$ or $X \to \bot$. We'll go with the latter: it's a surprise tool that will help us later. Now we know how to fill in the dots: It's $L \to \bot$.
<details>
<summary>Truth tables demonstrating the equivalence between $\neg A$ and $A \to \bot$, if you are classically inclined.</summary>
<div class="mathpar">
<table>
<tr>
<th> $A$ </th>
<th> $\neg A$ </th>
</tr>
<tr><td>$\top$</td><td>$\bot$</td></tr>
<tr><td>$\bot$</td><td>$\top$</td></tr>
</table>
<table>
<tr>
<th> $A$ </th>
<th> $A\to\bot$ </th>
</tr>
<tr><td>$\top$</td><td>$\bot$</td></tr>
<tr><td>$\bot$</td><td>$\top$</td></tr>
</table>
</div>
</details>
But wait. If $L = L \to \bot$, then $L = (L \to \bot) \to \bot$, and also $L = ((L \to \bot) \to \bot) \to \bot$, and so... forever. There is no finite, well-formed formula of first-order logic that represents the sentence "This sentence is false", thus, assigning a truth value to it is meaningless: Saying "This sentence is false" is true is just as valid as saying that it's false, both of those are as valid as saying "$\neg$ is true".
Wait some more, though: we're not done. It's known, by the [Curry-Howard isomorphism], that logical systems correspond to type systems. Therefore, if we can find a type-system that assigns a meaning to our sentence $L$, then there _must_ exist a logical system that can express $L$, and so, we can decide its truth!
Even better, we don't need to analyse the truth of $L$ logically, we can do it type-theoretically: if we can build an inhabitant of $L$, then it is true; If we can build an inhabitant of $\neg L$, then it's false; And otherwise, I'm just not smart enough to do it.
So what is the smallest type system that lets us assign a meaning to $L$?
# A system of equirecursive types: $\lambda_{\text{oh no}}$[^1]
[^1]: The reason for the name will become obvious soon enough.
We do not need a complex type system to express $L$: a simple extension over the basic simply-typed lambda calculus $\lambda_{\to}$ will suffice. No fancy higher-ranked or dependent types here, sorry!
As a refresher, the simply-typed lambda calculus has _only_:
* A set of base types $\mathbb{B}$,
* Function types $\tau \to \sigma$,
* For each base type $b \in \mathbb{B}$, a set of base terms $\mathbb{T}_b$,
* Variables $v$,
* Lambda abstractions $\lambda v. e$, and
* Application $e\ e'$.
<details>
<summary>Type assignment rules for the basic $\lambda_{\to}$ calculus.</summary>
<div class="math-paragraph">
<div>
$$\frac{x : \tau \in \Gamma}{\Gamma \vdash x : \tau}$$
</div>
<div>
$$\frac{b \in \mathbb{B} \quad x \in \mathbb{T}_{b}}{\Gamma \vdash x : b}$$
</div>
<div>
$$\frac{\Gamma, x : \sigma \vdash e : \tau}{\Gamma \vdash \lambda x. e : \sigma \to \tau}$$
</div>
<div>
$$\frac{\Gamma, e : \sigma \to \tau \quad \Gamma \vdash e' : \sigma}{\Gamma \vdash e\ e' : \tau}$$
</div>
</div>
</details>
First of all, we'll need a type to represent the logical proposition $\bot$. This type is empty: It has no type formers. Its elimination rule corresponds to the principle of explosion, and we write it $\mathtt{absurd}$. The inference rule:
<div class="math-paragraph">
$$\frac{\Gamma \vdash e : \bot}{\mathtt{absurd}\ e : A}$$
</div>
We're almost there. What we need now is a type former that serves as a solution for equations of the form $v = ... v ...$. That's right: we're just _inventing_ a solution to this class of equations---maths!
These are the _equirecursive_ types, $\mu a. \tau$. The important part here is _equi_: these types are entirely indistinguishable from their unrollings. Formally, we extend the set of type formers with type variables $a$ and $\mu$-types $\mu a. \tau$, where $\mu a$ acts as a binder for $a$.
Since we invented $\mu$ types as a solution for equations of the form $a = \tau$, we have that $\mu a. \tau = \tau[\mu a.\tau/a]$, where $\tau[\sigma{}/a]$ means "substitute $\sigma{}$ everywhere $a$ occurs in $\tau$". The typing rules express this identity, saying that anywhere a term might have one as a type, the other works too:
<div class="math-paragraph">
<div>
$$\frac{\Gamma \vdash e : \tau[\mu a.\tau / a]}{\Gamma \vdash e : \mu a. \tau}$$
</div>
<div>
$$\frac{\Gamma \vdash e : \mu a.\tau}{\Gamma \vdash e : \tau[\mu a. \tau / a]}$$
</div>
</div>
Adding these rules, along with the one for eliminating $\bot$, to the $\lambda_{\to}$ calculus nets us the system $\lambda_{\text{oh no}}$. With it, one can finally formulate a representation for our $L$-sentence: it's $\mu a. a \to \bot$.
There exists a closed term of this type, namely $\lambda k. k\ k$, which means: The "this sentence is false"-sentence is true. We can check this fact ourselves, or, more likely, use a type checker that supports equirecursive types. For example, OCaml with the `-rectypes` compiler option does.
We'll first define the empty type `void` and the type corresponding to $L$:
<div class="math-paragraph">
~~~~{.ocaml}
type void = | ;;
type l = ('a -> void) as 'a ;;
~~~~
</div>
Now we can define our proof of $L$, called `yesl`, and check that it has the expected type:
<div class="math-paragraph">
~~~~{.ocaml}
let yesl: l = fun k -> k k ;;
~~~~
</div>
However. This same function is also a proof that... $\neg L$. Check it out:
<div class="math-paragraph">
~~~~{.ocaml}
let notl (x : l) : void = x x ;;
~~~~
</div>
# I am Bertrand Russell
Bertrand Russell (anecdotally) once proved, starting from $1 = 0$, that he was the Pope. I am also the Pope, as it turns out, since I have on hand a proof that $L$ and $\neg L$, in violation of non-contradiction; By transitivity, I am Bertrand Russell. <span style="float: right; display: inline-block;"> $\blacksquare$ </span>
Alright, maybe I'm not Russell (drat). But I am, however, a trickster. I tricked you! You thought that this post was going to be about a self-referential sentence, but it was actually about typed programming language design (not very shocking, I know). It's a demonstration of how recursive types (in any form) are logically inconsistent, and of how equirecursive types _are wrong_.
The logical inconsistency, we all deal with, on a daily basis. It comes with Turing completeness, and it annoys me to no end every single time I accidentally do `let x = ... x ...`{.haskell}. I _really_ wish I had a practical, total functional programming language to use for my day-to-day programming, and this non-termination _everywhere_ is a great big blotch on Haskell's claim of purity.
The kind of recursive types you get in Haskell is _fine_. They're not _great_ if you like the propositions-as-types interpretation, since it's trivial to derive a contradiction from them, but they're good enough for programming that implementing a positivity checker to ensure your definitions are strictly inductive isn't generally worth the effort.
Unless your language claims to have "zero runtime errors", in which case, if you implement isorecursive types instead of inductive types, you are _wrong_. See: Elm. God damn it.
<details>
<summary>So much for "no runtime errors"... I guess spinning forever on the client side is acceptable.</summary>
<div class="flex-list">
```elm
-- Elm
type Void = Void Void
type Omega = Omega (Omega -> Void)
yesl : Omega
yesl = Omega (\(Omega x) -> x (Omega x))
notl : Omega -> Void
notl (Omega x) = x (Omega x)
```
</div>
</details>
Equirecursive types, however, are a totally different beast. They are _basically_ useless. Sure, you might not have to write a couple of constructors, here and there... at the cost of _dramatically_ increasing the set of incorrect programs that your type system accepts. Suddenly, typos will compile fine, and your program will just explode at runtime (more likely: fail to terminate). Isn't this what type systems are meant to prevent?
Thankfully, very few languages implement equirecursive types. OCaml is the only one I know of, and it's gated behind a compiler flag. However, that's a footgun that should _not_ be there.
**EDIT** (April 14th, 2021) It's been pointed out to me that you can get equirecursive types in OCaml even without passing `-rectypes` to the compiler. I am not an OCaml expert, so I encourage you to see [here](https://gist.github.com/drvink/a0094680aaae2569951ea4601752944d) for more details.
[Curry-Howard isomorphism]: https://en.wikipedia.org/wiki/Curry%E2%80%93Howard_correspondence

+ 607
- 0
pages/posts/2021-03-07-cubical.md View File

@ -0,0 +1,607 @@
---
title: Cubical Type Theory
date: March 7th, 2021
---
Hello, everyone! It's been a while, hasn't it? Somehow, after every post, I manage to convince myself that I'm gonna be better and not let a whole season go by between posts, but it never happens. For the last two posts I've been going on at length about fancy type theories, and this post, as the title implies, is no exception. In fact, two posts ago I mentioned, offhand, cubical type theory as a possibility for realising HoTT in a constructive way, but 128 days ago I did not understand cubical type theory in the slightest.
Now, however, I do! I still don't know what the hell the word "fibration" is supposed to mean, or indeed "fibrant", but we can gloss over that and present cubical type theory with as little category-theoretical jargon as possible. In fact, I have a mostly[^1]-complete implementation of cubical type theory for us to use as a discussion framework.
[^1]: At the time of writing, in the very early AM between Saturday and Sunday, the only thing missing is the implementation of composition for higher inductive types. However, this is mostly because I'm hella bored of writing code and wanted to write words instead. This way I can have more fun!
As mentioned in [Reflections on Equality], the main idea of Cubical Type Theory is the type of paths, so let's talk about that at length.
[Reflections on Equality]: /posts/reflections-on-equality.html
Paths
-----
Even in boring old Martin-Löf type theory, as soon as we have equalities and induction, we can prove a very interesting theorem: Every function preserves paths. This is actually a simplification of a more fundamental fact in MLTT, its groupoid structure, in which funct*ions* are interpreted as funct*ors*. Like a category-theoretical functor has an action on objects and an action on morphisms, a type-theoretical function has an action on values and an **a**ction on **p**aths.
Using path induction, we can prove it (roughly) like this. Suppose (given a $f : A \to B$), there is a path $p : x \equiv_A y$. By induction, we may assume $y$ is $x$ and $p$ is $\mathrm{refl}$, in which case what we need to prove is $f(x) \equiv_{B} f(x)$. But this is what $\mathrm{refl}$ states. This isn't a complicated proof, because it's not a complicated theorem: the images of equal elements are equal, big deal.
This is where things get a little mind-bending. What would happen if we had a type with "two" values, with a path between them? The values of the function at either end could be different, but they would still be... equal. This is the main idea of cubical type theory: We add an interval type, $\mathbb{I}$, which denotes the interval _object_ $[0,1]$ in our model. Then we can drop the inductive definition of equalities as generated by $\mathrm{refl}$ and simply _define_ equalities in $A$ as functions $\mathbb{I} \to A$.
Let's not get ahead of ourselves, though, and talk a bit more about the interval type. It has two elements, $i0$ and $i1$, but it's not isomorphic to the type of booleans: Internally to the type theory, we have no way of distinguishing $i0$ and $i1$, since every function must be continuous.
Since it denotes $[0,1]$, we can define a lattice operations on elements of the interval, enough to equip it with the structure of a _De Morgan algebra_, but not a boolean algebra. We have meets, $a \land b$, the logical operation of "and", interpreted as $\min(a,b)$; Joins, $a \lor b$, the logical operation of "or", interpreted as $\max(a,b)$; And an involution, $\neg a$, which denotes the algebraic operation $1 - a$.[^2]
[^2]: If you, like me, are always confused by why $a \land b$ is min and $a \lor b$ is max, check out these Desmos links: [min] and [max]. Keep these in mind the next time you're confused :)
[min]: https://www.desmos.com/calculator/8qkr6deosy
[max]: https://www.desmos.com/calculator/gohdyaehna
These operations follow the usual laws of Boolean logic save for two: In general, $min(x, 1 - x)$ is not $0$ and $max(x, 1 - x)$ is not $1$, only for the endpoints. While internally to the type theory we have no element representing "half", since the object $\mathbb{I}$ denotes _does_ have these filler points, we can't in general expect those equations to hold. Hence, De Morgan algebra, not Boolean.
Another thing to keep in mind is that, while the interval is an expression which other expressions have as a type (namely, $\Gamma \vdash i0 : \mathbb{I}$ and $\Gamma \vdash i1 : \mathbb{I}$), we do not call it a type. We reserve the word type for objects with more structure (which we will discuss later). For now, it's enough to think of the interval as a "pre"type, something which is almost, but not quite, a type. Cubical type theory has plenty of these pretypes so we include a separate universe $\mathscr{U}_{\omega}$ to classify them.
Now that we're familiar with the interval, we can discuss the actual title of this section, paths. We define the type of paths in $A$ as a refinement of the function space $f : \mathbb{I} \to A$, where the values of $f(i0)$ and $f(i1)$ are indicated in the type. Hence the formation rule, on the left:
<div class="mathpar">
$$\frac{\Gamma, i : \mathbb{I} \vdash e : A}{\Gamma \vdash \lambda i. e : \mathrm{Path}\ A\ e[i0/i]\ e[i1/i]}$$
$$\frac{\Gamma \vdash p : \mathrm{Path}\ A\ x\ y\quad \Gamma \vdash i : \mathbb{I}}{\Gamma \vdash p(i) : A}$$
</div>
On the right is the elimination rule, which says that if we have an element of the interval we can project the value the path takes at that point. Alternatively we could represent paths by the type with an inclusion $\mathrm{inP} : \prod{(f : \mathbb{I} \to A)} \to \mathrm{Path}\ A\ f(i0)\ f(i1)$ and projection $\mathrm{outP} : \mathrm{Path}\ A\ x\ y \to \mathbb{I} \to A$. Furthermore, we impose a pair of "regularity" equations, which state that $p(i0) = x$ and $p(i1) = y$ for paths $p : \mathrm{Path}\ A\ x\ y$.
One important difference between functions out of the interval and paths is that, while the former would be put in the universe $\mathscr{U}_{\omega}$ by virtue of its domain being a pretype, paths *do* have the required additional structure to be in the universe $\mathscr{U}$ of "proper types", as long as the type $A$ of the endpoints does.
Using the algebraic structure of the interval we can define some operations on paths, which we may represent diagramatically. For simplicity, paths will be drawn as direct lines between their endpoints, and the type will be left to be inferred from the context; A path whose bound variable is $i$ will be drawn in the left-to-right direction, and a path whose bound variable is $j$ will be drawn in the upwards direction.
Since bound interval variables _are_ variables, they have all the same structural rules as normal variables! In particular, weakening lets us drop an interval variable to have a constant path. This is a proof of reflexivity, which we diagram as follows:
<figure>
<img src="/diagrams/ctt/refl_tt.svg" alt="reflexivity" style="min-width: 250px;" />
<figcaption>The reflexivity path for $a$ is represented by a constant path.</figcaption>
</figure>
Given a path $p$ with endpoints $x$ and $y$ (concisely written as $p : x \equiv y$) we compute its inversion, $sym(p) : y \equiv x$ by "precomposition" with the interval involution:
<figure>
<img src="/diagrams/ctt/pi_vs_pnoti_1.svg" alt="p from a to b" style="min-width: 250px;" />
<img src="/diagrams/ctt/pi_vs_pnoti_2.svg" alt="the inverse of p from b to a" style="min-width: 250px;" />
<figcaption>By inverting the interval argument, we can invert paths.</figcaption>
</figure>
The meet and join operations on the interval let us define two kinds of squares called _connections_, which let us concisely turn a one-dimensional _path_ into a two-dimensional _square_, which gives us paths between paths (paths in the second dimension). The connection generated by $i \land j$ is going to be especially helpful in a bit, when we prove that singletons are contractible, and hence that paths are a suitable definition of equality.
<div class="mathpar">
<figure style="width: 50%;">
<img src="/diagrams/ctt/land_connection.svg" alt="and connection" style="min-width: 250px;" />
<figcaption>The square generated by $\lambda i\ j. p(i \land j)$</figcaption>
</figure>
<figure style="width: 50%;">
<img src="/diagrams/ctt/lor_connection.svg" alt="or connection" style="min-width: 250px;" />
<figcaption>The square generated by $\lambda i\ j. p(i \lor j)$</figcaption>
</figure>
</div>
Let's walk through the construction of the left square, keeping in mind that $i$ goes right and $j$ goes up. Since the top and bottom faces vary in the $i$ direction but not the $j$ direction, they'll all have a prefixed $\lambda i$; The left and right faces just correspond to applying the outermost lambda inside the square. For the faces, we have:
- Left: $(\lambda i\ j. p(i \land j))\ i0$, which reduces to $\lambda j. p(i0)$, is the constant path at $a$;
- Top: $\lambda i. (\lambda i\ j. p(i \land j))\ i\ i1$, which reduces to $\lambda i. p(i)$, is the path $p$;
- Bottom: $\lambda i. (\lambda i\ j. p(i \land j))\ i\ i0$, which reduces to $\lambda i. p(i0)$. Again, $\mathrm{refl}_a$.
- Right: $(\lambda i\ j. p(i \land j))\ i1$, which reduces to $\lambda j. p(j)$--- you guessed it, it's $p$;
You can see that in either the $i$ or $j$ direction the inside of this square connects the path $p$ with the constant path at its left endpoint. This is exactly what we need for the following proof that singletons are contractible:
```
singContr : {A : Type} {a : A} -> isContr (Singl A a)
singContr {A} {a} = ((a, \i -> a), \y i -> (y.2 i, \j -> y.2 (iand i j)))
```
This proof is written syntactically, in the language of [cubical](https://git.abby.how/abby/cubical). This proof appears on [line 114] of the massive source file which has everything I've tried to prove with this so far. What's a module system? The actual proof file has some preliminaries which would be interesting if you care about how cubical type theory is actually implemented.
Another operation on equalities which is very hard in MLTT, but trivial with cubes, is function extensionality. You can see why this would be simple if you consider that a pointwise equality between functions would be an element of $A \to \mathbb{I} \to B$, while an equality between functions themselves is an element of $\mathbb{I} \to A \to B$. By simply swapping the binders, we get the naive function extensionality.
The proof of full function extensionality as per the HoTT book is also very simple, but it requires quite a bit more infrastructure to talk about; For now, rather than saying `happly` (line 667) is an equivalence, we can simply say that `happly` has `funext` as right and left inverses, and the proof is trivial in both directions ([line 675]).
With the infrastructure so far we can't prove a whole lot, though. For instance, we have prove that singletons are contractible, but this doesn't freely get us axiom J; Neither can we prove that every property respects equalities, or anything like that. For that sort of proof, we need to introduce a transport operation, which, given the left endpoint of a path of types, returns the right endpoint. However, cubical type theory refuses to be simple.
Quick sidenote, path application corresponds to the eliminator for $\mathbb{I}$, since it conceptually has the type in the box below. We use here the type of _dependent paths_, PathP.
```
iElim : {A : I -> Type} {x : A i0} {y : A i1} -> PathP A x y
-> (i : I) -> A i
iElim p i = p i
```
Simplicity is disallowed
------------------------
While providing a primitive $\mathrm{transp} : \prod{(A : \mathbb{I}) \to \mathscr{U}} \to A(i0) \to A(i1)$ might seem like all we need to make paths a sensible notion of equality, reality is not that simple. In particular, transport on paths is hard to define with such an operation, so, as is tradition in type theory, we make things simpler by making them more general. Rather than providing a primitive transport, we provide a primitive _composition_ operation, which generalises transport and composition of paths.
Composition expresses the funny-sounding principle that "every open box has a lid". No, that is not a joke; That's actually what we're talking about. A description in (almost!) English would be to say that composition, given a shape, a partial cube of that shape, and a face (which must agree with the partial cube), returns the opposite face. If you think that description is nonsensical, strap in, because interpreting it type-theoretically requires another 67 lines of definitions in the code! For reference, the almost 2000 words which precede this paragraph covered roughly 20 lines of actual code.
Crying over what I still have to write won't help me get this blog post out any sooner though, so let's get to it.
### Cofibrations
Again that god damn word. In addition to the interval object, to define a cubical model of type theory, we need a notion of _cofibration_, which is a fancy way of saying "shape of a partial cube". In the papers which introduced cubical type theory, they use a "face lattice", $\mathbb{F}$. However, this turns out to be needlessly complicated, as we can get this structure from the interval object.
To each element $\phi : \mathbb{I}$ (referred to as a _formula_) we assign a _cofibrant proposition_ $[\phi] : \mathscr{U}_{\omega}$[^3] which is inhabited when $\phi = i1$. In the code, we write `IsOne phi` for $[\phi]$ and it is inhabited by a distinguished element `itIs1 : IsOne i1`. This family of types is *definitionally proof-irrelevant*, which means that any two inhabitants of `IsOne phi` are equal.
<details>
<summary>A note on terminology</summary>
Throughout the rest of this post I'll refer to elements of the interval as either "endpoints" or "formulas" depending on how they're used. These aren't technical terms, and are meant to be just indicative. The convention is roughly that, if $i : \mathbb{I}$ is used as the argument to a path, or to a filler, or it's the bound variable in a composition (or etc), it's called an _endpoint_; If it's used to denote a restriction (i.e., there might reasonably be an element of $[\phi]$ in the context), it's called a _formula_.
Also I apologise for the garbled terminology (or even ambiguity) when talking about $[\phi]$ vs $\phi$, since both can reasonably be called formulas.
</details>
[^3]: As a funny sidenote, the object in a category (if it exists) which corresponds to the type-theoretical universe of propositions is called the _subobject classifier_, written $\Omega$. So $[]$ is a family of maps $\mathbb{I} \to \Omega_{\omega}$. If only we could fit another $\Omega$ in there...
We can interpret these propositions as being _shapes of partial cubes_. For instance, the proposition $[i \lor \neg i]$ (for $i : \mathbb{I}$) represents a "line" which is defined when $i = i0$ or $i = i1$, but not in the middle; This isn't a line as much as it is a pair of points.
Thinking back to the "human-readable" description of the composition operation, the proposition $\phi$ specifies the _shape_ of the open box, but not the box itself.
### Partial Elements
We call a function $f : [\phi] \to A$ a _partial element_ of $A$, that is, an element of $A$ which is only defined when $[\phi]$ is inhabited. For these we have a special pattern-matching notation, termed a _system_, which is written between brackets.
```
partialBool : (i : I) -> Partial (ior i (inot i)) Bool
partialBool = \i [ (i = i0) -> false, (i = i1) -> true ]
```
The element `partialBool` above is a boolean with different values when `i = i0` or `i = i1`. However, this does not lead to a contradiction, because to extract the underlying bool we need to apply `partialBool` not only to an element of the interval, but also to an inhabitant of `IsOne (ior i (inot i))`. This is why it's critical that the type checker distinguishes between $i \lor \neg i$ and $i1$!
As another implementation note, the type `Partial phi A` is a version of `IsOne phi -> A` with a more extensional equality. Two elements of `Partial phi A` are equal when they represent the same subcube, i.e., they take equal values for every assignment of variables which makes `phi = i1`.
Furthermore, there is a _dependent_ version of `Partial`{.kw}, `PartialP`{.kw}[^4], which allows the type `A` itself to be a partial element of $\mathscr{U}$. This will be used later when we introduce the glueing operation.
In the composition operation, the partial element with shape $\phi$ specifies the open box itself.
[^4]: By convention we call the dependent versions of cubical primitives their name suffixed with a big P. `PathP`, `PartialP`, etc. Don't ask me why.
### Extensibility
Given a type $A$ and a partial element $u : [\phi] \to A$, we can define the type of elements $a : A$ which _extend_ $u$. These are _total_ elements, in that their existence does not depend on the inhabitation of $[\phi]$ (for any $\phi$). To say they extend $u$ is to say that, given $[\phi]$, we have that $u(\mathtt{1is1}) = a$. In the theory, where we have All the fancy symbols, we write $A[\phi \to u]$ for the type of extensions of $u$, but in the code, where we're limited to boring ASCII, we just write `Sub A phi u`.
We can make any total element `u : A` into a partial element, with any formula that we want, by ignoring the proof. The constructor `inS` for the `Sub`-types expresses that this partial element agrees with `u` on any `phi` that we choose.
```
inS : {A : Type} {phi : I} (u : A) -> Sub A phi (\x -> u)
```
We also have a projection operation for `Sub` types, which undoes `inS`. Furthermore, `outS {A} {i1} {u} x` computes to `u i1 itIs1`, since `x` agrees with `u` on `phi`.
```
outS : {A : Type} {phi : I} {u : Partial phi A} -> Sub A phi u -> A
```
With the idea of a cubical `Sub`{.kw}type we can express the type of the fourth argument of the composition operation, the "bottom" face of an open box with _agrees with_ (extends!) the partial element specifying the sides.
## Composition
As stated before, the composition operation takes as input the description of an open cube with a face removed and computes that missing face. However this is not a helpful definition if we do not yet have intuition for what "cubes with missing faces" look like! So before explaining the computational behaviour of the composition operation (which is... quite something), let me show you some examples.
Before we get to the examples, for reference, this is the type of the composition operation, written out in syntax:
```
comp : (A : I -> Type) {phi : I} (u : (i : I) -> Partial phi (A i))
-> (a0 : Sub (A i0) phi (u i0))
-> A i1
```
A trivial use of composition is one where we take the formula $\phi = i0$, that is, the partial cube specifying the sides is defined _nowhere_. In this case we may illustrate the input face of the composition operation as agreeing with... nothing.
<figure>
<img src="/diagrams/ctt/transp.svg" alt="transport, illustrated" style="min-width: 150px;" />
<figcaption>The arguments to `comp A {i0} (\k [])`, illustrated.</figcaption>
</figure>
That's right, in the case where the formula is always false and the partial cube is empty, the input of the composition operation is just a point `a0 : A i0`, the left endpoint of a path. And by looking at the type of the composition operation, or thinking about its description, you can see where this is going! We give it `a0 : A i0`, and it gives us an element `comp A {i0} (\k []) a0 : A i1`!
That's right, by ignoring the extra power which the composition operation gives us over boring transport, we get back boring transport. Not too surprising, let's keep going.
For an example which illustrates composition with a cube, suppose we have three points, $x$, $y$, and $z$, all in some type $A$. Furthermore suppose that we have paths $p : x \equiv y$ and $q : y \equiv z$. By the transitive property of equality, we know there should be a path between $y$ and $z$. Furthermore, we know that transporting along this composite should be equivalent to transporting along $p$ then along $q$. But how can we, using cubical methods, build the composite of $p$ and $q$?
If you guessed the answer was "using composition", you... don't get a lot of extra points. It was heavily implied. But you can still have a cookie, since I suppose it can't be helped. To create this composite we need to draw a square with 3 lines, such that the missing line connects $x$ and $z$. Furthermore, the requirement that transporting along the composite transports along both constituent paths will guide us in creating this drawing. We only have two paths, though!
<figure>
<img src="/diagrams/ctt/comp_path.svg" alt="composition of paths" style="min-width: 230px;">
<figcaption>The lid of this square gives us the composite $q \circ p$ of $p$ and $q$.</figcaption>
</figure>
Turns out that only having two paths is not an issue, since we can always take the reflexivity path to get the side we didn't have. To make it clearer, the partial element $u : (j : \mathbb{I}) \to \mathrm{Partial}\ (\neg i \lor i)\ A$ is the tube with sides $a$ and $q(j)$, and the input $p(i) : A$ is the bottom side. These agree because when $j$ (the direction of composition) is $i0$ (the base), $u$ has left endpoint $a$ and right endpoint $b$; A path between these is exactly what $p(i)$ ($i$ is the direction of the path) is.
```
trans : {A : Type} {x : A} {y : A} {z : A}
-> Path x y
-> Path y z
-> Path x z
trans {A} {x} p q i =
comp (\i -> A)
{ior i (inot i)}
(\j [ (i = i0) -> x, (i = i1) -> q j ])
(inS (p i))
```
This expression is a syntactic representation of the composition drawn above; The dotted line in that diagram is the result of the composition operation.
## Cubical Complication 2: Computing Compositions
It doesn't suffice to describe the composition operation in types, we also need to describe how it computes when applied to enough arguments. The composition operation reduces to a canonical element of the type $A(i1)$ based on the structure of the function $A : \mathbb{I} \to \mathscr{U}$, by cases. For example, when $A$ computes to a function type, the composition will evaluate to a lambda expression; When $A$ is a $\sum$-type, it computes to a pair, etc.
Before we get started, one thing to note is that, since we have the $i \land j$ operation on elements of the interval, the composition operation can compute not only missing faces, but the missing _inside_ of a cube, which we call its filler. For instance, the filler `fill A {i0} (\k []) a0 i` connects `a0` and `comp A {i0} (\k []) a0` in the `i` direction, since it is the 1-dimensional cube (path) between the given and missing faces.
```
fill : (A : I -> Type) {phi : I}
(u : (i : I) -> Partial phi (A i))
(a0 : Sub (A i0) phi (u i0))
-> (i : I) -> A i
fill A {phi} u a0 i =
comp (\j -> A (iand i j))
{ior phi (inot i)}
(\j [ (phi = i1) as p -> u (iand i j) p, (i = i0) -> outS a0 ])
(inS (outS a0))
```
Fillers will be fundamental in reducing compositions in dependent types, including pairs, functions, and general inductive types.
### Simple types
A good place to start is composition for inductive types without parameters, since that is trivial. For instance, any composition in the booleans just evaluates to argument. This is also the case for many other types: the natural numbers, the integers, the rational numbers, etc.
$$\mathrm{comp}\ (\lambda i. \mathrm{Bool})\ [\phi \to u]\ a0 = a0$$
For parametrised types like lists, we need to explain composition by recursion. In the `nil` case it's trivial, we can just return `nil`. In the `cons` case, though, we need to recursively apply composition in the head and the tail, to end up with a list of the right type, agreeing with the right faces.
$$
\mathrm{comp}\ (\lambda i. \mathrm{List}(A))\ [\phi \to \mathtt{cons}\ x\ xs]\ (\mathtt{cons}\ a\ as) =\\
\mathtt{cons}\ (\mathrm{comp}\ (\lambda i. A) [\phi \to x]\ a) (\mathrm{comp}\ (\lambda i. \mathrm{List}(A)) [\phi \to xs]\ as)$$
### Dependent functions
Starting with the full reduction rule for composition in functions would be a lot, so I'll build it up incrementally. First, I'll explain transport in simple functions. Then, transport in dependent functions. After I've explained those two we can add back the sides to get the full composition for functions.
So, consider for starters transport in a line of $A \to B$, where both are functions $\mathbb{I} \to \mathscr{U}$. We're given a function $f : A(i0) \to B(i0)$ and want to compute a function $f : A(i1) \to B(i1)$. Start by introducing a $\lambda$ abstraction binding a single variable $x : A(i1)$, under which we'll work.
Since to get _any_ sort of element of $B$ we need to apply $f$, we must first transport $x$ to get an element of $A(i0)$, to be the argument of $f$. The line $\lambda i. A(\neg i)$ connects $A(i1)$ and $A(i0)$, so that's what we transport over. Take $x\prime = \mathrm{comp}\ (\lambda i. A (\neg i))\ (\lambda k [])\ x$.
The application $f\ x\prime$ has type $B(i0)$, and we need to transport that to an element of $B(i1)$. Again we invoke the trivial composition to get $y = \mathrm{comp}\ B\ (\lambda k [])\ (f\ x\prime)$. Since we have computed an element of $B(i1)$, we're done; Define the composition Thus, we can take $\mathrm{comp}\ (\lambda i. A \to B)\ (\lambda k [])\ f = \lambda x. y$.
To see the details of how composition generalises to dependent functions, consult the [appendix](#appendix), since it's a bit verbose to be here.
### Dependent pairs
The composition for pairs is what you'd expect. We have to transport the first element of the pair, and use a filler when transporting the second element to make sure the endpoints line up. Again, the details are in the [appendix](#appendix) if knowing more about composition strikes your fancy, but it's not too necessary to follow the proofs.
To be concise here, a simple equation that should clarify the behaviour of transport on pairs is the simply-typed definition of transport:
$$
\mathrm{transp}\ (\lambda i. A \times B)\ (x, y) =\\
(\mathrm{transp}\ (\lambda i. A)\ x, \mathrm{transp}\ (\lambda i. B)\ y)
$$
### Paths
In the case of paths, composition is composition. We're given a path $p0 : Path\ A\ u\ v$, where all of $A$, $u$ and $v$ can depend on a variable $i : \mathbb{I}$, which is the direction of composition. Furthermore we have a family of partial paths $p$ with which $p0$ agrees, and with which the result must also agree.
We start by assuming the existence of a dimension $j : \mathbb{I}$, which will be bound later. When $j = i0$, the resulting composition has to have value $u(i)$, and when $j = i1$, the result must be $v(i)$. Furthermore, when $phi$, the result must have the same value as $p(j)$. We can package these constraints straightforwardly in the partial element $[ \phi \to p(j), (j = i0) \to u, (j = i1) \to v ]$, again abusing notation for the applications of $u(i)$ and $v(i)$.
$$\mathrm{comp}\ (\lambda i. \mathrm{Path}\ A(i)\ u(i)\ v(i))\ [\phi \to p]\ p0 =\\
\lambda j. \mathrm{comp}\ A\ [ \phi \to p(j), (j = i0) \to u, (j = i1) \to v ]\ (p0(j))$$
### A note on naming: Pretypes
All of the types we explained composition for above are, well, types. In cubical type theory, or at least in this presentation, we reserve the word _type_ for those objects which have a composition structure. The ones which _don't_ have a composition structure are called pretypes.
Alternatively we could call the types for which we have composition the _fibrant_ types, since they have a fibrancy structure, as in the [CHM paper](https://arxiv.org/abs/1802.01170): They have a transport structure and a homogenous composition structure, with which we can assemble a composition structure as above.
All of the type formers inherited from MLTT ($\prod$ and $\sum$), the path types, and every inductive and higher inductive type made out of types are fibrant, leaving only the cubical primitives (the interval, partial elements, and cubical subtypes) as pretypes. However, we could consider an extension of type theory where both sorts are given equal importance: This would be a two-level type theory, a realisation of Voevodsky's Homotopy Type System.
## Auxiliary Definitions
In this section we're going to talk about a handful of operations, which can be defined in terms of what we have so far, which will be used in discussing the $\mathrm{Glue}$ types, which are used in interpreting the univalence axiom. In contrast to [the CCHM paper](https://arxiv.org/abs/1611.02108), I'll only talk about the notions which are mandatory for defining the glueing operation. Composition for glue is very complex, and needlessly detailed for the purposes of this post.
### Contractible Types
We define a type $A$ to be _contractible_ if, and only if, there exists an element $x : A$ (called the centre of contraction) to which all other elements $y : A$ are Path-equal. Cubically, we can give an alternative formulation of contractibility: $A$ is contractible iff. every partial element $u : \mathrm{Partial}\ \phi\ A$ is extensible.
Let $p$ be the proof that $A$ is contractible, a pair containing the centre of contraction and the proof that any element of the type is equal to the centre. We define $\mathrm{contr}\ [\phi \to u] = \mathrm{comp}\ (\lambda i. A)\ [\phi \to (p.2\ u)(i)]\ p.1$.
Conversely, if we have an extension for any partial element, we can prove that type is contractible in the typical sense: Take the centre of contraction to be $\mathrm{contr}\ []$ and the proof that any $y$ is equal to that is given by extending the partial element $[ (i = i0) \to \mathrm{contr}\ [], (i = i1) \to y]$.
As an example of contractible types, we have already seen `Singl A a`, the type of "elements of A equal to a". This has a centre at `(a, refl)`, which can be proven by a connection. The unit (or top) type is also contractible, having `tt` as a centre, which can be proven by induction. It can be proven that any contractible type is equivalent to the unit type, making all of them maximally uninteresting.
### Equivalences
Since we have the univalence axiom, it is important for soundness that we define a notion of equivalence for which "being an equivalence" is a mere proposition: Either a function is an equivalence, or it isn't. We choose one which is cubically convenient, namely that of "contractible fibers".
The fiber of a function $f : A \to B$ at a point $y : B$ is a pair of an input $x : A$ together with a proof that $f(x) \equiv y$. We define $f$ to be an equivalence if for every element $y : B$, the fiber $\mathrm{fiber}\ f\ y$ is contractible. That means that, for every element in the range, there is a corresponding element in the domain, and this element is unique.
Using this notion of equivalence we can prove that every equivalence has an inverse, by taking the first element of the centre of contraction for every fiber:
```
inverse : {A : Type} {B : Type} {f : A -> B} -> isEquiv f -> B -> A
inverse eqv y = (eqv y) .1 .1
```
Furthermore, this function is an actual inverse:
```
section : {A : Type} {B : Type} (f : A -> B) (eqv : isEquiv f)
-> Path (\x -> f (inverse eqv x)) id
section f eqv i y = (eqv y) .1 .2 i
```
We can also formulate the requirement that a function has contractible fibers cubically: A function is an equivalence iff every one of its partial fibers is extensible.
## Glueing & Univalence
Since I like quoting the impenetrable definitions of the paper, glueing expresses that "extensibility is invariant under equivalence". Concretely, though, it's better to think that the $\mathrm{Glue}$ operation "glues" together a partial type $T$ onto a total type $A$ (which we call the base) to get a total type which extends $T$. We can't do this freely, though, so we require an extra datum: A (partial) equivalence between $T$ and $A$.
```
Glue : (A : Type) {phi : I} -> Partial phi ((T : Type) * Equiv T A) -> Type
```
The type $\mathrm{Glue}\ A\ [\phi \to (T, f)]$ extends $T$ in the sense that, when $\phi = i1$, $\mathrm{Glue}\ A\ [\phi \to (T, f)] = T$.
The "user-friendly" typing rule for Glue is as presented above. Internally we separate the type $T$ from the equivalences $f$ to make defining composition in Glue simpler. These types come with a constructor, $\mathrm{glue}$, which says that, given an inhabitant $t : \mathrm{PartialP}\ \phi\ T$, and a total element $a : A$ which extends the image of $f\ t$ (the equivalence), we can make an inhabitatnt of $\mathrm{Glue}\ A\ [\phi \to (T, f)]$.
Conversely there is a projection, $\mathrm{unglue}$, which extracts a value of $A$ from a value of $\mathrm{Glue}\ A\ [\phi \to (T, f)]$. When applied to an element constructed with $\mathrm{glue}$, unglueing simply extracts it; When applied to a neutral value, as long as $\phi = i1$, the value of the glued type will be a value of $T$, and the equivalence is defined; We can then apply the equivalence to get a value of $A$.
Using the boundary conditions for $\mathrm{Glue}$ we can define, from any equivalence $A \simeq B$, a path $A \equiv B$.
```
univalence : {A : Type} {B : Type} -> Equiv A B -> Path A B
univalence {A} {B} equiv i =
Glue B (\[ (i = i0) -> (A, equiv),
(i = i1) -> (B, the B, idEquiv {B}) ])
```
For the proof that transporting along this path has the effect of applying the equivalence, I'll need to handwave some stuff about the behaviour of transport in $\mathrm{Glue}$. First, we can illustrate the Glue done above as the dotted line in the square below:
<figure>
<img src="/diagrams/ctt/univalence.svg" alt="reflexivity" style="min-width: 250px;" />
<figcaption>This square represents the glueing used for univalence. The left and right sides are equivalences.</figcaption>
</figure>
How would one go about transporting an element across the dotted line there? Well, I have a three-step program, which, since we're talking about squares, has to be rounded up to a neat 4. Suppose we have an element $x : A$ which we want to turn into an inhabitant of $B$.
- First, we can apply the equivalence $\mathrm{equiv}$ to $x$, getting us an element $\mathrm{equiv}.1\ x : B$. In the ideal world we'd be done here, but, in a more general case, we still have to do the other three filled-in lines.
- We transport $\mathrm{equiv}.1\ x$ along the path $\lambda i. B$ to get an element $\mathrm{comp}\ (\lambda i. B)\ (\lambda i [])\ (\mathrm{equiv}.1\ x) : B$
- Finally we can apply the inverse of the identity equivalence (which is, again, the identity) which does not alter what we've done so far.
We'd be done here, but since transport is a special case of composition, we need to compose along the line $\lambda i. B$ with the faces of the overall composition to get a _proper_ element of the type $B$. Of course, in this case, the faces are trivial and the system is empty, but we still have to do it.
To construct a $\mathrm{Path}\ (\mathrm{transp}\ (\lambda i. \mathrm{univalence}\ f\ i))\ f.1$, there is a bit of cubical trickery which needs to be done. This proof is commented in the repository [here], so I recommend you read it there for the details. The short of it is that $\mathrm{univalence}$ plus this path, which we call $\mathrm{univalence}\beta$, implies the full univalence axiom, namely that $(A \simeq B) \simeq (A \equiv B)$.
### Proofs using univalence
With univalence, and a proof that isomorphisms give rise to equivalences, we can get to proving some stuff about types! That's exciting, right? I'm excited. The proof that isomorphisms give rise to equivalences is, uh, very complicated, so I won't explain it here. Full disclosure, it seems like this proof is a bit of folklore: I got it from the [cubicaltt repo], and I think the version in [Cubical Agda]'s base library is the same!
[cubicaltt repo]: https://github.com/mortberg/cubicaltt/blob/a331f1d355c5d2fc608a59c1cbbf016ea09d6deb/experiments/isoToEquiv.ctt#L7-L63
[Cubical Agda]: https://github.com/agda/cubical/blob/3fbd0eb908474181606977f2a5f58363fceba1db/Cubical/Foundations/Isomorphism.agda#L55-L101
One very simple use of univalence, which doesn't require more fancy types, is proving that the universe $\mathscr{U}$ is not a set, in the sense of HoTT. Recall that a set (or h-set, to be more precise), is a type where any parallel equalities are themselves equal. In a type:
```
isHSet : Type -> Type
isHSet A = {x : A} {y : A} (p : Path x y) (q : Path x y) -> Path p q
```
We are going to prove that any inhabitant of $\mathrm{isHSet}\ \mathscr{U}$ is baloney. For this, we must define the type of booleans, the discrete space with two points:
```
data Bool : Type where
true : Bool
false : Bool
```
First, we can prove that $\mathrm{true} \not\equiv \mathrm{false}$. For this, suppose it were: Given a proof $p : \mathrm{true} \equiv \mathrm{false}$, we can build the path $\lambda i. \mathrm{if}\ p(i)\ \mathrm{then}\ Bool\ \mathrm{else}\ \bot$, which connects $\mathrm{Bool}$ (an arbitrary choice) and $\bot$. Transporting $\mathrm{true}$ (another arbitrary choice) along this path gives us an inhabitant $\mathrm{transp}\ (\lambda i. \dots)\ true : \bot$, which is what we wanted.^[No, there is no reason to use the QED symbol here. It's my blog, though!] <span class="qed">$\blacksquare$</span>
Define the function $\mathrm{not}\ x = \mathrm{if}\ x\ \mathrm{then}\ \mathrm{false}\ \mathrm{else}\ \mathrm{true}$. By induction, one can prove that $\mathrm{not}\ (\mathrm{not}\ x) \equiv x$ for any boolean, and thus $\mathrm{not}$ is its own inverse. Appealing to the fact that isomorphisms are equivalences, and then to univalence, we get a path $\mathrm{notp} : \mathrm{Bool} \equiv \mathrm{Bool}$ such that $\mathrm{transp}\ \mathrm{notp}\ x = \mathrm{not}\ x$.
Now we assume an inhabitant $\mathrm{sure}$ (to be read in a very sarcastic voice) of $\mathrm{isHSet}\ \mathscr{U}$ and derive a contradiction, that is, an inhabitant of $\bot$. The path $\mathrm{sure}\ \mathrm{notp}\ \mathrm{refl}$ connects $\mathrm{notp}$ and $\mathrm{refl}$ in the direction $i$. From this we build the path $\lambda i. \mathrm{transp}\ (\mathrm{sure}\ \mathrm{notp}\ \mathrm{refl})(i)\ \mathrm{false}$, which has as endpoints $true$ and $false$. To see this, compute:
- For $i = i0$, we have $\mathrm{transp}\ \mathrm{notp}\ \mathrm{false} = \mathrm{not}\ \mathrm{false} = \mathrm{true}$.
- For $i = i1$, we have $\mathrm{transp}\ \mathrm{refl}\ \mathrm{false} = \mathrm{false}$.
Applying the proof that $\mathrm{true} \not\equiv \mathrm{false}$ we have a contradiction, which is exactly what we wanted.^[I know, I know, I have to stop. Did you know I had to add the word "exactly" there so the paragraph overflew onto the next line and the QED symbol would show up right? It's terrible!]<span class="qed">$\blacksquare$</span>
"Big deal," I hear you say. "So what, the universe isn't a set?" Well, you're right. This isn't an exciting fact, or an exciting proof. To read. Getting this to go through was incredibly satisfying. But if we want to prove non-trivial facts using univalence, we're going to need a bigger ~~boat~~ universe. Ours doesn't have enough types.
## Higher Induction
To say that our universe $\mathscr{U}$ with its infinitely many types is lacking some is... weird, I'll admit. However, it's missing a lot of them! A countably infinite amount, in fact. While we have all inductive types, we only have the zero-dimensional inductive types, and not the higher inductive types!
I've written about these before a bit in the previous post, about induction. In short, while inductive types allow us to define types with points, higher inductive types let us define types with points and paths. Full disclosure, of time of writing, the implementation of HITs in [cubical](https://git.abby.how/abby/cubical) is partial, in that their fibrancy structure is a big `error`. However we can still write some simple proofs involving them.
### The Interval
Wait, didn't we talk about this before? No, no, this is the right interval. We're still on track.
The $\mathrm{Interval}$ is the inductive type freely generator by two constructors, $\mathrm{ii0}$ and $\mathrm{ii1}$, and a path $\mathrm{seg}$ connecting them. Well, that's the theory, but the reality is a bit different. In order to support eliminating (read: pattern matching on) inductive types, we can't simply assume paths exist, even in cubical type theory. What we end up with instead is a constructor parametrised by some interval (that's $\mathbb{I}$) variables, and an attached _boundary_ condition.
In the case of the Interval, we have this definition:
```
data Interval : Type where
ii0 : Interval
ii1 : Interval
seg i : Interval [ (i = i0) -> ii0
, (i = i1) -> ii1
]
```
This says that `seg i0` is definitionally equal to `ii0`, and `seg i1` is definitionally equal to `ii1`. We can get a path connecting them by abstracting over the $i$ variable: $\lambda i. \mathrm{seg}\ i : \mathrm{Path}\ \mathrm{ii0}\ \mathrm{ii1}$. To pattern match on an element of the interval we need three (really, four, but one is details---and automated) things:
- A case for `c0 : P ii0`
- A case for `c1 : P i11`
- A proof `cseg` which says the cases for `c0` and `c1` agree.
To express the type of `cseg`, we need to power up our path types a bit. Conceptually, just like a $\mathrm{Path}$ is a specialised version of $\mathbb{I} \to A$, we need a _dependent_ path, called $\mathrm{PathP}$, which specialises $\prod{(i : \mathrm{I})} A\ i$, that is, the type of the endpoints is allowed to depend on the interval variable. With that, the type of `p` becomes `PathP (\i -> P (seg i)) c0 c1`, since `c0 : P (seg i0)` and `c1 : P (seg i1)`.
As for that fourth thing I mentioned? In addition to preserving each of the constructor data, a map between Interval-algebras needs to be _fibrancy preserving_: Compositions in the domain are mapped to the "appropriate" compositions in the range. In implementations of cubical type theory, this is automatic, since the range has a fibrancy structure (since it is in $\mathscr{U}$), and preserving compositions can be done automatically and uniformly.
Since we already have an interval pretype $\mathbb{I}$, having an interval _type_ isn't too interesting. One thing we can do is prove function extensionality... again... reproducing an argument from the HoTT book.
```
iFunext : {A : Type} {B : A -> Type}
(f : (x : A) -> B x)
(g : (x : A) -> B x)
-> ((x : A) -> Path (f x) (g x)) -> Path f g
iFunext f g p i = h' (seg i) where
h : (x : A) -> Interval -> B x
h x = \case
ii0 -> f x
ii1 -> g x
seg i -> p x i
h' : Interval -> (x : A) -> B x
h' i x = h x i
```
I'm pretty sure that I had reproduced this proof in the previous blog post as well, so you can check there for a more thorough explanation. Let's move on to some more exciting higher inductive types.
### Synthetic Homotopy Theory: $\mathbb{S}^1$
I am not a _homotopy type_ theorist, but I am a homotopy _type theorist_, which means I am qualified to prove some facts about spaces. A particularly simple space, which is nonetheless non trivial, is the circle, $\mathbb{S}^1$, the type freely generated by a point and a loop.
```
data S1 : Type where
base : S1
loop i : S1 [ (i = i1) -> base, (i = i0) -> base ]
```
We can illustrate this type like this:
<figure>
<img src="/diagrams/ctt/circle.svg" alt="The circle" style="min-width: 150px;" />
<figcaption>The circle.</figcaption>
</figure>
The elimination principle for this is just like for the interval. We need a point `b : P base` and a dependent path `l : PathP (\i -> P (loop i)) b b` (since `loop i0 = loop i1 = base` the dependent path is not strictly necessary). For example, to define a function $\mathbb{S}^1 \to \mathscr{U}$, we need to pick a type $X : \mathscr{U}$ and a path $X \equiv X$. All non-trivial paths in types are going to be generated by univalence on some interesting equivalence.
Allow me one paragraph's worth of digression before we get to the point. The type of integers is defined as the coproduct of $\mathbb{N} + \mathbb{N}$^[In the implementation, this definition is unfolded], were $\mathrm{inl}\ x$ is interpreted as $+x$ and $\mathrm{inr}\ x$ is $-(x + 1)$. With this representation, one can define the functions $\mathrm{sucZ} = x + 1$ and $\mathrm{predZ} = x - 1$, and prove that they are inverses, such that $\mathrm{sucZ}$ is an autoequivalence of $\mathbb{Z}$.
Consider the function $\mathrm{helix} : \mathbb{S}^1 \to \mathscr{U}$ which maps $\mathrm{base}$ to $\mathbb{Z}$ and $\mathrm{loop}(i)$ to $(\mathrm{univalence}\ sucZ)(i)$. It's easy to check that this definition is type-correct (and boundary-correct), so we can apply it to elements of the circle and get back types and equivalences. Now we can define the function $winding : \mathrm{base} \equiv \mathrm{base} \to \mathbb{Z}$ by
```
winding : Path base base -> Int
winding p = transp (\i -> helix (p i)) (pos zero)
```
This map counts, for any loop $x : \mathrm{base} \equiv \mathrm{base}$, the number of times x "goes around" the $\mathrm{loop}$. For example, going around it once:
```
windingLoop : Path (winding (\i -> loop i)) (pos (succ zero))
windingLoop = refl
```
or once in the other direction:
```
windingSymLoop : Path (winding (\i -> loop (inot i))) (neg zero)
windingSymLoop = refl
```
or no times at all:
```
windingBase : Path (winding (\i -> base)) (pos zero)
windingBase = refl
```
If we also write a function $wind : \mathbb{Z} \to \mathrm{base} \equiv \mathrm{base}$ and prove that they are inverses, what we end up with is a fully synthetic, machine-checked proof that $\Omega(\mathbb{S}^1) \equiv \mathbb{Z}$. Of course, we could also _define_ $\mathbb{Z}$ as $\Omega(\mathbb{S}^1)$, but in that case the proof is a lot less interesting!
### Category Theory: The Homotopy Pushout
Category theory has the notion of limits and colimits of diagrams, which give rise to lots of important concepts. A full explanation of colimits is not due here, but it should suffice to say that if we want to do mathematics internally to cubical type theory, a complete and co-complete category is a fine setting to do it. Given a diagram like the one on the left, a _cocone_ under it is a diagram like the one on the right, which commutes. The _pushout_ of a span is its colimt, that is, the "smallest" such cocone.
<div class="mathpar">
<figure style="width: 50%;">
<img src="/diagrams/ctt/span.svg" alt="Span" style="min-width: 250px;" />
<figcaption>A **span** is a triple of types $A$, $B$, $C$ with maps $f : A \to B$ and $g : A \to C$</figcaption>
</figure>
<figure style="width: 50%;">
<img src="/diagrams/ctt/span_colimit.svg" alt="Colimit of a span" style="min-width: 250px;" />
<figcaption>A **cocone under a span** is a type $P$ and inclusions $i_1 : B \to P$ and $i_2 : C \to P$ such that $i_1 \circ f = i_2 \circ g$</figcaption>
</figure>
</div>
Normal Martin-Löf type theory does not give us the tools to define pushouts, but, as you will have guessed, cubical type theory does. We can define pushouts as a higher inductive type, like this:
```
data Pushout {A B C : Type} (f : A -> B) (g : A -> C) : Type where
inl : (x : B) -> Pushout f g
inr : (y : C) -> Pushout f g
push i : (a : A) -> Pushout f g [ (i = i0) -> inl (f a)
, (i = i1) -> inr (g a) ]
```
The `push` path constructor is parametrised by an element $a : A$ and an endpoint $i : \mathbb{I}$. Applying function extensionality, one can turn this into a path between $f \circ \mathrm{inl}$ and $g \circ \mathrm{inr}$, which is what we need for the diagram to commute. Homotopy pushouts are very general and can be used to define a number of homotopy-theoretic constructions. Quoting the HoTT book, section 6.8, we have:
> - The pushout of $1 \leftarrow A \to 1$ is the **suspension** $\Sigma A$
> - The pushout of $A \leftarrow A \times B \to B$ is the **join** of $A$ and $B$, written $A * B$
> - The pushout of $1 \leftarrow A \xrightarrow{f} B$ is the **cone** or **cofiber** of $f$
The big file with all the proofs in [cubical](https://git.abby.how/abby/cubical) features a proof that the suspension $\Sigma A$ defined directly as a HIT is the same as the one defined by the pushout of $1 \leftarrow A \to 1$.
## But Why?
The motivation for cubical type theory was made explicit two posts ago, when I was talking about equality for the first time, but it's worth mentioning it again, especially after all^[Actually, the most complex part of Cubical Type Theory is the definition of composition for $\mathrm{Glue}$, which is far too hardcore for a blog post, even for its appendix.] of its complexity has been exposed like this. And let me be clear, it is _very_ complex. No amount of handwaving away details can make cubical type theory seem like a "natural" extension: It's not something we found, like the groupoid interpretation of type theory. It's something we found.
And what _did_ we find? A type system with great computational behaviour for all of Homotopy Type Theory. In particular, an argument based on the cubical set _model_ of type theory, rather than on the syntax, proves that cubical type theory enjoys _canonicity_: Every boolean in the empty context is _strictly_ equal to either $\mathrm{true}$ or $\mathrm{false}$, and other types enjoy similar properties for their canonical elements.
The big failing of Homotopy Type Theory before the cubes came to save us was that there were closed inhabitants of types not equal to any of their constructors. In particular, any construction with path induction would get stuck on the terms $\mathrm{ua}(e)$ of the univalence axiom. Cubical type theory solves this twofold: It gives us ways of working with paths _directly_, using operations on the interval and composition, _and_ it explains what the computational behaviour of $\mathrm{univalence}$ is.
So, if you ask me, the complexity is justified. It's one of those things that took me a while to get my head around, but where the learning process _and_ the result (knowing about cubes) were beneficial. And god, did it take a while. The first time I encountered the cubical type theory paper was in mid 2019, almost two years ago! It took me _that long_ to go from "what the hell is this" to "this is neat but it confuses me" to "I understand this" to "I can implement this" (we are here).
Writing about it has been my white whale for that long---I'll need a new one, suggestions welcome! Maybe I should write a monad tutorial? Heard those are complicated, too.
If you made it this far, I thank you deeply. This post is a behemoth! In fact the next **word** is the 7000th, which almost makes this post longer than my two previous longest posts _combined_! If you haven't abandoned me yet, I swear: I will never make you read this much again. However, if you made it this far and understood everything, I only have one thing to say: Go forth, dear reader, and fill those cubes.
Well, that sounded weird. I won't say it again.
----
## Appendix: Blog/EXTRA CCC {#appendix}
<sup><sup>_CCC stands for "computing cubical compositions", I'm so sorry_</sup></sup>
### Functions
Now we add one step of generalisation, and consider transport in a line of $\prod{(x : A)} B\ x$, where $A : \mathbb{I} \to \mathscr{U}$ as before but $B : \prod{(i : \mathbb{I})} \to A(i) \to \mathscr{U}$. A given $f : (x : A(i0)) \to B(i0)\ x$ will become, through a trick of magic, a function $(x : A(i1)) \to B(i1)\ x$.
The first step is to define $x\prime : A(i0)$ as before, apply $f$ to get an element $B(i0)\ x\prime$, then cast the result of the application along $\lambda i. B(i)$... Wait. The function is dependent. Can we cast along $\lambda. B(i)\ x$? No, not quite. $x : A(i1)$, but we need an element of $A(i)$. $\lambda. B(i)\ x\prime$ won't do either, since that has type $A(i0)$.
What we need is a line, dependent on $i$, which connects $x\prime$ and $x$, call it $p$; Then we can transport along $\lambda i. B(i)\ p(i)$ to get the element of $B(i1)\ x\prime$ which we want. The filler of the composition which generated $x\prime$ is _exactly_ what we need. Define $v(i) = \mathrm{fill}\ (\lambda i. A (\neg i))\ (\lambda j [])\ x$, so that we may define $y = \mathrm{comp}\ (\lambda i. B(i)\ v(i)) \ (\lambda k [])\ (f\ x\prime)$, and the composition is $\lambda x. y$ as before.
To generalise this to non-empty compositions only requires a very small change. If you think of functions as extensional black boxes, like we do, one thing to realise is that it doesn't really matter _how_ we turn $x$ into an argument to the function, as long as we do; The only thing which needs to respect the constraints of the composition is the overall function, that is, its result. So we can simply take $x\prime$, $v(i)$ as in the case for dependent compositions and define the full composition to be:
$$
\mathrm{comp}\ (\lambda i. \prod{(x : A(i))} B(i)\ x)\ [\phi \to u]\ a0 =\\
\lambda x. \mathrm{comp}\ (\lambda i. B(i)\ v(i))\ [\phi \to u\ v]\ (a0\ x\prime)
$$
Note the light abuse of notation we use in the mathematics; More properly, the system of sides in the resulting composition would be written
$\lambda i\ x. u(i)(x)\ v(i)$.
### Pairs
Assume, we're given an element $p : \sum{(x : A)} B\ x$, and take $x = p.1$ and $y = p.2$. Just like in the case for dependent functions, $A$ is a line and $B$ is a dependent line; What we want is an element $p\prime : \sum{(x : A(i1))} B(i1)\ x\prime$ for some $x\prime$.
To define $\mathrm{comp}\ (\lambda i. \sum{(x : A(i))} B(i)\ x)\ [\phi \to u]\ p$, first define $v(i) = \mathrm{fill}\ A\ [\phi \to u.1]\ x$, which is a line connecting $x$ and $\mathrm{comp}\ A\ [\phi \to u.1]\ x$. For the second element we'll do the same thing as we did for dependent functions, and define $y\prime = \mathrm{comp}\ (\lambda i. B(i)\ v(i))\ [\phi \to u.2]\ y$. Then we can define composition as follows:
$$\mathrm{comp}\ (\lambda i. \textstyle\sum{(x : A(i))} B(i)\ x)\ [\phi \to u]\ p = (v(i1), y\prime)$$
[here]: https://git.abby.how/abby/cubical/src/branch/master/intro.tt#L436-L460
[line 114]: https://git.abby.how/abby/cubical/src/commit/fb87b16429fdd54f7e71b653ffaed115015066cc/intro.tt#L110-L114
[line 667]: https://git.abby.how/abby/cubical/src/commit/fb87b16429fdd54f7e71b653ffaed115015066cc/intro.tt#L667
[line 675]: https://git.abby.how/abby/cubical/src/commit/fb87b16429fdd54f7e71b653ffaed115015066cc/intro.tt#L675

+ 118
- 0
pages/posts/2021-06-07-ax-j.md View File

@ -0,0 +1,118 @@
---
title: "A quickie: Axiom J"
date: June 7th, 2021
synopsys: 2
---
Hey y'all, it's been three months since my last blog post! You know what that means.. or should mean, at least. Yes, I'd quite like to have another long blog post done, but... Life is kinda trash right now, no motivation for writing, whatever. So over the coming week(s) or so, as a coping mechanism for the chaos that is the end of the semester, I'm gonna write a couple of really short posts (like this one) that might not even be coherent at all---this sentence sure isn't.
Today's note is about what is perhaps the most confusing rule of Groupoid Martin-Löf's dependent type theory, the _J_ eliminator. For starters, its name means basically nothing: as far as I can tell its name comes from the fact that **I**dentity is another word for equality and J is the letter that comes after I.
First, let's recall how the identity type is defined, or rather, the two ways in which it can be defined. The first has two _parameters_, `A` and `x`, and a single _index_ (of type `A`), while the latter has a single `A` _parameter_ and two _indices_ of type `A`. Using Agda syntax:
<div class="mathpar">
```agda
data _=_ {A : Type} (x : A) : A -> Type where
refl : x = x
```
```agda
data _=_ {A : Type} : A -> A -> Type where
refl : {x : A} -> x = x
```
</div>
These definitions give rise to subtly different (but equivalent — see section §1.12.2 of Homotopy Type Theory if you're curious about the details) elimination rules. We'll consider the one on the right (or above, if your screen is narrow), since that one is _based_[^1].
[^1]: Which implies the other is cringe.
One decomposition which is (sometimes) helpful when an induction principle is confusing is to break it down into a simply typed _recursion_ principle and a propositional _uniqueness_ principle. Let's visit the recursion principle first.
It's actually something you're already familiar with, even if you don't have a background in type theory: Indiscernibility of identicals. We're going to assume a rather big type theory, with arrows and universes, so we can consider a family of propositions indexed by `A` to be a type family `P : A -> Type`. I ambiguously use Type to refer to some universe and leave it to the reader to find a consistent assignment of levels. Best of luck.
Where does `A` come from? It's an argument to the recursor since it's a _parameter_ to the inductive family. Similarly, `x` is also a parameter, but we make it implicit for convenience (in a theory without implicit arguments this, of coruse, doesn't happen). Let's write down what we have so far.
```agda
=-rec : {A : Type} {x : A} -> (P : A -> Type) -> ...
```
I'm using "Agda" as a language marker but I'm adding extra arrows for clarity. After the proposition we're proving, comes one hypothesis for each constructor. Above I wrote it in infix form, `refl : x = x`{.agda}, but you can alternatively consider this as `refl : (_=_ x) x`{.agda} — i.e., the family `(_=_ x)`{.agda} applied to its index `x`.
For each constructor, the hypothesis returns a term in `P` applied to each of the indices of the constructor---so in this case, `P x`---and is a function of any arguments to our constructor. `refl` doesn't have any arguments, so the hypothesis is _just_ `P x`.
```agda
=-rec : {A : Type} {x : A} -> (P : A -> Type) -> P x -> ...
```
And now, the conclusion! Literally. We introduce new variables with the same types as our indices---let's call this one `y : A`---and one argument which has the type "our inductive type applied to those new indices". Our inductive type is `(_=_ x)`, so that applied to our new indices is `(_=_ x) y`: `x = y`. And the conclusion? `P` applied to those indices!
```agda
=-rec : {A : Type} {x : A} -> (P : A -> Type) -> P x
-> {y : A} -> x = y -> P y
```
We can shuffle the parameters around a bit to make it more familiar, and, indeed, give it a better name, too:
```agda
subst : {A : Type} {x y : A} (P : A -> Type) -> x = y -> P x -> P y
```
The recursion principle for `(_=_ x)` says that, if `x = y`, then any property that's true of `x`---that is, an inhabitant `P x`---is also true of `y`!
Now let's consider the uniqueness principle. I think this is the hardest one to wrap your head around, since it's _really_ counterintuitive. The first guess anyone would make is that the uniqueness principle says that the only term of `x = x` is `refl`, since, well, just look at the type definition! However..
What we've defined is not a type. It's a _family_ of types, indexed by an `y : A`. So we can't state an uniqueness principle for some specific `x = y`, we need to consider the "whole family". The, uh, _total space_ of the family, if you'll forgive my HoTT accent. That's a sigma type, a dependent sum, of all the indices and only _then_ our inductive family.
The uniqueness principle for `(_=_ x)` says something about `Σ A \y -> x = y`, or `(y : A) * x = y`, or $\sum_{y : A} x = y$, depending on how much of my terrible syntax decisions you can tolerate. It says this type is _contractible_, i.e., only has one inhabitant up to equality, and the centre of contraction is `(x, refl)`.
The name for this principle is _contractibility of singletons_, since it speaks about singleton types: The, for a fixed A and x, "subset of A equal to x". If `x = y` were a proposition, this would indeed be a subset, but we can't in general expect `x = y` to be a proposition.
I claim: J = `subst` + contractibility of singletons. Let's see how. Here's the full type of the J axiom, just for reference:
```agda
J : {A : Type} {x : A}
-> (P : (y : A) -> x = y -> Type)
-> P x refl
-> {y : A} (p : x = y)
-> P y p
```
Let's, uh, look at the type of `P` there. It's a function of two arguments... mmm.. What happens if we curry it?
```agda
J : {A : Type} {x : A}
-> (P : (Σ A λ y -> x = y) -> Type)
-> P (x, refl)
-> {z : Σ A λ y -> x = y}
-> P z
```
Now we're getting somewhere interesting. J say something about the type `(y : A) * x = y` (or `Σ A λ y -> x = y` in the _cursed_ "Agda" notation) — The total space of the family `(_=_ x)`. In particular, it says that, if we want to prove `P` about any inhabitant `z` of that space, it's sufficient to prove `P (x, refl)`. This looks suspiciously like the principle of contractibility of singletons I was talking about before! In fact, let's see how we can derive J from contractibility of singletons and substitution.
To recap, we assume:
```agda
contract : {A : Type} {x : A} (z : (Σ A λ y -> x = y)) -> z = (x, refl)
subst : {P : A -> Type} {x y : A} -> x = y -> P x -> P y
```
Suppose our proof of `P (x, refl)` is called `pr`, for simplicity, and the other inhabitant is called, well, `z`. By `contract z` we have `z = (x, refl)`, so the inverse of that is a path `(x, refl) = z`. By `subst {P} {(x, refl)} {z} (sym (contract z))` we have a function `P (x, refl) -> P z`, which we can apply to `pr` to get a `P z`, like we wanted.
This decomposition might sound a bit useless, since, well, we can get both substitution and contractibility of singletons from J, but it's actually super handy! It's how I _prove_ J in cubical type theory. Here, substitution is a derived operation from a primitive called _composition_ (read my last blog post!), and contractibility of singletons can be proven using a _connection_ (also in the last post!). So `J` looks like:
```cubical
J : {A : Type} {x : A}
(P : (y : A) -> Path x y -> Type)
(d : P x (\i -> x))
{y : A} (p : Path x y)
-> P y p
J P d p = transp (\i -> P (p i) (\j -> p (iand i j))) d
```
---
I think that's it for what I can write for today. I didn't really have a conclusion in mind, I just see a lot of talk about Martin-Löf's equality and wanted to throw my own two cents out into the internet. I guess writing about J is like the monad tutorial of dependent type theory? Though I'd like to think of myself as a bit more advanced than "writing a monad tutorial", since, you know, I wrote my own cubical type theory, but whatever..
I'm still thinking of writing up a complete introduction to type theory, like, the whole thing: What it is, how to read inference rules, the empty and unit types, products, functions, dependent products, dependent sums, coproducts, naturals, inductive types, equality, and possibly the axioms HoTT makes you postulate on top of that. Of course, it's a _lot_ of work, and the sheer scale of what I want to write is.. kinda paralysing. Let's see whether or not it'll happen.

+ 411
- 0
pages/posts/2021-06-21-cubical-sets.md View File

@ -0,0 +1,411 @@
---
title: Cubical Sets
date: June 21th, 2021
abbreviations:
cube: 🧊
globe: 🌎
yo: よ
---
<div class="warning">
Throughout this post I'll use the ice cube emoji, &cube;, to stand for the _category of cubes_, which is more traditionally written □ (a blank square). The reason for this is that I have a really convenient emoji picker, so when I write about cubes on Twitter, it's a lot easier to call the category &cube; (maybe 4 keystrokes to select) rather than looking up the proper symbol on Google.
If you can't see this symbol - &cube; - then you should probably download an emoji font.
</div>
In which I try to write about semantics. This is not gonna go well, but I'm gonna try my best. I've heard it on good authority that the best way to learn something is to explain it to someone else, so in this post I'm going to use you, dear reader, as my rubber duck while I try to understand _cubical sets_. These are important (to me) because they provide a semantic model of cubical type theory (which I have written about previously), and since we have a semantic model, that theory is (semantically) consistent.
Personally, I like to think of a cubical set as.. a set that has opinions about cubes. You can ask some cubical set $F$ (the letter $F$ is meant to be indicative of its nature, as we will see) about its opinion on _the_ 0-cube, and it'll give us a set of points. We could ask it about the line, and it'll give us a set of lines, and so on and so forth. There are also, as we shall see, _maps_ between cubes, and asking $F$ about these will give us maps between what it thinks those cubes are.
A disclaimer (which I won't put in one of the big red warning boxes like above) is that _**I am not a category theorist**_! Most of this stuff I absorbed from reading various papers about the semantics of cubical type theory. This post is not really what I would call "a good reference".
The Category of Cubes &cube;
----------------------------
The category of cubes has a concise description in category-theoretic terms but I would rather describe it like this: It's the category in which the objects are all powers of the set of booleans, $\{0,1\}^n$, which we abbreviate to $[n]$. To describe the maps in the category of cubes, I'm going to upset category theorists and describe them concretely, as functions of sets, written in a "pattern matching" notation similar to Haskell. However, we will only "pattern match" on products.
- The _faces_, which exhibit a cube as a _face_ of a larger cube. Concretely, a face map inserts either a 0 or a 1 somewhere along the tuple, taking an $n$-cube to an $(n+1)$-cube. The two most basic face maps take the 0-cube (a point) to either endpoint of the 1-cube (a line), defined by $\delta^0(()) = 0$ and $\delta^1(()) = 1$.
As further examples, we have functions $\delta^i_j$ (for $0 \le i, j \le 1$) which map the 1-cube (a line) into the 2-cube (a square), as any of its 4 faces. These are, explicitly, $\delta^0_0(j) = (0, j)$, $\delta^0_1(i) = (i, 0)$, $\delta^1_0(j) = (1, j)$, and $\delta^1_1(i) = (i, 1)$.
These also compose. For instance, the map $\delta^0_0 \circ \delta^1 : [0] \to [2]$ exhibits the point `()` as `(0, 1)`-th corner of a square. If we take the first coordinate to be the left-right direction (0 = left) and the second coordinate to be the up-down (0 = up) direction, then this composite map can be pictured as follows:
<figure>
<img height=200px alt="A diagram meant to indicate the inclusion of a point as a 0-face in the 2-cube. Colours are used to indicate how the point is mapped to the endpoint of a line, and then the corner of a square." src="/diagrams/cubicalsets/delta10_delta0.svg" />
<figcaption>Including the $\color{red}{\bullet}$ point as a 0-face in the 2-cube </figcaption>
</figure>
The actual, _concrete_ effect of $\delta^0_0 \circ \delta^1 : [0] \to [2]$ can be seen by evaluating the composite at the unique inhabitant of $[0]$, which is $()$ (the empty tuple). We have $(\delta^0_0 \circ \delta^1)(()) = \delta^0_0(1) = (0, 1)$.
- The _degeneracies_, which "collapse" an $(n+1)$-cube to an $n$-cube by deleting a dimension. The most basic degeneracy is given by the unique map $\sigma : [1] \to [0]$. There are two degeneracies $[2] \to [1]$, mapping a square to a line, by deleting either coordinate. These... don't have very nice pictoral representations, at least in the category of _cubes_. We'll see that when it comes to a cubical _set_, though, they are quite easy to diagram.
We also have the trivial map $1_n : [n] \to [n]$ which returns its argument $n$-tuple (cube) unchanged. It's easy to see that this is the identity for composition, which is as in $\mathbf{Set}$. Since &cube; is a category, we can consider standard category-theoretical operations _on_ &cube;, like taking its opposite category, $\cube^{op}$. The category $\cube^{op}$ has as its objects the same cubes $[n]$ as before, but all of the maps are backwards, so that the face maps in $\cube^{op}$ _project_ a face and the degeneracies _expand_ a cube, by inserting trivial faces.
We can also consider functors which map out of $\cube$ --- and its opposite category! --- and that's what we're interested in today. The functors we will talk about, those $X : \cube^{op} \to \mathbf{Set}$, are called _cubical sets_, and we'll talk about them shortly, but first, a note:
_Crucially_, the category of cubes does not have any maps other than the faces and degeneracies (and identities), and importantly, any map $p : [m] \to [n]$ factors as a series of degeneracies followed by a series of faces. This means we can specify a cubical set _entirely_ by how it acts on the faces and the degeneracies.
Cubical Sets
------------
- To each object $c$ in &cube;, a _set_ $X(c)$. Since the objects of &cube; are all $[n]$, these are alternatively notated $X_n$.
- To each map $f : [n] \to [m]$ in &cube;, an arrow $X(f) : X_m \to X_n$. Specifically, these are all composites of the faces $X(\delta^i_j) : X_{n + 1} \to X_n$ and the degeneracies $X(\sigma_j) : X_n \to X_{n + 1}$.
Hold up - aren't those backwards? Yes, they are! Remember, a cubical set is not a functor out of the category &cube;, it's a functor out of $\cube{}^{op}$, so all of the arrows are backwards. To work more conveniently with cubical sets (and, in fact, more concretely) we need to take a _very_ abstract detour through even more category theory.
### The Unit Interval
Being functors $[\cube^{op}, \mathbf{Set}]$, we can also form maps _between_ cubical sets, which are _natural transformations_ $\alpha : X \to Y$. Specifically, a map between cubical sets assigns to each cube $c \in \cube$ a map $\alpha_c : F(c) \to G(c)$, such that for any morphism $f : c \to c\prime \in \cube$, the equation $\alpha_{c} \circ F(f) = G(f) \circ \alpha_{c\prime}$ holds. This condition is captured by the following diagram:
<figure>
<img height=200px alt="A naturality square" src="/diagrams/cubicalsets/naturality.svg" />
<figcaption>Is it even possible to talk about category theory without drawing a naturality square?</figcaption>
</figure>
A standard construction in category theory is the _Yoneda embedding_ functor, written $\yo$ (the hiragana character for "yo"), which maps an object of (in our case) $\cube$ into the category of cubical sets. It maps objects $c \in \cube$ to the _hom-set_ functor $\mathrm{Hom}_{\cube}(-, c)$, which takes each object $d \in \cube^{op}$ to the set of morphisms $d \to c$ in &cube;.
It takes the morphism $f : c \to d$ to the natural transformation $\yo(f) : \mathrm{Hom}_{\cube}(-, c) \to \mathrm{Hom}_{\cube}(-, d)$.
Let's look at $\yo(f)$ some more. It's a natural transformation between $\mathrm{Hom}(-, c)$ and $\mathrm{Hom}(-, d)$, so we can read that as a set of maps indexed by some $e \in \cube^{op}$. Since $\yo(f)_e : \mathrm{Hom}(e, c) \to \mathrm{Hom}(e, d)$, we can understand that the value $\yo(f)_e(g)$ takes for $g \in \mathrm{Hom}(e, c)$ is $f \circ g$.
The $\yo$ functor gives us, for an object $[n]$ in the category of cubes, an object.. well, $\yo([n])$ in the category of cubical sets. We'll abbreviate $\yo([n])$ as $\square^n$ for time, since, as we'll see, this object is very special indeed. Let's consider, for simplicity, the _unit interval_ cubical set, $\square^1$. We know it's a functor from $\cube^{op}$ to $\mathbf{Set}$ --- and more, we know exactly what it maps each cube to. The _set of all maps from other cubes to $[1]$_. Turns out, above $[1]$ this set only contains trivial cubes, so let's look at what $\square^1_0$ and $\square^1_1$ are:
- For $\square^1_0$ we have to consider all ways of mapping the $0$-cube to the $1$-cube. These are the two "base" face maps $\delta^0$ and $\delta^1$.
- For $\square^1_1$, we have to consider all ways of mapping the $1$-cube to itself. You might think that this set is trivial, but think again (if you do): Yes, we do have the identity map $1_{[1]} : [1] \to [1]$, but we also have the compositions $\delta^0 \circ \sigma$ and $\delta^1 \circ \sigma$. Since we know what the objects in the category of cubes look like, you can think of these as the constant function `f(x) = 0` and `g(x) = 1` respectively, since that's what they work out to:
$$
(\delta^0 \circ \sigma)(x) = (\delta^0)(\sigma(x)) = \delta^0(()) = (0)
$$
- For $\square^1_j, j > 1$ we only have degeneracies (and compositions of degeneracies) mapping $[j] \to [1]$.
Now, the standard cubes $\square^n$ don't look very interesting. But you see, this is where I pulled a sneaky on you! Because of a result about $\yo$ --- the _Yoneda lemma_. Specialised to our case, it says that for _any n_ and _any X_, the sets $X_n$ and $\mathrm{Hom}(\square^n, X)$ correspond exactly: we can probe the structure of a cubical set $X$ by examining the classes of maps $\square^n$ to $X$.
<details>
<summary>The Yoneda lemma</summary>
The Yoneda lemma is a result about an arbitrary category $C$, its category of _presheaves_ $\mathbf{PSh}(C) = [C^{op}, \mathbf{Set}]$, and the functor $\yo(c)$ we just defined. Its statement is as follows:
$$
\mathrm{Hom}(\yo(c), F) \simeq F(c)
$$
In our case, it's interesting particularly because it says that we can explore the structure of a cubical set --- a presheaf on $\cube$ --- by analysing the maps from the standard $n$-cube $\square^n$ into $X$. Furthermore, it implies the Yoneda embedding $\yo$ is _fully faithful_, by the following calculation:
$$
\mathrm{Hom}_{\mathbf{PSh}(C)}(\yo(c), \yo(d)) \simeq \yo(d)(c) \simeq \mathrm{Hom}_{C}(c, d)
$$
It thus realises $C$ as a full subcategory of $\mathbf{PSh}(C)$ - in our case, the category $\cube$ as a subcategory of the category of cubical sets. This is useful because $\mathbb{PSh}(C)$ is a category with a _lot_ of structure (as we shall see), even when $C$ doesn't have any structure.
This also means that we can study maps between cubes by studying maps of standard cubical sets, which is good, because degeneracies in the category of cubes confuse me to death!
</details>
### Cubes in Sets
Let's look at what the maps $\square^n \to X$ impart on $X$, shall we? But first, let's reason a bit to identify how we can represent diagramatically the cubical set $\square^n$, by extrapolating our knowledge about the unit interval cubical set. For that case, $\square^1_0$ was the set containing both "endpoints" of the unit interval, and the set $\square^1_1$ contained two degenerate lines (for either endpoint --- we'll see how to think about these in the next section) and one non-degenerate line, which we think of as "the" unit interval.
So, in general, we think of $\square^n$ as consisting of the set $\square^n_0$ of _vertices_ of X, the set $\square^n_1$ of _lines_ of X, the set $\square^n_2$ of _squares_ of X, the set $\square^n_3$ of _cubes_ of X (cube in the sense of high school geometry), etc, all the way up to the set $\square^n_n$ of $n$-cubes of X, and all $\square^n_m, m > n$ are degenerate. We can represent these using.. diagrams! Diagrams of points, lines, squares, cubes, etc. Let's look at the first few:
<figure>
<img height=200px alt="The image of the first 3 objects of the category of cubes under the Yoneda embedding are cubical sets representing familiar shapes: a point, a line, a square, and a (solid) cube." src="/diagrams/cubicalsets/first_ncubes.svg" />
<figcaption>The cubical sets $\square^0$, $\square^1$, $\square^2$, $\square^3$.</figcaption>
</figure>
Now we can investigate a particular $n$-cube in $X$ as being a diagram in $X$ with the same shape as one of the diagrams above!
- A $0$-cube in X is just a point in X.
- A $1$-cube in X can be parsed to mean an arrow $f : x_0 \to x_1$. The points $x_0$ and $x_1$ are understood to be the cubes $f \circ \yo(\delta^0)$ and $f \circ \yo(\delta^1)$, which we call the _endpoints_ of $f$. By composing with the image of a face map under $\yo$, we can project a lower-dimensional cube from a higher dimensional cube, by the action of $\yo$ on morphisms.
- A $2$-cube in X is a _square_ $\sigma$ like
<figure>
<img alt="A diagrammatic representation of a particular square in a cubical set." height=200px src="/diagrams/cubicalsets/asquare.svg" />
<figcaption>A square $\sigma$.</figcaption>
</figure>
In this diagram too we can understand the lower-dimensional cubes contained in $\sigma$ to be compositions $\sigma \circ \yo(p)$ for some composition of face maps $p : [m] \to [2], m \le 2$. As an example (the same example as in the section on &cube;), the arrow $p$ is the map $\sigma \circ \yo(\delta^0_0)$, and the point $b$ is the map $\sigma \circ \yo(\delta^0_0) \circ \yo(\delta^1)$. By functoriality of $\yo$, that composite is the same thing as $\sigma \circ \yo(\delta^0_0 \circ \delta^1)$.
- <div class="text-image">
<div class="ti-text">
A $3$-cube in X is a map $\aleph : \square^3 \to X$, which could be visualized as the proper _cube_ below, and has 6 2-faces (squares), 12 1-faces (edges) and 8 0-faces (vertices). As an exercise, work out which sequence of face maps in the underlying cube category leads leads to each of the possible 24 faces you can project. Honestly, the drawing of the $3$-cube isn't even _that_ enlightening, I just wanted to be fancy.
Like, check out this absolute _flex_ of a diagram, it's god damn useless. Wow.
As an a quick aside, can we talk about how god damn confusing this projection is? I can never tell whether I'm looking top-down at a truncated square pyramid ($\kappa$ is the top face) or if I'm looking _through_ a normal solid 3-cube whose front face is transparent ($\kappa$ is the back face).
</div>
<figure>
<img alt="A diagrammatic representation of a particular cube in a cubical set. The diagram is incredibly busy and not very helpful." height=200px src="/diagrams/cubicalsets/acube.svg" />
<figcaption>A _proper_ cube, finally!</figcaption>
</figure>
</div>
In case it's not clear (it's not clear, I know), the 2-cubes present in the 3-cube $\aleph$ -- yes, $\aleph$, that's how hard I'm running out of letters over here -- are these:
- $\kappa$ is the square spanned by $w \to x \to z \leftarrow y \leftarrow w$.
- $\lambda$ is the square spanned by $a \to w \to y \leftarrow c \leftarrow a$.
- $\mu$ is the square spanned by $a \to b \to x \leftarrow w \leftarrow a$.
- $\nu$ is the square spanned by $b \to x \to z \leftarrow d \leftarrow b$.
- $\epsilon$ is the square spanned by $c \to y \to z \leftarrow d \leftarrow g$.
- There is one more square, obscured by $\kappa$, which is spanned by $a \to b \to d \leftarrow c \leftarrow a$.
Yeah, this item is padding. Fight me.
Now that we know we can represent particular cubes in a cubical set X by diagrams, I can also finally show you what a degeneracy actually looks like! For instance, we know $X(\sigma)$ maps from the set of points of $X$ to the set of lines of $X$ (since $X$ is contravariant, it inverts the direction of $\sigma$ -- remember that).
If $x$ is a particular point in $X$, its image under $X(\sigma)$ is a degenerate line connecting $x \to x$. Connections on lines $l$ turn them into degenerate squares where two opposing faces are $l$ and the other two faces are degenerate, and so on.
<figure>
<img height=200px alt="Diagrammatic representations of the degeneracy which expresses a point as a degenerate line, and one of the ways of expressing a line as a degenerate square." src="/diagrams/cubicalsets/degeneracies.svg" />
<figcaption>Some degeneracies in cubical sets, diagrammed.</figcaption>
</figure>
In both diagrams above, the dashed arrow from the $n$-cube to the inside of $(n+1)$-cube is meant to be understood as $a \circ \yo(\sigma)$, where $a$ is a map $\square^n \to X$. $\sigma_0 \circ \sigma$ is the map which collapses a square to a point by first removing the _first_ coordinate, which is understood to be left-right; Thus, the cells in the up-down direction in $f \circ \yo(\sigma_0) \circ \yo(\sigma)$ are thin, and the left-right cells are full.
### More examples of Cubical Sets
The simplest way of making a cubical set is by taking a normal set, say $A$, and ignoring the cubes, thus making the _discrete cubical set_ $K(A)$, which has $K_n = A$ for every $n$; $K(δ^i_j) = 1$ and $K(\sigma_i) = 1$.
It's easy to see that $K(A)$ is a functor, since:
* $K(1) = 1$
* $K(g \circ f) = 1$, and $K(g) \circ K(f) = 1 \circ 1 = 1$.
And thus $K(A)$ is a cubical set. It doesn't have a lot of interesting _structure_, but some discrete cubical sets will have important roles to play when discussing the _category_ of cubical sets. For instance, $K(\mathbb{N})$ plays the same role in $\mathbf{cSet}$ as it does in $\mathbf{Set}$!
If $A$ and $B$ are cubical sets, we can form their product $A \times B$, which is _also_ a cubical set. Every $(A \times B)_n$ is $A_n \times B_n$, and maps $(A \times B)(f) : A_n \times B_n \to A_m \times B_m$ are taken to products of morphisms $A(f) \times B(f)$.[^1]
[^1]: Where $(f \times g)(x, y) = (f(x), g(y))$ in $\mathbf{Set}$.
Describing individual constructions on cubical sets (like their product) isn't very enlightening, though, and it's a lot more fruitful to describe most of them in one go. So, with that goal, I'll describe..
### The Category of Cubical Sets, $\mathbf{PSh}(\cube)$
Cubical sets are, of course, objects of a category, like all good things. We call a functor $X^{op} \to \mathbf{Set}$ a _presheaf on $X$_, and we denote the category of presheaves on $X$ by $\mathbf{PSh}(X)$. Thus, since a cubical set is a functor $\cube^{op} \to \mathbf{Set}$, we can also call it a _presheaf on &cube;_, and thus, an object of $\mathbf{PSh}(\cube)$. To reduce the number of ice cube emoji on the screen, we'll denote this category by $\mathbf{cSet}$.
The word "presheaf", rigorously, only means "contravariant functor into $\mathbf{Set}$." However, it's what the nLab calls a "concept with an attitude": If you call something a "presheaf category" instead of a "functor category", it's likely that you're interested in the properties of $\mathbf{PSh}(C)$ as a presheaf _topos_, and, indeed, that's what we're interested in.
A topos is a "particularly nice category to do mathematics", in which "nice" means "has a lot of structure". Let's look at some of the structure $\mathbf{cSet}$ (and, indeed, _any_ $\mathbf{PSh}(C)$) has for "free":
- **Completeness** Every _small limit_ exists in $\mathbf{cSet}$, and is computed pointwise as a limit in $\mathbf{Set}$. This is an extension of the product of cubical sets mentioned above: a product is just a small, _discrete_ limit. In particular, this also includes a _terminal object_ in cubical sets, which is the discrete cubical set $K(1)$.
- **Cocompleteness** Every _small colimit_ exists in $\mathbf{cSet}$. In particular, if $C$ is a category, $\mathbf{PSh}(C)$ is often referred to as the "free cocompletion" of $C$ --- C plus all small colimits thrown in. These are also computed pointwise as colimits in $\mathbf{cSet}$. Don't know what a colimit is? One particularly important example is the _coproduct_ $A + B$. In $\mathbf{Set}$, this is the disjoint union.
Another important colimit is the _initial object_ in cubical sets, which is the discrete cubical set $K(0)$.
- **Cartesian closure** This one merits a little more explanation than a paragraph. Fix a cubical set $X$. To say $\mathbf{cSet}$ is Cartesian closed is to say the functor $- \times X$ ("product with X", called "tensor") has a _right adjoint_ functor $[X, -]$, called "hom" (also read "function from X", at least by me) - That is, $\mathrm{Hom}(A \times X, B) \simeq \mathrm{Hom}(A, [X, B])$.
We can try to imagine what a would-be $[A, B]$ would be like by fixing a third cubical set $Z$ and seeing that if $[A, B]$ exists, then it must satisfy the equation
$$
\mathrm{Hom}_{\mathbf{cSet}}(Z, [X, Y]) \simeq \mathrm{Hom}_{\mathbf{cSet}}(Z \times X, Y).
$$
This equation holds when $c \in \cube$ and $Z = \yo(c)$, so by the Yoneda lemma we have
$$\mathrm{Hom}_{\mathbf{cSet}}(y(c) \times X, Y) \simeq \mathrm{Hom}_{\mathbf{cSet}}(\yo(c), [X, Y]) \simeq [X, Y](c)$$
By defining an "evaluation" map, $\mathrm{ev} : X \times [X, Y] \to Y$, and showing that for every $f : X \times A \to Y$ there is a $\lambda{}(f) : A \to [X, Y]$, we can prove that $\mathrm{ev}$ is the counit of the tensor-hom adjunction we want in $\mathbf{PSh}(\cube)$, and thus that the definition posed above is indeed the correct definition of $[X, Y]$ for cubical sets. For the details of this construction, check out [the nLab](https://ncatlab.org/nlab/show/closed+monoidal+structure+on+presheaves).
- And a wealth of other properties, like **local cartesian closure** ("has dependent products"), having a **subobject classifier** (a "type of propositions"), having **power objects** (a generalisation of power sets), among _many_ others.
Kan Cubical Sets
----------------
The category of cubical sets is pretty neat by itself, but.. it's kinda useless. I'm sure there exist applications of cubical sets by themselves, but I can't think of any. The cubical sets, just like the simplicial sets, come into their own when we consider the subcategory of $\mathbf{cSet}$ (resp. $\mathbf{sSet}$) consisting of the _Kan complexes_. Since the term Kan complex is generally used to mean "Kan simplicial set", we're generally left to use either "Kan cubical set" or "Cubical complex" for the objects of our subcategory. Let's go with the former.
Fix a cubical set $X$ throughout. We define the boundary of an $n$-cube $x$, $\partial x$, to be the union of all of its faces. This can be pictured diagramatically as below: The faces of $\sigma$ are all of the points and arrows spanning it, and the union of these is $\partial \sigma$.
<div class=mathpar>
<figure>
<img height=200px alt="The same square in a cubical set as before." src="/diagrams/cubicalsets/asquare.svg" />
<figcaption>The same ol' square $\sigma$.</figcaption>
</figure>
<figure>
<img height=200px alt="The square, but with its inside (σ) removed." src="/diagrams/cubicalsets/del_asquare.svg" />
<figcaption>The boundary of the square $\sigma$.</figcaption>
</figure>
</div>
We still have the same 0-cubes and 1-cubes spanning $\sigma$, but the 2-cube $\sigma$ _itself_ is no longer under consideration. We are principally interested in the boundaries of the standard $n$-cubes, which will be denoted $\partial \square^n$. Considering that boundary, we can define a box _open_ in $\square^n$ as being the subset of $\partial \square^n$ with one (the face in the image of $\delta^\varepsilon_i$) of its $n-1$ faces removed. This we denote by $\sqcap^{n,i,\varepsilon}$.
Just like in the case of an $n$-cube in $X$, we understand the phrase "$(n,i,\varepsilon)$-open box in $X$" to mean a map $\sqcap^{n,i,\varepsilon} \to X$. Here are diagrams of all the open boxes in the same $\sigma$ as before.
<figure>
<img height=200px alt="All possible open boxes of the square σ, which you get by removing one of the faces. In the diagram, the missing face was replaced with a dotted line." src="/diagrams/cubicalsets/open_boxes.svg" />
<figcaption>All of the open boxes in $\sigma$.</figcaption>
</figure>
A cubical set satisfies the Kan condition if every open box in X can be extended to a cube, or, more formally, if there exists a dotted arrow $g$ which factors the map $f$ through the inclusion from $\sqcap^{n,i}$ into $\square^n$.
<figure>
<img height=200px alt="A commutative triangle representing the Kan condition." src="/diagrams/cubicalsets/kan_condition.svg" />
<figcaption>The Kan condition on the cubical sets $X$.</figcaption>
</figure>
### The Cubical Nerve
First, recall the definition of a groupoid. A groupoid $\mathcal{G}$ is a category in which for every arrow $f : A \to B$ there exists an arrow $f^{-1}$, such that ${f \circ f^{-1}} = {f^{-1} \circ f} = 1$. That is: a groupoid is a category in which every arrow is invertible. There is a (2-)category of groupoids, $\mathbf{Grpd}$, in which the objects are groupoids and the morphisms are functors (and the 2-morphisms are natural isos).
We specify a functor $N^{\le 2} : \mathbf{Cat} \to \mathbf{cSet}$, the _truncated nerve_ functor, which assigns to every groupoid a cubical set in which every $n\ge{}3$-cube is degenerate, as follows:
- The points in $N^{\le 2}(A)$ are the objects in $A$,
- The lines $f : a_0 \to a_1$ in $N^{\le 2}(A)$ are the arrows $f : a_0 \to a_1$ in $A$; The lines induced by degeneracy maps are the identity arrows.
- The squares in $N^{\le 2}(A)$ are the squares with corners $a, b, c, d$ spanned by $f : a \to c$, $p : a \to b$, $q : c \to d$, $g : b \to d$, such that $g \circ p = q \circ f$ - that is, the commutative squares with that boundary.
The degenerate squares in $N^{\le 2}(A)$ are the squares as below, and they exist for every $a, b$, and $f : a \to b$ in $A$:
<figure>
<img height=200px alt="Degenerate squares in the truncated cubical nerve of a category." src="/diagrams/cubicalsets/thin_squares.svg" />
<figcaption>Thin squares in $N^{\le 2}(A)$</figcaption>
</figure>
I claim: If $A$ is a groupoid, then its nerve $N^{\le 2}(A)$ is always Kan. I will not show this with a lot of rigour, but to convince yourself of this fact, deliberate on what it means to fill boundaries of our non-degenerate cubes: the lines and squares.
* In the case of lines, an open box $\sqcap^{1,0,i}$ is just a point $x_i$; We can extend this to a line $1_{x_i} : x_i \to x_i$, as desired.
* In the case of squares, an open box $\sqcap^{2,i,\varepsilon}$ is a diagram like the one below, in which all of the corners are objects of $A$ and the lines are maps in $A$. The maps in $A$ are invertible, so if we have $q$, we also have $q^{-1}$ (for instance).
<figure>
<img height=200px alt="A particular open box in the truncated cubical nerve of a groupoid." src="/diagrams/cubicalsets/open_box.svg" />
<figcaption>A representative example of open boxes in $f$.</figcaption>
</figure>
We're looking for the map $f : a \to c$. The strategy to use here is to try to "follow" the source of the missing arrow "around" the edges of the cube, and, if you get stuck, invert the arrow you got stuck on. We take $a$ to $b$ through $p$, then to $d$ through $g$, and now we're stuck. A priori, there's no arrow $d \to c$ we can follow, but since $A$ is a groupoid, we can invert $q$ to get $q^{-1} : d \to c$. Thus the composite $q^{-1} \circ g \circ p$ connects $a$ and $c$, like we wanted.
Moreover, this diagram must commute, i.e., we must check that $g \circ p = q \circ (q^{-1} \circ g \circ p)$. But this is automatic from the axioms of a category (which say we can ignore the parentheses), and the axioms for a groupoid, which imply that $q \circ q^{-1} \circ f = f$ (for any f).
We have established that the truncated nerve of a groupoid is Kan. Why _truncated_? Because we only consider **1-**categories in the construction of $N^{\le 2}$, and, as the superscript implies, only have non degenerate cubes for levels 2 and below. We could consider an _untruncated_ $N$ functor from $\infty$-categories to cubical sets; In that case, the nerve of an $\infty$-groupoid is Kan, just like in the 1-categorical case.
More surprising, the converse implication is also true! If the nerve $N^{\le 2}(A)$ of a category is Kan, then $A$ is a groupoid. Adapting the analogous argument from [Kerodon](https://kerodon.net/tag/0037) about Kan complexes to our Kan cubical sets, we're given an $f : a \to b \in A$, and we build left and right inverses $g, h : b \to a$ to $f$.
This can be done by defining a pair of partial squares in $N^{\le 2}(A)$, in which the missing faces represent left and right inverses to our map $f \in A$. Here they are:
<div class=mathpar>
<figure style="width:48%;">
<img height=200px alt="The open box which computes the left inverse of a map f." src="/diagrams/cubicalsets/left_inv.svg" />
<figcaption>If this open box had a filler, it would witness in $A$ the equation $g \circ f = 1$.</figcaption>
</figure>
<figure style="width:48%;">
<img height=200px alt="The open box which computes the right inverse of a map f." src="/diagrams/cubicalsets/right_inv.svg" />
<figcaption>If this open box had a filler, it would witness in $A$ the equation $f \circ h = 1$.</figcaption>
</figure>
</div>
By assumption, $N^{\le 2}(A)$ is Kan, which means these open boxes _do_ have fillers, and thus the equations $g \circ f = 1$ and $f \circ h = 1$ hold in $A$. We calculate: $g = 1 \circ g = h \circ f \circ g = h \circ 1 = 1$, leaving implicit the applications of associativity of $\circ$, thus concluding that $g = h$ is an inverse to $f$.