repo
stringlengths 27
90
| file
stringlengths 57
176
| language
stringclasses 2
values | license
stringclasses 13
values | content
stringlengths 25
180k
|
---|---|---|---|---|
https://github.com/lebinyu/typst-thesis-template | https://raw.githubusercontent.com/lebinyu/typst-thesis-template/main/template/cover_style.typ | typst | Apache License 2.0 | #let cover(
title:"",
supervisor: (),
group_name: "",
institute: "",
author: "",
ID: "",
address: (),
email: "",
closing_date: ""
) = {
set document(title: title, author: author)
set page(
margin: (x: 3cm),
numbering: none,
number-align: center,
)
// --- Cover ---
linebreak() * 4
align(center)[
#image(width: 9cm ,"figure/uzh_logo.png")
]
linebreak() * 3
align(center)[
#block(text(weight: 100, 1.75em, "Master Thesis"))
]
align(center)[
#block(text(weight: 900, 1.75em, title))
]
linebreak() * 3
align(center)[
#block(text(weight: 100, 1.3em, "Supervisors:"))
]
align(center)[
#supervisor.join(", ", last: " and ")
]
linebreak() * 3
align(center)[
#block(text(weight: 400, 1.75em, group_name))
]
linebreak() * 3
align(center)[
#block(text(weight: 400, 1.75em, institute))
]
linebreak() * 3
grid(
columns: (1fr, 1fr),
gutter: 5pt,
row-gutter: 0.65em,//0.65em is default spacing betwen line
align(right)[Author:],
align(left)[#author],
align(right)[Student ID:],
align(left)[#ID],
align(right)[Address],
align(left)[
#for adressline in address [
#adressline \
]
],
align(right)[E-mail:],
align(left)[#email],
align(right)[Closing date:],
align(left)[#closing_date],
)
pagebreak(weak:false)
pagebreak(weak:false)
} |
https://github.com/QuadnucYard/crossregex-typ | https://raw.githubusercontent.com/QuadnucYard/crossregex-typ/main/src/lib.typ | typst | MIT License | #import "hex.typ": crossregex-hex
#import "square.typ": crossregex-square
/// Make a wonderful cross-regex puzzle. This is a dispatcher function.
/// All of the arguments apply to `crossregex-hex` or `crossregex-square`.
///
/// - size (int): The size of the grids, namely the number of cells on the edge.
/// - alphabet (regex): The set of acceptable characters, used for highlight.
/// - constraints (array): All constraint regular expressions, given in clockwise order.
/// - answer (none, array, content): Your answers, either a multi-line raw block or an array of strings. The character in one cell is represented as a char in the string.
/// - show-whole (bool): Whether to show all constraints in one page.
/// - show-views (bool): Whether to show three views separately.
/// - cell (content): The shape of grid cells.
/// - cell-config (dict): Controls the appearance of cells. Defaults: (size: 1em, text-style: (:), valid-color: blue, invalid-color: purple). The text-style applies to the cell texts.
/// - deco-config (dict): Controls the appearance of decorations (hint marker + regex). Defaults: (hint-offset: 0.5em, hint-marker: auto, regex-offset: 1.0em, regex-style: auto).
/// - progress-creator (function, none): The creator function of progress: (total, filled) => content. If set to none, the progress is not shown.
/// - page-margin (length, dict): The margin of each page.
#let crossregex(size, shape: "hex", ..args) = {
if shape == "hex" {
crossregex-hex(size, ..args)
} else if shape == "square" {
crossregex-square(size, ..args)
} else {
panic("unsupported shape: " + repr(shape))
}
}
|
https://github.com/phinixplus/docs | https://raw.githubusercontent.com/phinixplus/docs/master/source/utilities.typ | typst | Other | #import "/source/config.typ": font-sans, font-mono
#import "/source/config.typ": interj-fg-color, text-fg-color, code-bg-color, code-fg-color
#import "/source/config.typ": text-weight-normal, text-weight-bold, code-weight
#import "/source/config.typ": text-size-2, text-size-5
#let code(file-path, language, tab-size: 4) = {
let file-content = read(file-path).replace("\t", " "*tab-size)
set block(breakable: true, radius: 0.6em)
show block: set align(center)
block(fill: code-fg-color,inset: 0.3em)[#{
show raw: set align(start)
show raw: set text(font: font-mono, size: text-size-2, weight: code-weight)
block(fill: code-bg-color, inset: 1em, raw(file-content, lang: language))
show text: x => move(dy: -0.5em, x)
text(font: font-sans, weight: text-weight-bold, file-path.split("/").last())
}]
}
#let interjection(body, icon, color: black) = {
grid(columns: (10mm, 1fr), stroke: 0mm, fill: white, align: horizon,
text(font: font-mono, size: text-size-5, fill: color, icon),
grid.vline(stroke: (paint: color, thickness: 0.3mm, cap: "round")),
block(inset: (x: 5mm, y: 1mm), text(fill: interj-fg-color, body))
)
}
#let note(body) = interjection(body, [], color: blue)
#let comment(body) = interjection(body, [], color: green)
#let separator(length: 98%) = {
show line: set align(center)
line(length: length, stroke: 0.1mm)
}
#let date-now = {
let today = datetime.today()
let (day-num, year-num) = (today.day(), today.year())
let day-suffix = "th"
if day-num == 1 or day-num == 21 or day-num == 31 { day-suffix = "st" }
else if day-num == 2 or day-num == 22 { day-suffix = "nd" }
else if day-num == 3 or day-num == 23 { day-suffix = "rd" }
[#day-num#day-suffix of #today.display("[month repr:long]") #year-num]
}
|
https://github.com/dyc3/good-typst-template | https://raw.githubusercontent.com/dyc3/good-typst-template/main/main.typ | typst | The Unlicense | #let authors = (
(
name: "<NAME>",
email: "<EMAIL>",
department: [School of Systems and Enterprises],
organization: [Stevens Institute of Technology],
location: [Hoboken, NJ],
),
)
// import and use whatever document format you want here
#import "lib/glossary.typ": glossary, glossaryWords, glossaryShow
#show glossaryWords("glossary.yml"): word => glossaryShow("glossary.yml", word)
// Index-Entry hiding : this rule makes the index entries in the document invisible.
#show figure.where(kind: "jkrb_index"): it => {}
#metadata("!glossary:disable")
// put outlines, title pages, etc here
#metadata("!glossary:enable")
// include chapters here
#metadata("!glossary:disable")
#glossary("glossary.yml")
#pagebreak()
#include "index.typ"
#pagebreak()
#bibliography("bibfile.bib")
|
https://github.com/Enter-tainer/typstyle | https://raw.githubusercontent.com/Enter-tainer/typstyle/master/tests/assets/unit/grid/dense.typ | typst | Apache License 2.0 | #align(
center,
table(
columns: 4,
align: (right, left, right, left),
column-gutter: (1em, 1.5em, 1em),
[$alpha$], [`alpha`], [$xi$, $Xi$], [`xi`, `Xi`],
[$beta$], [`beta`], [$omicron$], [`omicron`],
[$gamma$, $Gamma$], [`gamma`, `Gamma`], [$pi$, $Pi$], [`pi`, `Pi`],
[$delta$, $Delta$], [`delta`, `Delta`], [$pi.alt$], [`pi.alt`],
[$epsilon.alt$], [`epsilon.alt`], [$rho$], [`rho`],
[$epsilon$], [`epsilon`], [$rho.alt$], [`rho.alt`],
[$zeta$], [`zeta`], [$sigma$, $Sigma$], [`sigma`, `Sigma`],
[$eta$], [`eta`], [$sigma.alt$], [`sigma.alt`],
[$theta$, $Theta$], [`theta`, `Theta`], [$tau$], [`tau`],
[$theta.alt$], [`theta.alt`], [$upsilon$, $Upsilon$], [`upsilon`, `Upsilon`],
[$iota$], [`iota`], [$phi.alt$, $Phi$], [`phi.alt`, `Phi`],
[$kappa$], [`kappa`], [$phi$], [`phi`],
[$lambda$, $Lambda$], [`lambda`, `Lambda`], [$chi$], [`chi`],
[$mu$], [`mu`], [$psi$, $Psi$], [`psi`, `Psi`],
[$nu$], [`nu`], [$omega$, $Omega$], [`omega`, `Omega`],
),
)
|
https://github.com/darioglasl/Arbeiten-Vorlage-Typst | https://raw.githubusercontent.com/darioglasl/Arbeiten-Vorlage-Typst/main/05_Qualitätssicherung/02_unit_tests.typ | typst | #import "../Helpers/code-snippets.typ": code
== Unit Tests <headingUnitTests>
TODO: text
#figure(
code(
stepnumber:1,
numberfirstline: true,
numbers: true,
firstnumber: 138,
caption: "DateUtilsTest.java"
)[```java
@Test
void monthsBetweenDates_whenDifferentYear_thenTwoMonths() {
// given
var fromDate = LocalDate.parse("2021-12-01");
var toDate = LocalDate.parse("2022-01-28");
var expected = List.of(12, 1);
// when
var result = DateUtils.getMonthsBetweenDates(fromDate, toDate);
// then
assertEquals(expected, result);
}
```],
caption: [Unit Test für `DateUtils.getMonthsBetweenDates`],
kind: "Code-Fragment",
supplement: [Code-Fragment]
) <codeUnitTestExample> |
|
https://github.com/TheBotlyNoob/ECE1551 | https://raw.githubusercontent.com/TheBotlyNoob/ECE1551/main/notes/template.typ | typst | #show link: underline
#show link: set text(blue)
#outline(indent: 2em, title: [ECE 1551 Chapter REPLACEME_CHAPTER: ])
|
|
https://github.com/seanharmer/rails_typst | https://raw.githubusercontent.com/seanharmer/rails_typst/main/README.md | markdown | MIT License | # RailsTypst
TODO: Delete this and the text below, and describe your gem
Welcome to your new gem! In this directory, you'll find the files you need to be able to package up your Ruby library into a gem. Put your Ruby code in the file `lib/rails_typst`. To experiment with that code, run `bin/console` for an interactive prompt.
## Installation
TODO: Replace `UPDATE_WITH_YOUR_GEM_NAME_IMMEDIATELY_AFTER_RELEASE_TO_RUBYGEMS_ORG` with your gem name right after releasing it to RubyGems.org. Please do not do it earlier due to security reasons. Alternatively, replace this section with instructions to install your gem from git if you don't plan to release to RubyGems.org.
Install the gem and add to the application's Gemfile by executing:
$ bundle add UPDATE_WITH_YOUR_GEM_NAME_IMMEDIATELY_AFTER_RELEASE_TO_RUBYGEMS_ORG
If bundler is not being used to manage dependencies, install the gem by executing:
$ gem install UPDATE_WITH_YOUR_GEM_NAME_IMMEDIATELY_AFTER_RELEASE_TO_RUBYGEMS_ORG
## Usage
TODO: Write usage instructions here
## Development
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and the created tag, and push the `.gem` file to [rubygems.org](https://rubygems.org).
## Contributing
Bug reports and pull requests are welcome on GitHub at https://github.com/[USERNAME]/rails_typst.
## License
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
https://github.com/typst/templates | https://raw.githubusercontent.com/typst/templates/main/wonderous-book/README.md | markdown | MIT No Attribution | # wonderous-book
A book template for fiction. The template contains a title page, a table of contents, and a chapter template.
Dynamic running headers contain the title of the chapter and the book.
## Usage
You can use this template in the Typst web app by clicking "Start from template"
on the dashboard and searching for `wonderous-book`.
Alternatively, you can use the CLI to kick this project off using the command
```
typst init @preview/wonderous-book
```
Typst will create a new directory with all the files needed to get you started.
## Configuration
This template exports the `book` function with the following named arguments:
- `title`: The book's title as content.
- `author`: Content or an array of content to specify the author.
- `paper-size`: Defaults to `iso-b5`. Specify a [paper size
string](https://typst.app/docs/reference/layout/page/#parameters-paper) to
change the page format.
- `dedication`: Who or what this book is dedicated to as content or `none`. Will
appear on its own page.
- `publishing-info`: Details for the front matter of this book as content or
`none`.
The function also accepts a single, positional argument for the body of the
book.
The template will initialize your package with a sample call to the `book`
function in a show rule. If you, however, want to change an existing project to
use this template, you can add a show rule like this at the top of your file:
```typ
#import "@preview/wonderous-book:0.1.0": book
#show: book.with(
title: [Liam's Playlist],
author: "<NAME>",
dedication: [for Rachel],
publishing-info: [
UK Publishing, Inc. \
6 Abbey Road \
Vaughnham, 1PX 8A3
#link("https://example.co.uk/")
971-1-XXXXXX-XX-X
],
)
// Your content goes below.
```
|
https://github.com/Skimmeroni/Appunti | https://raw.githubusercontent.com/Skimmeroni/Appunti/main/Metodi%20Algebrici/Interi/Fermat.typ | typst | Creative Commons Zero v1.0 Universal | #import "../Metodi_defs.typ": *
#theorem("Piccolo Teorema di Fermat")[
Sia $p in NN$ numero primo. Per qualsiasi $a in NN$ vale:
$ a^(p) equiv a mod p $
Inoltre, se $p$ non é divisore di $a$, vale anche:
$ a^(p − 1) equiv 1 mod p $
] <Fermat-little-theorem>
#proof[
/*
Si consideri innanzitutto il caso in cui $p$ non sia divisore di $a$.
Si studino allora le classi di resto cosí definite:
$ {[0]_(p), [a]_(p), [2a]_(p), ..., [(p - 1)a]_(p)} $
É possibile provare che tali classi sono tutte distinte fra loro. Si
supponga infatti per assurdo che questo non sia vero, e che quindi
esistano (almeno) due classi di resto dell'insieme sopra definito che
coincidono. Siano queste $[r a]_(p) = [s a]_(p)$, con $r, s in ZZ$
tali per cui $0 lt.eq r < p$ e $0 lt.eq s < p$. Supponendo, senza
perdita di generalitá, $r gt.eq s$, si ha allora:
$ [r a]_(p) = [s a]_(p) => [r a]_(p) - [s a]_(p) = [0]_(p) =>
[r a - s a]_(p) = [0]_(p) => [(r - s)a]_(p) = [0]_(p) $
Ovvero, $p | (r - s) a$. Per il @Euclid-lemma, deve essere vera almeno
una proposizione fra $p | r - s$ e $p | a$; dato che quest'ultima non
puó essere vera per ipotesi, deve aversi $p | r - s$. I due numeri
interi $r$ e $s$ sono stati peró definiti come positivi ed inferiori
a $p$, pertanto $p | r - s$ puó essere vera solamente nel caso in cui
$r - s = 0$, ovvero $r = s$. Ma allora:
$ {[0]_(p), [a]_(p), [2a]_(p), ..., [(p - 1)a]_(p)} =
{[0]_(p), [1]_(p), [2]_(p), ..., [(p - 1)]_(p)} $
Questo perché entrambi hanno esattamente $p$ classi di resto modulo $p$,
e diventa allora possibile ridurre in modulo $p$ il primo insieme ottenendo
il secondo.
Poiché la classe $[0]_(p)$ compare in entrambi gli insiemi, puó essere
eliminata matenendo valida l'uguaglianza:
$ {[a]_(p), [2a]_(p), ..., [(p - 1)a]_(p)} =
{[1]_(p), [2]_(p), ..., [(p - 1)]_(p)} $
Se i due insiemi sono uguali membro a membro, allora il prodotto degli
elementi del primo insieme deve essere uguale al prodotto degli elementi
del secondo insieme:
$ [a]_(p) dot [2 a]_(p) dot ... dot [(p - 1)a]_(p) & =
[1]_(p) dot [2]_(p) dot ... dot [(p - 1)]_(p) => \
[a dot 2 a dot ... dot (p - 1)a]_(p) & =
[1 dot 2 dot ... dot (p - 1)]_(p) => \
[a^(p - 1) (p - 1)!]_(p) & =
[(p - 1)!]_(p) $
Da cui si ricava, per la definizione di classe di resto,
$a^(p - 1) (p - 1)! equiv (p - 1)! mod p$. Essendo $p$
un numero primo, certamente non puó essere un divisore
di $(p - 1)!$, pertanto é valido il @Cancellation-law-congruences
e quindi é possibile semplificare come $a^(p - 1) equiv 1 mod p$.
Si supponga ora che $p$ sia un numero primo qualsiasi. Se $p$ non
é un divisore di $a$, é possibile applicare il @Cancellation-law-congruences
"nell'altro verso" al risultato appena trovato. Ovvero, é possibile moltiplicare
ambo i membri di $a^(p - 1) equiv 1 mod p$ per $p$, ottenendo $a^(p) equiv p mod p$.
Se invece $p$ é un divisore di $a$, questo equivale a dire $a equiv 0 mod p$.
Tuttavia, deve valere anche $a^(p) equiv 0 mod p$; per proprietá transitiva,
$a^(p) equiv a mod p$.
*/
]
#theorem("Teorema di Fermat-Eulero")[
Sia $n in NN - {0}$ e sia $a$ un qualsiasi intero tale che $a$ ed $n$
siano primi fra di loro. Allora vale:
$ a^(phi(n)) equiv 1 mod n $
] <Euler-theorem>
#proof[
Si consideri innanzitutto il caso in cui $n$ sia una potenza di un
numero primo, ovvero $n = p^(m)$ con $p$ numero primo e $m$ numero
naturale. Si proceda per induzione su $m$; il caso base si ha con
$m = 1$:
$ a^(phi(p^(1))) equiv 1 mod p^(1) =>
a^(p^(1 - 1) (p - 1)) equiv 1 mod p =>
a^(p - 1) equiv 1 mod p $
Che equivale all'enunciato del @Fermat-little-theorem, e pertanto é
verificato.
Si consideri ora l'ipotesi induttiva: si dimostri che sia
valido $a^(phi(p^(m))) equiv 1 mod p^(m)$ assumendo che sia
valido $a^(phi(p^(m - 1))) equiv 1 mod p^(m - 1)$. Tale
espressione equivale a:
$ a^(phi(p^(m - 1))) equiv 1 mod p^(m - 1) =>
p^(m - 1) | a^(phi(p^(m - 1))) - 1 =>
a^(phi(p^(m - 1))) - 1 = p^(m - 1) b $
Per un certo $b in ZZ$. Per il @Euler-function-primes, é
possibile esplicitare l'esponente di $a$:
$ a^(phi(p^(m - 1))) - 1 = p^(m - 1) b =>
a^(p^(m - 2) (p - 1)) - 1 = p^(m - 1) b =>
a^(p^(m - 2) (p - 1)) = 1 + p^(m - 1) b $
Elevando ambo i membri alla potenza $p$, si ha:
$ (a^(p^(m - 2) (p - 1)))^(p) = (1 + p^(m - 1) b)^(p) =>
a^(p^(m - 1) (p - 1)) = (1 + p^(m - 1) b)^(p) =>
a^(phi(p^(m))) = (1 + p^(m - 1) b)^(p) $
Il termine $(1 + p^(m − 1) b)^(p)$ puó essere espanso usando la
formula del binomio di Newton:
$ (1 + p^(m - 1) b)^(p) =>
1 + (p^(m - 1) b)^(p) +
sum_(k = 1)^(p - 1) mat(p; k) (p^(m - 1) b)^(p - k) $
Ogni addendo della sommatoria, cioè ogni termine $mat(p; k)
(p^(m - 1) b)^(p - k)$, é un multiplo di $p^(m)$ perché
$mat(p; k)$ é multiplo di $p$ e $(p^(m - 1) b)^(p - k)$
é multiplo di $p^(m - 1)$, per $k = 1, ..., p - 1$.
Inoltre, $(p^(m − 1) b)^(p)$ è un multiplo di $p^(m)$, dunque si ha:
$ (1 + p^(m − 1) b)^(p) equiv 1 mod p^(m) $
Da cui, per proprietá transitiva:
$ a^(phi(p^(m))) equiv 1 mod p^(m) $
Nel caso in cui $n$ sia un numero qualsiasi, questo puó essere
certamente fattorizzato come $n = p_(1)^(m_(1)) p_(2)^(m_(2)) ...
p_(r)^(m_(r))$, dove ciascun $p_(i)$ con $1 lt.eq i lt.eq r$ é un
numero primo distinto e ciascun $m_(i)$ é un numero naturale. Per
ciascuno di questi fattori elevati al rispettivo esponente, dovrá
valere:
$ a^(phi(p_(i)^(m_(i)))) equiv 1 mod p_(i)^(m_(i)) $
Per il @Euler-function-multiplicative, si ha che ciascun
$phi(p_(i)^(m_(i)))$ é divisore di $phi(n)$, ovvero che
per un certo $t in ZZ$ vale $phi(n) = phi(p_(i)^(m_(i))) t$.
Allora:
$ a^(phi(n)) = a^(phi(p_(i)^(m_(i))) t) = (a^(phi(p_(i)^(m_(i)))))^(t)
equiv 1^(t) = 1 mod p_(i)^(m_(i)) $
In altre parole, ogni $p_(i)^(m_(i))$ é divisore di $a^(phi(n)) - 1$.
Dato che ogni $p_(i)^(m_(i))$ é potenza di un numero primo, é evidente
come, presi due $p_(i)^(m_(i))$ e $p_(j)^(m_(j))$ qualsiasi con $i != j$,
questi saranno coprimi. Ma allora:
$ p_(1)^(m_(1)) p_(2)^(m_(2)) ... p_(r)^(m_(r)) | a^(phi(n)) - 1 =>
a^(phi(n)) equiv 1 mod p_(1)^(m_(1)) p_(2)^(m_(2)) ... p_(r)^(m_(r)) $
Avendo peró definito $n$ come $n = p_(1)^(m_(1)) p_(2)^(m_(2)) ...
p_(r)^(m_(r))$:
$ a^(phi(n)) equiv 1 mod n $
]
|
https://github.com/Maso03/Bachelor | https://raw.githubusercontent.com/Maso03/Bachelor/main/Bachelorarbeit/chapters/conclusion.typ | typst | MIT License | = Conclusion
#lorem(100) @lorem-ipsum-generator |
https://github.com/Dherse/masterproef | https://raw.githubusercontent.com/Dherse/masterproef/main/masterproef/parts/2_ecosystem.typ | typst | #import "../ugent-template.typ": *
= Programming of photonic processors <sec_programming_photonic_processors>
The primary objective of this chapter is to explore the different aspects of
programming photonic processors. This chapter will start by looking at different
traditional programming ecosystems and how different languages approach common
problems when programming. Then, the existing software solutions and their
limitations will be analysed. Finally, an analysis of relevant programming
paradigms will be done. This chapter's secondary objective is to familiarise the
reader with the concepts and terminology used in the rest of the thesis. This
chapter will also introduce the reader to different programming paradigms
relevant to the research at hand, as well as programming language concepts and
components. As this chapter also serves as an introduction to programming
language concepts, it is written in a more general way, exploring components of
programming ecosystems -- in @sec_components -- before looking at specificities
relevant to the programming of photonic processors.
== Programming languages as a tool <sec_language_tool>
#udefinition(
footer: [ Adapted from @noauthor_imperative_2020 ],
)[
*Imperativeness* refers to whether the program specifies the expected results of
the computation (declarative) or the steps needed to perform this computation
(imperative). These differences may be understood as the difference between _what_ the
program should do and _how_ it should do it.
]
Programming languages, in the most traditional sense, are tools used to express _what_ and,
depending on its imperativeness and paradigm, _how_ a device should perform a
task. A device, in this context, means any device that is capable of performing
sequential operations, such as a processor, a microcontroller or another device.
However, programming languages are not limited to programming computers but are
increasingly used for other tasks. So-called #gloss("dsl", long: true, suffix: [s]) are
languages designed for specific purposes that may intersect with traditional
computing or describe traditional computing tasks but can also extend beyond
traditional computing. @dsl[s] can be used to program digital devices such as
@fpga[s] but also to program and simulate analog systems such as @verilog-ams or
@spice.
Additionally, programming languages can be used to build strong abstractions
over traditional computing tasks. For example, @sql is a language designed to
describe database queries by describing the _what_ and not the _how_, making it
easier to reason about the queries being executed. Other examples include _Typst_,
the language used to create this document.
Furthermore, some languages are designed to describe digital hardware, so-called
@rtl #gloss("hdl", long: true, suffix: [s]). These languages are used to
describe the hardware in a way closer to the actual hardware; therefore, they
are not used to describe the _what_ but the _how_. These languages are not the
focus of this thesis, but they are essential to understand the context of the
research at hand, and they will be further examined in
@sec_ecosystem_components_summary, where their applicability to the research at
hand will be discussed.
As such, programming languages can be seen, in a more generic way, as tools that
can be used to build abstractions over complex systems, whether software systems
or hardware systems, and therefore, the ecosystem surrounding a language can be
seen as a toolbox providing many amenities to the user of the language. Is it,
therefore, important to understand these components and reason about their
importance, relevance and how they can best be used for a photonic processor.
== Typing in programming languages <sec_typing>
#udefinition(
footer: [ Adapted from @cardelli_understanding_1985 ],
)[
A *type system* is a system made of rules that assign a property called a type
to values in a program. It dictates how to create them, what kind of operations
can be done on those values, and how they can be combined.
]
#udefinition(
footer: [ Adapted from @cardelli_understanding_1985 ],
)[
*Static or dynamic typing* refers to whether the type of arguments, variables
and fields is known at compile time or at runtime. In statically typed
languages, the type of values must be known at compile time, while in
dynamically typed languages, the type of values is computed at runtime.
]
All languages have a type system; it provides the basis for the language to
reason about values. It can be of two types: static or dynamic. Static typing
allows the compiler to know ahead of executing the code what each value is and
means. This allows the compiler to provide features such as type verification
that a value has the correct type for an operation and to optimise the code to
improve performance. On the contrary, dynamic typing does not determine the type
of values ahead of time, instead forcing the burden of type verification on the
user. This practice makes development easier at the cost of increased overhead
during execution and the loss of some optimisations @dot_analysis_2015.
Additionally, dynamic typing is a common source of runtime errors for programs
written in a dynamically typed language, something that is caught during the
compilation process in statically typed languages.
Therefore, static typing is generally preferred for applications where speed is
a concern, as is the case in _C_ and _Rust_. However, dynamic typing is
preferred for applications where iteration speed is more important, such as in _Python_.
However, some languages exist at the intersection of these two paradigms, such
as _Rust_, which can infer parts of the type system at compile time, allowing
the user to write their code with fewer type annotations while still providing
the benefits of static typing. This is achieved through a process called type
inference, where the compiler generally uses the de facto standard algorithm
called _Hindley-Milner_ algorithm @milner_theory_1978 @rust_compiler, which will
be discussed further in @sec_phos.
#udefinition(
footer: [ Adapted from @cardelli_understanding_1985 ],
)[
*Polymorphism* refers to the ability of a language to allow the same code to be
used with different types.
]
Polymorphism allows code to be re-used with different types; a typical example
is a list. For a list to work, it does not matter what type of value is
contained within the list. Therefore one can make the list polymorphic over the
item type such that the list is defined at the type `List<V>` where `V` is a
type argument defining the contents of the list. Additionally, polymorphic
languages often offer a way to define types that meet specific criteria, such as
a type that is comparable or a type that is copyable. This is called a _trait_ in _Rust_ and
an _interface_ in _Java_ and _C\#_. On the other hand, _C_ does not have
polymorphism nor interfaces or traits. Then, polymorphic types and functions can
request that their type argument meet these requirements. This is called _bounded polymorphism_ and
is a common feature in modern programming languages
@cardelli_understanding_1985.
== Explicitness in programming languages <sec_explicitness>
In language design, one of the most important aspects to consider is the
explicitness of the language, that is, how many details the user must manually
specify and how much can be inferred. This is a trade-off between the
expressiveness of the language and the complexity of the language. A language
that is too explicit is both difficult to write and to read, while a language
that is too implicit is difficult to understand and reason about, while also
generally being more complex to implement. Therefore, it is essential to find a
balance between these two extremes. Another factor to take into account is that
too much "magic", that is, operations being done implicitly, can lead to
difficult-to-understand code, unexpected results and bugs that are difficult to
track down.
Therefore, it is in the interest of the language designer and users to find a
balance where the language is sufficiently expressive while also being
sufficiently explicit. This is, generally, a difficult balance to find and can
take several iterations to achieve. This balance is not the same for every
programming language either. The target audience of the language tends to
govern, at least to some extent, which priorities are put in place. For example,
performance-focused systems, such as @hpc solutions, tend to be very explicit,
with fine-grained control to eke out the most performance, while on the
contrary, systems designed for beginners might want to be more implicit,
sacrificing complexity and fine-grained control for ease of use.
== Components of a programming ecosystem <sec_components>
An important part of programming any kind of programmable device is the
ecosystem that surrounds that device. The most basic ecosystem components that
are necessary for the use of the device are the following:
- a language reference or specification: the syntax and semantics of the language;
- a compiler or interpreter: to translate the code into a form that can be
executed by the device;
- a hardware programmer or runtime: to physically program and execute the code on
the device.
These components are the core elements of any programming ecosystem since they
allow the user to translate their code into a form the device can execute. And
then to use the device. Therefore, without these components, the device is
useless. However, these components are not sufficient to create a user-friendly
ecosystem. Indeed, the following component list can also be desirable:
- a debugger: to aid in the development and debugging of the code;
- a code editor: to write the code in, it can be an existing editor with support
for the language;
- a formatter: to format the code consistently;
- a linter: a tool used to check the code for common mistakes and to enforce a
coding style;
- a testing framework: to test and verify the code;
- a simulator: to simulate the execution of the code;
- a package manager: to manage dependencies between different parts of the code;
- a documentation generator: to generate documentation for the code;
- a build system: to easily build the code into a form that the device can
execute.
With the number of components desired, one can conclude that any endeavour to
create such an ecosystem is a large undertaking. Such a large undertaking needs
to be carefully planned and executed. And to do so, it is important to look at
existing ecosystems and analyse them. This section will analyse the ecosystems
of the following languages, when relevant:
- #emph[C]: a low-level language that is mainly used for embedded systems and
operating systems;
- #emph[Rust]: a modern systems language primarily used for embedded systems and
high-performance applications;
- #emph[Python]: a high-level language that is used chiefly for scripting and data
science;
- #emph[@vhdl]: an #gloss("hdl", long: true) that is used to describe digital
hardware;
- #emph[@verilog-ams]: an analog simulation language that has been used to
describe photonic circuits @ansys_lumerical;
Each of these ecosystems comes with a particular set of tools in addition to the
aforementioned core components. Some of these languages come with tooling
directly built by the maintainers of the languages, while others leave the
development of these tools to the community. However, it should be noted that,
generally, tools maintained by the language maintainers tend to have a higher
quality and broader usage than community-maintained tools.
Additionally, the analysis done in this section will give pointers towards the
language choice used in developing the language that will be presented in
@sec_phos, a custom @dsl language for photonic processors. As this language will
not be self-hosted -- its compiler will not be written in itself -- it will need
to use an existing language to create its ecosystem.
=== Language specification & reference
#udefinition(
footer: [ Adapted from @jones_forms_2007 ],
)[
A *programming language specification* is a document that formally defines a
programming language, such that there is an understanding of what programs in
that language mean. This document can be used to ensure that all implementations
of the language are compatible with one another.
]
#udefinition(
footer: [ Adapted from @jones_forms_2007 ],
)[
A *programming language reference* is a document that outlines the syntax,
features and usage of a programming language. It serves as a simplified version
of the specification and is usually written during the development of the
language.
]
A programming specification is useful for languages that are expected to have
more than one implementation, as it outlines what a program in that language is
expected to do. Indeed, code that is written following this specification should
therefore be able to be executed by any language implementation and produce the
same output. However, this is not always the case. Several languages with
proprietary implementations, such as #emph[VHDL] and #emph[SystemC] -- two
languages used for hardware description of digital electronics -- have issues
with vendored versions of the language @Chacko2019CaseSO.
This previous point is particularly interesting for the application at hand:
assuming that the goal is to reuse an existing specification for the creation of
a new photonic @hdl, then it is crucial to select a language that has a
specification. However, if the design calls for an @api implemented in a given
language instead, then it does not matter. Indeed, in the latter case, the
specification is the implementation itself.
Additionally, when reusing an existing specification for a different purpose
than the intended one, it is essential to check that the specification is not
too restrictive. Indeed, as previously shown in @photonic_processor, the
programming of photonic processors is different from that of electronic
processors. Therefore, special care has to be taken that the specification
allows for the expression of the necessary concepts. This is particularly
important for languages that are not designed for hardware description, such as #emph[C] and #emph[Python].
Given that photonics has a different target application and different semantics,
most notably the fact that photonic processors are continuous analog systems --
rather than digital processes -- these languages may lack the needed constructs
to express the necessary concepts, they may not be suitable for the development
of a photonic @hdl. Given the effort required to modify the specification of an
existing language, it may be better to create a new language dedicated for
photonic programming.
Furthermore, the language specification is only an important part of the
ecosystem being designed when reusing an existing language. However, if creating
a new language or an @api, then the specification is irrelevant. It is desirable
to create a specification when creating a new language, as it can be used as a
thread guiding development. With the special consideration that a specification
is only useful when the language is mature, immature languages change often and
may break their own specification. And maintaining a changing specification as
the language evolves may lower the speed at which work is done. For example, #emph[Rust] is
widely used despite lacking a formal specification @rust-reference.
=== Compiler
#udefinition(
footer: [ Adapted from @aho2006compilers ],
)[
A *compiler* is a program that translates code written in a higher-level
programming language into a lower-level programming language or format so that
it can be executed by a computer or programmed onto a device.
]
The compiler has an important task; they translate the user's code from a
higher-level language, which can still remain quite low-level, as in the case of #emph[C],
into a low-level representation that can be executed. The type of language used
determines the complexity of the compiler. In general, the higher the level of
abstraction, the more work the compiler must perform to create executable
artefacts.
An alternative to compilers are interpreters who perform this translation on the
fly; such is the case for #emph[Python]. However, @hdl[s] tend to produce
programming artefacts for the device. However, a compiler is more appropriate
for the task at hand. This, therefore, means that #emph[Python] is not a
suitable language for the development of a photonic @hdl. Or, at least, it would
require the development of a dedicated compiler for the language.
One of the key aspects of the compiler, excluding the translation itself, is the
quality of errors it produces. The easier the errors are to understand and
reason about, the easier the user can fix them. Therefore, when designing a
compiler, extreme care must be taken to ensure that the errors are as clear as
possible. Languages like #emph[C++] are notorious for having frustrating errors
@becker_compiler_2019, while languages like #emph[Rust] are praised for the
quality of their errors. This is important to consider when designing a
language, as it can make or break the user experience. Following guidelines such
as the ones in @becker_compiler_2019 can help in the design of a compiler and
greatly improve user experience.
==== Components
Compilers vary widely in their implementation. However, they all perform the
same basic actions that may be separated into three distinct components:
- the frontend: which parses the code and performs semantic analysis;
- the middle-end: which performs optimisations on the code;
- the backend: which generates the executable artefacts.
The frontend checks whether the program is correct in terms of its usage of
syntax and semantics. It produces errors that should be helpful for the user
@becker_compiler_2019. Additionally, in statically typed languages, it performs
type checking to ensure that types are correct and operations are valid. In
general, the frontend produces a simplified, more descriptive version of the
code to be used in further stages @rust_compiler. The middle-end performs
multiple functions but generally performs optimisations on the code. These
optimisations can be of various types, and are generally used to improve the
performance of the final executable. As will se discussed in @sec_phos, while
performance is important, it is not the main focus of the proposed language.
Therefore, the middle-end can be simplified. Finally, the backend, has the task
of producing the final executable. This is a complex topic in and off itself, as
it requires the generation of code for the target architecture. In the case of #emph[C] using #emph[Clang] -- a
common compiler for _C_-- this is done by the LLVM compiler framework
@clang_internals. However, as with the middle-end, the final solution suggested
in this work will not require the generation of traditional executable
artefacts. Instead, some of the tasks that one may group under the backend, such
as place-and-route, will still be required and are complex enough to warrant
their own research.
=== Hardware-programmer & runtime
#udefinition(
footer: [ Adapted from @czerwinski2013finite ],
)[
The *hardware-programmer* is a tool that allows the user to write their
compilation artefacts to the device. It is generally a piece of software that
communicates with the device through a dedicated interface, such as a USB port.
Most often, it is provided by the manufacturer of the device.
]
The hardware-programmer is an important part of the ecosystem, as it is required
to program the physical hardware. Usually it is also involved in debugging the
device, such as with interfaces like @jtag. However, as this may be considered
part of the hardware itself, it will not be further discussed in this section.
However, it must be considered as the software must be able to communicate with
the device.
#udefinition(
footer: [ Adapted from @czerwinski2013finite ],
)[
The *runtime* is a program that runs on the device to provide the base functions
of the device, such as initialization, memory management, and other low-level
functions @aho2006compilers. It is generally provided by the manufacturer of the
device.
]
In the case of a photonic processor, it is as of yet unclear what tasks and
functions it will perform for the rest of the ecosystem, and warrants its own
research and work. The runtime is a device-specific component, and as such, it
is not possible to design it as a generic, reusable, component. Therefore, it is
mentioned as a necessary component, and will be discussed in further details in
@sec_phos but will not be further considered in this section.
In general, the hardware-programmer and the runtime work hand-in-hand to provide
the full programmability of the device. As the hardware-programmer is the
interface between the user and the device, and the runtime is the interface
between the device and the user's code compiled artefacts. Therefore, these two
components are what allows the user's code to not only be executed on the
device, but also to have access to the device's resources.
=== Debugger
#udefinition(
footer: [Adapted from @aho2006compilers],
)[
A *debugger* is a program that allows the user to inspect the state of the
program as it is being executed. In the case of a hardware debugger, it
generally works in conjunction with the hardware-programmer to allow the user to
inspect the state of the device, pause execution and step through the code.
]
The typical features of debuggers include the ability to place break-points --
point in the code where the execution is automatically paused upon reaching it
-- step through the code, inspect the state of the program, then resume the
execution of the program. Another common feature is the ability to pause on
exception, essentially, when an error occurs, the debugger will pause the
execution of the program and let the user inspect what caused this error and
observe the list of function calls that lead to the error.
Some of the functions of a debugging interface are hard to apply to analog
circuitry such as in the case of photonic processors. And it is evident that
traditional step-by-step debugging is not possible due to the real-time,
continuous nature of analog circuitry. However, it may be possible to provide
mechanisms for inspecting the state of the processor by sampling the analog
signals present within the device.
Due to the aforementioned limitations of existing digital debuggers, no existing
tool can work for photonic processors. Instead, traditional analog electronic
debugging techniques, such as the use of an oscilloscope are preferable.
However, traditional tools only allow the user to inspect the state at the edge
of the device, therefore, inspecting issues inside of the device require routing
signals to the outside of the chip, which may not always be possible. However,
it is interesting to note that this is an active area of research
@szczesny_hdl_based_2017, @Felgueiras2007ABD @Motel2014SimulationAD, for analog
electronics at least, and it would be interesting to see what future research
yields and how much introspection will be possible with "analog debuggers".
=== Code formatter
#udefinition(
footer: [Adapted from @nonoma_formatter],
)[
A *code formatter* is a program that takes code as input and outputs the same
code, but formatted according to a set of rules. It is generally used to enforce
a consistent style across a codebase such as in the case of the _BSD project_ @bsd_style and _GNU style_ @gnu_style.
]
Most languages have code formatters such as _rustfmt_ for _Rust_ and _ClangFormat_ for
the _C_ family of languages. These tools are used to enforce rules on styling of
code, they play an important role in keeping code bases readable and consistent.
Although not being strictly necessary, they can enhance the programmer's
experience. Additionally, some of these tools have the ability to fix certain
issues they detect, such as _rustfmt_.
Most commonly, these tools rely on _Wadler-style_ formatting @wadler_style. Due
to the prominence of this formatting architecture, it is likely that, when
developing a language, a library for formatting code will be available. This
makes the development of a formatting code much easier as it is only necessary
to implement the rules of the language.
=== Linting
#udefinition(
footer: [Adapted from @nonoma_formatter],
)[
A *linter* is a program that looks for common errors, good practices, and
stylistic issues in code. It is used with a formatter to enforce a consistent
style across a codebase. They also help mitigate the risk of common errors and
bugs that might occur in code.
]
As with formatting, most languages have linters made available through
officially maintained tools or community maintained initiatives. As these tools
provide means to mitigate common errors and bugs, they are an important part of
the ecosystem. They can be built as part of the compiler, or as a separate tool
that can be run on the codebase. Additionally, linters often lack support for
finding common errors in the usage of external libraries. Therefore, when
developing an @api, linters are limited in checking for proper usage of the @api
itself. Care must be done to ensure that the @api is used correctly, such as
making the library less error-prone through strong typing.
Nonetheless, linters are limited in their ability to detect only common errors
and stylistic issues, as they can only check errors and issues for which they
have pre-made rules. They cannot check for more complex issues such as logic
errors. However, the value of catching common errors and issues cannot be
understated. Therefore, whether selecting a language to build an @api or
creating a custom language, it is important to consider the availability and
quality of linters.
As for the implementation of linters, they generally rely on a similar
architecture to formatters, using existing compiler components to read code.
However, they differ by matching a set of rules on the code to find common
errors. Creating a good linter is, therefore, more challenging than creating a
good formatter as the number of rules required to catch common errors may be
quite high. For example, _Clippy_, _Rust_'s linter, has 627 rules @clippy_rules.
Interestingly, as in the case of _Clippy_, some rules can also be used to
suggest better, more readable ways of writing code, colloquially called good
practices. For example, _Clippy_ has a rule that suggests lowering cognitive
load using the rule `clippy::cognitive_complexity` @clippy_rules. This rule
suggests that functions that are too complex, as defined in the literature
@cognitive_load, should be either reworked or split into smaller, more readable
code units.
=== Code editor
#udefinition(
footer: [ Adapted from @source-code-editor ],
)[
A *code editor* is a program that allows the editing of text files. It generally
provides features aimed at software development, such as syntax highlighting,
code completion, and code navigation.
]
As previously mentioned, most code editors also provide features aimed at
software development. Features such as syntax highlighting: which provides the
user with visual cues about the structure of the code, code completion: which
suggest possible completions for the code the user is currently writing. And
code navigation: allows the user to jump to the definition or user of a
function, variable, or type. These features help the user be more productive and
navigate codebases more easily.
In general, it is not the responsibility of the programming language to make a
code editor available. Fully featured programming editors are generally called
@ide[s]. Indeed, most users have a preferred choice of editor, with the most
popular being _Visual Studio Code_, _Visual Studio_ -- both from _Microsoft_ -- and _IntelliJ_ -- a _Java_-centric
@ide from _JetBrains_ @stackoverflow_survey. Additionally, most editors have
support for more than one language, either officially or through
community-maintained plugins -- additional software that extends the editor's
functionality.
When creating a new language, effort should not go towards creating a new editor
as much as supporting existing ones. This is usually done by creating plugins
for common editors. However, this approach leads to repetition, has editors use
different languages for plugin development. Over the past few years, a new
standard, @lsp, has established itself as a de-facto standard for editor support
@kjaer_rask_specification_2021. Allowing language creators to provide an @lsp
implementation and small wrapper plugins for multiple editors greatly reducing
the effort required to support multiple editors. @lsp was originally introduced
by _Microsoft_ for _Visual Studio Code_, but has since been adopted by most
editors @kjaer_rask_specification_2021.
=== Testing & simulation
#udefinition(
footer: [ Adapted from @unit-test, "simulation") ],
)[
*Testing* is the process of checking that a program produces the correct output
for a given input. It is generally done by writing a separate program that runs
parts -- or the entirety -- of the tested program and checks that it produces an
output and that the produced output is correct.
]
Testing can generally be seen as checking that a program works as intended. They
check for logical errors rather than syntactic errors, as the compiler would.
Tests can be written ahead of the writing of the program. This is then called
@tdd @McDonough2021TestDrivenD. Additionally, external software can provide
metrics such as _code coverage_ that inform the user of the proportion of their
code being tested @ivankovic_code_2019.
Testing also comes in several forms; one may write _unit tests_ that test a
single function, _integration tests_ that test the interaction between functions
or modules, _regression tests_ that test that a bug was fixed and does not
reappear in newer versions, _performance tests_ -- also called _benchmarks_ -- which
test the performance of the programs or parts of the program, and _end-to-end tests_ which
test the program as a whole.
Additionally, there also exists an entirely different kind of test called _constrained random_ which
produces random but correct input to a program and checks that, under no
conditions, the program crashes. This is generally utilised to find edge cases
that are not correctly handled and test the program's robustness, especially in
areas concerning security and memory management.
Most modern programming languages, such as _Rust_ provide a testing framework as
part of the language ecosystem. However, these testing frameworks may need to be
expanded to provide library-specific features to test more advanced usage. As an
example, one may look at libraries like _Mockito_, which provides features for
@http testing in _Rust_ @mockito_github.
Therefore, when developing an @api, it is important to consider how the @api
itself will be tested and how the user is expected to test their usage of the
@api. Additionally, when creating a language, it is important to consider how
the language will be tested and what facilities will be provided to the user to
test their code.
#udefinition(
footer: [ Adapted from @unit-test @simulation) ],
)[
*Simulation* is the process of running a program that simulates the behaviour of
a physical device. It is used to test that @hdl[s] produce the correct state for
a given input and starting state while also checking that the program does so in
the correct timing or power consumption limits.
]
Simulation is more specific to @hdl[s] and embedded development than traditional
computer development, where the user might want to programmatically test their
code on the target platform without needing the physical device to be attached
to a computer. For this reason, the hardware providers make simulators available
to their users. These simulators run the user's code as if it was running on
real hardware, providing the user with tools for introspection of the device and
checking that the program behaves as expected. As an example, _Xilinx_ provides
a simulator for their @fpga[s] called _Vivado Simulator_. This simulator allows
the user to run their code on a simulated @fpga and check that the output is
correct. This is an essential tool for the users of @hdl[s] as it allows them to
test their code without needed access to physical devices. Furthermore, it
allows programmers working on @asic[s] to simulate their code and design before
manufacturing a prototype.
There are many simulation tools, such as _Vivado Simulator_, which allows users
to test their FPGA code, other tools, such as _QEMU_ which allow users to test
embedded platforms. Additionally, many analog simulation tools exist, most
notably the @spice family of tools, which allow the simulation of analog
electronics. There is also work being done to simulate photonic circuits using
@spice @ye_spice-compatible_2022.
Finally, there also exist tools for physical simulation, such as _Ansys Lumerical_ which
are physical simulation tools that simulate the physical interactions of light
with matter. These tools are used during the creation of photonic components
used when creating @pic[s]. However, they are generally slow and require large
amounts of computation power @bogaerts_silicon_2018 @alerstam_parallel_2008.
Therefore, when creating an @api or a language for photonic processor
development, it is desirable to consider how simulations will be performed and
the level of details that this simulator will provide. The higher the amount of
details, the higher the computational needs.
==== Verification
As previously mentioned, when writing @hdl code, it is desirable to simulate the
code to check that it behaves correctly. Therefore, it may even be desirable to
automatically simulate code in a similar way that unit tests are performed. This
action of automatically testing through simulation is called _verification_, as
verification is an integral part of the @hdl workflow and ecosystem. Any
photonic programming solution must provide a way to perform verification. This
would be done by providing both a simulator and a tester and then providing a
way of interfacing both together to perform verification.
#pagebreak(weak: true)
=== Package manager
#udefinition(
footer: [ Adapted from @package-manager ],
)[
A *package manager* or *dependency manager* is a tool that allows users to
install and manage dependencies of their projects. These dependencies are
generally libraries but can also be tools such as testing frameworks, etc.
]
Package management is an integral part of modern language ecosystems. It allows
users to easily install dependencies from the community and share new ones with
the community. This is done through the use of a global repository of packages.
Additionally, some package managers provide a way to create private repositories
to protect intellectual property.
This last point is of particular interest for hardware description. It is common
in the hardware industry to license the use of components -- generally called
@ip[s]. Therefore, any package manager designed to be used with an @hdl must
provide a way of protecting the intellectual property of package providers and
users alike.
Additionally, package managers often offer version management, allowing the user
to specify which version of a package they wish to use. As well as allowing
package providers to update their packages as they get refined and improved. The
same can be applied to hardware description as additional features may be added
to a component, or hardware bugs may be fixed.
Finally, package managers usually handle nested dependencies, that is, they are
able to resolve the dependencies of the dependencies, making the experience of a
user wishing to use a specific package easier. This lets creators of
dependencies build on top of existing community solutions, providing a more
cohesive ecosystem. It is also important to point out that nested dependencies
can cause conflicts, so package managers must provide a way to resolve these
conflicts. This is usually done using _semantic versioning_ which is a way of
specifying version number that allow, to some degree, automatic conflict
resolution @lam_putting_2020.
=== Documentation generator
#udefinition(
footer: [ Adapted from @sai_zhang_automated_2011 ],
)[
A *documentation generator* is a tool that allows users to generate
documentation for their code using their code. This is usually done using
special comments in the code that are extracted and interpreted as
documentation.
]
The most common document generators are _Doxygen_ used by the _C_ and _C++_ communities
and _Javadoc_ used by the _Java_ community. Generally, documentation generators
produce documentation in the form of a website, where all the documentation and
components are linked together automatically. This makes navigating the
documentation easier for the user. Additionally, some documentation generators,
such as _Rustdoc_ for the _Rust_ ecosystem, provide a way to include and test
examples directly in the documentation. This makes it easier for users to
understand and use new libraries they might be unfamiliar with. For this reason,
when developing an @api, having a documentation generator built into the
language is highly desirable as the documentation can serve as a way for users
to learn the @api but also for maintainers to understand the implementation of
the @api itself. Additionally, when creating a new language, care might be given
to documentation generators, as they can provide a way for users to document
their code and maintainers to document the language and its standard library.
Finally, as technical documentation is the primary source of information for
developers @stackoverflow_survey, it is essential to consider this need from
users.
=== Build system
#udefinition(footer: [ Adapted from @aho2006compilers ])[
A *build system* is a tool that allows users to build their projects.
]
Build systems play an essential role in building complex software. Modern
software is generally composed of many files that are compiled together, along
with dependencies, configuration and many other resources, so it is challenging
to compile modern software projects by hand. For these reasons, build systems
are available. They provide a way to specify how a project should be built, this
can be done in an explicit way: where the user specifies the steps that should
be taken, the dependencies and how to build them. This approach would be similar
to the popular _CMake_ build system for the _C_ family of languages. Other build
systems like _Cargo_ for _Rust_ provide a mostly implicit way of building
projects, where the user only specifies the dependencies and, by using a
standardised file structure, the build system is able to infer how to build the
project. This approach is easier to use and leads to a more uniform project
structure. This means that, in combination with other tools such as formatters
and linters, projects built using tools like _Cargo_ all _look_ alike, making
them easy to navigate for beginners and experienced users alike. Additionally,
not having to create _CMake_ files for every new project follows the @dry
principle, which is a common mantra in programming.
Additionally, build systems can provide advanced features that are of particular
interest to hardware description languages. Features such as _feature flags_ are
particularly useful. A feature flag is a property that can be enabled during
building that is additive. It adds additional features to the program. As a
simple example, consider the program in @prog_hello: it will print `"Hello, world!"`
when it is called. A feature flag called `custom_hello` may be used to
add the function in @prog_hello_custom, which allows the user to specify a name
to greet. It is purely additive: adding functionality to the previous library
and using the `custom_hello` feature flag to enable the additional feature
conditionally. This example is trivial, but this idea can be expanded.
Another example might be a feature flag that enables an additional type of
modulator in a library of reusable photonic components. Some libraries even take
a step further, where almost all of their features are gated, which allows them
to be very lean and fast to compile. However, this is not a common occurrence.
#figure(
caption: [ Simple function that prints `"Hello, world!"`, in _Rust_. ],
raw(read("../assets/code/hello_world/0.rs"), lang: "rust", block: true),
) <prog_hello>
#figure(
caption: [ Function that prints `"Hello, {name}!"` with a custom name, in _Rust_. ],
raw(read("../assets/code/hello_world/1.rs"), lang: "rust", block: true),
) <prog_hello_custom>
Whether providing the user with an @api or creating a new language, it is
essential to consider how the user's program must be built, as this task can
quickly become quite complex. Enforcing a fixed folder structure and providing a
ready-made build system that handles all common building tasks can significantly
improve the user experience. And especially the experience of newcomers as it
might prevent them from having to do obscure tasks such as writing their own _CMake_ files.
=== Summary <sec_ecosystem_components_summary>
As has been shown, many components are necessary or desirable to build a
complete, user-friendly ecosystem. Official support for these components might
be preferred as they lead to lower fracturing of their respective ecosystems. In
@tab_ecosystem_components, an overview of required components, desirable or not
needed, along with a short description and their applicability for different
scenarios are mentioned. Some components are more critical than others and are
required to build the ecosystem. Most notably, the compiler,
hardware-programmer, and testing and simulation tools are critical to be able to
utilise the hardware platform. Without these components, the ecosystem is not
usable for hardware development. However, while the other components are not
strictly needed, several of them are desirable: having proper debugging
facilities makes the ecosystem easier to use. Similarly, having a build system
can help the users get started with their projects faster.
In @tab_ecosystem_components, there is a distinction made on the type of design
that is pursued, as will be discussed in @sec_phos, this thesis will create a
new hardware description language, but the possibility of creating an @api was
also discussed. And while an @api is not the retained solution, one can use this
information for the choice of the language in which this new language, called
@phos, will be implemented. Indeed, the same components that make @api designing
easy also make language implementation easier. As will be discussed in
@sec_language_summary, @phos will be implemented in _Rust_. The language meets
all requirements by having first-party support for all of the required and
desired components for an @api design. Its high performance and safety features
make it a good candidate for a reliable implementation of the @phos ecosystem.
#ufigure(
caption: [
This table shows the different components that are needed (#required_sml),
desired (#desired_sml) or not needed (#not_needed_sml) for an ecosystem. It
compares their importance for different scenarios, namely whether developing an
API that is used to program photonic processors or whether creating a new
language for photonic processor development.
+ Interpreted languages are languages that are not compiled to machine code, but
rather interpreted at runtime. This means that they do not require a compiler
per se, but rather an interpreter.
+ A code editor is provided as an external tool, however, support for the language
must be provided by the ecosystem. That being said, it is not a requirement and
is desired rather than required.
],
outline: [
Comparison of programming ecosystem components and their importance.
],
kind: table,
table(
columns: (auto, 1fr, 0.25fr, 0.25fr),
align: center + horizon,
stroke: (x: none),
table.header(
table.cell(rowspan: 2, smallcaps[ *Component* ]),
table.cell(rowspan: 2, smallcaps[ *Description* ]),
table.cell(colspan: 2, smallcaps[ *Importance* ]),
smallcaps[ *@api design* ],
smallcaps[ *language #linebreak() design* ],
),
// Language specification
smallcaps[ *Language specification* ],
align(left)[Defines the syntax and semantics of the language.],
desired,
desired,
// Compiler
smallcaps[ *Compiler* ],
align(
left,
)[Converts code written in a high-level language to a low-level language.],
required,
[ #desired #linebreak() (interpreted#super[1]) ],
// Hardware programmer & runtime
smallcaps[ *Hardware-programmer#linebreak()& runtime* ],
align(left)[ Allows the execution of code on the hardware. ],
required,
required,
// Debugger
smallcaps[ *Debugger* ],
align(left)[Allows the user to inspect the state of the program at runtime.],
desired,
desired,
// Code formatter
smallcaps[ *Code formatter* ],
align(left)[Allows the user to format their code in a consistent way.],
desired,
desired,
// Linter
smallcaps[ *Linter* ],
align(left)[Allows the user to check their code for common mistakes.],
not_needed,
desired,
// Code editor
table.cell(rowspan: 2, smallcaps[ *Code editor* ]),
table.cell(rowspan: 2, align(left)[Allows the user to write code in a user-friendly way.]),
not_needed,
not_needed,
table.hline(stroke: 0pt),
table.cell(colspan: 2)[(provided by the#linebreak()ecosystem#super[2])],
// Testing & simulation
smallcaps[ *Testing#linebreak()& simulation* ],
align(left)[Allows the user to test their code.],
required,
required,
// Package management
smallcaps[ *Package management* ],
align(left)[Allows the user to install and manage dependencies.],
desired,
desired,
// Documentation generator
smallcaps[ *Documentation generator* ],
align(left)[Allows the user to generate documentation for their code.],
required,
desired,
// Build system
smallcaps[ *Build system* ],
align(left)[Allows the user to more easily build their codebase.],
desired,
desired,
),
) <tab_ecosystem_components>
Finally, @tab_ecosystem_compare compares the ecosystem of existing programming
and hardware description languages and their components. It shows that some
ecosystems, like _Python_'s, have many components but that not all of them are
first-party, nor is there always an agreement within the community on the best
tool. However _Rust_ is a particularly interesting candidate in this regard, as
it has first-party support for all of the required components except
hardware-programming and debugging tools. However, as noted in
@tab_ecosystem_compare, most other languages do not come with first-party
support for these tools either. However, as will be discussed in
@sec_overview_of_syntax, it is not easy to learn, has not seen use in hardware
synthesis and is, therefore not a good fit for regular users. But its robust
ecosystem makes it a good candidate for a language implementation, something for
which it has a thriving ecosystem of many libraries, colloquially called _crates_,
fit for this purpose.
One can also see from @tab_ecosystem_compare that simulation and hardware
description ecosystems tend to be highly proprietary and incomplete. This
problem can be solved by providing a common baseline for all tasks relating to
photonic hardware description, where only the lowest level of the technology
stack: the platform-support is vendored. Forcing platforms, through an open
source license such as @gpl-3-0, to provide a standard interface for their
hardware will allow a standardised ecosystem to be built on top of it. This is
the approach that @phos will hopefully take.
#pagebreak(weak: true)
#ufigure(
caption: [
This table compares the ecosystems of different programming and hardware
description languages. It shows whether the components are first-party (#required_sml),
third-party but well-supported (#desired_sml) or third-party but not
well-supported or non-existent (#not_needed_sml). Each component also lists the
name of the tool that is most commonly used for that purpose.
+ _C_ has multiple, very popular, compilers, such as _GCC_ and _Clang_. However,
these are third-party, and for embedded and @hls development, there is no de
facto standard.
+ Traditional programming languages usually rely on programmers and runtime
provided by the hardware vendor of the targetted embedded hardware.
+ #emph[@verilog-ams] is a language used for simulation, not hardware description.
+ _C_ and _Rust_ generally share debuggers due to being native languages.
+ There do seem to exist some formatters, linters, code editor support and
documentation generators for #emph[@verilog-ams] and #emph[@vhdl], but they are
not widely used and are sparsely maintained.
+ Due to the difficulty in handling intellectual property in hardware, there is no
ubiquitous package manager for hardware description languages.
+ Python being interpreted, it does not need a build system, but some dependency
and environment automation tools such as _Poetry_ are widely used.
],
outline: [
This table compares the ecosystems of different programming and hardware
description languages.
],
kind: table,
table(
columns: (auto, 0.1fr, 0.1fr, 0.1fr, 0.1fr, 0.1fr),
align: center + horizon,
stroke: (x: none),
table.header(
table.cell(rowspan: 2, smallcaps[ *Components* ]),
table.cell(colspan: 3, smallcaps[ *Traditional languages* ]),
table.hline(start: 1, end: 4, stroke: 1pt),
table.hline(start: 4, end: 6, stroke: 1pt),
table.cell(colspan: 2,smallcaps[
*Hardware description#linebreak()& simulation languages*
]),
smallcaps[ *C* ],
smallcaps[ *Rust* ],
smallcaps[ *Python* ],
smallcaps(strong(gloss("verilog-ams", short: true))),
smallcaps(strong(gloss("vhdl", short: true))),
),
// Language specification
smallcaps[ *Language specification* ],
[ #required @ISO13586 ],
[ #not_needed @rust-reference ],
[ #not_needed @python_reference ],
[ #required @verilog-ams-ref ],
[ #required @vhdl-standard ],
// Compiler
smallcaps[ *Compiler* ],
[#desired #super[1] #linebreak() (_Clang_ & _GCC_)],
[#required #linebreak() (_rustc_)],
[#desired #linebreak() (_PyPy_ & _Numba_)],
[#not_needed #linebreak() (simulated)],
[#desired #linebreak() (synthesised)],
// Hardware programmer & runtime
smallcaps[ *Hardware-programmer#linebreak()& runtime* ],
[#desired #super[2] #linebreak() (vendored)],
[#desired #super[2] #linebreak() (vendored)],
[#desired #super[2] #linebreak() (vendored)],
[#desired #super[3] #linebreak() (vendored)],
[#desired #linebreak() (vendored)],
// Debugger
smallcaps[ *Debugger* ],
[#desired #super[4] #linebreak() (_GDB_ & _LLDB_)],
[#desired #super[4] #linebreak() (_GDB_ & _LLDB_)],
[#required #linebreak() (_PDB_)],
[#desired #linebreak() (vendored)],
[#desired #linebreak() (vendored)],
// Code formatter
smallcaps[ *Code formatter* ],
[#desired #linebreak() (_clang-format_#linebreak()& _uncrustify_)],
[#required #linebreak() (_rustfmt_)],
[#desired #linebreak() (_Black_)],
[#not_needed #super[5]],
[#not_needed #super[5]],
// Linter
smallcaps[ *Linter* ],
[#desired #linebreak() (_clang-tidy_#linebreak()& _uncrustify_)],
[#required #linebreak() (_Clippy_)],
[#desired #linebreak() (_Black_)],
[#not_needed #super[5]],
[#not_needed #super[5]],
// Code editor
smallcaps[ *Code editor support* ],
[#desired #linebreak() (_clangd_ & _ccls_)],
[#required #linebreak() (_rust-analyzer_)],
[#desired #linebreak() (_Pyright_)],
[#not_needed #super[5]],
[#not_needed #super[5]],
// Testing & simulation
smallcaps[ *Testing* ],
[#desired #linebreak() (_CUnit_)],
[#required #linebreak() (_rustc_)],
[#desired #linebreak() (_Pytest_)],
[#desired #linebreak() (_SVUnit_)],
[#desired #linebreak() (_VUnit_)],
// Testing & simulation
smallcaps[ *Simulation* ],
[#desired #super[2] #linebreak() (vendored)],
[#desired #super[2] #linebreak() (vendored)],
[#desired #super[2] #linebreak() (vendored)],
[#desired #linebreak() (vendored)],
[#desired #linebreak() (vendored)],
// Package management
smallcaps[ *Package management* ],
not_needed,
[#required #linebreak() (_Cargo_)],
[#required #linebreak() (_PyPI_)],
[#not_needed #super[6]],
[#not_needed #super[6]],
// Documentation generator
smallcaps[ *Documentation generator* ],
[#desired #linebreak() (_Doxygen_)],
[#desired #linebreak() (_Rustdoc_)],
[#desired #linebreak() (_Sphinx_)],
[#not_needed #super[5]],
[#not_needed #super[5]],
// Build system
smallcaps[ *Build system* ],
[#desired #linebreak() (_CMake_)],
[#required #linebreak() (_Cargo_)],
[#desired #super[7] #linebreak() (_Poetry_)],
[#desired #linebreak() (vendored)],
[#desired #linebreak() (vendored)],
),
) <tab_ecosystem_compare>
#uconclusion[
With the previous sections, it can be seen that creating a user-friendly
ecosystem revolves around creating tools to aid development. The compiler and
language cannot be created in isolation, and the entire ecosystem has to be
considered to achieve the broadest possible adoption.
Depending on the implementation choice, the ecosystem's components will change.
However, whether the language already exists or is created to program photonic
processors, special care needs to be taken to ensure high usability and
productivity through the availability or creation of tools to aid in
development.
As will be discussed in @sec_phos, the chosen solution will be the creation of a
custom @dsl for photonic processors. This will be done due to the unique needs
of photonic processors and the lack of existing languages that can be used for
development targetting such devices. Moreover, this ecosystem will need to be
created from scratch. However, the analysis done in this section will be used to
guide the development of this ecosystem.
]
== Overview of syntaxes <sec_overview_of_syntax>
Following the analysis of programming ecosystem components, this section will
analyse the syntaxes employed by various common programming languages. This
section aims at building intuition on what these syntaxes look like, what they
mean and how they can be applied to photonics. Additionally, this section will
also analyse the syntaxes of existing @hdl[s] and other @dsl[s] that are used to
program digital electronics -- most notably @fpga[s] -- and analog electronics.
This analysis will also provide insight into whether these languages are
suitable for programmable photonics. As programmable photonics works using
different paradigms than digital and analog electronics, it is crucial to
understand these differences and why they make these existing solutions
unsuitable.
The first analysis, which looks at traditional programming languages, will look
at the syntaxes of the following languages: _C_, _Rust_, and _Python_. These
languages have been chosen as they are some of the most popular languages in the
world, but also because they each bring different strengths and weaknesses with
regards to the following aspects:
- _C_ is a low-level language that is used as the building block for other
non-traditional computation types such as @fpga[s] by being used for @hls
@schafer_high_level_2020, but is also being used for novel use cases such as
quantum programming @mccaskey_extending_2021.
- _Rust_ is another low-level language, it has not seen wide use in @hls or other
non-traditional computation types, but it has modern features that make it a
good candidate for @api development. However, _Rust_ has a very steep learning
curve, making it unsuitable for non-programmers @rust_learning_curve.
- _Python_ is a common language that is used by a vast proportion of researchers
and engineers @stackoverflow_survey @python_research, which makes it a great
candidate as the starting point of any language development. It is also used for
some @hdl development @villar_python_2011 and is used for the development of the
existing photonic processor @api[s], as well as for other non-traditional
computation types such as quantum computing. However, it is a high-level,
generally slow language with a syntax generally unsuitable for hardware
description, as will be further discussed later.
The second analysis will focus on different forms of #gloss("hdl", long: true, suffix: [s]) and
simulation languages. Most notably, the following languages will be analysed:
- _SytemC_ is a language that has seen increased use in @hls for @fpga[s].
- _MyHDL_ is a library for _Python_ that gives it hardware description
capabilities.
- _VHDL_: a common @hdl used for @fpga development and other digital electronics
@my_hdl.
- #emph[@verilog-ams]: a superset of _Verilog_ that allows for the description of
analog electronics. It has seen use in the development of photonic simulation,
most notably in _Ansys Lumerical_ @ansys_lumerical.
- #emph[@spice]: a language that is used for the simulation of analog electronics. #emph[@spice] as
seen use in the development of photonic simulation @ye_spice-compatible_2022.
The goal of the second analysis will be to see whether any of these languages
can be reused or easily adapter for photonic simulation. In the end, none of
these languages fit the needs of photonic development, most notably with regard
to ease of use. Nonetheless, the analysis provides insight that can be useful
when designing a new language. It is also important to note that two distinct
families of languages are in the aforementioned list: digital @hdl[s] and analog
simulation-centric languages. Therefore this comparison will be made in two
parts, one for each family of languages.
=== Traditional programming languages
To compare traditional programming languages, a simple yet classical example
will be used: _FizzBuzz_, which is a simple program that prints the number from
one to one hundred, printing _Fizz_ when the number is divisible by three, _Buzz_ when
the number is divisible by five and _FizzBuzz_ when the number is divisible by
both three and five. The _C_ implementation of _FizzBuzz_ is shown in
@lst_c_fizz. The _Rust_ implementation of _FizzBuzz_ is shown in @lst_rust_fizz.
The _Python_ implementation of _FizzBuzz_ is shown in @lst_python_fizz. For each
of those languages, many different implementations are possible. However, a
simple and representative version was used. As performance is not the focus of
this comparison, choosing the most optimised implementation is not necessary.
Programming languages often take inspiration from one another. As such, most
modern languages are inspired by _C_, which is itself inspired by _B_, _ALGOL 68_ and _FORTRAN_ @ritchie_development_1993. _C_ has
had a large influence on languages such as _Python_ @rossum1993python and _Rust_ @rust-reference -- through _C++_ and _Cyclone_ -- but
also on @hdl[s] such as _Verilog_ (and therefore _Verilog-AMS_). As such, this
section will start with an outlook on the syntax of _C_ and discuss some of its
shortcomings regarding more modern languages. Additionally, the more difficult
aspects of the language will be discussed, most notably manual memory management
and pointer semantics, as these two aspects are error-prone and even considered
to be the root cause for most security vulnerabilities @ms_security.
A simple _C_ implementation of _FizzBuzz_ can be found in @lst_c_fizz, it shows
several important aspects of _C_:
- blocks of code are surrounded by curly braces (`{` and `}`);
- statements are terminated by a semicolon (`;`); however, curly braces can be
omitted for single-line statements;
- variables are declared with a type and a name and optionally initialised with a
value;
- functions are declared with a return type, a name, a list of arguments and a
body;
- ternary operators are available for shorter but less readable conditional
statements;
- _C_ lacks a lot of high-level constructs such as string, relying instead on
arrays of characters;
- _C_ has a lot of low-level constructs, such as pointers, which are used to pass
arguments by reference;
- _C_ is not whitespace or line-space sensitive, and statements can span multiple
lines;
- _C_ uses a preprocessor to perform text substitution, such as importing other
files;
- _C_ needs a `main` function to be defined, which is the program's entry point.
#figure(
caption: [ _FizzBuzz_ implemented in _C_, based on the _Rosetta Code_ project
@rosetta_code_sieve_2021. ],
raw(read("../assets/code/fizzbuzz/c.c"), lang: "c", block: true),
) <lst_c_fizz>
The _Rust_ implementation of _FizzBuzz_ can be found in @lst_rust_fizz, it shows
several important aspects of _Rust_:
- blocks of code are surrounded by curly braces (`{` and `}`);
- statements are terminated by a semicolon (`;`);
- loops use the range syntax (`..`) instead of manual iteration;
- printing is done using the `print` and `println` macros, which are similar to _C_'s
`printf`;
- variables do not need to be declared with a type, as the compiler can infer it;
- _Rust_ is not whitespace or line-space sensitive, and statement can span
multiple lines;
- _Rust_ needs a `main` function to be defined, which is the program's entry
point.
#figure(
caption: [
_FizzBuzz_ implemented in _Rust_, based on the _Rosetta Code_ project
@rosetta_code_sieve_2021
],
raw(read("../assets/code/fizzbuzz/rust.rs"), lang: "rust", block: true),
) <lst_rust_fizz>
The _Python_ implementation of _FizzBuzz_ can be found in @lst_python_fizz, it
shows several important aspects of _Python_:
- blocks of code are delimited by indentation;
- a newline terminates statements;
- loops use the `range` function instead of manual iteration;
- printing is done using the `print` function;
- variables do not need to be declared with a type, as the language is dynamically
typed;
- _Python_ is whitespace and line-space sensitive;
- _Python_ does not need a `main` function to be defined, as the file is the
program's entry point.
#figure(
caption: [
_FizzBuzz_ implemented in _Python_, based on the _Rosetta Code_ project
@rosetta_code_sieve_2021.
],
raw(read("../assets/code/fizzbuzz/python.py"), lang: "python", block: true),
) <lst_python_fizz>
This simple example shows some fundamental design decisions for _C_, _Rust_, and _Python_,
most notably that _Python_ is whitespace and line-space sensitive, while _C_ and _Rust_ are
not. This is a design feature of _Python_ that aids in making the code more
readable and consistently formatted regardless of whether the user uses a
formatter or not. Then, focusing on typing, _Python_ is dynamically typed,
making the work of any compiler more difficult. Dynamic typing is a feature that
generally makes languages easier to use at the cost of runtime performance, as
type-checking has to be done as the code is running. Per contra, _Rust_ takes an
intermediate approach between _Python_'s dynamic typing and _C_'s manual type
annotation: _Rust_ uses type inference to infer the type of variables, which
means that users still need to annotate some types. However, overall most
variables do not need type annotations. This makes _Rust_ easier to use than _C_,
but also more challenging to use than _Python_ from a typing point of view.
Additional features that the languages offer:
- _Python_ and _Rust_ both offer iterators, which are a high-level abstraction
over loops;
- _C_ and _Rust_ both offer more control over data movement through references and
pointers;
- _Python_ and _Rust_ both have an official package manager, while _C_ does not;
- _Python_ and _Rust_ are both memory safe, meaning that memory management is
automatic and not prone to errors;
- _Rust_ is a thread-safe language, meaning that multithreaded programs are easier
to write and less prone to errors;
- _C_ and _Rust_ are both well suited for embedded development. While _Python_ has
seen use in embedded development, it is not as well suited as the other two
languages due to performance constraints;
- _Rust_ does not have truthiness: only `true` and `false` are considered boolean
values, while _Python_ and _C_ have truthiness, meaning several types of values
can be used as boolean values.
#uconclusion[
It was shown that traditional programming languages generally lack the features
required to be used as a photonic @hdl. However, _Python_ is a strong candidate
for creating an @api, and _Rust_ is a strong candidate for implementing a
compiler.
]
=== Digital hardware description languages
Unlike traditional programming languages, digital @hdl[s] try and represent
digital circuitry using code. This means that the code is not executed but
rather synthesised into hardware that can be built. This hardware generally has
one of two forms: logic gates that can be built discretely or @lut[s] programmed
on an FPGA. Both processes involve "running" the code through a synthesiser that
produces a netlist and a list of operations that are needed to implement the
circuit. As previously discussed, in @sec_language_tool, languages can serve as
the foundation to build abstractions over complex systems. However, most @hdl[s]
tend to only have an abstraction over the #gloss("rtl", long: true) level, which
is the level that describes the movement, and processing of data between
registers. Registers are memory cells commonly used in digital logic that store
the result of operations between clock cycles. This means that the abstraction
level of most @hdl[s] is shallow.
This low-level of abstraction can be better understood by understanding three
factors regarding digital logic programming. The first is the economic aspect:
custom @ic[s] are very expensive to design and produce. As such, the larger the
design, the larger the dies needed, which increases cost; and @fpga[s] are
costly devices, the larger the design, the more space it physically occupies
inside of the @fpga, increasing the size needed and therefore the cost. The
second factor is the design complexity: the more complex the design, the more
difficult it is to verify and the slower it is to simulate, which decreases
productivity. The third factor is with regard to performance. Three criteria
characterise the performance of a design: the speed of the algorithm being
implemented, the power consumed for a given operation, and the area that the
circuit occupies. These performance definitions are often referred to be the
acronym @ppa. As such, the design is generally done at a lower-level of
abstraction to try and meet performance targets.
#pagebreak(weak: true)
==== High-level synthesis
#udefinition(
footer: [ Adapted from @schafer_high_level_2020, @meeus_overview_2012 ],
)[
*High-level Synthesis (HLS)* is the process of translating high-level
abstractions in a programming language into #gloss("rtl", long: true) level
descriptions. This process is generally done by a compiler that takes as input
the high-level language and translates the code into a lower-level form.
]
In recent years, there has been a push towards higher-level abstraction for
digital @hdl[s]. It takes the form of so-called #gloss("hls", long: true) languages.
These languages allow the user to build their design at a higher-level of
abstraction, which is generally more straightforward and more productive
@ye_scalehls_2022. Allowing the user to focus on the feature they are trying to
build and not the low-level implementation of those designs. As discussed in
@sec_language_tool, this can be seen as a move towards declarative programming
or a less imperative programming model. Coupled with the rise of hardware
accelerators in the data center and cloud markets, which are generally either
@gpu[s] or @fpga[s], there has been an increased need for software developers to
be able to use these #gloss("fpga")-based accelerators. Because these software
developers are generally not electrical engineers, and due to the high
complexity of @fpga[s], developing for such devices is not an easy skill to
acquire. This has provided an industry drive towards economically viable @hls
languages and tools that software developers can use to program #gloss("fpga")-based
accelerators.
Another advantage of @hls is the ability to test the hardware using traditional
testing frameworks, as discussed in @sec_ecosystem_components_summary, testing
systems for @hdl[s] tend to be vendored and therefore difficult to port.
Additionally, they are based on simulation of the behaviour, which is generally
slower than running the equivalent @cpu instructions. Therefore, testing the
hardware using traditional frameworks is a significant advantage of @hls
languages. In the same way that it allows the use of regular testing frameworks,
it also enables the reuse of well-tested algorithms that may already be
implemented in a given ecosystem which can drastically lower the development
time of a given design and reduce the risk of errors. In addition to being able
to use existing testing frameworks, the code can be verified using provers and
formal verification tools, which can prove the correctness of an implementation,
something that does not exist for traditional @rtl level development.
Given that @hls development is generally easier, more productive and allows for
the reuse of existing well-tested resources, it is a sensible alternative to
traditional @rtl level development. However, it does come at the cost of
generally higher resource usage and lower performance. This is due to the fact
that the @hls abstractions are still not mature enough to meet the performance
of hand-written @hdl code. However, there has been a push towards a greater
level of optimisation, such as using the breadth of optimisation available in
the @llvm compiler. This has allowed @hls to reach a level of performance
acceptable for large swath of applications, especially when designed by
non-specialists @lahti_are_2019. Other techniques, such as machine learning
based optimisation techniques have been used to increase performance even
further @shahzad_reinforcement_2022.
==== Modern RTL languages
In parallel to @hls development, a lot of higher-level @rtl languages and
libraries have been created, such as _MyHDL_, _Chisel_, and _SpinalHDL_. These
alternatives are positioned as replacements to traditional @hdl[s] such as _SystemVerilog_.
They are often libraries for existing languages such as _Python_, and therefore
inherit their broad ecosystems. As discussed in
@sec_ecosystem_components_summary, @hdl[s], tend to be lackluster -- or highly
vendor-locked -- with regard to development tools. And just as in the case of
@hls, this can be an argument in favour of using alternatives, such as these
@hdl[s] implemented inside of existing languages.
These @hdl[s] are generally implemented as translators, where, instead of doing
synthesis down to the netlist level, they translate the user's code into a
traditional @hdl. As such, they are not a replacement for traditional @hdl[s]
but offer a higher-level of abstraction and better tooling through the use of
more generic languages. This places these tools in an interesting place, where
users can use them for their nicer ecosystems and easier development but still
have the low-level control that traditional @hdl[s] offer. This is in contrast
to @hls, where this control is often lost due to the higher-level of abstraction
over the circuit's behaviour. Additionally, these tools often integrate well
with existing package-managers which are available for the language of choice,
allowing for easy reuse and sharing of existing libraries.
=== Comparison
For the comparison, three @hdl[s] of varying reach and abstraction levels will
be used: #emph[@vhdl], #emph[MyHDL], and #emph[SystemC]. They each represent one
of the aforementioned categories: traditional @hdl[s], modern @rtl\-level
languages, and @hls languages. For this comparison, a simple example of an $n$-bit
adder will be used, where $n$ is a design parameter. This will allow the
demonstration of procedural generation of hardware and the use of modules and
submodules to structure code.
#uinfo[
Most @hdl languages come with pre-built implementations of adders. Usually, the
compiler or synthesis tool chooses the best adder implementation based on the
user's constraints. These constraints can relate to the area, power consumption
or timing requirements.
]
In the first example, in @lst_adder_vhdl, it can be seen that the @vhdl
implementation is verbose, giving details for all parameters and having to
import all of the basic packages (line $#2-3$). In @vhdl, the ports and other
properties are defined in the `entity`, and the logic is implemented in an
`architecture` block. This leads to functionality being spread over multiple
locations, generally reducing readability. Assignments are done using the `<=`
operator. Unlike most modern counterparts, the language does not use indentation
or braces to denote code blocks but rather the `begin` and `end` keywords, which
is a dated practice. However, @vhdl does support parameterisation of the design,
as can be seen on line $#6$ with the declaration of the generic `n`. This allows
for the generation of hardware based on parameters, which is a useful feature
for hardware design.
#ufigure(
caption: [ Example of a $n$-bit adder in @vhdl, based on @vhdl-adder. ],
raw(lang: "vhdl", read("../assets/code/adder/vhdl.vhdl"), block: true),
) <lst_adder_vhdl>
The second example based on _MyHDL_, in @lst_adder_my_hdl, shows a combinatorial
implementation of an adder. It shows that _MyHDL_ relies on decorators to
perform code transformations, something that may be useful when designing custom
languages based on _Python_ @ikarashi_exocompilation_2022. Despite using
decorators, the code for the _Python_ example is very short, relying on the
`@always_comb` annotation to denote the combinatorial logic. The `@block`
annotation is used to denote a block of code that will be translated to a
module. Overall, code in _MyHDL_ is generally easy to read and has a low barrier
to entry for _Python_ developers.
#ufigure(
caption: [ Example of a $n$-bit adder in _MyHDL_. ],
raw(lang: "python", read("../assets/code/adder/myhdl.py"), block: true),
) <lst_adder_my_hdl>
The final and third sample is in _SytemC_, in @lst_adder_systemc. It is verbose,
using lots of macros, it does not directly support generics due to its _C_ heritage,
and requires the use of defined macros to configure the number of bits. Overall,
it does not provide a pleasant user experience even for a simple example.
Despite being a @hls language, it is seemingly less readable and user-friendly
than _MyHDL_.
#ufigure(
caption: [ Example of a $n$-bit adder in _SystemC_. ],
raw(lang: "c", read("../assets/code/adder/systemc.c"), block: true),
) <lst_adder_systemc>
Three languages were shown, starting with @vhdl, which is widely used in the
industry and has a long history of support and use in hardware synthesis
toolchains. A newer, very modern @rtl language based on _Python_ with a
compelling feature set, _MyHDL_, was also shown. Finally, a @hls language, _SystemC_,
was shown. It was shown that _MyHDL_ is a very user-friendly language, with a
low barrier to entry and a very modern feature set. It was also shown that _SystemC_ is
a very verbose language and does not provide a good user experience. It was also
shown that _SystemC_ does not support generics and requires the use of macros to
achieve the same functionality. This is in contrast to _MyHDL_, which supports
generics and parameterisation of designs. It was also shown that _MyHDL_ is a
very modern language, with a very modern feature set and a very low barrier of
entry. This is in contrast to _SystemC_, a very verbose language that does not
provide a good user experience. It was also shown that _SystemC_ does not
support generics and requires the use of macros to achieve the same
functionality. This is in contrast to _MyHDL_, which implicitly supports
generics and parameterisation of designs. However, this implicitness can be
error-prone, which in the case of @asic design would be very expensive.
Finally, none of the aforementioned @hdl[s] provide any facilities for analog
hardware description. Some, like @vhdl, can provide analog modelling, but not
analog hardware description. This is a significant limitation of all digital
electronic @hdl[s]. Additionally, the signal semantics they all use of _driven-once_, _drive_many_ could
lead to issues with signal splitting, as will be discussed in @sec_signal_types.
#uconclusion[
It was shown that traditional @rtl @hdl[s] are not suitable for photonic
development. They are not easily approachable for non-expert and lack the
correct semantic for analog processing. However, _MyHDL_ shows a promising
approach to @hdl creation based on _Python_.
]
=== Analog simulation languages
There are several analog simulation languages. However, there are very few
analog hardware description languages, and they mostly seem to be research
languages @murayama_top-down_1996 @mitra_study_2010. Due to this overall
unavailability of analog @hdl[s], this comparison will instead rely on analog
simulation languages, namely @spice and @verilog-ams. These two languages are
very different, designed for different purposes and at different times. However,
they are both actively used. Their uses differ significantly as @spice aims to
provide a netlist description of analog electrical circuitry to be simulated,
whereas @verilog-ams aims to provide models of analog systems compatible with
mixed-signal simulations of digital and analog electronics.
==== SPICE
@spice is not a programming language but a configuration language: the user
declares a list of nets and the components that connect these nets. As such,
@spice is very explicit, and little in the way of programmatic features are
offered. Additionally, @spice depends on models and is not meant to describe
hardware. This means it is a very low-level representation of a circuit, which
goes against the goal of using a high-level language, as discussed in
@initial_requirements.
==== Verilog-AMS
@verilog-ams is a modern mixed-signal simulation. It suffers from the same issues as @spice, namely that it cannot be used for hardware description but rather hardware modelling. While @verilog-ams has been used for photonic modelling, it is not a suitable candidate for use as a photonic @hdl.
#uconclusion[
Existing analog modelling languages are unsuitable for photonic hardware
description, as they are not hardware description languages but hardware
modelling languages.
]
== Analysis of programming paradigms <sec_paradigms>
#udefinition(footer: [ Adapted from @noauthor_programming_2020. ])[
A *programming paradigm* is a style of programming, a way of thinking,
structuring, and solving problems in a language.
]
After an overview of existing programming languages, one must now consider the
available programming paradigms. When selecting or creating a language,
particular care must be taken when selecting one or more paradigms. This is
because the choice of paradigms will affect the language's expressiveness and
ease of use. Generally, most languages, like _Python_ are imperative languages
with elements from functional programming.
There are two broad categories of programming paradigms, imperative and
declarative programming. As mentioned in @sec_language_tool, imperative
languages are concerned with the "how" of programming, whereas declarative
languages are concerned with the "what". A complete overview of all programming
paradigms is available in @anx_paradigms @van_roy_classification_nodate. In this
comparison, the number of paradigms will be reduced as many of them exist.
Instead, focusing on the most relevant ones, namely object-oriented, functional,
logic, and dataflow programming. It is important to note that object-oriented
programming is a subset of imperative programming and that functional
programming is a subset of declarative programming, with dataflow programming
being a subset of functional programming. This means that the aforementioned
paradigms are not mutually exclusive and can, for example, be combined to create
an object-oriented language with functional elements @van_roy_programming_2012.
=== Object-oriented programming
Object-oriented programming is one of the most common paradigms, being part of _Java_, _Python_, _C\#_,
and many others. It follows the idea that data is the most important part of an
application and that it should be contained together in an object along with the
methods acting upon it. For each piece of data, an instance of an object is
created. In theory, this allows for the creation of complex data structures
easily in a tree-like structure. Object-oriented also allows for inheritance,
where one class of object inherits from another. The most typical example is
shown in @lst_oop_example, it shows a super class `Student` being inherited by a
subclass `Sebastien`. This allows the subclass to override methods on the super
class and share its initialisation function and state.
#pagebreak(weak: true)
#ufigure(
outline: [ Example of object-oriented programming in _Python_. ],
caption: [ Example of object-oriented programming in _Python_, showing inheritance and
method overriding. ],
)[
```python
class Student:
def __init__(self, name):
self.name = name
def print_thesis_grade(self, grade):
print("Thesis grade of " + self.name + " is " + grade)
class Sebastien(Student):
def __init__(self):
super().__init__("<NAME>")
def print_thesis_grade(self, grade):
print("Thesis grade of Sébastien is A+")
```
]<lst_oop_example>
==== Criticism
Object-oriented programming has been criticised for its tendency to create
overly complex and sometimes confusing data structures. This is because it is
very easy to create complex trees of classes, all interconnected, and all
inheriting from one-another in not-always obvious ways. Additionally, one of the
stated goals of object-oriented programming is to make code more modularised,
therefore reusable. However, in practice, this is not always the case, as it is
easy to create overly specialised classes @cardelli_bad_1996.
=== Functional programming
Functional programming views programs in the opposite way of object-oriented
programming, instead emphasising procedures being done on data. In purely
functional programming, all data is immutable, meaning that a value cannot be
changed, only ever created or destroyed. This has advantages regarding limiting
the number of side-effects, making the code easier to reason about. However, it
can also make implementing some programs that require side-effects very
difficult, such as communicating with other programs.
=== Logic programming
Logic programming is a subset of functional programming, instead focused on
logical relations. The most common example of a logic programming language is _Prolog_.
In logic programming, the programmer defines a set of rules, and the program
will try to find a solution to the problem. This is done by defining a set of
rules and a set of facts. The program will then try to find a solution to the
problem by applying the rules to the facts. Logic programming does not find its
use in common programming, but rather in proving mathematical theorems and
solving mathematical problems. As such, it is not suitable for hardware
description.
=== Dataflow programming
Dataflow programming is another subset of functional programming, where the
program is represented as a graph of nodes, where each node performs a function,
and the graph's edges are the data flowing between the nodes. Its data model is
particularly interesting for hardware description, as it can represent the
operations being done on a signal, with the "flow" of light being the edges of
the graph. Indeed, this is the approach taken by _DFiant_, a _Scala_ based @rtl
@hdl that uses dataflow programming as its paradigm @port_dfiant_2017. And as
will be seen in @sec_phos, it is part of the paradigm used by @phos, the
language created in this thesis.
== Existing framework
There currently exists a framework developed at the @prg for the programming of
photonic processors. However, its level of abstraction is low. It consists of
manually setting the parameters of each photonic gate and then manually
connecting them together. This is a very low-level approach, and as such, it is
not suitable for the programming of complex photonic processors. However, it is
still useful for the programming of simpler photonic circuits, and as such has
been used for demonstrations of routing, switching, and circuit designing.
== Hardware-software codesign
#udefinition(
footer: [ Adapted from @darwish_trends_2005. ],
)[
*Hardware-software codesign* is the process of designing a system where both the
hardware and software components are designed together, with the goal of
interoperating hardware components and software systems more easily. And
optimise the system as a whole rather than the hardware and software components
separately.
]
Hardware-software codesign is difficult, requiring good communication and
planning between the different parties. As such, some tools have been created to
make this process easier. In @fpga development, this is usually done by having
the synthesiser produce lists of registers which the software can configure.
However, this is still error prone and limited in usefulness.
== Summary <sec_language_summary>
From the aforementioned criteria, one may give a score for each of the discussed
languages based on its suitability for a given application. This is done in
@tbl_language_comparison. The score is given on a scale of one to five, with one
being the lowest and five being the highest. The score is given based on the
following criteria: the maturity of the ecosystem and the suitability for
different scenarios that were previously explored, notably: @api design, root
language -- i.e. as the basis for reusing the existing ecosystem and syntax --
and the implementation of a new language -- i.e. using the language to build the
ecosystem components of a new language. @rtl languages implemented on top of _Python_ are
not included in the table. Neither is @spice due to its restrictive scope.
From @tbl_language_comparison, one can see that for creating a new language, the
best languages to implement it are _Rust_ and _C_. And the best languages to
inspire the syntax and semantics are _Python_ and #emph[@verilog-ams].
Additionally, _C_ is also a good inspiration due to its widespread use and the
familiarity of its syntax. Finally, for the implementation of an @api, the best
choice is _Python_ due to its maturity, simplicity and popularity in academic
and engineering circles.
#pagebreak(weak: true)
#ufigure(
caption: [
Comparison of the different languages based on the criteria discussed in
@sec_language_summary.
],
kind: table,
table(
columns: (0.75fr, 1fr, 1fr, 1fr, 1fr),
align: center + horizon,
stroke: (x: none),
table.header(
smallcaps[ *Language* ],
table.cell(colspan: 4, smallcaps[ *Applications* ]),
smallcaps[ *Ecosystem* ],
smallcaps[ *@api design* ],
smallcaps[ *Root language* ],
smallcaps[ *New language* ],
),
smallcaps[ *C* ],
score(3),
score(2),
score(2),
score(4),
table.hline(start: 0, end: 6, stroke: (thickness: 0.5pt, dash: "dashed")),
[],
table.cell(
colspan: 4,
align(
left,
)[
C is a fully featured low-level language; it is performant and has a simple
syntax. However, it lacks some more modern ecosystem components and is
error-prone. Because of this, it is unsuitable for @api design since it would
require the user to be familiar with memory management. It lacks many of the
semantics of hardware description, making it unsuitable as a root language.
However, its extensive array of language-implementation libraries makes it a
good candidate for implementing a new language.
]
),
smallcaps[ *Rust* ],
score(5),
score(2),
score(2),
score(5),
table.hline(start: 0, end: 6, stroke: (thickness: 0.5pt, dash: "dashed")),
[],
table.cell(
colspan: 4,
align(
left,
)[
Rust is a modern low-level language; it is very performant, has excellent
first-party tooling, is quickly growing in popularity, and is memory safe.
However, it has complicated syntax and semantics that is unwelcoming for
non-developers, which makes it unsuitable for either @api design or as a root
language. However, its extensive array of language-implementation libraries and
its memory and thread safety make it an excellent candidate for implementing a
new language.
]
),
smallcaps[ *Python* ],
score(4),
score(5),
score(4),
score(2),
table.hline(start: 0, end: 6, stroke: (thickness: 0.5pt, dash: "dashed")),
[],
table.cell(
colspan: 4,
align(
left,
)[
Python is a mature high-level language that sees wide use within the academic
community; it has great third-party tooling and is easy to learn. These factors
make it an excellent candidate for @api design and as a root language. However,
its slowness and error-prone dynamic typing make it an unsuitable candidate for
implementing a new language.
]
),
smallcaps[ *Verilog-AMS* ],
score(1),
score(0),
score(3),
score(0),
table.hline(start: 0, end: 6, stroke: (thickness: 0.5pt, dash: "dashed")),
[],
table.cell(
colspan: 4,
align(
left,
)[
@verilog-ams is a mixed signal simulation software; its ecosystem is lackluster,
with many proprietary tools which incur expensive licenses. It is not a generic
language and is therefore not designed for an @api to be implemented in the
language, nor is it suitable for implementing a new language. However, it is a
mature language with a familiar syntax to electrical engineers, which may make
it suitable as the root language.
]
),
smallcaps[ *VHDL* ],
score(1),
score(0),
score(1),
score(0),
table.hline(start: 0, end: 6, stroke: (thickness: 0.5pt, dash: "dashed")),
[],
table.cell(
colspan: 4,
align(
left,
)[
VHDL is a mature language with a large ecosystem but suffers from the same
issues as @verilog-ams, most notably that most tools are proprietary and
licensed. Similarly, its nature as a hardware description language makes it
unsuitable for @api design or the creation of a new language. Its verbose syntax
and semantics are challenging to learn and make the language difficult to read,
which makes it unsuitable as a root language.
]
),
),
) <tbl_language_comparison> |
|
https://github.com/LDemetrios/Svart | https://raw.githubusercontent.com/LDemetrios/Svart/main/The%20road%20to%20Svartalfheim.typ | typst | #import "coloring.typ": show-rule
#show raw.where(lang: "svr"): show-rule
#set page(fill: black)
#set text(fill: white)
#let logo-blue = rgb("#08053f")
// #let logo-blue = rgb("#0e117c")
// #let logo-blue = rgb("#0b0a5e") // avg
#let smallcaps(body) = {
show regex("[a-z]"): letter => text(size: 0.65em, upper(letter))
show regex("[а-яё]"): letter => text(size: 0.7em, upper(letter))
body
}
#show heading: it => {
set text(1.2em)
smallcaps(it)
}
#show heading.where(level: 1): align.with(center)
#show heading.where(level: 2): align.with(center)
#show heading.where(level: 3): align.with(center)
#set par(justify: true)
#show raw.where(block: true): set par(justify: false)
#show raw.where(block: true): block.with(breakable: false)
#page(
align(center + horizon)[
*#text(size: 3.3em, smallcaps[The road to Svartalfheim])*
#v(5em)
#import "logo.typ": logo
#scale(90%, logo(back: logo-blue))
#v(15em)
*#text(size: 1.8em, [#smallcaps[Ляпин Д.Р.,] #text(size:.9em)[a.k.a]. #smallcaps[LDemetrios]])*
#v(2em)
2024
],
)
== Что это такое?
Это статья/книга о том, как я создаю свой язык программирования, который
называется Svart (шв. Тёмный). Это не столько пошаговый гайд, сколько дневник
разработки. Я публикую его в самом начале написания и буду обновлять по мере
продвижения.
#outline(title: none, indent: 1em)
#pagebreak()
== Язык Svart
Этот язык совмещает в себе преимущества разных известных мне языков. Основной
вклад внесли Kotlin и Rust, но также немного влияния имели C++, JavaScript,
Prolog и другие языки. Конкретнее говоря:
- Kotlin:
- Общий вид синтаксиса.
- Компилируемость под JVM, и, соответственно, сборка мусора из коробки.
- Оболочка над Java reflection, представляющая каноничные типы.
- Rust:
- Интерфейсы скорее похожи на трейты. В частности, есть трейты для операторов.
- Кортежи как полноценные типы.
- Ассоциированные типы.
- Полноценные алгебраические типы.
- Дженерики (не как в Java или Kotlin, без стирания).
- JavaScript:
- toString для замыканий выдаёт осмысленную информацию.
- Prolog:
- Общий подход к паттерн-матчингу.
- C++:
- Типы могут параметризоваться числами, с некоторыми возможностями compile-time вычислений. К сожалению, это потенциально означает, что компиляция может никогда не завершиться...
#pagebreak()
= Конкретнее о концепции
В первую очередь, хочется написать как можно больше различного, наполненного разнообразными фичами, кода, чтобы понять, что должно компилироваться и как работать, а что --- не должно компилироваться или падать с ошибкой.
== Основной синтаксис
... позаимствуем из Котлина. Придётся подождать пару страниц, пока я объясняю его для непосвящённых. Входной точкой является функция `main()`. Вывод осуществляется встроенной функцией `println`, строковые литералы ограничиваются двойными кавычками. Точка с запятой не требуется.
```svr
fun main() {
println("Hello, world!")
}
```
А вот прежде чем говорить про ввод, сначала придётся поговорить про слишком много всего.
А пока --- переменные и постоянные, циклы и условия, комментарии и аннотации типов --- всё без изменений.
```svr
fun main() {
val delta : Int = 2
var n = 9 // Auto inferred type Int
var fact = 1L // Auto inferred type Long
while (n > 0) {
fact *= n
n -= delta
}
println(fact) // Prints 945
}
```
Мы сосчитали $9!! = 945$. Отлично.
Ещё у нас есть...
== Классы, их наследование и интерфейсы.
```svr
interface Animal {
fun speech() : String
}
class Dog : Animal {
override fun speech() = "Bark!"
}
class Cat : Animal {
override fun speech() = "Meow!"
}
```
Это всё бывает generic:
```svr
interface List<+T> {
val size : Int
fun get(index: Long) : T
}
```
Они, конечно же, поддерживают declaration-site variance. Ну и всё в таком же духе. В отличие от Котлина, мы можем объявить _абстрактные_ `static` методы. Это поможет нам потом, с generics. При этом у каждого `static` метода автоматически появится ещё и не статическая перегрузка:
```svr
abstract class Base {
static fun name() : String = "Base"
}
class A : Base {
override static fun name() = "A"
}
class B : Base {
override static fun name() = "B"
}
```
Зачем это? Затем, что мы можем вызывать методы, не имея инстанса. В частности, на параметрах типа метода, в том числе и выведенных автоматически:
```svr
fun <C : Base, X : C, Y : C> commonName(x : X, y : Y) = C.name()
```
Позже мы научимся это делать и без таких махинаций с дженериками, но сработает это так:
```svr
val a = A()
val b = B()
println(commonName(a, a)) // X = A, Y = A, C = A, prints "A"
println(commonName(a, b)) // X = A, Y = B, C = Base, prints "Base"
```
С другой стороны, имея инстанс, мы сможем вызвать его собственный метод.
== Алгебраические типы данных
Собственно, что написано на упаковке:
```svr
enum Result<T, E> {
Success(T), Error(E)
}
```
== Типы-функции, типы-массивы и типы-кортежи.
В том числе, именованные кортежи. Вот тут-то и начнутся нововведения, а потом и расхождения. Давайте пока условимся, что у нас есть функции `listOf()` и `mutableListOf()`, как в Котлине, ибо мне пока лень придумывать названия для стандартной библиотеки. Может быть, впоследствии это поменяется.
```svr
fun <T> asList(arr: T[]) : List<T> {
val result = mutableListOf<T>()
for (el in arr) {
result.add(el)
}
return result
}
```
Хоть мне это и не очень нравится, но определить параметры типов надо до имени функции, так как у нас есть extension functions.
```svr
fun <T, R> List<T>.map(transform: T => R) : List<R> {
val result = mutableListOf<R>()
for (el in this) {
result.add(transform(el))
}
return result
}
```
Хотелось бы целиком разделить фазы парсинга и вывода типов, и это налагает ограничения на код. Например, мы здесь на уровне парсинга увидим, что `transform` --- это параметр, а не функция, а значит, будем пытаться вызывать на ней операторный метод `invoke`. И, если вдруг у нас есть такая ситуация:
```svr
fun method(x: Int) : Int = x
fun main() {
val method = "abc"
println(method(1))
}
```
В принципе, оно могло бы и скомпилироваться: вызвать метод, объявленный выше. Но нет, в данном скоупе `method` --- это строка.
Да, и именованные кортежи:
```svr
fun <T> List<T>.findIndexed(condition: T => Boolean) : (index: Int, value: T)? {
for i in 0 .. this.size() {
if (condition(this[i])) {
return (index: i, value: this[i])
}
}
return null
}
```
Во-первых, есть nullable типы, которые нужно явно маркировать. Во-вторых, при конструировании результата нужно явно прописать имена аргументов. Зачем? Затем, что тип `(index: Int, value: T)` --- подтип `(Int, T)`, и если мы напишем `(i, this[i])` --- мы сконструировали второй тип.
Также замечу, что скобки в объявлении `for` не обязательны, оператор `..` предполагает, что конец --- исключительно, и в обычном форматировании его стоит выделять пробелами.
== Ассоциированные типы.
Их удобство не так уж и очевидно в простых программах, поэтому я постараюсь это описать гораздо позже. Когда мы попытаемся компилятор Svart написать на Svart же. Пока же скажем так --- это своего рода постоянная, которую можно запросить на наследнике типа так же, как обычно запрашивают просто переменную на инстансе типа, но в compile-time.
```svr
interface Common {
type Associated
fun paramInstance() : Self::Associated
}
class A : Common {
override type Associated = Int
override fun paramInstance() = 1
}
class B : Common {
override type Associated = String
override fun paramInstance() = "1"
}
fun <G : Common> genericMethod(something: G) {
val param = something.paramInstance()
// Auto inferred G::Associated
}
```
== Self и final типы-параметры.
Да, у нас есть тип `Self`, который означает "тип, которому принадлежит `this`". Очевидно, если его использовать в качестве возвращаемого значения, проблем не возникнет, а вот в качестве параметра...
```svr
interface Negatable {
fun negate() : Self
}
interface Monoid {
static fun one() : Self
fun mul(another: Self) : Self
}
```
Теперь, допустим, у нас `Int` и `Double` оба реализуют эти два интерфейса.
```svr
fun checkReversability(x: Negatable) : Boolean {
val negX = x.negate() // Type is Negatable
return x == negX.negate()
}
```
```svr
fun checkNeutrality(x: Monoid) : Boolean {
val one = x.one() // Type is Monoid, despite the fact `one` is static
val x1 = x.mul(one) // Oops, Compilation Error!
return x == x1
}
```
Почему же ошибка компиляции? Потому что интерфейс требует реализовать метод `mul(Self)`, а не `mul(Monoid)`. Соответственно, `Int` реализует `mul(Int)`, `Double` реализует `mul(Double)`. Но мы хотим, чтобы так работало?
```svr
fun <T : Monoid> checkNeutrality(x: T) : Boolean {
val one = x.one() // Type is T
val x1 = x.mul(one) // Still Compilation Error!
return x == x1
}
```
Снова проблема. Потому что никто не запрещает подставить `T = Monoid`, а это приводит к уже известным проблемам... Мы как-то хотим разрешить подставлять только те типы, у которых нет наследников.
```svr
fun <final T : Monoid> checkNeutrality(x: T) : Boolean {
val one = x.one() // Type is T
val x1 = x.mul(one) // OK now
return x == x1
}
```
== Типы-функции, массивы и кортежи... снова.
... да, они у нас есть, мы это уже выяснили. Но во имя операций над типами, у них есть "длинный" синтаксис, консистентный с остальными:
- `T[]` это `Array<T>`. Ничего интересного, на самом деле.
- `(T, U) => R` это `Function<(T, U)>`. Аргумент типа --- это кортеж типов аргументов функции. А где тип результата? Он нигде не появляется в сигнатуре функций, поэтому реализовывать две версии интерфейса с разными аргументами для одного класса не очень осмысленно. Поэтому тип результата --- это ассоциированный тип.
```svr
class Something : (Int, Double) => String {
override operator fun invoke(arg0: Int, arg1: Double) : String = "abc"
}
```
--- это то же самое, что...
```svr
class Something : Function<(Int, Double)> {
override type Result = String
override operator fun invoke(arg0: Int, arg1: Double) : String = "abc"
}
```
- Кортежи... А тут сложно. Это своего рода лист типов, используемый в компайл-тайме. Поэтому, во-первых, у нас есть синтетические типы, обозначающие кортежи длины $n$: `(A, B, C, D, E, F)` это `Hexad<A, B, C, D, E, F>`. Во-вторых, у нас есть специальный тип `Cons`: `(T, U, R)` --- это `Cons<T, Cons<U, Cons<R, Nullad>>>`. И наконец, у нас есть общий тип `Tuple`, от которого они все наследуются. А у `Tuple` компиляторно определено ассоциированный тип `Reduce`, позволяющий совершать операции над типами.
#import "@preview/diagraph:0.2.5": *
#let hex-color(clr) = {
let as-rgb = repr(rgb(clr))
as-rgb.slice(5, as-rgb.len() - 2)
}
#let contextual-graph(graph) = {
let back = hex-color(page.fill)
let fore = hex-color(text.fill)
let graph = ```
digraph {
edge[color=$foreground, fontcolor=$foreground, labelfontcolor=$foreground];
node[color=$foreground, fontcolor=$foreground, fillcolor=$background];
```.text + "\n" + graph + "\n}"
assert(type(graph) == "string", message: repr(type(graph)))
return render(graph.replace("$foreground", "\"" + fore + "\"").replace("$background", "\"" + back + "\""))
}
#show raw.where(lang: "dot-render"): it => context align(center, contextual-graph(it.text))
== Операции над типами
Для понимания этой главы рекомендуется сначала преисполниться лямбда-исчислением.
Первым делом надо заметить, что ассоциированы с типом могут быть не только единичные типы, но и семейства типов:
```svr
class Sample {
type <T> Associated = Comparable<(T, T)>
}
```
Тогда `Sample::Associated<Int>` это то же самое, что `Comparable<(Int, Int)>`. Этот же синтаксис мы можем использовать для задания top-level псевдонимов:
```svr
type <T> Predicate = T => Boolean
```
Теперь давайте посмотрим, что же мы хотим иметь. Давайте научимся добавлять элемент в конец списка. Для тех, кто не знаком с тем, что такое `reduce`:
```
Reduce([], Func, Acc) = Acc
Reduce(Cons(Head, Tail), Func, Acc) = Func(Head, Reduce(Tail, Func, Acc))
```
Так, например, сумма списка --- это `reduce(list, +, 0)`. Хорошо, у нас есть `(A, B, C)`. Нам нужна какая-то функция и какой-то аккумулятор, которые удовлетворяет следующим "функциональным уравнениям":
```
Func(C, Acc) = X
Func(B, X) = Y
Func(A, Y) = (A, B, C, D)
```
Хочется сразу сказать, что пусть `Func = Cons`. Тогда сразу:
```
Cons(C, Acc) = (C, D)
Cons(B, (C, D)) = (B, C, D)
Cons(A, (B, C, D)) = (A, B, C, D)
```
Отсюда вывод: `Acc = (D,)`.
Тогда
```svr
type <List : Tuple, Last> Append = List::Reduce<Cons<*, *>, (Last,)>
```
Звёздочки здесь --- указание на то, что мы передаём `Cons` как функцию над типами, а не тип. Аналогичным образом давайте развернём список.
```
Func(C, Acc) = X
Func(B, X) = Y
Func(A, Y) = (C, B, A)
```
Понятно, что здесь должно быть `Func`, равное только что написанному `Append`:
```
Append(C, ()) = (C,)
Append(B, (C,)) = (C, B)
Append(A, (C, B)) = (C, B, A)
```
```svr
type <List : Tuple> Reverse = List::Reduce<Append<*, *>, ()>
```
А теперь хотим написать функцию высшего порядка. Как бы это сделать? Как принять семейство типов в качестве аргумента? Сделаем это так: пусть все функции над типами --- синтетические типы, наследники
```svr
interface TypeFunction<Bounds : Tuple> {
type Result = +Any?
abstract type <T: Bounds> Invoke : Self::Result
}
```
Соответственно, например, `Append<*, *>` --- это синтетический тип
```svr
class `Append<*, *>` : TypeFunction<(Tuple, Any?)> {
override type <T: (Tuple, Any?)> Invoke = Append<T::First, T::Second>
}
```
Итак, мы хотим написать фильтр. Поступим в лучших традициях лямбда-исчисления:
```svr
sealed interface TypeBoolean : TypeFunction<(Any?, Any?)>
class TypeTrue : TypeBoolean {
type <T: (Any?, Any?)> Invoke = T::First
}
class TypeFalse : TypeBoolean {
type <T: (Any?, Any?)> Invoke = T::Second
}
type <T> TypePredicate = TypeFunction<(T,), Result = +TypeBoolean>
type <Value, List : Tuple, Pred : TypePredicate<(Value,)>> CondCons =
Pred::Invoke<(Value,)>::Invoke<(Cons<Value, List>, List)>
type <List : Tuple, Pred : TypePredicate<Common<List>>> Filter =
List::Reduce<CondAppend<*, *, Pred>, ()>
```
Как вы могли заметить, здесь при передаче `CondAppend` мы не все параметры пометили `*`. Это `CondAppend`, в который заранее подставили третий аргумент, равный `Pred`. Также есть #box(`type <T : Tuple> Common`) --- "наиболее узкий общий тип", встроенная в компилятор функция.
И да, конечно же, как у нас поддерживаются extension functions, так поддержим и extension types!
```svr
type <A, T : Tuple> Cons<A, T>::First = A
type <A, B, T : Tuple> Cons<A, Cons<B, T>>::Second = B
```
== Ограничения в generic параметрах
Давайте придумаем generic класс.
```svr
class OrderedEntry<N : Number, T : Comparable<T>>(val num : N, val value : T)
```
Теперь мы хотим написать какой-нибудь метод, который его принимает.
```svr
fun doSomething(param: OrderedEntry<...>, ...)
```
Ага, нам придётся ввести соответствующие переменные.
```svr
fun <N : Number, T : Comparable<T>> doSomething(param: OrderedEntry<N, T>, other: T)
```
... я бы хотел ввести немного сахара для этого дела. Здесь обе переменные имеют _ровно_ такие ограничения, которые требуются для того, чтобы использовать их как параметры `OrderedEntry`. Введём обозначение с вопросительным знаком для того, чтобы вводить такие переменные:
```svr
fun <T : Comparable<T>> doSomething(param: OrderedEntry<?N, T>, other: T)
```
И даже, если у нас эта переменная используется в объявлении в другом месте, разрешим использовать `?` не более одного раза.
```svr
fun doSomething(param: OrderedEntry<?N, ?T>, other: T)
```
Так, например, теперь можем написать `First` и `Second` по-другому, короче:
```svr
type Cons<?A, ?T>::First = A
type Cons<?A, Cons<?B, ?T>>::Second = B
```
Заодно сделаем так: постановка `?` без последующего имени означает то же самое, что и переменная с уникальным именем. В общем, как в Прологе:
```svr
type Cons<?A, ?>::First = A
type Cons<?, Cons<?B, ?>>::Second = B
```
== Перегрузка операторов
Конечно, куда же без неё?
С одной стороны, в Котлине это делается лаконично, ключевым словом `operator`, а в Расте --- длинным (не менее, чем в шесть строчек) `impl Trait`. С другой, в Расте знание о том, что класс реализует оператор, получаемо через информацию о реализации соответствующего трейта, и это можно использовать для написания красивого обобщённого кода.
```rs
impl <U, T : Add<U>> Add<Vector<U>> for Vector<T> {
type Output = Vector::<<T as Add<U>>::Output>;
fn add(self, rhs: Vector<U>) -> Self::Output {
...
}
}
```
Так вот. Совместим это. Написание `operator fun` для класса автоматически добавляет соответствующий интерфейс к предкам этого класса. В частности, это означает, что _возвращаемый тип операторной функции нужно специфицировать явно_. Потому что я хочу разделить этапы вывода типов. Ну да об этом позже, когда начнём его писать...
Назовём возвращаемый тип `Result` для всех операторов ниже.
#show table: set align(center)
#let nobreak = block.with(breakable: false, width: 100%)
#nobreak[
Унарные операторы:
#context table(
stroke: text.fill,
columns: 2,
align: center + horizon,
align(center)[Оператор], align(center)[Сахар для],
`-a`, `a.negate()`,
`!a`, `a.not()`,
`~a`, `a.inv()`,
)
Здесь нет унарного плюса... Может быть, добавлю позже.
]
#nobreak[
Бинарные операторы:
#context table(
stroke: text.fill,
columns: 2,
align: left + horizon,
align(center)[Оператор], align(center)[Сахар для],
align(center)[`a + b`], `a.add(b)`,
align(center)[`a - b`], `a.sub(b)`,
align(center)[`a * b`], `a.mul(b)`,
align(center)[`a / b`], `a.div(b)`,
align(center)[`a % b`], `a.rem(b)`,
align(center)[`a & b`], `a.bitAnd(b)`,
align(center)[`a | b`], `a.bitOr(b)`,
align(center)[`a ^ b`], `a.xor(b)`,
align(center)[`a && b`], `a.bitAnd { b }`,
align(center)[`a || b`], `a.bitOr { b }`,
)
Заметим, что `&&` и `||` принимают правым аргументом функцию, возвращающую нужное значение. Это нужно для возможности ленивых вычислений, как это происходит с настоящими булевыми значениями.
]
Также у нас есть интересные операторы `?.`, `?:` и `!!` для обеспечения null-safety. А в Rust был интересный `enum Result`, у которого есть методы `map`, `unwrap_or_else`, `unwrap`. В общем... Это ровно то, что нам нужно.
#nobreak[
#context table(
stroke: text.fill,
columns: 2,
align: center + horizon,
align(center)[Оператор], align(center)[Сахар для],
align(center)[`a?.b()`], `a.safeCall { it.b() }`,
align(center)[`a ?: b`], `a.orElse { b }`,
align(center)[`a!!`], `a.orElseThrow()`,
)
]
Например, для `Result` можем сделать так:
```svr
operator fun <T, R, E> Result<T, E>.safeCall(func : T => R) : Result<R, E> =
match (this) {
Success(?x) => Success(func(x))
Error(?e) => Error(e)
}
```
М-м-м... мы не поговорили про паттерн-матчинг пока? Ну, вы поймёте. Похоже на Rust, но не очень...
```svr
operator fun <T, R> Result<T, ?>.orElse(another : () => R) : Common<(T, R)> = match(this) {
Success(?x) => x
Error(?) => another()
}
```
```svr
operator fun <T> Result<T, ?>.orElseThrow() : T = match(this) {
Success(?x) => x
Error(?e) => throw AssertionError(e )
}
```
Таким же образом, кстати, можно обрабатывать умные ссылки!
#nobreak[
Так, теперь... инкремент и декремент:
#context table(
stroke: text.fill,
columns: 2,
align: left + horizon,
align(center)[Оператор], align(center)[Сахар для],
align(center)[`a++`], `a.postInc(b)`,
align(center)[`a--`], `a.postDec(b)`,
align(center)[`++a`], `a.preInc(b)`,
align(center)[`--a`], `a.preDec(b)`,
)
В отличие от Котлина, здесь это разные методы. Иначе это слишком неудобно для мутабельных классов...
]
Операторы с присваиванием (`+=`, `-=` и так далее) делаем так: сначала ищем метод с соответствующим именем с суффиксом (`addAssign`, `subAssign`, и так далее), а, если не находит, преобразуем в присваивание с применением (`a = a + b`). Если есть и то, и другое --- warning (не ошибка).
#nobreak[
Операторы индексирования и вызова:
#context table(
stroke: text.fill,
columns: 2,
align: left + horizon,
[Оператор], align(center)[Сахар для],
[`a()`], `a.invoke()`,
[`a(b)`], `a.invoke(b)`,
[`a(b, c)`], `a.invoke(b, c)`,
[`a[b]`], `a.get(b)`,
[`a[b, c]`], `a.get(b, c)`,
[`a[b] = x`], `a.set(b, x)`,
[`a[b, c] = x`], `a.set(b, c, x)`,
)
Стоит лишь заметить, что `invoke` как раз отвечает за интерфейс `Function`, который является отражением функциональных типов (`(T, U) => R` и так далее).
]
Ещё есть `range`, отвечающий за `..`. Операторы `==` и `!=`, остаются за методом `equals`, `===` и `!==` --- встроенная в компилятор проверка на идентичность ссылок. И из интересного остались только операторы сравнения.
Здесь у нас есть один интерфейс `Ordered`, с одним же методом `compareTo`. В отличие от привычного `Comparable`, он будет возвращать один из `enum Order { Less, Equal, Greater }`. И есть ещё интерфейс `PartialOrder`, метод которого может также вернуть `Unknown`. Всё это преобразуется понятным образом, я тут пока не документацию пишу, в самом-то деле...
== Внешние перегрузки интерфейсов
Во-первых, заметим, что у нас нет стирания, а значит, нам никто не мешает перегружать интерфейс с разными параметрами. И разный набор интерфейсов для по-разному параметризованного типа. Например, `Vector<String>` реализует `Add<Vector<String>>`, а `Vector<Int>` реализует и `Add<Vector<Int>>`, и `Sub<Vector<Int>>`. С другой стороны, мы не хотим, чтобы реализации конфликтовали. Поэтому позаимствуем _orphan rule_: мы можем определить реализацию #strike[трейта] интерфейса для #strike[структуры] класса, только если мы определили одно или другое. Соответственно, синтаксис пусть будет такой же:
```svr
impl <T> Add<T> for List<T> {
...
}
```
Хм... ладно, я думал, у меня есть что ещё сказать по этому поводу...
== Объекты
Это Singleton классы... ничего особо интересного. Разве что, вместо аннотации `@JvmStatic` сделаем то же самое ключевым словом `static`.
```svr
object Sample {
fun a() = 1
static fun b() = "abc"
}
```
#nobreak[
Компилируется в (обойдёмся без байт-кода, просто аналогичным Java кодом):
```java
public final class Sample {
private Sample() {}
public static final Sample INSTANCE = new Sample();
public int a() {
return 1;
}
public static String b() {
return "abc";
}
}
```
]
а тем временем
```svr
static object Sample {
fun a() = 1
static fun b() = "abc" // Warning: unnessessary `static`
}
```
#nobreak[
Компилируется в:
```java
public final class Sample {
private Sample() {}
public static int a() {
return 1;
}
public static String b() {
return "abc";
}
}
```
]
== Наследование ассоциированных типов
Сделаем следующим образом: если тип объявлен явно, перегрузить его нельзя. Но можно поставить соответствующий знак вариантности, обещая использовать его только в нужно вариантностью.
```svr
abstract class Base {
type In = Number
type +Co = Number
type -Contra = Number
abstract fun usingCo() : Self::Co
abstract fun usingContra(x: Self::Contra)
abstract fun incorrectUsage() : Self::Contra // Error: can't use in covariant position
}
```
```svr
class Derive : Base() {
// can't override type In
override type Co = Int
override type Contra = Any
override fun usingCo() : Int
override fun usingContra(x: Any)
}
```
== Varargs
По определённым причинам хотелось бы, чтобы можно было параметризовать функцию кортежем. И чтобы он (этот кортеж) был "выпрямлен". Например, так:
```svr
fun <T : Tuple> sample(x: Int, y: String, zs: ...T)
```
Тогда:
```svr
sample(1, "abc", 3) // T is (Int,)
sample(2, "def", 5, "ghi") // T is (Int, String)
```
Но так же должна быть возможность и массив потребовать. Поэтому, для консистентности, нужно будет указать _и_ что это массив, _и_ что это vararg.
```svr
fun sample(xs: ...Any[])
``` |
|
https://github.com/SnowManKeepsOnForgeting/NoteofEquationsofMathematicalPhysics | https://raw.githubusercontent.com/SnowManKeepsOnForgeting/NoteofEquationsofMathematicalPhysics/main/main.typ | typst | #import "@preview/bubble:0.2.1": *
//#import "@preview/codelst:2.0.1": sourcecode
#show: bubble.with(
title: "Note of Mathematical Physics Equations",
subtitle: "Course Note",
author: "Lawrence",
//affiliation: "University",
date: datetime.today().display(),
year: "2024 Year",
class: "Fall Season Class",
main-color: "4DA6FF", //set the main color
)
#show heading: set text(fill: black)
#set enum(indent: 1em, numbering: n => [#text(fill: black, numbering("1.", n))])
#set list(indent: 1em, marker: n => [#text(fill: black, "•")])
#include "Chapter_1/Chapter_1.typ"
#pagebreak()
#include "Chapter_2/Chapter_2.typ"
#pagebreak()
#include "Chapter_3/Chapter_3.typ"
|
|
https://github.com/pepega007xd/slplugin | https://raw.githubusercontent.com/pepega007xd/slplugin/master/ip2_report/main.typ | typst | #import "template.typ": *
#let ls(from, to) = {$ "ls"(from, to) $}
#let sep = $- #h(-0.12em) ast.circle$
#let nil = $"nil"$
#show: project.with(
title: "Shape Analysis Using Separation Logic",
authors: (
"<NAME>",
),
abstract: [
This work describes the implementation of a static analysis of C programs, that tries to find bugs related to dynamically allocated memory. The analysis is implemented using the Frama-C framework, and it is done by dataflow analysis using a solver for separation logic. This work also explores possible improvements to this prototype.
],
)
= Introduction
The way dynamic memory allocation is handled is one of the key aspects that determines the runtime performance of a language. The usual choice for high-level languages is garbage collection, which tracks allocations at runtime, and frees memory when an algorithm determines that it is no longer reachable. This has a performance impact that can be unacceptable in certain applications, such as embedded systems, operating system kernels, or real-time applications. Other languages, notably C and C++, let the user's code manage memory manually. While being the most performant option, this allows for a range of errors resulting from incorrect handling of allocated memory. For example, null pointer dereferences are a common mistake, where a fallible function indicates an error by returning a `NULL` pointer, and the programmer forgets to check for it. Use-after-free is a situation where a pointer to a freed allocation is used to read or write data to memory, resulting in undefined behavior. The opposite situation, where the user does not free memory when it is no longer needed, is called a memory leak. Another common error is double free, where a pointer to already freed allocation is passed to the `free` function again, resulting again in undefined behavior.
To mitigate these problems, a number of methods have been developed. One option is the instrumentation of program binaries with extra code that is able to detect invalid memory accesses at runtime, and safely stop the program. An example of this is Valgrind @valgrind, or LLVM's AddressSanitizer @asan. Even though these methods are efficient at mitigating the security risks associated with memory errors, these tools come with a performance penalty, and obviously cannot be used to check programs ahead of time. Some languages are built to prove a program's memory safety statically during compilation; this is achieved by placing additional restrictions on the code, as, for example, Rust does with its ownership and borrowing rules. While being a reasonably safe option, this makes implementing some programs difficult, or requires writing unsafe code. Static analysis tools exist for other languages, including C, which will be the topic of this work. For example, the static analyzer Infer can perform analyses of memory allocations.
= Frama-C framework
Frama-C @frama_c is a framework for building analysis tools of C99 source code. Frama-C itself is written mainly in the OCaml programming language. Unlike other tools, which focus on finding common bugs using heuristics, Frama-C specializes in verification tools, which guarantee that after successful completion, the program is correct. The framework itself is composed of a kernel, multiple plugins, and a GUI to present the results of analyses. The kernel provides common functionality for multiple plugins. The main component is an adapted form of the _C Intermediate Language_ (CIL) @cil, constructed by the Frama-C kernel for use within plugins, as well as an API for its manipulation. CIL has the form of an abstract syntax tree of the input source code, with extra semantic information added to it. This includes types of variables, whether a variable is initialized at a certain node, and other information. Frama-C also transforms the input code, making operations like type casts explicit, and otherwise making the code more suitable for static analysis. For example, all `return` statements in a function are replaced with `goto` statements leading to a single `return` at the end of the function. A complete description of CIL can be found in the module `Frama_c_kernel.Cil_types` of the Frama-C API documentation @api_documentation. The modules `Cil`, `Cil_datatype`, and `Ast_info` inside `Frama_c_kernel` contain other useful functions for adding content to the AST.
One of the functions of Frama-C is to help with the development of custom analyses. This is done using its plugin system, where a separate plugin binary is built and linked dynamically to the Frama-C runtime. Frama-C then handles command-line argument parsing, reporting results, storing intermediate states of analyses, and exposing APIs to the plugin for access to the input's CIL.
== Dataflow analysis
Besides a general framework for crafting analyses, Frama-C provides generic implementations of common algorithms used for static analysis. One of these is dataflow analysis, implemented in module `Frama_c_kernel.Dataflow2`. Dataflow analysis works by assigning an initial value to each node of a Control Flow Graph (CFG), and then systematically updating the values of nodes using a transfer function, following the edges between them. When a node is reached for the second time, the data for this node is computed again based on the data from the previous node, and then joined with the previous data stored for the node. Typically, a CFG is a structure where a node represents a basic block -- a sequence of instructions that will be executed in order, with no loops or conditionals inside. Edges then represent the control flow of a program -- branches and loops.
However, Frama-C implements dataflow analysis in a slightly different way. In dataflow analysis as it is implemented in Frama-C, instructions (`Cil_types.instr`) have a separate transfer function from statements (`Cil_types.stmt`). Instruction is a kind of statement that does not change the control flow of the current function, such as variable definition, assignment, or function call. To implement a dataflow analysis, the user needs to provide the type of data that will be stored and updated for each node of the CFG, and the implementation of the following functions. Before running the analysis, the data for the first statement must be set manually.
- `computeFirstPredecessor` -- this function is called when the analysis reaches a statement that has no data associated with it. The input for this instruction is the result of the previous statement's transfer function, and the statement itself. The result of this function is the initial state for the given statement, which will be stored. Note that because you set the initial state for the first statement manually, this function is not called for the first statement.
- `doStmt` -- this function is used by the transfer function for statements -- it gets the statement itself, and data provided by the previous statement's transfer function, and it decides what to do. One option is not to continue the analysis of this statement, another option (default) is to continue the analysis of the inside of this statement, and the third is to also continue, but with modified data. Note that the full transfer function for compound statements (if, loop, etc.) is implemented by the dataflow module itself, the user is not supposed to call `doInstr` inside `doStmt`. Also note that the result of the full transfer function -- the new computed data for each statement, is not stored. It is only sent to the next statement's transfer function.
- `doInstr` -- this is the transfer function for instructions, it receives data from the englobing statement, and generates new data that will be used by the transfer function of the englobing statement. Note that the result of `doInstr` is also not stored, only sent to the transfer function that called `doInstr` internally.
- `combinePredecessors` -- this is the join operation. It is called when the analysis computes a new state for a node that already has some data associated with it. This includes situations, where the analysis goes through two branches of an `if` statement, and then joins data from both branches on the statement immediately following the conditional block. Input for this function is the statement itself, an old state -- the data currently stored for the statement, and a new state -- the data that was just computed by the transfer function. The returned value is then stored as the data for this statement.
- `doGuard` -- this function is called when an `if` statement is reached. It receives the result of the transfer function for the `if` statement, and the condition expression, and generates two states, each to be used in one of the branches.
- `doEdge` -- called between analyzing two statements. The function receives both statements, and the result of the first statement's transfer function. The function can modify this data, and the modified version will be passed into the second statement's transfer function.
Note that this is not the full API required for implementing dataflow analysis in the `Dataflow2` module, but the other functions, such as pretty-printers for stored data, are not important to the actual analysis, and are therefore omitted.
== Visitor mechanism <visitors>
Frama-C provides a convenient way to modify the AST of the analyzed program using a user-provided visitor object, as described in @developer_manual. The plugin constructs an object inheriting from a class inside Frama-C, and overrides some of the methods corresponding to the AST node that it visits. For example, when the plugin overrides the `vstmt` method, the method will be called at each statement of the AST, and the statement will be passed into the method as input. The method returns a value of type `Cil_types.visitAction`, which allows the visitor to either leave the node as is, change it, or continue with the visits of its children.
There are two kinds of visitors, `Cil.CilVisitor` and visitors derived from `Visitor.frama_c_visitor`. The former does not update the internal state of Frama-C when the AST is modified, and it is therefore unsuitable for making larger changes, such as adding or removing statements. The latter visitor is able to update the internal state of Frama-C after a pass over the AST, but this update must be done explicitly by the plugin. The way of doing this update was not obvious from the available documentation, so I mention it here explicitly:
```ocaml
Ast.mark_as_changed (); (* tell Frama-C that the AST was changed *)
Cfg.clearFileCFG file;
Cfg.computeFileCFG file; (* recompute the CFG, which is used for dataflow analysis *)
```
These lines must run after adding or removing a statement (`Cil_types.stmt`), or after changing the type of a statement, for example after changing an instruction to a block.
Frama-C also provides a different kind of visitor called a _copy_ visitor, as opposed to the _inplace_ visitor described above. The copy visitor does not have the same state synchronization problem as the inplace visitor, but it creates a new _project_ inside Frama-C, instead of modifying the AST of the default, existing one. A project in Frama-C is a collection of all configuration and input data provided by the user. This includes the values of command-line arguments, the AST of the input program, the CFG that constructed internally by Frama-C, and other data. Switching into a new project resets all these values, making it impossible to access the command-line plugin configuration.
== Ivette <ivette_chapter>
Frama-C currently supports two GUI frameworks. There is a legacy GTK application called `frama-c-gui`, and a new Electron-based application called Ivette @ivette. Both applications can be extended to show custom data, but in the case of `frama-c-gui`, this requires manually implementing new GTK widgets, which is inconvenient. Also, the codebase is considered unmaintainable by the authors and is intended to be replaced by Ivette.
Ivette is a desktop application written in TypeScript using a client-server architecture. Ivette, the client, asynchronously polls the server (Frama-C plugin) for data and displays it. The server has to first register, what data has to be shown. The protocol is composed of data in the JSON format, with a predefined structure. The server can notify the client about changes using signals, which also have to be registered ahead of time. The API for registering and using Ivette can be found in module `Server` inside the library `frama-c-server.core`.
Ivette is currently in development, and the API is subject to change in future versions of Frama-C. There is an undocumented requirement for plugins to be used with Ivette: the plugin must have a command-line argument, which enables the analysis. For example, the Eva plugin has the `-eva` command line argument, without which the plugin does not do anything. This requirement stems from the way Ivette processes command-line arguments and executes `frama-c` multiple times, first without user-provided arguments. In this first pass, the analysis must not run.
= Separation logic
A common technique in program analysis is to generate logical formulae describing the possible states of the program at each point, and then use a solver to prove the correctness of the program. However, earlier analyses of programs with dynamic memory have faced a problem with globality -- an abstract state of the program would contain the description of the whole heap in such a way, that an update of a single value would require updating the whole program state. Separation logic (SL) aims to solve this inefficiency by describing the heap in a way that allows for local updates of the heap. In this work, I will describe the syntax and semantics of SL @separation_logic, for which <NAME> implemented a solver called Astral @astral_github. Testing the solver's performance on formulae generated during the analysis is a secondary goal of this work.
== Syntax
#let Var = $bold("Var")$
Let $x,y in Var$, where #Var is an infinite set of variables, with a special variable $nil in Var$. The syntax of SL is defined by the following grammar.
$ phi ::=& x = y | x != y & "(pure atoms)" \
&| x |-> y | ls(x,y) & "(spatial atoms)" \
phi ::=& phi and phi | phi or phi | phi and_not phi | not phi #h(1cm) & ("boolean connectives") \
phi ::=& phi * phi | phi sep phi & "(spatial connectives)" $
The atomic formulae $x = y$ and $x != y$ simply describe equality and inequality of two variables, $x |-> y$ corresponds to a pointer from $x$ to $y$. The atom $ls(x,y)$ describes a sequence of pointers from $x$ to $y$, or in other words, an acyclic linked list. The boolean connectives have their usual meaning, the spatial connective $phi * phi$, called separating conjunction, says basically that the heap can be split into two disjoint parts, each satisfying one of the formulae. The septraction operator #sep has roughly the following meaning: formula $phi_1 sep phi_2$ is satisfied for all heaps, for which exists another heap satisfying $phi_1$, which can be merged with our heap, together satisfying $phi_2$.
== Semantics
#let Loc = $bold("Loc")$
The models of SL formulae are so-called stack-heap models. Let #Loc be an ordered set of memory locations, then stack $s$ is a partial function from #Var to #Loc, and heap $h$ is a partial function from #Loc to #Loc, where $h(s(nil)) = bot$. The symbol $bot$ says that the function $h$ is not defined for this input, which corresponds to an invalid memory access in the analyzed program. Stack-heap model is then simply a pair $(s,h)$. The semantics of equality and inequality atoms are satisfied on all models, where $s(x) = s(y)$, or $s(s) != s(y)$ respectively, and the heap is empty. Points-to atom $x |-> y$ is satisfied on a stack-heap model, where the heap contains only a single allocated location, $h = {s(x) |-> s(y)}$. The list segment predicate $ls(x,y)$ is satisfied on every model where either $s(x) = s(y)$ and the heap is empty, or where the heap is a series of distinct allocated locations $h = {l_0 |-> l_1, ..., l_(n-1) |-> l_n}$ of length at least $n=1$, where $s(x) = l_0$ and $s(y) = l_n$.
Semantics of boolean connectives are defined as usual. Separating conjunction $phi_1 * phi_2$ is satisfied on models, for which exist two heaps $h_1, h_2$, such that
$ (s, h_1) tack.double phi_1 and (s, h_2) tack.double phi_2 and h_1 union.plus h_2 != bot and h = h_1 union.plus h_2 . $
Septraction is defined similarly,
$ (s,h) tack.double phi_1 sep phi_2 <=> exists h_1: (s, h_1) tack.double phi_1 and h_1 union.plus h != bot and (s, h_1 union.plus h) tack.double phi_2 . $
The operation $union.plus$ is defined as a union of two heaps, but it is defined only for heaps, whose domains are disjoint, and also share only named locations, i.e. locations, which are in the image of $s$.
== Fragment used for analysis
Although any formula generated by the mentioned grammar is a well formed SL formula, I will use just a subset of all these formulae. This fragment has the form $phi = phi_1 * phi_2 * ... * phi_n$, where $phi_1, ..., phi_n$ are atomic formulae for equality, inequality, points-to relation, and list segments. Note that pure atoms are satisfied only on the empty heap, which is why separating conjunction is used for both spatial and pure atoms.
= Shape analysis
Shape analysis is a kind of static analysis that aims to detect the shapes of data structures in the heap, such as linked lists, trees, and variations of those, and to use this knowledge to describe the state of a program's memory in more detail than would be otherwise possible. In @shape_analysis, the authors propose a method to analyze programs with singly linked lists using separation logic for better scalability. However, they work with SL of slightly different semantics (pure atoms are satisfied on any heap), and they also implement shape analysis of a minimal language. They also do not use a dedicated solver for separation logic. My objective is to adapt their method to support at least a part of the C programming language, and to work with SL as implemented by Astral.
== Implementation
In its current state, the plugin can analyze programs without function calls except allocation functions, multiple dereferences on any side of assignments, and global variables. Most of these missing aspects can be implemented without any new code in the analysis itself, merely by preprocessing the CIL AST and adding some heuristics. A more interesting extension might be the to cover doubly linked lists and nested lists, since predicates for these structures are also available in recent versions of Astral. More on this in @future_work.
Let us start with the type of data that will be stored for each CFG node during the dataflow analysis. Currently, this is a list of SL formulae (`SSL.t list`). Each formula represents a possible state the program could be in. For example, after a statement `x = malloc(sizeof(void *));`, the program's memory could be described by any of the two formulae $phi_1 = (x |-> y')$, or $phi_2 = (x = nil)$. This expresses the two possible outcomes of calling `malloc` -- either the allocation succeeded, and x is now an allocated memory location, or it failed, and `x` is equal to `NULL`. The $y'$ in the formula simply represents an arbitrary memory location that x is now pointing to, and it is a _fresh_ variable.
Variables in the formulae are of two kinds, named and fresh variables. Named variables correspond to variables in the analyzed code, simply by having an identical name. Fresh variables, in this text marked with a tick $x'$, are variables corresponding to memory locations unnamed in the analyzed code, for example the inner nodes inside a linked list, or the target of a dangling pointer. Note that Astral does not differentiate between named and fresh variables in any way, the only distinction is how this analysis treats each kind. Internally, fresh variables are differentiated by their names containing an exclamation mark.
The implementation of the analysis itself is done through the `Dataflow2` module API.
- `computeFirstPredecessor` -- This function is implemented as identity.
- `doInstr` -- As mentioned, this is the transfer function for instructions, and therefore much of the logic of the analysis is implemented here. Note that an instruction in the CIL naming convention is any C statement that doesn't affect the control flow of the current function, which means that a function call is also an instruction. As input, the function gets the instruction itself, and the state from the previously analyzed instruction, and it must return a new state for this instruction based on the inputs. For this analysis, I am interested in the following instructions: `LocalInit`, `Set`, and `Call`.
- `LocalInit` is the initialization of a local variable. Initializations of non-pointer variables are currently ignored, and the previous state is returned. For pointer variables, the action depends on the initializing value. If the variable is initialized with a call to an allocation function (currently, this is detected simply by its name `malloc`), for each previous state, two new states are generated. One of them represents successful allocation, and therefore $("<name>" |-> "<fresh>")$ is appended to it using separating conjunction. `name` is simply the name of the newly initialized variable, and `fresh` is a globally unique (fresh) variable name generated by Astral. Any other initialization is handled with the same logic as an assignment.
- `Set` is an assignment instruction. Again, this analysis is only interested in assignments to pointer variables, or assignments where the right-hand side contains a dereference. For other assignments, the previous state is returned.
Simple assignment `a = b;`, where the type of the variables is a pointer type, results in the following change to the previous state. First, all occurrences of the variable `a` in the formula are substituted with a new, fresh variable, and then the atom $(a = b)$ is added to the formula.
The assignment of a dereferenced variable, `a = *b;`, is more complicated. We must first find the variable, to which `b` is pointing, and then do essentially the same as with simple assignment. To find the variable, to which `b` is pointing, the formula is first transformed into a shape, in which `b` or its alias is not a part of a list segment predicate, but a simple points-to predicate. Alias here means another variable equal to `b`. Then, the transitive closure of equality $C(b)$ is found for `b`, and then the set of all variables that are pointed to by this closure is found simply by iterating through all the atoms of the formula: $T = {t; exists c in C(b) : c |-> t}$. This set $T$ ("target") can be empty -- this corresponds to the possibility of dereferencing an invalid pointer in the program. In this case, the analysis is stopped. $T$ can contain exactly one element $T = {t}$, then `a` is substituted with a new variable name as in the first case, and atom $a |-> t$ is appended to the substituted formula. This is the computed state. $T$ cannot contain more than a single element, because such a formula would be unsatisfiable. Unsatisfiable formulae are filtered out in `doEdge`.
For write to a dereferenced variable `*a = b;`, the single-element set $T = {t}$ with the target of `a` is again found, and then the spatial atom $s |-> t$ is changed to $b |-> t$. `s` is a member of the equivalence class $C(a)$. Notice that in this case, no substitutions are made, because no additional equality is being added.
- `Call` is a function call instruction. Currently, only allocation functions are supported. The implementation is the same as in allocation during initialization, with the extra step that before creating two states for two outcomes of the allocation, the name of the variable `x` in `x = malloc(sizeof(void *));` would be first substituted in the whole formula with a fresh variable.
The function `free` is also handled here. As in the previous cases, the set of aliases of the freed variable is again found, and the points-to atom leading from one of these variables is removed.
#let old = $phi_"old"$
#let new = $phi_"new"$
#let all = $phi_"all"$
#let unique = $phi_"unique"$
#let next = $phi_"next"$
- `combinePredecessors` -- This function is called when the analysis reaches a CFG node for the second time, and computes new data for this node. `combinePredecessors` gets the old state and the new state as input parameters, and returns these states joined together, as well as information on whether the old state was updated in any way. This is then used by the dataflow analysis to decide whether to continue updating nodes following this one. Note that the state of a CFG node is internally a list of formulae, but the meaning is a logical disjunction of these (the models of a state for a single CFG node are the union of models of all the formulae in the list).
In theory, we could simply take the old state #old and new state #new, and check the entailment $old tack.double new$. If this were true, all models of the new state would have been contained in the old state, and we could have returned that the state had not changed. Otherwise, we would have returned the disjunction of new and old states (internally, a list concatenation). However, this would be imprecise, a single formula in #new, of which a single model would not be a model of #old would cause adding the entirety of #new.
Instead, the formulae of both old and new states are all placed in a single list #all, and a new list #unique is constructed by the following algorithm. In each iteration, a single formula #next is removed from #all, and the following entailment is computed:
$ #next tack.double or.big_(phi in phi_"all" union phi_"unique") phi $
If the entailment is true, #next is simply thrown out, since its models are fully contained in either #all or #unique. If the entailment is false, #next is added to #unique. This is done until #all does not contain any formulae. The joined state for this CFG node is then the list #unique. This method proved to be efficient in eliminating duplicate formulae, but it is also probably the most expensive part of the analysis, because when computing the entailment, all fresh variables in the formulae are existentially quantified.
- `doStmt` -- This function decides, whether to continue with analysis upon reaching a statement. This is independent of the actual algorithm for dataflow analysis, which decides when the analysis is complete -- that is, when the state of all statements cannot be updated. Currently, this is set to always continue analysis.
- `doGuard` -- called when the analysis reaches an `if` statement. The function gets the state from the previous node and the condition expression, and returns the states to use in each of the branches. Currently, all conditions that are not in the form `a == b` or `a != b` are considered nondeterministic. When reaching a nondeterministic condition, the analysis simply uses the input state as the state for both branches, because it gained no additional information about what is true in the branches. If the condition is `a == b`, then all formulae of the input state $phi_"in"_i$ are tested separately using the following method. For the "then" branch, each formula is appended with $(a = b)$ using separating conjunction, and checked for satisfiability. Unsatisfiable formulae are filtered out, the rest is used in the "then" branch. For the "else" branch, the same method is used, only the formula is appended with $(a != b)$. This way, only satisfiable formulae are passed into their respective branches, simplifying further analysis. For the case of condition `a != b`, the algorithm is the same, only the states for the two branches are swapped. If there is no formula left for a certain branch after filtering out unsatisfiable formulae, the branch is marked as unreachable. Note that this method still allows for the analysis of conditions such as `(list != NULL)`, because the `NULL` constant will be swapped for the special `__nil` variable, see @preprocessing for more details.
- `doEdge` -- called between the updates of nodes, it can modify the state that is sent from the previous node to the next one. In this function, the satisfiability of all formulae in the state is checked, and unsatisfiable formulae are removed from the state. All formulae are also simplified using Astral's `Simplifier.simplify` function, which performs many simple syntactic optimizations of the formula without changing its semantics, such as flattening of nested operations. Most importantly, this is where the abstraction into list segments is performed, and where the formulae are further simplified, this time taking advantage of the different semantics of named and fresh variables. More on this below.
== Abstraction and simplifications <simplification>
The following simplifications are done on each formula separately in this order. The main purpose of most of these is to reduce the number of fresh variables, since these must lie under a quantifier when checking an entailment. The other purpose is to reduce the size of the formula itself, mainly by eliminating useless information, such as irrelevant equalities and inequalities.
- The formula is syntactically simplified using Astral's `Simplifier`.
- The formula is checked for satisfiability, and if unsatisfiable, it is removed.
- Variables going out of scope on this transition between statements are substituted with fresh variables. A list of all variables in scope at each statement is computed before starting the analysis using a preprocessor pass, more in @preprocessing. Variables going out of scope are the set difference between the sets of variables in scope at the previous and next statement.
- _Junk_ atoms are removed from the formula. Junk atoms are atoms $x' |-> y$, $ls(x',y)$, or $x' = y$, in which $x'$ is fresh (as indicated by the tick), and it appears only once in the formula. Removing the atom $x' = y$ does not have any effect on the analysis, since the variable $x'$ is mentioned only in this equality. A points-to or list segment atom starting at $x'$ corresponds to a memory leak, since $x'$ is an unnamed variable, and it is therefore no longer reachable from the analyzed code. Removing this information will not affect the result of the analysis, but it will simplify the formula.
- Fresh variables equal to nil are substituted with nil. This is done by finding the equivalence class for the special variable nil, and substituting all fresh variables in this class with nil. This is done to reduce the number of fresh variables in the formula.
- Equivalence classes of fresh variables are reduced to a minimal size. The equivalence class $E_x'$ for a fresh variable $x'$ is computed, and if it contains a named variable $y$, all fresh variables in $E_x'$ are substituted with $y$. If not, all fresh variables in $E_x'$ are substituted with $x'$. After this, all equalities with the same variable on both sides are removed. This simplification, like the previous, is done to reduce the number of variables in the formula.
- Consecutive points-to atoms are abstracted into a list segment atom. This is done by joining two spatial atoms (points-to atom, or a list segment) into a single list segment, while removing the fresh variable in their middle:
$ P(x,y') * P(y', z) ~> ls(x,z) * x != z $
where
$ P(x,y) ::=& x |-> y | ls(x,y) $
Since Astral allows list segments to have the length zero, the inequality $x != z$ is added. This is done for all created list segments, and all other steps in this analysis assume that this inequality is present for any list segment.
However, several conditions need to be met for this abstraction to be valid. Firstly, $x$ and $z$ must be distinct before the transformation. This is checked by checking the satisfiability of the original formula with the $x = z$ atom added. If the resulting formula is satisfiable, the abstraction cannot be done.
Secondly, $y'$ cannot be a part of any other equality or pointer predicate anywhere else in the formula because this would not be compatible with the semantics of the list segment predicate. This property is checked syntactically in the formula itself.
- All fresh variables occurring solely in inequalities are removed along with the inequality atoms themselves. These inequalities are left in the formula after joining existing list segments and can be removed, since they do not contain any meaningful information.
- Formulae are checked for redundancy using the same algorithm that is used when computing a join of two states in `combinePredecessors`. The set of initial formulae #all at the start of the algorithm is simply the whole set of input formulae.
== Preprocessing <preprocessing>
Preprocessing is done on the AST of the analyzed program using visitors (described in @visitors). Writing the analysis rules themselves to support all syntactical constructs in the C language would be tedious, a much better option is to support basic constructs in the analysis itself, and to transform any program into this basic form before the analysis. Currently, the following transformations are done in this order:
- All variable names in the analyzed program are made globally unique. This is done as a precaution for the future, when the analysis will work with local as well as global variables, and to make the output of the analysis easier to read and interpret.
- All pointer or integer constants in the file are replaced with a special variable `_nil`, which is set to to `NULL` at the start of the program. Thanks to this, the analysis can omit the support for constants everywhere besides initialization, notably in the evaluation of conditions, e.g. `x == NULL`.
- Field accesses on structs are either converted to dereferences, or removed from the AST altogether. Consider the following structure:
```c
struct List {
List *next;
uint32_t data;
}
```
When visiting a statement, in which a field of this structure is accessed, the structure field is analyzed. If the structure itself has more than two self-referential pointers (`List *`), the statement is discarded (removed from the AST), and a warning about a skipped structure is printed. This structure likely represents an ADT which is not yet supported, such as a doubly linked list, a binary tree, a graph, etc. If the structure has a single self-referential pointer, and it is the currently accessed field, the field access is converted to a simple dereference. For example, `list->next` would be converted into `*list`.
If the accessed field is not self-referential, no matter whether the structure itself contains such a field or not, the whole statement (e.g. `list->data = 42;`) containing the field is removed.
- For each statement, its local variables are stored in an associative array for use in the simplification @simplification. This is done simply by storing the local variables of each block scope when traversing the AST.
- Type casts are removed from the AST. The analysis does not currently take types of variables into account, apart from a broad distinction between pointer and non-pointer types, and it is therefore simpler to remove all type casts from the AST, eliminating the need to create rules for these in the analysis. This is the only visitor using the low-level `Cil.CilVisitor` instead of the `Visitor` module itself, because the high-level visitor introduces pointer casts (e.g. `void *` into `List *`) back into the AST. This is also why this step is done as the last one.
== GUI
As mentioned in @ivette_chapter, the API for interfacing with Ivette is not yet stable, or documented, so it would not make sense to create complex visualizations of the analysis with this version of Frama-C. However, mostly for debugging purposes, I added a simple text field for each statement, which shows the final state (a set of formulae) reached in each statement. The plan is to expand this GUI to show the whole progress of the analysis in real time, leveraging the asynchronous nature of Ivette, and to provide a user-friendly result summary including errors and warnings in the relevant parts of the source code.
#figure(caption: [Ivette showing formulae computed for two statements. The right panel shows the original code, the left panel shows the code after preprocessing.], image("ivette.png"))
== Example <example>
The following example shows the analysis of a simple program, which constructs a singly linked list of a nondeterministic, unbounded length. The program then iterates through the list, modifying the data stored in each node. Finally, the list is deallocated. The full source code is available in @example_code.
After the allocation of the list, the state is the following formula:
$ \_nil = nil * ls("start", \_nil) * "start" != \_nil $
This demonstrates many things, firstly that the abstraction to a list segment was successful, otherwise the analysis would not have terminated at all, because the state formulae would grow in length with each iteration of the loop, indefinitely. Secondly, the small size of the formula shows that the elimination of variables going out of scope works, otherwise we would see other variables from within the block inside the formula. Lastly, the fact that only one formula remained after analyzing the block shows that the simplifications done in `doEdge` are effective in simplifying and deduplication of the formulae.
After the second block, which traverses the list, the state is not changed. This should be expected, as the code does not modify the structure of the list itself, only the data contained within it.
After the third block, which frees the list, we get the following state:
$ \_nil = nil * "start" != \_nil $
This formula describes the empty heap, indicating that the analysis correctly followed the actions of the code. The atom $("start" != \_nil)$ contains the information that `start` is now a dangling pointer, since it is not allocated, but it is not a null pointer. If we add a statement to the end of the program dereferencing the pointer, for example `start->next = NULL;`, the analysis will report an error:
```
[SLplugin] examples/linked_list.c:54: Failure:
detected a dereference of an unallocated variable
```
= Future work <future_work>
A small shortcoming of the current implementation is that statements with multiple composite l-values are not supported by the analysis. However, these composite statements can be broken down into a sequence of the following four basic statements:
```c
a = b;
a = *b;
*a = b;
a = malloc(...);
```
For example, the statement `*a = **b;` can be broken down into this sequence, using temporary variables:
```c
tmp_1 = *b;
tmp_2 = *tmp_1;
*a = tmp_2;
```
This will be simple to implement using a new AST visitor. The main reason why this has not been implemented already is that I encountered some difficulties with adding statements to the AST, which were resolved only recently. Moreover, this missing feature was not essential when testing the analysis, so the priority for implementing this preprocessing pass was low.
Another missing feature is the analysis of function calls other than allocation functions. This can be done in multiple ways, for example programs without recursion can be inlined into a single function. This is not a big limitation, since recursion is generally avoided in low-level code. Another option is to create function summaries -- pairs of input formulae and changes done by the function, stored for each function. Ideally, only the relevant part of the formula should be passed into a function for analysis, which would reduce the time needed for analysis and help create more general summaries, allowing for the simple reuse of the computed result of a function call.
Global variables can be converted to local variables in the entrypoint function, and passed into every other function as extra input parameters. This can be further optimized by not passing variables irrelevant to the content of the called functions.
Most importantly, Astral now supports more predicates, which can be used for an abstraction of doubly linked lists, and nested lists. Therefore, the implementation of more abstractions is possible. This would mean going beyond the abstraction rules laid out in @shape_analysis, but it would allow for the analysis of a much wider range of programs, as doubly linked lists are used more commonly than singly linked lists.
= Conclusion
In the current state, the analysis is able to process simple programs operating on singly linked lists. The main limitations include the lack of support for analysis across function boundaries, and the inability to analyze more complex data structures. However, Astral itself supports other, currently unused abstractions, making it possible to implement the analysis of more complex structures in the future.
#bibliography("references.bib")
#let appendix(body) = {
set heading(numbering: (a) => {"Appendix " + numbering("A", a)}, supplement: [])
body
}
#counter(heading).update(0)
#pagebreak()
#appendix[= <example_code>]
This is the full source code for the example in @example.
```c
#include "stdlib.h"
typedef struct List {
struct List *next;
int data;
} List;
int main() {
List *start = malloc(sizeof(List));
if (start == NULL)
return 1;
{ // construct a linked list of unknown size
List *list = start;
list->next = NULL;
int nondeterministic;
while (nondeterministic) {
List *next = malloc(sizeof(List));
if (next == NULL)
return 1;
list->next = next;
list = list->next;
}
list->next = NULL;
}
{ // walk to the end of the list
List *list = start;
while (list != NULL) {
List *next = NULL;
next = list->next;
list->data = 42;
list = next;
}
}
{ // free the list
List *list = start;
while (list != NULL) {
List *next = NULL;
next = list->next;
free(list);
list = next;
}
}
// this would be detected as an error
// start->next = NULL;
}
``` |
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-16D40.typ | typst | Apache License 2.0 | #let data = (
("KIRAT RAI SIGN ANUSVARA", "Lm", 0),
("KIRAT RAI SIGN TONPI", "Lm", 0),
("KIRAT RAI SIGN VISARGA", "Lm", 0),
("KIRAT RAI LETTER A", "Lo", 0),
("KIRAT RAI LETTER KA", "Lo", 0),
("KIRAT RAI LETTER KHA", "Lo", 0),
("KIRAT RAI LETTER GA", "Lo", 0),
("KIRAT RAI LETTER GHA", "Lo", 0),
("KIRAT RAI LETTER NGA", "Lo", 0),
("KIRAT RAI LETTER CA", "Lo", 0),
("KIRAT RAI LETTER CHA", "Lo", 0),
("KIRAT RAI LETTER JA", "Lo", 0),
("KIRAT RAI LETTER JHA", "Lo", 0),
("KIRAT RAI LETTER NYA", "Lo", 0),
("KIRAT RAI LETTER TTA", "Lo", 0),
("KIRAT RAI LETTER TTHA", "Lo", 0),
("KIRAT RAI LETTER DDA", "Lo", 0),
("KIRAT RAI LETTER DDHA", "Lo", 0),
("KIRAT RAI LETTER TA", "Lo", 0),
("KIRAT RAI LETTER THA", "Lo", 0),
("KIRAT RAI LETTER DA", "Lo", 0),
("KIRAT RAI LETTER DHA", "Lo", 0),
("KIRAT RAI LETTER NA", "Lo", 0),
("KIRAT RAI LETTER PA", "Lo", 0),
("KIRAT RAI LETTER PHA", "Lo", 0),
("KIRAT RAI LETTER BA", "Lo", 0),
("KIRAT RAI LETTER BHA", "Lo", 0),
("KIRAT RAI LETTER MA", "Lo", 0),
("KIRAT RAI LETTER YA", "Lo", 0),
("KIRAT RAI LETTER RA", "Lo", 0),
("KIRAT RAI LETTER LA", "Lo", 0),
("KIRAT RAI LETTER VA", "Lo", 0),
("KIRAT RAI LETTER SA", "Lo", 0),
("KIRAT RAI LETTER SHA", "Lo", 0),
("KIRAT RAI LETTER HA", "Lo", 0),
("KIRAT RAI VOWEL SIGN AA", "Lo", 0),
("KIRAT RAI VOWEL SIGN I", "Lo", 0),
("KIRAT RAI VOWEL SIGN U", "Lo", 0),
("KIRAT RAI VOWEL SIGN UE", "Lo", 0),
("KIRAT RAI VOWEL SIGN E", "Lo", 0),
("KIRAT RAI VOWEL SIGN AI", "Lo", 0),
("KIRAT RAI VOWEL SIGN O", "Lo", 0),
("KIRAT RAI VOWEL SIGN AU", "Lo", 0),
("KIRAT RAI SIGN VIRAMA", "Lm", 0),
("KIRAT RAI SIGN SAAT", "Lm", 0),
("KIRAT RAI SIGN YUPI", "Po", 0),
("KIRAT RAI DANDA", "Po", 0),
("KIRAT RAI DOUBLE DANDA", "Po", 0),
("KIRAT RAI DIGIT ZERO", "Nd", 0),
("KIRAT RAI DIGIT ONE", "Nd", 0),
("KIRAT RAI DIGIT TWO", "Nd", 0),
("KIRAT RAI DIGIT THREE", "Nd", 0),
("KIRAT RAI DIGIT FOUR", "Nd", 0),
("KIRAT RAI DIGIT FIVE", "Nd", 0),
("KIRAT RAI DIGIT SIX", "Nd", 0),
("KIRAT RAI DIGIT SEVEN", "Nd", 0),
("KIRAT RAI DIGIT EIGHT", "Nd", 0),
("KIRAT RAI DIGIT NINE", "Nd", 0),
)
|
https://github.com/polarkac/MTG-Stories | https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/022%20-%20Commander%20(2015%20Edition)/001_All%20the%20Cairns%20of%20Jund.typ | typst | #import "@local/mtgstory:0.2.0": conf
#show: doc => conf(
"All the Cairns of Jund",
set_name: "Commander (2015 Edition)",
story_date: datetime(day: 04, month: 11, year: 2015),
author: "<NAME>",
doc
)
#emph[Far from Zendikar lies the fivefold world of Alara. Ages ago, Alara was divided into five separate worlds, each of which evolved its own customs, life forms, and forms of magic. On the dragon-worshiping shard-plane of Jund, shamans and warriors braved the tar pits and jungles, trying to stay alive amidst an endless cycle of predation. Necromancy was unknown on Jund, being the sole province of the hellish shard of Grixis.]
#emph[With the coming of the Conflux and the reunion of the shards, however, all that changed. Death magic came to Jund . . . with a vengeance.]
#figure(image("001_All the Cairns of Jund/01.png", width: 100%), caption: [], supplement: none, numbering: none)
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
The acrid smell of sulfur burned at her nostrils. Underfoot, sharp, dry grass bristled against her well-worn boots as a hot blast of wind swept across the valley. #emph[Savage. Instinct. Fire] . The words flickered across her mind as she scanned the horizon, taking in the untamed vistas that made up Jund. The wild pulse of the land thundered through everything here. It rumbled beneath the hooves of massive beasts with tough hides and sharpened tusks. It echoed in the roaring of distant dragons. It reverberated in the rumbling of volcanic peaks threatening eruption—and on more than one occasion in her memory, they'd made good on that threat.
#figure(image("001_All the Cairns of Jund/02.jpg", width: 100%), caption: [Savage Lands | Art by <NAME>aks], supplement: none, numbering: none)
Another word attempted to creep across her thoughts, slithering up from hidden recesses of memory. It caught in her throat and hooked at her heart, and she tore it from her mind, casting it aside, a fleeting shadow of a once-bright flame.
#emph[Home.]
She stepped forward, leaving the word behind, a discarded bone picked clean by scavengers of darker thoughts.
This was not home. Not anymore.
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
#emph[It's so . . . green.]
#emph[She looked at the vial before her, its contents pulsating with a strange phosphorescent glow. The wizened old shaman holding it gazed back at her, wispy gray hairs dancing wildly in the wind.]
#emph["We of the Circle of Nel Toth anoint you, Meren, child of stone and blood and bone, with the Dreamfire Draught." The old shaman's hand trembled ever so slightly as she held the vial out. Young fingers met old ones creased and spotted with age as she took the vial. Her gaze shifted from the potion to the shamans encircling her. She scanned the familiar faces of the Circle, finding her master, Kael, sitting tall and proud, watching closely.]
#emph[She stood a little taller to match.]
#emph["Drink, and prepare yourself."]
#emph[Doubt nibbled at her thoughts. Her whole life had led toward this moment, to this trial that would prove her worthy of a seat as a full-fledged shaman. Yet she had always struggled, always chased the shadows of her peers as they seemingly sprinted past her in their training. Spells that came so easily to the other young shamans baffled her. They seemed to wield life magic intuitively, while she struggled just to hear and find the pulse, much less channel and shape its form.]
#emph[Focus.]
#emph[The word reverberated in her thoughts, and she instinctively looked toward Kael again. The tall shaman wore a neutral expression, but his eyes locked with hers, glimmering with pride and hope. Kael believed in her strength, despite her struggles.]
#emph[You are not weak. You are Meren, shaman apprentice of the Nel Toth. You are a channeler of living magic, a wielder of blood and bone. You are on the dawn of your twelfth year of life, and you have a long way to go—but I have seen you fight to survive. Focus, Meren. Flourish. Triumph.]
#emph[She took a deep breath in. She would be one of the apprentices who completed the trial, or she would die trying.]
#emph[She tried not to think about the numerous other apprentice shamans who did, in fact, die trying.]
#emph[No turning back now.]
#emph[She tilted the vial against her lips and drained it of its contents.]
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
It was done.
It had taken her all morning. The work was slow, the magic required still new to her—a freshly crafted obsidian blade, its weight still unsure in the warrior's hand.
This weapon would be far more deadly than a sword, however.
She had been slow and methodical in her work. And though she was still inexperienced, the magic she shaped flowed naturally through her, almost subconscious in its rhythms. She was no longer the struggling child, grasping at power that slipped awkwardly out of her hands. The spell she cast, though new to her, felt heavy with age and power, and she knew as she neared its completion that she was successful. Around her, the sludge of the bog flowed away in rivulets, pushed by the sheer force of mana gathered around her. Swamp water and dreck gave way as her weapon took shape, re-formed and extracted from the eons that clung to its once-mighty form.
She admired its beauty, its ancient perfection. All throughout Jund, she knew great forces like this lay dormant, waiting for rediscovery. She gazed upon its shape: a sleek instrument of death, returned to its former glory—no, made #emph[greater] —by her hand. Almost certainly, it had a name once—lost to the bygone years. She would give it meaning anew.
"#emph[<NAME>] ."
Her voice rumbled with a gravelly weight that surprised her. It didn't occur to her until just then how long it had been since she last spoke words out loud.
"You are #emph[<NAME>] . Unearthed and bound to me."
She spoke again, her voice gaining strength, her hands shaping into intuitive gestures, binding the weapon to her magic and her will, one single word echoing in the chambers of her mind:
#emph[Vengeance.]
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
#emph[Everything inside her burned.]
#emph[Instantly she understood how the Dreamfire Draught got its name. Delirium and nausea hit her, a solid wall of dizziness plowing into her consciousness and threatening to knock her off her feet. Her insides felt like a master shaman had lit her up with banefire—inexplicable, searing pain racked her body. The vial dropped to the ground, but she did not even notice.]
#emph[Poison.]
#emph[The word leapt to her mind, and she knew it to be true. It was clear—without intervention, the Dreamfire Draught would kill her. She tried to breathe through the pain, tried to wrest control of her muscles as they spasmed against the concoction coursing through her veins.]
#emph[At the same time, she heard the hum of voices rising and falling, their tones overlapping in a pulsating rhythm. The Circle was chanting, she realized, weaving a spell in tandem.]
#emph[Then she looked forward.]
#emph[Before her, coalescing from the earth, a massive elemental reared up. Snaking vines wound their way around boulders snapping into place, forming strange and thick limbs. She peered up at the monstrosity. The being towered easily to fifteen feet, its enormous frame casting her into shadow. With a crackle, the large stone that comprised its head split, revealing a jagged mouth. Flickers of static bolts danced across its rock-fangs, leaping above to alight into two eyes.]
#emph[The elemental reared back, its aurochs-sized forearms stretched wide, and its gaping mouth yawned open, letting lose a primal roar. It smashed its forelimbs down, sending shockwaves rumbling through the ground as its lightning eyes gazed down at her—small, frail, insignificant.]
#figure(image("001_All the Cairns of Jund/03.jpg", width: 100%), caption: [Rockslide Elemental | Art by <NAME>], supplement: none, numbering: none)
#emph["Commune with the spirit, child!" The elder shaman shouted above the now howling wind, her voice cutting surprisingly clearly across the noise. "Bind it to your will."]
#emph[Right. So this was her test. Commune. Connect. Bind. Command.]
#emph[She turned her full attention to the elemental, and slowly raised her hands, a practiced chant already tumbling from her lips. The Dreamfire Draught burned hot in her veins, and she realized it wasn't just a poison—the potion augmented her senses, even adding a few new ones. She could see—just a little—the elemental energies that extended beyond the creature's physical body. She could feel the pulse of mana feeding its existence.]
#emph[All it cost her was her life force.]
#emph[Such was the shamanism of Nel Toth—blood and flesh sacrifice as raw materials for savage power. She knew she needed to bind this elemental, and quickly. With the power of the elemental, she might be able to purge the poison before she was consumed from the inside out. Or before she was consumed from the outside in by the unbound, wild elemental itself.]
#emph[She was beginning to understand why the ascension rate of new shamans was so low.]
#emph[She reached a tendril of her energy out toward the elemental. First contact. A greeting. She gritted her teeth, holding steady despite the burning in her veins, despite the stinging of the winds, despite the loud drone of the Circle's continued chant.]
Hello.
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
She spotted the Nel Toth hunters long before they saw her. Two warriors and a shaman, picking their way through the brush. She watched as they made their way across the terrain, coming closer to her hidden perch. The earth-tone garb. The familiar shape of their weapons. A tingle ran down her spine. #emph[It's been so long.] It was all familiar yet foreign, things once intimate made strange by intervening years. It seemed incredible, almost unreal, seeing them now, here, in living flesh. The tribe yet survived.
That was about to change.
The first warrior was dead before he knew what happened. Green fire erupted from his chest, burning fierce and hot. Within seconds, his entire body ignited, collapsing in a heap before a scream could escape his lips. The second warrior let out a yell, leaping back and drawing his blade, eyes desperately searching for an enemy. Instead, he found a bone dagger piercing his chest. She leaned into the blade and against his back as she pressed it further through his sternum.
#figure(image("001_All the Cairns of Jund/04.jpg", width: 100%), caption: [Jund Battlemage | Art by <NAME>], supplement: none, numbering: none)
A torrent of fire roared her way, and she pivoted, throwing the warrior in the way to take the brunt of the attack. As the fire abated, she pulled her knife back and pushed the remains of the warrior forward, another scorched corpse hitting the ground. She looked up as the remaining shaman charged, letting loose a scream of rage. The shaman raised her staff as she ran, summoning two thornling elementals—quick, vicious, sharp.
Meren watched the shaman's approach, taking a step back while wiping the blood from her bone blade with a finger. She quickly drew a glyph on the wrist holding the knife, finishing just as the thornlings leapt at her. With a quick flick of her hands, the two elementals burst aflame with green fire then disappeared into fine ash, blowing away on the wind.
The charging shaman cried out in anguish but didn't slow, drawing back her staff, fire sparking around the teeth and bone jangling wildly on the end—when a charred hand reached up and grasped her ankle. She fell hard, staff flying out of her hands, hitting the ground with a crunch, her wrist snapping under the sudden impact.
The shaman looked back in horror at the immolated, blank eyes of the dead warrior staring lifelessly at her, glowing the same putrid green as the fires that consumed her elementals. The warrior's other arm reached out and grasped the shaman's leg, the bone of fingers poking through where flesh had melted away, digging in and piercing skin, eliciting a horrified scream.
Through all of this, Meren stepped closer, watching the panicked, struggling shaman with cold contempt. Another sickening crunch rang out as she stomped down hard on the shaman's injured hand. The shaman howled in pain, writhing to free herself, until the bone blade flicked against her jugular.
"Where are they?"
The shaman looked up into cold grey eyes, blank of emotion. "Abomination!" she hissed. "What foul magic do you—"
The shaman's words turned to a gurgle as blood welled up, bubbling out of her mouth and pouring out of the gash where the bone knife had neatly cut through her throat.
She stood, again wiping the blood from her bone blade with a finger. With a gesture, she commanded the dead warrior to release the shaman's ankles and stand. With the bloody finger, she drew another glyph, this one far more complex, on the back of her dagger hand. As her fingers finished tracing the pattern, a green glow permeated the shaman's corpse. The shaman rose from the ground and stood haltingly. Blank eyes, now tinged green, stared at Meren.
"Where are they," she asked again. "Where are the new convening caves of the Circle."
The shaman made a sound that seemed like an attempt to speak, but all that escaped was a low hiss of air. Blood slowly bubbled from its slit throat.
"Point," she commanded.
The shaman stared back for a moment. Then, slowly, slowly, its hand raised, and an earth- and blood-caked finger gestured east.
"Lead." The shaman slowly began to walk.
"Wait." The shaman stopped.
Meren gestured to the staff lying on the ground. "Retrieve your weapon."
The shaman bent and struggled to pick up the staff with its one good hand.
She watched for a moment, amused, then gazed back at the last warrior. The green fire had consumed most of the flesh, but some sinew and skin still clung stubbornly to the bones. She shrugged. #emph[Waste not.]
She reached a hand out and called to the magic in her veins. The experience from crafting of the weapon that morning, along with the blood bonds she cast now, made animating the skeleton easy in comparison. She wouldn't even need any physical viscera for this spell.
#figure(image("001_All the Cairns of Jund/05.jpg", width: 100%), caption: [Terminate | Art by <NAME>], supplement: none, numbering: none)
The skeleton rose as the shaman finally succeeded in getting a good grasp on its staff. With a gesture, the party again marched, the shaman in the lead, flanked by the two warriors. Meren followed a few paces back, sheathing her knife.
#emph[Vengeance begins.]
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
#emph[This is the end] .
#emph[She spat blood from her mouth and ran a hand against her lips, breathing heavily. One eye was swollen shut and an arm dangled, useless and broken. Her vision was mostly gone, everything reduced to blurry patches. Her body was beyond pain, her insides felt like a melted jelly slush, and she suspected the only reason she still held onto consciousness was due to the very thing that was killing her from the inside.]
#emph[Things hadn't gone so well with the elemental binding.]
#emph[To put it mildly.]
#emph[Her greeting, her tendril of mana to initiate contact, did not have the effect she expected. Sure, she had never been good at the summoning or binding of elementals. But she had never had an elemental react with the rage that this one had.]
#emph[As soon as she made contact, the elemental recoiled with a roar, then swatted her away with a vicious swing of its limb—which happened to be an aurochs-sized boulder. The speed of the assault caught her off guard, and she was sent sprawling, the arm she threw up in defense instantly broken. The only good news was the new pain had momentarily dulled the constant burn of the poison coursing through her veins.]
#emph[She had struggled to her feet, winded and coughing, gazing frantically to the Circle for any clue or guidance. The open looks of shock on many faces was no comfort. She looked to Kael. His face was as stoic as ever, but his clenched fists gave him away. A loud bellow drew her focus back on the elemental as it primed itself to charge. She tried to breathe, to center herself, and a new sharp pain told her a few ribs were probably also cracked, if not broken.]
#emph[Don't panic. Focus. If a willing bond cannot be negotiated, a forcible chain must be forged.]
#emph[It didn't matter that she had never succeeded in binding an elemental against its will before, much less one of such size. She had to succeed. She will succeed. She would not perish here.]
#emph[Nearly thirty minutes later, it seemed like she was wrong. She would perish today. It was only force of will and a generous dose of luck that kept her alive. The few binding rituals she knew didn't come close to having the power needed to chain the wild elemental, even with her abilities augmented by the Dreamfire Draught. Now, she cowered within a dome of stone she had raised around her as the elemental raged and smashed from the outside. Chips of rock rained down on her with each reverberating blow, and she knew her shelter would not last long. Even if it did . . . another twinge in her gut reminded her of the other clock she fought against. It looked like death by wrathful elemental would get her before death by willingly imbibed poison.]
#emph[Yet, more horrible than the threat of death chipping ever closer was her dismal performance before the Circle. She not only failed to bind the elemental, all of her shamanic spells and attempts at the binding were sloppy and weak. Even with the Dreamfire Draught beating through her veins, she still struggled to hear the pulse of Jund, the heartbeat of life magic that she was supposed to wield. This stone shield was her most successful spell, and only because she used her own spilled blood to augment its power.]
You are not weak.
#emph[Despair and anger churned in the pit of her stomach. Kael was wrong, apparently.]
#emph[Another crashing blow, and a beam of daylight pierced into the dome. The end was nigh.]
#emph[But it was not yet here.]
#emph[She breathed, pushing air into her battered lungs, ignoring the protests of her fractured rib cage. She breathed, centering on the wild beating of her own heart as entry into a deeper pulse, the pulse of life, of fire, of Jund. She breathed, and time seemed to slow as she strained to listen, to grab hold of the elusive rhythm, to tap into the pulse of magic for one final attempt at . . . something. Anything. She listened, straining, seeking power, seeking the surge.]
#emph[A rush, then silence. A rush, then silence.]
#emph[She frowned, her brow wrinkling. She had always struggled to catch that rush, the surge in life force that all shamans tapped for their power. She had always fumbled, trying to synchronize her words and gestures with the pulse, its timing foreign and ungraspable to her. But now, as she listened, as she felt her own life draining away, the roaring gaps of silence between the rushes called to her. The lack of sound, the infinite void, pulled her in. She never noticed that space, the quiet ever-present darkness, quite as she did now. Its expanse seemed wider, she realized, more ever-present than the rush—it took up all the space that was not the pulse, that was not life.]
#emph[A distant boom and the crackle of breaking stone reminded her that time still flowed outside herself. The otherworldly feel of hot Jund air and sunlight against her skin told her that time was up. She had to do something, now.]
#emph[She reached into the darkness, and pulled.]
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
#emph[The Circle was certain they were witnessing another failed shaman trial—and a spectacular one at that. The young girl somehow provoked the elemental she was charged with binding, which was in and of itself quite unusual. The trial was not supposed to be one of combat, but rather a race against time. Instead, this trial somehow devolved into a young girl trying to fight a fully enraged avalanche elemental.]
#emph[When the girl forged a stone shelter using her own blood as conduit to enhance her spell, a few sat forward, intrigued. A few murmurs of her tenacity and resourcefulness rippled amongst the Circle.]
#emph[No one was prepared for what happened next.]
#emph[As the elemental tore away a large chunk of the stone dome, a flash of green beamed out from within. A second later, a pillar of emerald flames erupted, spouting high into the air. The massive column of fire flickered wildly—but there was no blast of heat, no fiery burn, no roar, no pulse. The Circle looked on in horror at the unnatural fire. The avalanche elemental stumbled back, also watching the flames.]
#emph[Then it turned and attempted to flee.]
#emph[The flames flared down and the silhouette of a girl emerged, hidden behind sheets of green fire. The Circle saw hands raise—and fire danced out in a jet at the elemental. Some shaman would claim later they saw the flames flicker into the shape of skulls as it barreled through the air, catching the avalanche elemental in its tracks.]
#figure(image("001_All the Cairns of Jund/06.jpg", width: 100%), caption: [Meren of <NAME> | Art by <NAME>], supplement: none, numbering: none)
#emph[A tortured shriek rang out as the fires enveloped the elemental. It writhed, bathed in jade light, withering away. Within seconds, it was done. Boulders and stone crashed into a heap of rubble. The fire had devoured the vines, lightning, mana—anything living. Nothing remained but a heap of broken rocks.]
#emph[All eyes pivoted to the girl, just in time to see her knees buckle as she collapsed.]
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
#emph[Muffled voices echoed loudly in her head as swirls of senseless colors floated by her. She swam desperately upstream against exhaustion and delirium, fighting back toward consciousness. She had tried to channel a fire spell like Kael taught her, only pulling from the void instead of the pulse. She had no idea if she was successful, but she was still having thoughts, which meant she was alive, which must have meant something good happened.]
Focus. FOCUS. Open your eyes.
#emph[Slowly, slowly, her body acquiesced. Her eyelids fluttered open, heavy and lethargic.]
#emph[The scene that greeted her was . . . confusing.]
#emph[Two shamans of the Circle stood over her, eyes hard and staring, their staves pointed at her. Behind them, she could see the blurry outlines of others engaged in heated argument. Their words still sounded muffled, a garbled mash of syllables. She willed her brain to separate out the words, shaking her head to try and clear some of the haze of the Dreamfire Draught.]
#figure(image("001_All the Cairns of Jund/07.jpg", width: 100%), caption: [Corrupted Roots | Art by <NAME>], supplement: none, numbering: none)
#emph["Do not move."]
#emph[She looked at the end of the staff, confused. The shaman holding the weapon glared at her, and she could see mistrust in his eyes—and also a glimmer of fear.]
#emph[The arguments ceased, and she felt the weighty gaze of the Circle upon her.]
#emph["I say we kill her now."]
#emph[The words from an unseen voice rang out with clarity, piercing the haze in her brain.]
#emph["That was . . . I don't know what that was. Certainly not life magic."]
#emph["Her power is undeniable, though," another voice countered.]
#emph["But she destroyed an elemental!" A third voice. "Not banished. Not defeated. Annihilated."]
#emph["Did you see the way it rejected her initial binding?"]
#emph["It knew."]
#emph["She's unstable."]
#emph["Dangerous."]
#emph[The second voice spoke again, more urgent. "But powerful. Have you ever seen anything like it? She could become a great shaman—"]
#emph["She is no shaman."]
#emph[That last voice. She knew that voice.]
#emph[Kael.]
#emph["We bind life. We preserve balance. That fire was not of shamanic nature."]
#emph[She turned to look for him, but the shaman standing guard jutted his staff into her throat, stopping her movement.]
#emph["She is an abomination. She'll never be a shaman. "]
#emph[All had fallen silent save the shrill whistle of the wind and Kael's booming voice. She saw shamans move aside as he stepped forward through the people, into her view. His eyes stared into hers, unmoving, unyielding, cold. Droplets of moisture hit her hands, and she realized she was crying.]
#emph["We must kill her now."]
#emph[Murmurs of agreement rippled through the members of the Circle.]
#emph[Kael raised his staff. She lowered her eyes. Even when she thought she found success, she failed. And here she was, betrayed by the one who believed in her, who thought her worthy and strong, even when she did not believe it herself.]
#emph["You cannot kill her."]
#emph[All heads of the circle turned to look at the unexpected speaker. The elder shaman, the one who administered the Dreamfire Draught, leaned on her staff, hair fluttering wild as ever.]
#emph["The trial is still not over."]
#emph["She killed her elemental. It is over," Kael countered.]
#emph["The trial ends only when the young one purges the Dreamfire Draught, or when she succumbs to the eternal dream." The elder shaman's words were simple, but they reverberated with the magical weight of ancient ritual.]
#emph["She is an anomaly. Dangerous," Kael pressed on.]
#emph["We will not let her break our traditions and our oaths. You will not sully your hands." The elder held Kael's gaze until he looked away.]
#emph["Besides, she is not long for the living."]
#emph[The Circle again turned to look at her, and she stared into the stony ground beneath her, hating her rattling, uneven breath for betraying her and confirming the elder's words.]
#emph[The elder spread her arms wide, addressing the whole Circle. "Let her conclude the trial alone. May the draught consume her mind and dragon whelplings consume her flesh, much as her fires consumed the elemental."]
#emph[The elder held her staff high in the air. Slowly, one by one, each member of the Circle joined her gesture in consent.]
#emph[Soon, Kael's staff was the only one that remained by his side.]
#emph[The elder shaman nodded in acknowledgement of Kael's dissenting vote, then turned to leave. One by one, the shamans of the Circle followed, until only Kael remained with her.]
#emph[She again looked up at him, waiting for him to crack a smile like he did, what seemed eons ago, when he first chose her, first spotted the flare of magical potential in her. He looked back, unmoving.]
#emph["Die, Meren. Die quiet and alone."]
#emph[Kael turned and left.]
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
She did not die.
It would have been easy. Sprawled on that flat rock bed, poison coursing through her veins, burning up the last of her life. Discarded, broken and alone, abandoned by everything she knew.
But something in her had awoken. She had found power. She had found strength.
She had found #emph[focus] .
In the silent space between. In the dark, hushed void. She had found it, a force as natural as breathing. Like the other shamans, for whom casting with the pulse was easy, she had finally found her true calling, in the midst of her trial.
And they had consigned her to death for it.
No. She did not die. She did not cower and break. She refused.
On that day, she did not need to struggle to fit their rules anymore. Kael was wrong in the end. She #emph[was] a shaman.
She was just a shaman the Nel Toth had never seen before.
She did not die, for on that day, she wielded death itself. As a weapon, manifest as flame. Then, as a source of sustenance to her own life, reshaping the inky fabric between heartbeats. On that day, she closed her eyes, returning again to the place of infinite silence, and drew on the cool darkness, calling upon her newfound well of power to quench the Dreamfire burning in her veins.
She had dragged her body, broken and beaten, to shelter, and began the slow process of healing. When the cold grasp of death clawed at her, she did not panic or fight as a life shaman would. She caressed Death's hand with her own, familiarizing herself with Death's embrace. When passing beasts threatened, she called upon Death like an old friend, taking down game she only dreamed of hunting in her former life, all with mere thoughts and flicks of her hands.
And when her physical wounds healed such that she could walk, she left.
Left the rocky highlands, the fiery volcano peaks. Walked through the dense brush of the lowlands. Trudged through the marshy swamplands, until she had left it all behind.
She continued through the bog, following the call of the darkness. In it, she found focus. In it, she found purpose. #emph[Grow strong. Embrace power. Seek vengeance.]
Jund was too noisy. The ever-present pulse, the beat she worked so hard to hear before, was now a loud throbbing ache, ceaseless in its cacophony, interrupting the sweet silent darkness. She sought a quiet place, a shadowy corner of the world—a place empty of the living.
Her march was slow. It took her years of searching, but eventually, whispers of strange patches of deadlands began to reach her—places where entropic death gales replaced hot Jundian winds and where no beasts could survive. Places where corpses somehow walked again, climbing out of tar pits and crawling from the depths of swamps. The first time she found such a place—a squat little patch of swamp where the ground itself was somehow more like decaying flesh than natural soil—she knew she had found her anchor. There, in that alien blemish intruding on her world, was the familiar darkness, a brief respite of silence among the clamorous din of Jund. Her spells flourished there, and she sought out more places like that one, larger patches of dead earth, places where demons replaced dragons as lords of the realm.
#figure(image("001_All the Cairns of Jund/08.jpg", width: 100%), caption: [Grixis Panorama | Art by <NAME>], supplement: none, numbering: none)
In her journeys, she encountered others like herself, mages who wielded the darkness, who called themselves by another title—#emph[necromancer] —and the land itself by a strange name—#emph[Grixis] . She learned their skills and defeated their undead, for she had something they lacked—understanding of the living. While they simply animated the corpses of the dead as soldiers and weapons, she learned to revitalize the deceased—to Borrow the individuals from death itself.
And now, years later, she had returned. Now, she marched back from the grasps of Grixis, back through Jund with singular purpose. Now, three of her Borrowed Ones had led her to the current dwelling of Clan <NAME>'s shamans.
Now, she brought death to those who had tried to deliver it unto her.
#emph[Vengeance is almost too easy] , she mused as she strode through the camp. The two Borrowed warriors had dispatched the young shaman standing watch, preventing the raising of any alarms, while her Borrowed shaman summoned its own elemental thornlings to assault the Circle. The result was a bloodbath. Nel Toth shamans scrambled in surprise, attacked by their dead friends and vicious elementals they assumed were allies. With each fallen shaman, her ranks grew, as did the chaos and panic. It was all she could do to keep pace with raising the bodies.
In the madness, she hunted for one face in particular. She wanted to make sure he didn't die at the anonymous hands of a Borrowed. She wanted to see his face, to witness his fear, to make him regret.
She needn't have worried.
Three massive torrents of fire swept across her growing ranks, melting flesh and bone alike. She shielded her eyes, at the same time enshrouding herself in protective shadow, and willed the Borrowed to stand again.
#figure(image("001_All the Cairns of Jund/09.jpg", width: 100%), caption: [Skeletonize | Art by <NAME>ski], supplement: none, numbering: none)
As they rose, however, the fires persisted, melting away sinew and reducing bone into an inanimate charred mass.
She grinned. Only a master shaman of Nel Toth could sustain such a blaze.
Indeed, stepping through the flame, two elder shamans wielding staves heavy with dragon fangs and talons of thrinaxes strode forth. They flanked a tall figure, stoic and harsh. Streaks of gray now ran through his hair, and his face held more lines than she remembered, but beyond that, Kael looked unchanged.
"Your rampage ends here, death mage!" Kael roared. "Clan Nel Toth will not fall before such Grixis scum! Your kind has crawled here before, only to end in blood and ash."
"I'm disappointed you don't recognize me, Kael." She lowered her shadowy defense, revealing her face. She watched as his brows furrowed. She reveled in the slow dawning of recognition, savored the flitting twinges of shock behind his eyes.
Kael raised his hands to cast, but she was faster. Two screaming skulls made of green fire burst from her palms, smashing into the two elders on either side of Kael, immolating them in pyres of flame. She grinned as she watched them burn—a grin that transformed to surprise as the two shamans seemed to ignore the fire, walking then breaking into a run toward her. She focused on the duo, intensifying the fire still burning their flesh, but they remained unfazed, charging forward, their forms shifting from humans into beasts. Suddenly, she found herself facing down a massive bear and a long-fanged thrinax, both alight with flames that should have killed them on contact.
The bear swung a massive paw at her while the thrinax tried to clamp onto her legs. She ducked out of the way of the first assault, but powerful jaws managed to ensnare her calf, sharp teeth tearing into her flesh, and she cried out in pain.
#figure(image("001_All the Cairns of Jund/10.jpg", width: 100%), caption: [Scarland Thrinax | Art by Daarken], supplement: none, numbering: none)
Before the thrinax could pull back, she reached down and grasped its tusk with both hands. Black corruption spread across its flesh as the beast tore itself free from her leg. The combination of fire and corruptive ichor finally brought it crashing down.
She whipped around just as the bear-shaman swung back around with a vicious paw, mauling her across the shoulder. She gritted her teeth, turning and burying it under sustained blasts of emerald fire. Finally, it collapsed in a smoking heap at her feet.
The crackle of lightning and boom of stone told her she had taken too long in dispatching the two elders. A quick glance at Kael confirmed her fears—three massive avalanche elementals were coalescing around him, lightning eyes turning toward her, vine and stone limbs grasping, ready to crush. Storm clouds gathered overhead, charged by the raw energies of the hulking elementals. Hastily, she reawakened the two shamans before her, their forms barely serviceable after the spells she had just wrought against them.
She looked up at Kael, fists ready with emerald fire, and hesitated. Kael looked back at her, but rather than the fury or resolve she expected to see, his gaze looked pensive and sad. The air crackled with the static charge from the elementals, but Kael did not send them to attack.
"You should have died, Meren."
"You should have killed me, Kael," She snarled. "Like you wanted."
"You're right. I should have. So you would not have become this."
She laughed, hollow, empty of any mirth. "You told me I would be a wielder of blood and bone. Now I am that and so much more."
Kael shook his head. "Not like this. You are no channeler of life. You are merely death's tool, its puppet."
"You're wrong. Death bends to me."
Kael sighed. At one time, it might have sounded paternal to her. Now it just rang of patronizing condescension.
"If you insist. Return to the earth, <NAME>."
Kael pointed, and the three massive elementals charged at her, bellowing roars filling the air and trembling steps sending shockwaves through the earth.
She smiled, looking at the two mostly destroyed corpses standing at her defense. The smile grew into a laugh, manic and shrill, as the elementals barreled ever closer.
With a sudden gesture, she tore the sparks of life from the two Borrowed. She thrust her hands in the air, sending the sparks skyward.
"Strike, #emph[Skaal Kesh.] "
#figure(image("001_All the Cairns of Jund/11.jpg", width: 100%), caption: [Art by <NAME>], supplement: none, numbering: none)
She felt rather than saw her perfect weapon descend from the sky. A massive shadowy form struck with impossible speed, leaving in its wake a trail of clouds, blasting out a rush of wind from where it landed. The three elementals fell apart mid-charge, their anchors to this plane broken as her instrument of vengeance struck Kael. She ducked as giant boulders came hurtling and bouncing past her, dodging out of the way as they kicked up shards of stone and clouds of dust.
As the rocks settled, she looked back to where Kael had stood. An enormous, shadowy dragon crouched in his place, holding the broken form of Kael impaled on its claws. Its eyes glowed green, and clouds of vile smoke billowed from its maw. Its muscles rippled as it shifted its weight, and its long tail swept across the stone, sending little flinty sparks into the air. #emph[Skaal Kesh] . Scourgeclaw.
To her surprise, she found that Kael was still conscious, despite the dragon talons piercing his abdomen and holding him aloft. He looked down at her as she approached.
"You tamed . . . a dragon . . . impossible," he mumbled.
"I #emph[reforged] a dragon," she corrected.
A wheezing sound escaped from Kael. She guessed it was supposed to be a laugh.
"I was right after all. You #emph[are] strong, Meren."
She stared at him, her face a stony mask.
"Devour, #emph[Skaal Kesh."]
The gruesome crunch of teeth against flesh and bone echoed across the steppes. Blood poured down in great bursts as she watched the dragon consume the last remnants of her past in two great bites. Vengeance served.
But she wasn't satisfied.
She turned, gazing at the horizon, the untamed vistas of Jund. Faintly, she could hear the wild pulse of the land, a pulse she once sought to tame. Now, she wanted to destroy it.
Her vengeance would continue. She would silence the pounding rhythm of this land. She would show it the beautiful darkness, the serenity of death.
She was Meren, last of <NAME>.
And she would not stop until all of the cairns of Jund were toppled.
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
#figure(image("001_All the Cairns of Jund/12.png", width: 100%), caption: [], supplement: none, numbering: none)
|
|
https://github.com/Myriad-Dreamin/tinymist | https://raw.githubusercontent.com/Myriad-Dreamin/tinymist/main/crates/tinymist-query/src/fixtures/semantic_tokens/fn.typ | typst | Apache License 2.0 | #let f(x, y) = { x += y } |
https://github.com/Myriad-Dreamin/typst.ts | https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/math/alignment_02.typ | typst | Apache License 2.0 |
#import "/contrib/templates/std-tests/preset.typ": *
#show: test-page
// Test no alignment.
$
"right" \
"a very long line" \
"left" \
$
|
https://github.com/fenjalien/metro | https://raw.githubusercontent.com/fenjalien/metro/main/tests/unit/inter-unit-product/test.typ | typst | Apache License 2.0 | #import "/src/lib.typ": unit
#set page(width: auto, height: auto)
#unit("farad squared lumen candela")
#unit("farad squared lumen candela", inter-unit-product: sym.dot.c) |
https://github.com/Enter-tainer/typstyle | https://raw.githubusercontent.com/Enter-tainer/typstyle/master/README.md | markdown | Apache License 2.0 | # typstyle
[![Packaging status](https://repology.org/badge/vertical-allrepos/typstyle.svg)](https://repology.org/project/typstyle/versions)
## Usage
### Use as a CLI
#### Installation
1. Download the binary from the [release page](https://github.com/Enter-tainer/typstyle/releases/)
2. Install it from your package manager: <https://repology.org/project/typstyle/versions>
3. Install using [`cargo-binstall`](https://github.com/cargo-bins/cargo-binstall): `cargo binstall typstyle`
4. Install it using cargo: `cargo install typstyle --locked`
Usage:
```txt
Beautiful and reliable typst code formatter
Usage: typstyle.exe [OPTIONS] [INPUT]... [COMMAND]
Commands:
format-all Format all files in-place in the given directory
help Print this message or the help of the given subcommand(s)
Arguments:
[INPUT]... Path to the input files, if not provided, read from stdin. If multiple files are provided, they will be processed in order
Options:
-c, --column <COLUMN> The column width of the output [default: 80]
-a, --ast Print the AST of the input file
-p, --pretty-doc Print the pretty document
-i, --inplace Format the file in place
--check Run in 'check' mode. Exits with 0 if input is formatted correctly. Exits with 1 if formatting is required
-h, --help Print help
-V, --version Print version
```
Typical usage:
- Inplace format a file:
```sh
typstyle -i file.typ
```
- Format a file and print the result to stdout:
```sh
typstyle file.typ
```
- Inplace format file list:
```sh
typstyle -i file1.typ file2.typ file3.typ
```
- Format all files in a directory. If the not provided, it will recursively format all files in the current directory:
```sh
typstyle format-all dir
```
- Read from stdin and print the result to stdout:
```sh
cat file.typ | typstyle > file-formatted.typ
```
### Use in your editor
typstyle has been integrated into [tinymist](https://github.com/Myriad-Dreamin/tinymist). You can use it in your editor by installing the tinymist plugin and set `tinymist.formatterMode` to `typstyle`.
### Use as a web app
There is an online version of the formatter at <https://enter-tainer.github.io/typstyle/> that you can see how it formats your code.
### Use with [pre-commit](https://github.com/pre-commit/pre-commit)
Add this to your `.pre-commit-config.yaml`:
```yaml
- repo: https://github.com/Enter-tainer/typstyle
rev: '' # The the revision or tag you want to use
hooks:
- id: typstyle
```
## Escape Hatch
If you find typstyle is not working as expected, you can use `// @typstyle off` or `/* @typstyle off */` to disable the formatter on the next node of code.
typstyle also gives up formatting **part** of the code if it is not able to format it correctly. Specifically, it will print that part as is if:
- contains syntax error
- contains syntaxes that are not supported by the formatter
And please let us know the issue by creating an issue on the [GitHub repository](https://github.com/Enter-tainer/typstyle)
## Design Goals
1. Opinionated: We want to have a consistent style across all codebases.
2. Code only: We want to format only the code. Contents should be left untouched as much as possible.
3. Convergence: Running the formatter twice should not change the code.
4. Correctness: The formatter should not change the looking of the rendered output.
## Test
```sh
cargo nextest run -E 'not test(~e2e)' --no-fail-fast
cargo insta review
```
We have set up multiple tests:
1. Convergence tests: format result must be the same when applied twice
2. Snapshot tests: format result are stored in the `snapshots` directory and are compared to the current result when running the tests
3. Correctness test: We compare the rendered output of the code before and after formatting and ensure they are the same
4. E2E Correctness test: We collect a bunch of typst code repo including tablex, cetz, fletcher... and format them to ensure (a) the format result converges and (b) the rendered output is the same.
## Use as a library
- npm: <https://www.npmjs.com/package/typstyle>
- rust: <https://crates.io/crates/typstyle>
## Known issues
You tell us! Bad formatting? Incorrect output? Please create an issue on the [GitHub repository](https://github.com/Enter-tainer/typstyle)!
We've set up comprehensive test suites to ensure the correctness of the formatter. If you find any issues, please let us know! And we can add more tests to prevent the issue from happening again.
## Why another formatter?
Why there is a need for another formatter? We already have [typstfmt](https://github.com/astrale-sharp/typstfmt), [typstfmt](https://github.com/jeffa5/typstfmt), [prettypst](https://github.com/antonWetzel/prettypst). Why another one?
typstyle started because I had a bunch of ideas on how to improve typst source code formatting but kept finding typstfmt wasn't a good codebase to explore them with. Namely:
- I wanted to use Wadler's pretty printer to get consistent and beautiful output for any width. (Note that it is the same technique used in the prettier formatter)
- I didn't have much energy to maintain a bunch combination of configuration options. It turns out to be very hard to make everything correct. So I decided to make it opinionated.
- I wanted to experiment with more testing techniques and make sure the formatter is correct.
So I decided to write something from scratch. I started it about half a year ago and kept working on it in my spare time. Currently it lacks some advanced features but it is already usable for most of the cases. I hope you like it!
|
https://github.com/AHaliq/CategoryTheoryReport | https://raw.githubusercontent.com/AHaliq/CategoryTheoryReport/main/chapters/chapter5/notes.typ | typst | #import "../../preamble/lemmas.typ": *
#import "../../preamble/catt.typ": *
#import "@preview/fletcher:0.5.1" as fletcher: diagram, node, edge
#definition(name: "Exponentials")[@sa[Definition 6.1]
$
"isExponential"(C^B, epsilon) =&
forall A, (arr(f,A times B, C)). exists! (arr(tilde(f),A,C^B)). epsilon comp (tilde(f) times 1_B) = dash(tilde(f))=f
$
]
#definition(name: "Cartesian Closed Category (CCC)")[@sa[Definition 6.2]
$
"isCCC"(Ob(""), Hom("")) =&
"isCategory"(Ob(""), Hom("")) \
&and forall A,B. exists! A times B. "UMP"_"product" (arr(p_1,A times B, A), arr(p_2,A times B, B)) \
&and forall B,C. "isExponential"(C^B, epsilon)
$
]
todo:
categorical logic
- Heyting Algebra $~$ Intuitionistic Propositional Calculus
- CCC $~$ $lambda$-calculus
- Kripke models of logic; variable sets
- theory: a set of basic types and terms and equations between them (generators; recall the section on coequalizer) |
|
https://github.com/lucifer1004/leetcode.typ | https://raw.githubusercontent.com/lucifer1004/leetcode.typ/main/problems/p0006.typ | typst | #import "../helpers.typ": *
#import "../solutions/s0006.typ": *
= Zigzag Conversion
The string "PAYPALISHIRING" is written in a zigzag pattern on a given number of rows like this:
```
P A H N
A P L S I I G
Y I R
```
And then read line by line: "PAHNAPLSIIGYIR"
Write the code that will take a string and make this conversion given a number of rows.
#let zigzag-conversion(s, numRows) = {
// Solve the problem here
}
#testcases(
zigzag-conversion,
zigzag-conversion-ref, (
(s: "PAYPALISHIRING", numRows: 3),
(s: "PAYPALISHIRING", numRows: 4),
(s: "A", numRows: 1),
)
)
|
|
https://github.com/loqusion/typix | https://raw.githubusercontent.com/loqusion/typix/main/checks/overlapping-virtual-paths/main.typ | typst | MIT License | #lorem(100)
#image("icons/link.svg")
|
https://github.com/ivaquero/book-control | https://raw.githubusercontent.com/ivaquero/book-control/main/08-奈奎斯特稳定性.typ | typst | #import "@local/scibook:0.1.0": *
#show: doc => conf(
title: "Nyquist 稳定性",
author: ("ivaquero"),
header-cap: "现代控制理论",
footer-cap: "github@ivaquero",
outline-on: false,
doc,
)
= Nyquist 稳定性
== 定义
对如下系统
#figure(
image("images/block/sensor.drawio.png", width: 40%),
caption: "传感器",
supplement: "图",
)
- 开环传递函数:$G(s)H(s)$
- 闭环传递函数:$G(s)/(1+G(s)H(s))$
令
- $G(s) = N_G(s) / D_G(s)$
- $H(s) = N_H(s) / D_H(s)$
可得
$
G(s)H(s) = frac(N_G N_H, D_G D_H) \
1 + G(s)H(s) = frac(D_G D_H + N_G N_H, D_G D_H)\
frac(G(s), 1 + G(s)H(s)) = frac(N_G D_H, D_G D_H + N_G N_H)
$
不难得到
- 开环传递函数的极点 = 媒介函数的极点
- 闭环传递函数的极点 = 媒介函数的零点
若有映射$F(s) = 1 + G(s)H(s)$,将平面$A$中的闭合曲线,映射到平面$B$中,则对新的闭合曲线逆时针绕原点的圈数$N$有
$ N = P - Z $
其中
- $P$为 Nyquist 闭合区内,$F(s)$的极点(开环传递函数的极点)个数
- $Z$为 Nyquist 闭合区内,$F(s)$的零点(闭环传递函数的极点)个数
> Nyquist 闭合区:复平面的右半平面
变换映射函数为$F(s) - 1 = G(s)H(s)$,闭合曲线$B$将整体左移,中心点变为$(-1, 0)$,绘制出的图形称为 Nyquist Plot。
#theorem("Nyquist 稳定性")[
若系统稳定,则其闭环传递函数在 Nyquist 闭合区没有极点,即
$ P = N $
]
#tip[
现实生活中,传递函数均为真分数,即分母≥分子。
]
== 裕度分析
由于$G(j ω)$和$G(-j ω)$共轭,其模相等,角度互为相反数,故其映射关于实轴对称。此时,分析只需绘制正虚轴部分。
=== 幅值裕度
幅值裕度(gain magin)表示开环增益$K$在系统变得不稳定前,还能增加的比例。
=== 相位裕度
相位裕度(phase magin)表示相位角在系统变得不稳定前,还能延迟的比例。
|
|
https://github.com/Wh4rp/Typst-PUC | https://raw.githubusercontent.com/Wh4rp/Typst-PUC/master/README.md | markdown | # Template para ayudantías PUC
Este repositorio contiene un template para ayudantías de la PUC en [Typst](https://typst.app/).
## ¿Cómo usarlo?
El modulo de template está en la carpeta `modules`. Copia dicha carpeta en tu proyecto y crea tu archivo typst más o menos así:
```yaml
#import "modules/template.typ": *
#show: doc => project(
curso: (
sigla: "IIC2213",
nombre: "Lógica para ciencia de la computación",
departamento: "Departamento de Ciencia de la Computación",
),
autor: (
nombre: "Fulano",
apellido: "<NAME>",
email: "<EMAIL>",
),
numero_de_ayudantia: 4,
fecha: "25 de septiembre de 2021",
body: doc
)
#problem[
#lorem(50)
+ #lorem(10)
+ #lorem(10)
#solution[
#lorem(20)
+ #[
#lorem(4)
$ integral_0^x^2 = 1/3 x^3 $
]
+ #[
#lorem(10)
$ sum_(i=1)^n i = 1/2 n (n+1) $
]
]
]
```
![Example](./example.png)
## Módulos utilizados
- [typst-theorems](https://github.com/sahasatvik/typst-theorems)
|
|
https://github.com/tedaco1/typst-example | https://raw.githubusercontent.com/tedaco1/typst-example/main/README.md | markdown | MIT License | # typst-example
Typst example for a presentation |
https://github.com/dismint/docmint | https://raw.githubusercontent.com/dismint/docmint/main/template.typ | typst | // | 🙑 dismint
// | YW5uaWUgPDM=
// FUNCTIONS //
#let colorbox(
title: "Title",
color: "95b8d1",
type: "DEFAULT",
body
) = {
box(
stack(
block(
width: 100%,
fill: rgb(color),
stroke: (left: (black + 0.2em), right: (rgb(color) + 0.2em)),
inset: (left: 1em, top: 0.6em, bottom: 0.6em, right: 1em),
)[*#title* #h(1fr) #smallcaps(text(size: 0.7em)[#type])],
block(
radius: (bottom: 0.3em),
width: 100%,
stroke: (left: (black + 0.2em), right: (rgb(color) + 0.2em), bottom: (rgb(color) + 0.2em)),
inset: (left: 1em, top: 0.8em, bottom: 0.8em, right: 1em),
)[#body],
)
)
}
#let walkthrough(
title: "Walkthrough",
body
) = {
colorbox(title: title, color: "d1cfe2", type: "WALKTHROUGH")[#body]
}
#let define(
title: "Definition",
body
) = {
colorbox(title: title, color: "b8e0d2", type: "DEFINITION")[#body]
}
#let note(
title: "Note",
body
) = {
colorbox(title: title, color: "a7c7e7", type: "NOTE")[#body]
}
#let twocol(
body_l,
body_r
) = {
grid(
columns: (47%, 47%),
column-gutter: 6%,
body_l,
body_r,
)
}
#let twocola(
body_l,
body_r
) = {
grid(
columns: (47%, 47%),
column-gutter: 6%,
align(horizon)[#body_l],
align(horizon)[#body_r],
)
}
#let boxed(
body
) = {
rect()[$ #body $]
}
#let bimg(
path,
width: 50%
) = {
align(center, rect(image(path, width: width), stroke: 0.2em, radius: 0.2em))
}
// TEMPLATE //
#let template(
title: "Notes",
subtitle: "Class",
pset: false,
toc: true,
body
) = {
// SHOWS //
// code block
let fsize = 0.9em
show raw.where(block: true): it => { set par(justify: false); grid(
columns: (100%, 100%),
column-gutter: -100%,
par(
leading: 0.585em,
block(width: 100%, inset: 1.0em, for (i, line) in it.text.split("\n").enumerate() {
box(width: 0em, align(right, text(font: "Cascadia Mono", str(i + 1), size: fsize) + h(1.5em)))
hide(text(size: fsize)[#line])
linebreak()
}),
),
par(
block(radius: 1em, fill: luma(246), width: 100%, inset: 1em, text(size: fsize)[#it])),
)}
show raw.where(block: false): it => {
box(fill: rgb("#EEEEEE"), outset: (y: 0.3em, x: 0.1em), radius: 0.2em, it)
}
// links
show link: underline
// SETS //
set page(
paper: "a4",
margin: (
x: if pset { 10% } else { 7% },
y: if pset { 10% } else { 5% },
),
numbering: "1 / 1",
)
// only set the header on the second page onward
set page(header: locate(loc => {
if counter(page).at(loc).first() > 1 and pset [
*#title*
#h(1fr)
<NAME>
#box(line(length: 100%, stroke: 0.1em))
]
}))
set par(
justify: true,
)
// TITLE
par(leading: 1em)[
#box(align(left, text(size: 2.5em)[*#title*]))
#box(width: 1fr, line(start: (0.3em, -0.70em), length: 100%, stroke: 1em))
#linebreak()
// subtitle
#box(align(left, text(size: 1.5em, fill: rgb("808080"))[*#subtitle*]))
]
// TABLE OF CONTENTS //
if toc {
show outline.entry.where(
level: 1
): it => {
v(1em, weak: true)
strong(it)
}
outline(
indent: 1em,
title: text(size: 1.2em)[Contents]
)
}
// DIVIDING LINE //
v(1em)
if toc {
line(start: (0em, -0.70em), length: 100%, stroke: 0.25em)
v(-0.5em)
}
body
}
|
|
https://github.com/jgm/typst-hs | https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/show-text-04.typ | typst | Other | // This is a fun one.
#set par(justify: true)
#show regex("\S"): letter => box(stroke: 1pt, inset: 2pt, upper(letter))
#lorem(5)
|
https://github.com/glocq/typst-forthright-cv | https://raw.githubusercontent.com/glocq/typst-forthright-cv/master/src/template.typ | typst | MIT License | #import "settings.typ": *
/*****************************************
* Helper functions; to be hidden *
* once hiding becomes possible in Typst *
*****************************************/
// Layout two texts side by side,
// aligned respectively left and right.
// Make sure that the right content is not too long,
// otherwise lines will wrap in a weird way.
// Optional arguments:
// alignment: set to `horizon` to have both pieces of content
// centered vertically relative to each other
// gap: separate both pieces of content by a given distance
#let sideBySide(contentLeft, contentRight, alignment: auto, gap: 0pt) = {
table(
columns: (1fr, auto),
inset: 0pt, // We remove any automatic spacing to provide our own
gutter: gap,
align: alignment,
stroke: none, // comment this to see layout boxes!
align(left, contentLeft), align(right, contentRight)
)
}
/********************
* Individual items *
********************/
#let name(first, last) = [
#firstNameStyle(first)
#lastNameStyle(last)
#v(nameSpace, weak: true)
]
#let description(content) = [
#descriptionStyle(content)
#v(descriptionSpace, weak: true)
]
// This function takes both named and unnamed arguments.
// Named arguments appear first; if their name matches one of the predefined ones,
// It will get formatted accordingly (with an icon on the left, and/or a link).
// Otherwise, the argument will just appear as is.
// Unnamed arguments will appear after all named arguments, so if you want
// to enforce a specific order, you should name all your arguments,
// even if some names are arbitrary.
#let contactDetails(..arguments) = {
let namedArgs = arguments.named()
let unnamedArgs = arguments.pos()
let numArgs = namedArgs.len() + unnamedArgs.len()
let counter = 0
for (key, value) in namedArgs {
if (key == "email") {
detailsStyle([ #h(5pt) #link("mailto:" + value)[#value]])
} else if (key == "github") {
detailsStyle([ #h(5pt) #link("https://github.com/" + value)[#value]])
} else if (key == "gitlab") {
detailsStyle([ #h(5pt) #link("https://gitlab.com/" + value)[#value]])
} else if (key == "homepage") {
detailsStyle([ #h(5pt) #link("https://" + value)[#value]])
} else if (key == "linkedin") {
detailsStyle([#link("https://www.linkedin.com/in/" + value)[#value]])
} else if (key == "location") {
detailsStyle([ #h(5pt) #value])
} else if (key == "phone") {
detailsStyle([ #h(5pt) #value])
} else {
detailsStyle(value)
}
// Add some separating space, unless this was the last item
counter = counter + 1
if (counter != numArgs) {
h(12pt)
}
}
for value in unnamedArgs {
detailsStyle(value)
// Add some separating space, unless this was the last item
counter = counter + 1
if (counter != numArgs) {
h(12pt)
}
}
v(detailSpace)
}
#let sectionTitle(title) = {
sectionStyle(title)
h(4pt)
box(width: 1fr, line(stroke: 0.9pt, length: 100%))
v(interEntry, weak: true)
}
#let cvEntry(
title,
organisation,
location,
date,
description,
short: false // set to true for a slightly more visually compact result
) = {
if(short) {
sideBySide(
titleStyle(title + ", ") +
organisationStyle(organisation + ", ") +
locationStyle(location),
dateStyle(date)
)
} else {
sideBySide(
titleStyle(title),
dateStyle(date)
)
v(interItem, weak: true)
sideBySide(
organisationStyle(organisation),
locationStyle(location),
)
}
v(interItem, weak: true)
bodyStyle(description)
v(interEntry, weak: true)
}
#let miscEntry(title, description) = {
titleStyle(title)
v(interItem, weak: true)
bodyStyle(description)
v(interEntry, weak: true)
}
/******************
* General layout *
******************/
// `layout` takes the following arguments:
// header, phot, body, sidebar.
// `body` is mandatory, the rest are optional.
// If provided, `photo` should be a string, the path
// to the photo. An empty string will be interpreted
// as the absence of a photo.
#let layout(..args) = {
// Define sections of the CV based on arguments, to avoid
// having to call named() and at() every time.
// Absence of an optional section is denoted by `none`
let arguments = args.named()
let body = arguments.at("body", default: none)
let sidebar = arguments.at("sidebar", default: none)
let header = arguments.at("header", default: none)
let photo = if (arguments.at("photo", default: "") != "") {
image(arguments.at("photo"), height: imageSize)
} else {
none
}
// Text appearance
show link: set text(linkColor)
set strong(delta: strongDelta)
set par(leading: interLine)
// We remove any automatic spacing to provide our own:
show par: set block(above: 0pt, below: 0pt)
// Layout setup
set align(left)
set page(
paper: "a4",
margin: (
left: pageMargin,
right: pageMargin,
top: pageMargin,
bottom: pageMargin,
),
)
// Actual content layout.
// The verbosity here comes from two places:
// 1. My use of tables, which is probably not necessary,
// but I feel more confident that spacing will behave as I want
// with tables than with spaces...
// 2. Case handling (presence/absence of various sections)
let bodyAndSidebar = if (sidebar != none) {
table(
columns: (auto, sidebarSize),
inset: 0pt, // We remove any automatic spacing to provide our own
stroke: none, // comment this to see layout boxes!
gutter: sidebarSpace,
body,
sidebar
)
} else {
arguments.at("body", "")
}
let headerSection
if (header != none) and (photo != none) {
headerSection = sideBySide(
header,
photo,
alignment: horizon,
gap: photoSpace
)
} else if (header != none) {
headerSection = args.named().at("header")
} else if (photo != none) {
headerSection = align(center, image(args.named().at("photo"), height: 2.8cm))
}
if (headerSection != none) {
table(
columns: (auto),
inset: 0pt, // We remove any automatic spacing to provide our own
stroke: none, // comment this to see layout boxes!
gutter: headerSpace,
headerSection,
bodyAndSidebar
)
} else {
bodyAndSidebar
}
}
|
https://github.com/EunTilofy/Compiler2024 | https://raw.githubusercontent.com/EunTilofy/Compiler2024/main/lab2/Report_of_Lab2.typ | typst | #import "../template.typ": *
#show: project.with(
course: "编译原理",
title: "Compilers Principals - Lab2",
date: "2024.4.21",
authors: "<NAME>, 3210106357",
has_cover: false
)
= 实验内容
本次实验,我们基于 lab1 的语法分析,
实现了代码的语法树的构建,
并基于语法树,构建出了符号表,实现了更加复杂的语义分析功能,包括类型检查,数组初始化检查。
通过
```
make compiler
./compiler <input file>
```
可以对输入的 sy 文件进行语法和语义的检查,
如果可以正确解析出语法树并且通过类型检查和数组检查,程序将正常退出并返回 0。同时在错误流中输出程序的语法树,并且显示:
```
Parse success!
```
否则,程序将汇报错误,一个错误的代码的解析输出如下:
```
DEBUG: type error at src/semantic.hpp:219
DEBUG: type error at src/semantic.hpp:105
DEBUG: type error at src/semantic.hpp:39
```
报错信息表示语义分析错误在源程序中的位置,
在这里,我们并未实现面向用户的报错信息,仅用于个人调试。
= 代码实现
== 主接口
main.cc 在lab1的基础上,增加了语法树的输出和语义分析。
```cpp
Root->print(0);
Checker checker;
if(!checker.check(Root))
{
std::cerr << "Failed in semantic analysis : " << argv[1] << std::endl;
return 1;
}
std::cerr << "\nParse success !" << std::endl;
```
== 类型检查的依据:class Type
为了更方便的对比函数,表达式,变量的类型,我们用一个“类型类”来封装一个对象类型的所有信息。
比如对于一个函数,它应当包括的信息有:返回类型,参数类型。
对于一个数组变量,应当包括:数组类型,每维的宽度等等。
class Type 对类型信息进行了很好的封装,并且添加了基本的比较算子,使其可以用常用的 STL 容器进行存储,使之后的处理更加方便。
```cpp
class Type // to recognize variables and functions
{
public:
bool isfunc; // whether the object is a function or not.
string type;
vector<Type> args; // if it is a function, it will have params
deque<int> wid; // width for array
...
};
```
== 类型检查
类型检查基于语法树实现,在语法树上通过深度优先搜索的方式,
对所有节点进行检查。
=== 符号表的实现
我们按照 dfs 的顺序对所有被定义的 ident(函数,单变量,数组)标号,
对于每一个 ident 字符串,维护一个栈作为其符号表,栈中维护的 ident 的标号。为了方便查找,我们用 map 存储符号表。
```cpp
map<string, stack<int>> get_var; // get the position of the variable in the stack
```
同时,为了方便查询变量所绑定的类型,我们按照 ident 的标号,存储其类型。
```cpp
map<int, Type> get_type;
```
=== check 函数
```cpp
int check(Node* o, int L = 0);
```
用于检查语法树上的一个节点。
基本的思路为,首先递归扫描所有子节点,判断其是否合法。
然后根据当前节点的类型分别进行特殊的判断。
```cpp
for(auto &x : o->child)
if(!check(x, L)) { DEBUG("type error"); return 0;}
```
对于函数、变量的定义,要同时维护符号表。在 dfs 的同时还需要
维护当前的作用域(可以直接维护当前作用域内变量的标号的最小值),
离开当前作用域的时候,需要对所有符号表的栈,弹出当前作用域的所有变量。
```cpp
for(auto &[_, t] : get_var)
if(t.size() && t.top() > L && !get_type[t.top()].isfunc) t.pop();
```
对于作用域还有一个特殊的需要考虑的问题,就是函数的参数也应当属于当前
函数的作用域,因此,函数中节点中的 block 不应当更新作用域的范围。
因此对于 block,需要额外传入一个变量来表示是否使用最新的作用域。
```cpp
int check_Block(Node* o, int L, bool modify = 1)
{
if(modify) L = num_var;
// ...
}
```
对于类型检查,我们以函数调用为例:
需要判断传入的参数类型是否与函数参数本身相同。
```cpp
for(int i = 0; i < args->child.size(); ++i)
{
if(!check(args->child[i], L)) { DEBUG("type error"); return 0;}
Type arg_type = args->child[i]->exp_type;
if (arg_type != type.args[i]) { DEBUG("type error"); return 0; }
}
```
== 数组范围检查
这里数组范围检查主要指的是在初始化阶段的检查,
对于程序执行过程中的数组越界,属于段错误(不在编译阶段处理)。
数组初始化相关样例见:/test/lab2/arr_defn2.sy、arr_defn3.sy、array_init_error2.sy 等。
比较复杂的情况是初始化数组的格式正确,但是超过了原定数组的大小,
我们需要处理初始化阶段的数组元素补齐操作(将部分位置设置为0)。
具体的,考虑大括号的层数可以用语法树上 initVal 的层数来表示,
所以,我们在对 initVal 类型的节点的check时维护一下当前的的大括号的层数,对于层数为1,也就是最外层的值,它有可能是一个单值,
也有可能是一个内部的大括号,我们根据它的最大层数,
作为这个元素的贡献。
该处理方法并不完全匹配 sy 语法,但是可以处理绝大多数的数组范围检查。
```cpp
if(layer >= 0)
{
num[layer] = 1;
if(layer == 0)
{
int mx = 0;
for(int i = 1; i < MAXLAYER; ++i) if(num[i]) mx = i, num[i] = 0;
calc_sum += widths[mx];
}
}
```
= 测试结果
```
python3 test.py ./compiler lab2
```
tests 下的测试样例全部通过:
#figure(
image("1.png", width: 50%),
caption: [
All tests passed!
],
) |
|
https://github.com/yhtq/Notes | https://raw.githubusercontent.com/yhtq/Notes/main/抽象代数/作业/hw8.typ | typst | #import "../../template.typ": *
// Take a look at the file `template.typ` in the file panel
// to customize this template and discover how it works.
#show: note.with(
title: "作业8",
author: "YHTQ ",
date: none,
logo: none,
withOutlined: false
)
= P36
== 14
设 $a^n = 0, b^m = 0$ ,只需验证:
- $r a$ 幂零,$forall r in R$:
$
(r a)^n = r^n a^n = 0
$
- $a + b$ 幂零:
$
(a+b)^(n+m+1) = sum_(i=0)^(n+m+1) vec(n+m+1, i) a^i b^(n+m+1-i) = 0
$
== 15
只证右分配律。注意到:
$
x in (I + J)K <=> x = sum_i (a_i + b_i)k_i <=> x = sum_i a_i k_i + sum_i b_i k_i <=> x in I K + J K
$
== 16
考虑商环 $quotient(R, I)$,取自然同态 $pi: R -> quotient(R, I)$,并设 $I'$ 是 $quotient(R, I)$ 的幂零根,则有:
$
"rad"I = Inv(pi)(I')
$
由于同态 $R -> quotient(R, I) -> quotient((quotient(R, I)), I')$ 的核恰为 $"rad"I$,故 $"rad"I$ 是理想
== 18
#set enum(numbering: "(1.")
+ $a = a^n => phi(a) = phi(a^n) = (phi(a))^n$ 成立
+ 考虑映射:
$
funcDef(phi, ZZ_12, ZZ_3, a mod 12, a mod 3)
$
容易验证它是将单位元映成单位元的满同态,但 $2$ 是 $ZZ_12$ 中的零因子($2 dot 6 = 6 dot 2 = 0$),它的像 $2$ 不是零因子,故结论不成立。
+ 首先同态把交换环映成交换环成立:
$
phi(b) phi(a) = phi(b a) = phi(a b) = phi(a) phi(b)
$
但考虑 $ZZ -> ZZ_12$ 的自然同态,显然是满的,但前者是整环,后者有零因子
+ (2)中已经给出反例
+ $phi(a Inv(a)) = phi(a) phi(Inv(a)) = 1$,成立
+ 考虑自然同态 $ZZ -> ZZ_3$,后者是域而前者可逆元只有 $plus.minus 1$,故不成立
== 21
若 $r_i in I sect R_i$,显有 $sum_i r_i in I$\
而对 $I$ 中任何一个元素 $x$,设:
$
1 = sum_i r_i space, r_i in R_i
$
自然有:
$
x = sum_i x r_i
$
而 $x r_i in I, R_i => x r_i in I sect R_i$,这就完成了证明。
= 补充题
==
对于前者,由于 $1_6 -> 1_3$,故加法群已经给出所有元素的像,容易验证它是环同态。\
对于后者,同样考虑加法群,将有:
$
phi(0) = phi(1_3 + 1_3 + 1_3) = 3_6
$
矛盾!因此没有群同态。\
一般的,断言 $Z_m -> Z_n$ 存在同态当且仅当 $n | m$
- 先证明必要性,设 $d = gcd(m, n)$,$u m + v n = d$,则有:
$
phi(d) = phi(u m + v n) = phi(v n) = n phi(v) = 0
$
而 $d <= m$,从而这当且仅当 $d = m$,也即 $n | m$ 时成立。
- 再证明充分性,设 $n | m$,则有 $m = n k$,考虑映射:
$
funcDef(phi, ZZ_m, ZZ_n, a mod m, a mod n)
$
由于 $a_1 = a_2 mod m => a_1 = a_2 mod n$,因此它是良定义的。容易验证这是环同态。
==
容易发现两者都对加减乘封闭。另一方面:
$
(a + b d)(a- b d) = a^2 -b^2 d^2 in R \
=> (a + b d)(1/(a^2 -b^2 d^2 )(a- b d)) = 1
$
表明对逆元也封闭(注意到不存在有理数 $a, b$ 使得 $a^2 = 2 b^2$ 或 $a^2 = 3 b^2$)
假设 $phi$ 是之间的环同态,显然:
$
phi(n) = n phi(1) = n\
phi(sqrt(2))phi(sqrt(2)) = phi(2) = 2
$
但 $QQ[sqrt(3)]$ 中没有平方为 $2$ 的元素,矛盾!
==
由题意知 $ker(phi)$ 是平凡理想,且 $ker(phi) != R$ 否则 $R'$ 将成为零环,因此 $ker(phi) = {0}$,$phi$ 是单同态
==
域当然是单环,只证充分性:
设 $a in R$ 不可逆,考虑:
$
(a) = a R
$
将成为非平凡理想,矛盾!
==
任取 $y in phi(I sect J), x in I sect J and phi(x) = y$,显有:
$
x in I => y = phi(x) in phi(I)\
x in J => y = phi(x) in phi(J)\
$
进而 $phi(I sect J) subset phi(I) sect phi(J)$。
考虑 $ZZ -> ZZ_6$ 的自然同态,取 $I = 7ZZ, J = 5ZZ$,将有:
$
I sect J = {0} => phi(I sect J) = {0}\
phi(I) = phi(J) = ZZ_6 => phi(I) sect phi(J) = ZZ_6
$
==
$Z(R)$ 是子环:显然$0, 1 in Z(R)$,且:
$
a, b in Z(R) => r a = a r, r b = b r => r(a + b) = (a + b)r => a + b in Z(R)\
a in Z(R) => r a = a r => r(-a) = (-a) r => -a in Z(R)\
a, b in Z(R) => r a = a r, r b = b r => r(a b) = (a b)r => a b in Z(R)
$
一个子环成为理想当且仅当它是原来的环,因此除非环是交换环否则它不是理想
==
$
phi(f + g) = (f+g)(sqrt(2)) = f(sqrt(2)) + g(sqrt(2)) = phi(f) + phi(g)\
phi(f g) = (f g)(sqrt(2)) = f(sqrt(2)) g(sqrt(2)) = phi(f) phi(g)\
phi(1) = 1(sqrt(2)) = 1
$
从而这是同态。
断言:$ker(phi) = (x^2 -2)$。
事实上,$QQ[x]$ 上所有满足 $f(sqrt(2)) = 0$ 的多项式集合恰为 $(x^2 -2)$,因此设 $g in ZZ[x], g(sqrt(2)) = 0$,则有:
$
g = f_q (x^2 - 2) and f_q in QQ [x]\
$
设 $f_q$ 中所有系数的公分母为 $q$,则有:
$
q g = q f_q (x^2 - 2) and q f_q in ZZ[x]
$
==
设 $I_M$ 是 $M_n(R)$ 的某个理想。记 $I_(i, j)$ 表示 $I_M$ 中所有矩阵的 $i j$ 元出现元素之并,容易验证它们都是 $R$ 的理想。\
注意到,通过左右乘也可以任意调换行列,因此任何两个 $I(i, j)$ 都相等,记为 $I$。而\
$
(0, 0, ..., 0, epsilon_i, 0...,0)(a_(i j))(0, 0, ..., 0, epsilon_j^T, 0...,0)
$
也就是给矩阵左乘只有 $i i$ 元为 $1$,其他都为零的矩阵,再右乘只有 $j j$ 元为 $1$,其他都为零的矩阵,将得到只有 $i, j$ 元为 $a_(i j)$,其他元素都为零的矩阵,由理想定义这个矩阵将在 $I_M$ 中。\
故 $I epsilon_i epsilon_j^T subset I_M$,对任意 $i, j$ 都成立。\
进一步,由加法封闭性,显然 $I_M = M_n (I)$
==
注意到 $1/(a^2 + b^2) (a - b i)$ 是 $a + b i$ 在 $CC$ 中的逆元,它在 $ZZ[i]$ 中当且仅当 $a^2 +b 2 = 1$,也即可逆元仅有 $1, -1, i, -i$\
为了证明 $ZZ[i]$ 是主理想环,只需证明它是 Euclid 环即可。事实上,令 $N(a + b i) = a^2 + b^2$,对任意 $a, b in ZZ[i]$,取 $k = x + y i$ 满足:
$
|x - a/b| <1/2\
|y - a/b| <1/2
$
则有:
$
N(a - b(x+y i))\
= N(a/b - (x + y i))N(b)\
<= 1/4 N(b)
$
从而满足 Euclid 环的定义,进而是主理想整环。
==
假设 $(f) = (2, x)$,则:
$
2 = f g, x = f h , space g, h in ZZ[x]
$
显然 $f != 1$,上式表明 $f = 2$,进而 $x = 2 h$,这是荒谬的,因此 $(2, x)$ 不是主理想。
==
由直和的运算定义,$I_i$ 显然是理想。而当 $n > 1$ 时,$1 in.not I_i$,因此不是子环。
==
(1):
$
(a, b)^2 = (a^2, b^2) = (a, b) => a^2 = a, b^2 = b
$
从而 $a, b$ 取值当然为 $0, 1, -1$
同理,分别考虑 $ZZ_4$ 和 $ZZ_6$ 中幂等元。
在 $ZZ_4$ 中:
$
0^2 = 0\
1^2 = 1\
2^2 = 0\
3^2 = 1
$
在 $ZZ_6$ 中:
$
0^2 = 0\
1^2 = 1\
2^2 = 4\
3^2 = 3\
4^2 = 4\
5^2 = 1
$
因此所有的幂等元就是 ${0,1} plus.circle {0, 1, 3, 4}$
(2)
$
(1-e)^2 = 1 - 2 e + e^2 = 1 - e
$
若 $e = 0, 1$,结论显然成立,下设 $e != 0, 1$
对任意 $x in R$,有:
$
x = x e + x (1-e)
$
同时:
$
x e = y (1 - e) => x e^2 = y(1-e)e = 0 => x e = y (1 - e) = 0
$
这就验证了确实是直和
= P89
== 1
显然 $I J subset I sect J$。\
任取 $x in I sect J$,由互素知:
$
exists i in I, j in J: i + j = 1\
=> x i + x j = x
$
而 $x i in I J, x j in I J$,故 $x in I J$,证毕。
== 3
由题意,取:
$
i in I, k in K : i + k = 1
$
从而对任意 $j in J$:
$
j = i j + k j in I J + K J subset K + K J subset K
$
表明结论成立
== 4
由 1 中结论,$I sect J = I J$,而当然有 $I sect J subset K$,证毕
= 补充题
==
- 第二同构定理,定义:
$
funcDef(phi, H, quotient(R, I), h , h + I)
$
容易验证它是同态,且:
$
ker(phi) = H sect I\
im(phi) = {h + I | h in H} = H + I
$
从而由 $ker, im$ 的结论及第一同构定理知所有结论成立
- 第三同构定理,定义:
$
funcDef(phi, quotient(R, I), quotient(R, J), r + I, r + J)
$
验证:
- 良定义性由 $I subset J$ 给出
- 同态显然,同时是满同态
- $ker(phi) = {r + I| r in J} = quotient(J, I)$,从而结论成立
- 对应定理:
+ 正面就是第三同构定理,而反面直接验证定义:
$
x, y in Inv(phi)(L) => phi(x), phi(y) in L => phi(x) + phi(y) in L => phi(x + y) in L \
=> x + y in Inv(phi)(L)\
x in R, y in Inv(phi)(L) => phi(x) in R', phi(y) in L => phi(x) phi(y) in L => phi(x y) in L \
=> x y in Inv(phi)(L)
$
+ $phi(Inv(phi)(L)) = L$ 由映射的性质是显然的,而 $Inv(phi)phi(J) = J$ 由加法交换群的对应定理直接成立
+ 上面命题已经给出证明
==
$
(x+y)^p = x^p + y^p\
(x y)^p = x^p y^p
$
从而 $x -> x^p$ 是环同态
==
- 取自然同态 $pi$,则:
$
0 = pi (sum_i a_i 10^i) = sum_i pi(a_i) pi(10)^i = sum_i a_i
$
- 类似有:
$
0 = pi (sum_i a_i 10^i) = sum_i pi(a_i) pi(10)^i = sum_i a_i (-1)^i
$
==
验证:
- $phi(f + g) = (f+g)(x^2, x^3) = f(x^2 + x^3) + g(x^2+x^3) = phi(f) + phi(g)$
- $phi(f g) = (f g)(x^2, x^3) = f(x^2 + x^3) g(x^2 + x^3) = phi(f) phi(g)$
- $phi(1) = 1$
显然 $phi(y^2 - x^3) = 0$,故 $(y^2 - x^3) subset ker(phi)$
另一方面,取自然同态 $pi: F[x, y] -> quotient(F[x, y], (y^2 - x^3))$ 设 $f = sum_i (a_i (y^2) + y b_i (y^2))x^i$,则有:
$
&f in ker(phi) \
<=>&f(x^2, x^3) = 0 \
<=>&sum_i (a_i (x^6) + x^3 b_i (x^6))x^(2i) = 0\
<=>&sum_i a_i (x^6) x^(2i) + x^3 sum_i b_i (x^6)x^(2i)= 0\
$
注意到前项中 $x$ 指数均为偶数,后者中均为奇数,因此:
$
0 = sum_i a_i (x^6) x^(2i) = sum_i b_i (x^6)x^(2i)
$
这表明:
$
pi(sum_i a_i (y^2 - x^3 + x^3) x^(i)) \
= sum_i a_i (pi(y^2 - x^3 + x^3)) (pi(x))^(i)\
= sum_i a_i (pi(x^3)) (pi(x))^(i)\
= pi(sum_i a_i (x^3) x^(i))\
$
另一方面,$sigma = [f | f(x) -> f(x^2)]$ 是 $F[x]$ 上的单自同态,前式已给出 $sigma(sum_i a_i (x^3) x^(i)) = 0 $,进而 $sum_i a_i (x^3) x^(i) = 0$,故:
$
sum_i a_i (y^2) x^(i) in ker(pi) = (y^2 - x^3)
$
同理 $sum_i b_i (y^2) x^(i) in ker(pi)$,因此 $f in (y^2 - x^3)$,证毕。
对于 $im(phi)$,注意到:
$
phi(x^i y^j) = x^(2i +3j) in im(phi), i, j >=0
$
容易验证这给出 $forall i > 1, x^i in im(phi)$\
同时,显然 $1 in im(phi)$,而 $x in.not im(phi)$ 也是显然的,故结论成立。
==
一方面,显然有 $phi(a_i) in phi(J)$\
另一方面,若 $phi(a_i) in J'$,则有 $a_i in Inv((J'))$,从而:
$
J subset Inv((J')) => phi(J) subset phi(Inv((J'))) = J'
$
有定义知 $phi(J)$ 由 $phi(a_i)$ 生成。
==
由对应定理:
$
quotient(B, phi(ker psi)) tilde.eq quotient(Inv(phi)(B), Inv(phi)(phi(ker psi)))) = quotient(A, ker phi + ker psi)
$
另一侧同理
==
由对应定理:
$
quotient(pi(R), pi((a))) tilde.eq quotient(R, Inv(pi)(pi((a))))
$
另一方面:
$
Inv(pi)(pi((a))) = (a) + (b) = (a, b)
$
故结论成立
==
考虑代入同态 $phi := [f in ZZ[x] | f -> f(i) in ZZ[i]]$,显然它是满同态,且若设 $f(x) = sum_k a_k x^(2k) + sum_k b_k x^(2k+1)$,则
$
&f(i) = 0\
<=>& sum_k a_k i^(2k) + sum_k b_k i^(2k+1) = 0\
<=>& sum_k a_k (-1)^k + sum_k b_k (-1)^k i = 0\
<=>& sum_k a_k (-1)^k = sum_k b_k (-1)^k = 0
$
取自然同态 $pi: ZZ[x] -> quotient(ZZ[x], (x^2 + 1))$,则:
$
&pi(sum_k a_k x^(2k))\
= &sum_k a_k pi((x^2 + 1 - 1)^k)\
= &sum_k a_k pi((- 1)^k)\
= &pi(sum_k a_k (- 1)^k)\
= &0\
$
因此 $ker(phi) subset (x^2 + 1)$,而 $(x^2 + 1) subset ker(phi)$ 是显然的,故 $ker(phi) = (x^2 + 1)$,从而由第一同构定理知结论成立。
==
首先,代入同态 $sigma := [f in RR[x]| f(x) -> f(x+1)]$ 给出 $RR[x]$ 上的一个自同构,因此由对应定理:
$
quotient(RR[x], (x^2 -2x +2)) tilde.eq quotient(RR[x], sigma(x^2 -2x +2)) = quotient(RR[x], (x^2 + 1))
$
另一方面,取代入同态 $phi: [f in RR[x] | f(x) -> f(i)]$,由于 $RR[x]$ 是主理想整环,$ker(phi)$ 中次数最低的非零多项式显然是 $x^2 + 1$,因此 $ker(phi) = (x^2 + 1)$,由第一同构定理知结论成立。
==
令 $pi: ZZ -> quotient(ZZ, a^2 + b^2)$ 是自然同态。\
按照如下规则构造 $phi: quotient(ZZ[i], a + b i) -> quotient(ZZ, a^2 + b^2)$:\
任取 $x + y i + (a + b i) in quotient(ZZ[i], a + b i)$,由 $a, b$ 互素知存在 $u, v$ 使得:
$
y = u a + v b\
$
从而选取 $x + y i + (a + b i)$ 中代表元:
$
x + y i - (v + u i)(a + b i) = x - a v + b u in x + y i + (a + b i)
$
令 $phi(x + y i + (a + b i)) = pi(x - a v + b u)$\
为了验证定义合理,需要以下事实:
- 每个 $quotient(ZZ[i], a + b i)$ 等价类中有且只有一个元素恰在 $quotient(ZZ, a^2 + b^2)$ 中
先说明唯一性,设 $x_1, x_2 in ZZ, x_1 + (a + b i) = x_2 + (a + b i)$,将有:
$
x_1 - x_2 = (u + v i)(a + b i) => a v + b u = 0, a u - b v = x_1 - x_2
$
但 $a v + b u = 0 <=> vec(u, v) = k vec(-a, b)$,进而:
$
x_1 - x_2 = a u - b v = k (a^2 + b^2) => pi(x_1) = pi(x_2)
$
这说明了唯一性,而存在性由上面的构造已经给出,这也说明了 $phi$ 是良定义的。
- $phi$ 是同态,注意到若 $x_1 , x_2 in ZZ$,则当然有:
$
(x_1 + (a + b i)) + (x_2 + (a + b i)) = (x_1 + x_2) + (a + b i)\
(x_1 + (a + b i)) (x_2 + (a + b i)) = x_1 x_2 + (a + b i)
$
且 $x_1 + x_2, x_1 x_2 in ZZ$,这表明:
$
phi((x_1 + (a + b i)) + (x_2 + (a + b i))) = pi(x_1 + x_2) = pi(x_1) + pi(x_2) \
= phi(x_1 + (a + b i)) + phi(x_2 + (a + b i))\
phi((x_1 + (a + b i)) (x_2 + (a + b i))) = pi(x_1 x_2) = pi(x_1) pi(x_2) \
= phi(x_1 + (a + b i)) phi(x_2 + (a + b i))\
$
从而确实是同态
//$
//u_1 a + v_1 b = y_1\
//u_2 a + v_2 b = y_2
//$
//则:
//$
//(u_1 + u_2) a + (v_1 + v_2) b = y_1 + y_2\
//(x_1 u_2 + x_2 u_1) a + (x_1 v_2 + x_2 v_1) b = x_1 y_2 + x_2 y_1
//$
//这表明:
//$
//phi(x_1 + x_2 + y_1 i +y_2 i + (a + b i)) = pi(x_1 + x_2 + (u_1 + u_2)b - (v_1 + v_2)a)\
//= pi(x_1 - v_1 a + u_1 b) + pi(x_2 - v_2 a + u_2 b) \ = phi(x_1 + y_1 i + (a + b i)) + phi(x_2 + y_2 i + (a + b i))
//$
//说明保持加法
//$
//phi((x_1 + y_1 i + (a + b i))(x_2 + y_2 i + (a + b i))) \
//= phi((x_1 x_2 - y_1 y_2) + (x_1 y_2 + x_2 y_1)i + (a + b i))\
//= pi((x_1 x_2 - y_1 y_2) + (x_1 u_2 + x_2 u_1)b - (x_1 v_2 + x_2 v_1)a)\
//= pi((x_1 x_2 - (u_1 a + v_1 b)(u_2 a + v_2 b)) + (x_1 u_2 + x_2 u_1)b - (x_1 v_2 + x_2 v_1)a)\
//= pi((x_1 x_2 - (u_1 u_2 a^2 + v_1 v_2 b^2 + (u_1 v_2 + u_2 v_1)a b)) + (x_1 u_2 + x_2 u_1)b - (x_1 v_2 + x_2 v_1)a)\
//= pi((x_1 x_2 + (u_1 u_2 + v_1 v_2)(a^2 + b^2) - (u_1 u_2 a^2 + v_1 v_2 b^2 + (u_1 v_2 + u_2 v_1)a b)) + (x_1 u_2 + x_2 u_1)b - (x_1 v_2 + x_2 v_1)a)\
//= pi((x_1 x_2 + (v_1 v_2 a^2 + u_1 u_2 b^2 - (u_1 v_2 + u_2 v_1)a b)) + (x_1 u_2 + x_2 u_1)b - (x_1 v_2 + x_2 v_1)a)\
//= pi((x_1 x_2 + (v_1 a - u_1 b)(v_2 a - u_2 b)) + (x_1 u_2 + x_2 u_1)b - (x_1 v_2 + x_2 v_1)a)\
//= pi(x_1 - (v_1 a - u_1 b)) pi(x_2 - (v_2 a - u_2 b)) \
//= phi(x_1 + y_1 i + (a + b i)) phi(x_2 + y_2 i + (a + b i))
//$
//说明保持乘法
- $phi$ 是满射:显然 $phi(n + (a + b i)) = pi(n), forall n in ZZ$,故当然是满射
- $phi$ 是单射,由于每个陪集中有且只有一个元素属于 $ZZ$,故:
$
phi(X) = 0
$ 当且仅当 $k(a^2 + b^2) = k(a+ b i)(a - b i) in X <=> X = 0 + (a + b i)$
综上,结论成立
==
先逆用中国剩余定理,将方程组拆解为:
$
cases(
5x = 3 mod 8,
x = 1 mod 3,
x = 1 mod 5,
3x = 1 mod 4,
3x = 3 mod 5
)
$
化简为:
$
cases(
5x = 3 mod 8,
x = 1 mod 3,
x = 1 mod 5,
x = 3 mod 4,
)
<=>
cases(
x = 7 mod 8,
x = 1 mod 3,
x = 1 mod 5,
)
$
而 $3 dot 5 dot 8 = 120$,$31$ 是一个特解,因此所有解为:
$
x = 31 mod 120
$
==
显然,$x^2 = 1$ 表明 $x$ 与 $600$ 互素。\
另一方面,由中国剩余定理,$x^2 = 1 mod 600$ 等价于:
$
cases(
x^2 = 1 mod 8,
x^2 = 1 mod 3,
x^2 = 1 mod 25,
)
$
枚举得:
$
cases(
x = 1\, 3\, 5\, 7 mod 8,
x = 1\, 2 mod 3,
x = 1\, 24 mod 25,
)
$
从而所有可能答案为:\
#let temp1(x, y, z) = calc.rem(x*225 + y *400 + z * (24 * 24), 600)
#let print_enum = {
let totalprod = 1
let process = ()
for x in (1, 3, 5, 7) {
for y in (1, 2) {
for z in (1, 24) {
[$(#x, #y, #z) : #temp1(x, y, z) mod 600 $\ ]
totalprod = calc.rem(totalprod * temp1(x, y, z), 600)
process = process + (temp1(x, y, z),)
}
}
}
[最终的乘积为:
$
&#(process.map(str).join(sym.times))\ = &#totalprod mod 600
$ ]
}
#print_enum
|
|
https://github.com/dashuai009/dashuai009.github.io | https://raw.githubusercontent.com/dashuai009/dashuai009.github.io/main/src/content/blog/045.typ | typst |
#let date = datetime(
year: 2023,
month: 10,
day: 11,
)
#metadata((
title: "gltf-transform使用记录",
subtitle: [gltf],
author: "dashuai009",
description: "",
pubDate: date.display(),
))<frontmatter>
#import "../__template/style.typ": conf
#show: conf
```text
-> ~ gltf-transform --help
node:internal/modules/cjs/loader:1075
const err = new Error(message);
^
Error: Cannot find module 'call-bind'
Require stack:
- /home/dashuai/.nvm/versions/node/v18.15.0/lib/node_modules/@gltf-transform/cli/node_modules/@ljharb/through/index.js
at Module._resolveFilename (node:internal/modules/cjs/loader:1075:15)
at Module._load (node:internal/modules/cjs/loader:920:27)
at Module.require (node:internal/modules/cjs/loader:1141:19)
at require (node:internal/modules/cjs/helpers:110:18)
at Object.<anonymous> (/home/dashuai/.nvm/versions/node/v18.15.0/lib/node_modules/@gltf-transform/cli/node_modules/@ljharb/through/index.js:4:16)
at Module._compile (node:internal/modules/cjs/loader:1254:14)
at Module._extensions..js (node:internal/modules/cjs/loader:1308:10)
at Module.load (node:internal/modules/cjs/loader:1117:32)
at Module._load (node:internal/modules/cjs/loader:958:12)
at ModuleWrap.<anonymous> (node:internal/modules/esm/translators:169:29) {
code: 'MODULE_NOT_FOUND',
requireStack: [
'/home/dashuai/.nvm/versions/node/v18.15.0/lib/node_modules/@gltf-transform/cli/node_modules/@ljharb/through/index.js'
]
}
Node.js v18.15.0
```
以上错误是因为call-bind没有找到,只需要`npm install call-bind`即可 |
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/octique/0.1.0/sample/sample.typ | typst | Apache License 2.0 | #import "@preview/octique:0.1.0": *
= typst-octique
https://github.com/0x6b/typst-octique
== Installation
```typst
#import "@preview/octique:0.1.0": *
```
== Usage
```typst
// Returns an image for the given name.
octique(name, color: rgb("#000000"), width: 1em, height: 1em)
// Returns a boxed image for the given name.
octique-inline(name, color: rgb("#000000"), width: 1em, height: 1em, baseline: 25%)
// Returns an SVG text for the given name.
octique-svg(name)
```
== List of Available Icons
#show: rest => columns(2, rest)
#table(
columns: (auto, auto),
stroke: none,
align: (x, y) => (left, center).at(x),
[```typst#octique("accessibility-inset")```],[#octique("accessibility-inset")],
[```typst#octique("accessibility")```],[#octique("accessibility")],
[```typst#octique("alert-fill")```],[#octique("alert-fill")],
[```typst#octique("alert")```],[#octique("alert")],
[```typst#octique("apps")```],[#octique("apps")],
[```typst#octique("archive")```],[#octique("archive")],
[```typst#octique("arrow-both")```],[#octique("arrow-both")],
[```typst#octique("arrow-down-left")```],[#octique("arrow-down-left")],
[```typst#octique("arrow-down-right")```],[#octique("arrow-down-right")],
[```typst#octique("arrow-down")```],[#octique("arrow-down")],
[```typst#octique("arrow-left")```],[#octique("arrow-left")],
[```typst#octique("arrow-right")```],[#octique("arrow-right")],
[```typst#octique("arrow-switch")```],[#octique("arrow-switch")],
[```typst#octique("arrow-up-left")```],[#octique("arrow-up-left")],
[```typst#octique("arrow-up-right")```],[#octique("arrow-up-right")],
[```typst#octique("arrow-up")```],[#octique("arrow-up")],
[```typst#octique("beaker")```],[#octique("beaker")],
[```typst#octique("bell-fill")```],[#octique("bell-fill")],
[```typst#octique("bell-slash")```],[#octique("bell-slash")],
[```typst#octique("bell")```],[#octique("bell")],
[```typst#octique("blocked")```],[#octique("blocked")],
[```typst#octique("bold")```],[#octique("bold")],
[```typst#octique("book")```],[#octique("book")],
[```typst#octique("bookmark-slash")```],[#octique("bookmark-slash")],
[```typst#octique("bookmark")```],[#octique("bookmark")],
[```typst#octique("briefcase")```],[#octique("briefcase")],
[```typst#octique("broadcast")```],[#octique("broadcast")],
[```typst#octique("browser")```],[#octique("browser")],
[```typst#octique("bug")```],[#octique("bug")],
[```typst#octique("cache")```],[#octique("cache")],
[```typst#octique("calendar")```],[#octique("calendar")],
[```typst#octique("check-circle-fill")```],[#octique("check-circle-fill")],
[```typst#octique("check-circle")```],[#octique("check-circle")],
[```typst#octique("check")```],[#octique("check")],
[```typst#octique("checkbox")```],[#octique("checkbox")],
[```typst#octique("checklist")```],[#octique("checklist")],
[```typst#octique("chevron-down")```],[#octique("chevron-down")],
[```typst#octique("chevron-left")```],[#octique("chevron-left")],
[```typst#octique("chevron-right")```],[#octique("chevron-right")],
[```typst#octique("chevron-up")```],[#octique("chevron-up")],
[```typst#octique("circle-slash")```],[#octique("circle-slash")],
[```typst#octique("circle")```],[#octique("circle")],
[```typst#octique("clock-fill")```],[#octique("clock-fill")],
[```typst#octique("clock")```],[#octique("clock")],
[```typst#octique("cloud-offline")```],[#octique("cloud-offline")],
[```typst#octique("cloud")```],[#octique("cloud")],
[```typst#octique("code-of-conduct")```],[#octique("code-of-conduct")],
[```typst#octique("code-review")```],[#octique("code-review")],
[```typst#octique("code")```],[#octique("code")],
[```typst#octique("code-square")```],[#octique("code-square")],
[```typst#octique("codescan-checkmark")```],[#octique("codescan-checkmark")],
[```typst#octique("codescan")```],[#octique("codescan")],
[```typst#octique("codespaces")```],[#octique("codespaces")],
[```typst#octique("columns")```],[#octique("columns")],
[```typst#octique("command-palette")```],[#octique("command-palette")],
[```typst#octique("comment-discussion")```],[#octique("comment-discussion")],
[```typst#octique("comment")```],[#octique("comment")],
[```typst#octique("container")```],[#octique("container")],
[```typst#octique("copilot-error")```],[#octique("copilot-error")],
[```typst#octique("copilot")```],[#octique("copilot")],
[```typst#octique("copilot-warning")```],[#octique("copilot-warning")],
[```typst#octique("copy")```],[#octique("copy")],
[```typst#octique("cpu")```],[#octique("cpu")],
[```typst#octique("credit-card")```],[#octique("credit-card")],
[```typst#octique("cross-reference")```],[#octique("cross-reference")],
[```typst#octique("dash")```],[#octique("dash")],
[```typst#octique("database")```],[#octique("database")],
[```typst#octique("dependabot")```],[#octique("dependabot")],
[```typst#octique("desktop-download")```],[#octique("desktop-download")],
[```typst#octique("device-camera")```],[#octique("device-camera")],
[```typst#octique("device-camera-video")```],[#octique("device-camera-video")],
[```typst#octique("device-desktop")```],[#octique("device-desktop")],
[```typst#octique("device-mobile")```],[#octique("device-mobile")],
[```typst#octique("devices")```],[#octique("devices")],
[```typst#octique("diamond")```],[#octique("diamond")],
[```typst#octique("diff-added")```],[#octique("diff-added")],
[```typst#octique("diff-ignored")```],[#octique("diff-ignored")],
[```typst#octique("diff-modified")```],[#octique("diff-modified")],
[```typst#octique("diff-removed")```],[#octique("diff-removed")],
[```typst#octique("diff-renamed")```],[#octique("diff-renamed")],
[```typst#octique("diff")```],[#octique("diff")],
[```typst#octique("discussion-closed")```],[#octique("discussion-closed")],
[```typst#octique("discussion-duplicate")```],[#octique("discussion-duplicate")],
[```typst#octique("discussion-outdated")```],[#octique("discussion-outdated")],
[```typst#octique("dot-fill")```],[#octique("dot-fill")],
[```typst#octique("dot")```],[#octique("dot")],
[```typst#octique("download")```],[#octique("download")],
[```typst#octique("duplicate")```],[#octique("duplicate")],
[```typst#octique("ellipsis")```],[#octique("ellipsis")],
[```typst#octique("eye-closed")```],[#octique("eye-closed")],
[```typst#octique("eye")```],[#octique("eye")],
[```typst#octique("feed-discussion")```],[#octique("feed-discussion")],
[```typst#octique("feed-forked")```],[#octique("feed-forked")],
[```typst#octique("feed-heart")```],[#octique("feed-heart")],
[```typst#octique("feed-issue-closed")```],[#octique("feed-issue-closed")],
[```typst#octique("feed-issue-draft")```],[#octique("feed-issue-draft")],
[```typst#octique("feed-issue-open")```],[#octique("feed-issue-open")],
[```typst#octique("feed-issue-reopen")```],[#octique("feed-issue-reopen")],
[```typst#octique("feed-merged")```],[#octique("feed-merged")],
[```typst#octique("feed-person")```],[#octique("feed-person")],
[```typst#octique("feed-plus")```],[#octique("feed-plus")],
[```typst#octique("feed-public")```],[#octique("feed-public")],
[```typst#octique("feed-pull-request-closed")```],[#octique("feed-pull-request-closed")],
[```typst#octique("feed-pull-request-draft")```],[#octique("feed-pull-request-draft")],
[```typst#octique("feed-pull-request-open")```],[#octique("feed-pull-request-open")],
[```typst#octique("feed-repo")```],[#octique("feed-repo")],
[```typst#octique("feed-rocket")```],[#octique("feed-rocket")],
[```typst#octique("feed-star")```],[#octique("feed-star")],
[```typst#octique("feed-tag")```],[#octique("feed-tag")],
[```typst#octique("feed-trophy")```],[#octique("feed-trophy")],
[```typst#octique("file-added")```],[#octique("file-added")],
[```typst#octique("file-badge")```],[#octique("file-badge")],
[```typst#octique("file-binary")```],[#octique("file-binary")],
[```typst#octique("file-code")```],[#octique("file-code")],
[```typst#octique("file-diff")```],[#octique("file-diff")],
[```typst#octique("file-directory-fill")```],[#octique("file-directory-fill")],
[```typst#octique("file-directory-open-fill")```],[#octique("file-directory-open-fill")],
[```typst#octique("file-directory")```],[#octique("file-directory")],
[```typst#octique("file-directory-symlink")```],[#octique("file-directory-symlink")],
[```typst#octique("file-moved")```],[#octique("file-moved")],
[```typst#octique("file-removed")```],[#octique("file-removed")],
[```typst#octique("file")```],[#octique("file")],
[```typst#octique("file-submodule")```],[#octique("file-submodule")],
[```typst#octique("file-symlink-file")```],[#octique("file-symlink-file")],
[```typst#octique("file-zip")```],[#octique("file-zip")],
[```typst#octique("filter")```],[#octique("filter")],
[```typst#octique("fiscal-host")```],[#octique("fiscal-host")],
[```typst#octique("flame")```],[#octique("flame")],
[```typst#octique("fold-down")```],[#octique("fold-down")],
[```typst#octique("fold")```],[#octique("fold")],
[```typst#octique("fold-up")```],[#octique("fold-up")],
[```typst#octique("gear")```],[#octique("gear")],
[```typst#octique("gift")```],[#octique("gift")],
[```typst#octique("git-branch")```],[#octique("git-branch")],
[```typst#octique("git-commit")```],[#octique("git-commit")],
[```typst#octique("git-compare")```],[#octique("git-compare")],
[```typst#octique("git-merge-queue")```],[#octique("git-merge-queue")],
[```typst#octique("git-merge")```],[#octique("git-merge")],
[```typst#octique("git-pull-request-closed")```],[#octique("git-pull-request-closed")],
[```typst#octique("git-pull-request-draft")```],[#octique("git-pull-request-draft")],
[```typst#octique("git-pull-request")```],[#octique("git-pull-request")],
[```typst#octique("globe")```],[#octique("globe")],
[```typst#octique("goal")```],[#octique("goal")],
[```typst#octique("grabber")```],[#octique("grabber")],
[```typst#octique("graph")```],[#octique("graph")],
[```typst#octique("hash")```],[#octique("hash")],
[```typst#octique("heading")```],[#octique("heading")],
[```typst#octique("heart-fill")```],[#octique("heart-fill")],
[```typst#octique("heart")```],[#octique("heart")],
[```typst#octique("history")```],[#octique("history")],
[```typst#octique("home")```],[#octique("home")],
[```typst#octique("horizontal-rule")```],[#octique("horizontal-rule")],
[```typst#octique("hourglass")```],[#octique("hourglass")],
[```typst#octique("hubot")```],[#octique("hubot")],
[```typst#octique("id-badge")```],[#octique("id-badge")],
[```typst#octique("image")```],[#octique("image")],
[```typst#octique("inbox")```],[#octique("inbox")],
[```typst#octique("infinity")```],[#octique("infinity")],
[```typst#octique("info")```],[#octique("info")],
[```typst#octique("issue-closed")```],[#octique("issue-closed")],
[```typst#octique("issue-draft")```],[#octique("issue-draft")],
[```typst#octique("issue-opened")```],[#octique("issue-opened")],
[```typst#octique("issue-reopened")```],[#octique("issue-reopened")],
[```typst#octique("issue-tracked-by")```],[#octique("issue-tracked-by")],
[```typst#octique("issue-tracks")```],[#octique("issue-tracks")],
[```typst#octique("italic")```],[#octique("italic")],
[```typst#octique("iterations")```],[#octique("iterations")],
[```typst#octique("kebab-horizontal")```],[#octique("kebab-horizontal")],
[```typst#octique("key-asterisk")```],[#octique("key-asterisk")],
[```typst#octique("key")```],[#octique("key")],
[```typst#octique("law")```],[#octique("law")],
[```typst#octique("light-bulb")```],[#octique("light-bulb")],
[```typst#octique("link-external")```],[#octique("link-external")],
[```typst#octique("link")```],[#octique("link")],
[```typst#octique("list-ordered")```],[#octique("list-ordered")],
[```typst#octique("list-unordered")```],[#octique("list-unordered")],
[```typst#octique("location")```],[#octique("location")],
[```typst#octique("lock")```],[#octique("lock")],
[```typst#octique("log")```],[#octique("log")],
[```typst#octique("logo-gist")```],[#octique("logo-gist")],
[```typst#octique("logo-github")```],[#octique("logo-github")],
[```typst#octique("mail")```],[#octique("mail")],
[```typst#octique("mark-github")```],[#octique("mark-github")],
[```typst#octique("markdown")```],[#octique("markdown")],
[```typst#octique("megaphone")```],[#octique("megaphone")],
[```typst#octique("mention")```],[#octique("mention")],
[```typst#octique("meter")```],[#octique("meter")],
[```typst#octique("milestone")```],[#octique("milestone")],
[```typst#octique("mirror")```],[#octique("mirror")],
[```typst#octique("moon")```],[#octique("moon")],
[```typst#octique("mortar-board")```],[#octique("mortar-board")],
[```typst#octique("move-to-bottom")```],[#octique("move-to-bottom")],
[```typst#octique("move-to-end")```],[#octique("move-to-end")],
[```typst#octique("move-to-start")```],[#octique("move-to-start")],
[```typst#octique("move-to-top")```],[#octique("move-to-top")],
[```typst#octique("multi-select")```],[#octique("multi-select")],
[```typst#octique("mute")```],[#octique("mute")],
[```typst#octique("no-entry")```],[#octique("no-entry")],
[```typst#octique("north-star")```],[#octique("north-star")],
[```typst#octique("note")```],[#octique("note")],
[```typst#octique("number")```],[#octique("number")],
[```typst#octique("organization")```],[#octique("organization")],
[```typst#octique("package-dependencies")```],[#octique("package-dependencies")],
[```typst#octique("package-dependents")```],[#octique("package-dependents")],
[```typst#octique("package")```],[#octique("package")],
[```typst#octique("paintbrush")```],[#octique("paintbrush")],
[```typst#octique("paper-airplane")```],[#octique("paper-airplane")],
[```typst#octique("paperclip")```],[#octique("paperclip")],
[```typst#octique("passkey-fill")```],[#octique("passkey-fill")],
[```typst#octique("paste")```],[#octique("paste")],
[```typst#octique("pencil")```],[#octique("pencil")],
[```typst#octique("people")```],[#octique("people")],
[```typst#octique("person-add")```],[#octique("person-add")],
[```typst#octique("person-fill")```],[#octique("person-fill")],
[```typst#octique("person")```],[#octique("person")],
[```typst#octique("pin-slash")```],[#octique("pin-slash")],
[```typst#octique("pin")```],[#octique("pin")],
[```typst#octique("pivot-column")```],[#octique("pivot-column")],
[```typst#octique("play")```],[#octique("play")],
[```typst#octique("plug")```],[#octique("plug")],
[```typst#octique("plus-circle")```],[#octique("plus-circle")],
[```typst#octique("plus")```],[#octique("plus")],
[```typst#octique("project-roadmap")```],[#octique("project-roadmap")],
[```typst#octique("project")```],[#octique("project")],
[```typst#octique("project-symlink")```],[#octique("project-symlink")],
[```typst#octique("project-template")```],[#octique("project-template")],
[```typst#octique("pulse")```],[#octique("pulse")],
[```typst#octique("question")```],[#octique("question")],
[```typst#octique("quote")```],[#octique("quote")],
[```typst#octique("read")```],[#octique("read")],
[```typst#octique("redo")```],[#octique("redo")],
[```typst#octique("rel-file-path")```],[#octique("rel-file-path")],
[```typst#octique("reply")```],[#octique("reply")],
[```typst#octique("repo-clone")```],[#octique("repo-clone")],
[```typst#octique("repo-deleted")```],[#octique("repo-deleted")],
[```typst#octique("repo-forked")```],[#octique("repo-forked")],
[```typst#octique("repo-locked")```],[#octique("repo-locked")],
[```typst#octique("repo-pull")```],[#octique("repo-pull")],
[```typst#octique("repo-push")```],[#octique("repo-push")],
[```typst#octique("repo")```],[#octique("repo")],
[```typst#octique("repo-template")```],[#octique("repo-template")],
[```typst#octique("report")```],[#octique("report")],
[```typst#octique("rocket")```],[#octique("rocket")],
[```typst#octique("rows")```],[#octique("rows")],
[```typst#octique("rss")```],[#octique("rss")],
[```typst#octique("ruby")```],[#octique("ruby")],
[```typst#octique("screen-full")```],[#octique("screen-full")],
[```typst#octique("screen-normal")```],[#octique("screen-normal")],
[```typst#octique("search")```],[#octique("search")],
[```typst#octique("server")```],[#octique("server")],
[```typst#octique("share-android")```],[#octique("share-android")],
[```typst#octique("share")```],[#octique("share")],
[```typst#octique("shield-check")```],[#octique("shield-check")],
[```typst#octique("shield-lock")```],[#octique("shield-lock")],
[```typst#octique("shield-slash")```],[#octique("shield-slash")],
[```typst#octique("shield")```],[#octique("shield")],
[```typst#octique("shield-x")```],[#octique("shield-x")],
[```typst#octique("sidebar-collapse")```],[#octique("sidebar-collapse")],
[```typst#octique("sidebar-expand")```],[#octique("sidebar-expand")],
[```typst#octique("sign-in")```],[#octique("sign-in")],
[```typst#octique("sign-out")```],[#octique("sign-out")],
[```typst#octique("single-select")```],[#octique("single-select")],
[```typst#octique("skip-fill")```],[#octique("skip-fill")],
[```typst#octique("skip")```],[#octique("skip")],
[```typst#octique("sliders")```],[#octique("sliders")],
[```typst#octique("smiley")```],[#octique("smiley")],
[```typst#octique("sort-asc")```],[#octique("sort-asc")],
[```typst#octique("sort-desc")```],[#octique("sort-desc")],
[```typst#octique("sparkle-fill")```],[#octique("sparkle-fill")],
[```typst#octique("sponsor-tiers")```],[#octique("sponsor-tiers")],
[```typst#octique("square-fill")```],[#octique("square-fill")],
[```typst#octique("square")```],[#octique("square")],
[```typst#octique("squirrel")```],[#octique("squirrel")],
[```typst#octique("stack")```],[#octique("stack")],
[```typst#octique("star-fill")```],[#octique("star-fill")],
[```typst#octique("star")```],[#octique("star")],
[```typst#octique("stop")```],[#octique("stop")],
[```typst#octique("stopwatch")```],[#octique("stopwatch")],
[```typst#octique("strikethrough")```],[#octique("strikethrough")],
[```typst#octique("sun")```],[#octique("sun")],
[```typst#octique("sync")```],[#octique("sync")],
[```typst#octique("tab-external")```],[#octique("tab-external")],
[```typst#octique("table")```],[#octique("table")],
[```typst#octique("tag")```],[#octique("tag")],
[```typst#octique("tasklist")```],[#octique("tasklist")],
[```typst#octique("telescope-fill")```],[#octique("telescope-fill")],
[```typst#octique("telescope")```],[#octique("telescope")],
[```typst#octique("terminal")```],[#octique("terminal")],
[```typst#octique("three-bars")```],[#octique("three-bars")],
[```typst#octique("thumbsdown")```],[#octique("thumbsdown")],
[```typst#octique("thumbsup")```],[#octique("thumbsup")],
[```typst#octique("tools")```],[#octique("tools")],
[```typst#octique("tracked-by-closed-completed")```],[#octique("tracked-by-closed-completed")],
[```typst#octique("tracked-by-closed-not-planned")```],[#octique("tracked-by-closed-not-planned")],
[```typst#octique("trash")```],[#octique("trash")],
[```typst#octique("triangle-down")```],[#octique("triangle-down")],
[```typst#octique("triangle-left")```],[#octique("triangle-left")],
[```typst#octique("triangle-right")```],[#octique("triangle-right")],
[```typst#octique("triangle-up")```],[#octique("triangle-up")],
[```typst#octique("trophy")```],[#octique("trophy")],
[```typst#octique("typography")```],[#octique("typography")],
[```typst#octique("undo")```],[#octique("undo")],
[```typst#octique("unfold")```],[#octique("unfold")],
[```typst#octique("unlink")```],[#octique("unlink")],
[```typst#octique("unlock")```],[#octique("unlock")],
[```typst#octique("unmute")```],[#octique("unmute")],
[```typst#octique("unread")```],[#octique("unread")],
[```typst#octique("unverified")```],[#octique("unverified")],
[```typst#octique("upload")```],[#octique("upload")],
[```typst#octique("verified")```],[#octique("verified")],
[```typst#octique("versions")```],[#octique("versions")],
[```typst#octique("video")```],[#octique("video")],
[```typst#octique("webhook")```],[#octique("webhook")],
[```typst#octique("workflow")```],[#octique("workflow")],
[```typst#octique("x-circle-fill")```],[#octique("x-circle-fill")],
[```typst#octique("x-circle")```],[#octique("x-circle")],
[```typst#octique("x")```],[#octique("x")],
[```typst#octique("zap")```],[#octique("zap")],
[```typst#octique("zoom-in")```],[#octique("zoom-in")],
[```typst#octique("zoom-out")```],[#octique("zoom-out")],
)
|
https://github.com/hei-templates/hevs-typsttemplate-thesis | https://raw.githubusercontent.com/hei-templates/hevs-typsttemplate-thesis/main/main.typ | typst | MIT License | //
// Description: Main document to stitch everything together
//
#import "00-templates/template-thesis.typ": *
#import "01-settings/metadata.typ": *
//-------------------------------------
// Template config
//
#show: thesis.with(
title: title,
subtitle: subtitle,
version: version,
author: author,
professor: professor,
expert: expert,
school: school,
date: date,
tableof: tableof,
icons: icons,
)
//-------------------------------------
// Content
//
#include "02-main/00-acknowledgements.typ"
#include "02-main/01-abstract.typ"
#include "02-main/02-introduction.typ"
#include "02-main/03-analysis.typ"
#include "02-main/04-design.typ"
#include "02-main/05-implementation.typ"
#include "02-main/06-validation.typ"
#include "02-main/07-conclusion.typ"
//-------------------------------------
// Appendix
//
#include "03-tail/a-appendix.typ"
//-------------------------------------
// Bibliography
//
#if bib == true {
include "03-tail/bibliography.typ"
} |
https://github.com/kdog3682/2024-typst | https://raw.githubusercontent.com/kdog3682/2024-typst/main/src/block-math.typ | typst | #import "base-utils.typ": *
#let eq(s, spacing: 0) = {
return box(baseline: -50%, equation(s))
}
#let parser(s) = {
let get-type(x) = {
return if x == " " {
none
} else if test(x, "\d") {
"number"
} else if test(x, "[+-/*]") {
"operator"
} else if test(x, "[()]") {
"paren"
} else {
"variable"
}
}
let runner(x) = {
return (type: get-type(x), value: x)
}
return split-singles(remove-spaces(s)).map(runner)
}
#let block-arithmetic(s) = {
let parts = parser(s)
let block-type = "circle"
// you can have paddings and arrays
// you can have circles
// you can have triangles
// id do the parsing in javascript
// typst should be for layout
for part in parts {
if part.type == "integer" {
block-stack(part.value)
}
else if part.type == "paren" {
eq(part.value)
}
else if part.type == "operator" {
eq(part.value, spacing: 1)
}
else if part.type == "variable" {
block-stack(part.value)
}
}
}
#let block-sum(..args) = {
let (integers, attrs) = handle-args(args, base: "block-sum")
let store = ()
for (index, integer) in integers.enumerate() {
let block = block-stack(integer)
store.push(block)
if is-last(index, integers) {
let sum = block-answer(integers, "sum")
store.push(eq("="))
store.push(sum)
} else {
store.push(eq("+"))
}
}
return store
}
#block-math((1,1,3)).join(" ")
#panic(parser("1 + 2"))
// handle-args is a way of doing something ...
|
|
https://github.com/ngoetti/knowledge-key | https://raw.githubusercontent.com/ngoetti/knowledge-key/master/template/main.typ | typst | MIT License | #import "@preview/knowledge-key:1.0.0": *
#show: knowledge-key.with(
title: [Title],
authors: "Author1, Author2"
)
#include "sections/01-introduction.typ"
#include "sections/02-devops-with-gitlab.typ"
#include "sections/03-terraform.typ" |
https://github.com/frectonz/the-pg-book | https://raw.githubusercontent.com/frectonz/the-pg-book/main/book/203.%20fn.html.typ | typst | fn.html
<NAME>
May 2021Most people think of nerds as quiet, diffident people. In ordinary
social situations they are — as quiet and diffident as the star
quarterback would be if he found himself in the middle of a physics
symposium. And for the same reason: they are fish out of water.
But the apparent diffidence of nerds is an illusion due to the fact
that when non-nerds observe them, it's usually in ordinary social
situations. In fact some nerds are quite fierce.The fierce nerds are a small but interesting group. They are as a
rule extremely competitive — more competitive, I'd say, than highly
competitive non-nerds. Competition is more personal for them. Partly
perhaps because they're not emotionally mature enough to distance
themselves from it, but also because there's less randomness in the
kinds of competition they engage in, and they are thus more justified
in taking the results personally.Fierce nerds also tend to be somewhat overconfident, especially
when young. It might seem like it would be a disadvantage to be
mistaken about one's abilities, but empirically it isn't. Up to a
point, confidence is a self-fullfilling prophecy.Another quality you find in most fierce nerds is intelligence. Not
all nerds are smart, but the fierce ones are always at least
moderately so. If they weren't, they wouldn't have the confidence
to be fierce.
[1]There's also a natural connection between nerdiness and
independent-mindedness. It's hard to be
independent-minded without
being somewhat socially awkward, because conventional beliefs are
so often mistaken, or at least arbitrary. No one who was both
independent-minded and ambitious would want to waste the effort it
takes to fit in. And the independent-mindedness of the fierce nerds
will obviously be of the aggressive
rather than the passive type:
they'll be annoyed by rules, rather than dreamily unaware of them.I'm less sure why fierce nerds are impatient, but most seem to be.
You notice it first in conversation, where they tend to interrupt
you. This is merely annoying, but in the more promising fierce nerds
it's connected to a deeper impatience about solving problems. Perhaps
the competitiveness and impatience of fierce nerds are not separate
qualities, but two manifestations of a single underlying drivenness.When you combine all these qualities in sufficient quantities, the
result is quite formidable. The most vivid example of fierce nerds
in action may be <NAME>'s The Double Helix. The first sentence
of the book is "I have never seen <NAME> in a modest mood,"
and the portrait he goes on to paint of Crick is the quintessential
fierce nerd: brilliant, socially awkward, competitive, independent-minded,
overconfident. But so is the implicit portrait he paints of himself.
Indeed, his lack of social awareness makes both portraits that much
more realistic, because he baldly states all sorts of opinions and
motivations that a smoother person would conceal. And moreover it's
clear from the story that Crick and Watson's fierce nerdiness was
integral to their success. Their independent-mindedness caused them
to consider approaches that most others ignored, their overconfidence
allowed them to work on problems they only half understood (they
were literally described as "clowns" by one eminent insider), and
their impatience and competitiveness got them to the answer ahead
of two other groups that would otherwise have found it within the
next year, if not the next several months.
[2]The idea that there could be fierce nerds is an unfamiliar one not
just to many normal people but even to some young nerds. Especially
early on, nerds spend so much of their time in ordinary social
situations and so little doing real work that they get a lot more
evidence of their awkwardness than their power. So there will be
some who read this description of the fierce nerd and realize "Hmm,
that's me." And it is to you, young fierce nerd, that I now turn.I have some good news, and some bad news. The good news is that
your fierceness will be a great help in solving difficult problems.
And not just the kind of scientific and technical problems that
nerds have traditionally solved. As the world progresses, the number
of things you can win at by getting the right answer increases.
Recently getting rich became
one of them: 7 of the 8 richest people
in America are now fierce nerds.Indeed, being a fierce nerd is probably even more helpful in business
than in nerds' original territory of scholarship. Fierceness seems
optional there. Darwin for example doesn't seem to have been
especially fierce. Whereas it's impossible to be the CEO of a company
over a certain size without being fierce, so now that nerds can win
at business, fierce nerds will increasingly monopolize the really
big successes.The bad news is that if it's not exercised, your fierceness will
turn to bitterness, and you will become an intellectual playground
bully: the grumpy sysadmin, the forum troll, the
hater, the shooter
down of new ideas.How do you avoid this fate? Work on ambitious projects. If you
succeed, it will bring you a kind of satisfaction that neutralizes
bitterness. But you don't need to have succeeded to feel this;
merely working on hard projects gives most fierce nerds some
feeling of satisfaction. And those it doesn't, it at least keeps
busy.
[3]Another solution may be to somehow turn off your fierceness, by
devoting yourself to meditation or psychotherapy or something like
that. Maybe that's the right answer for some people. I have no idea.
But it doesn't seem the optimal solution to me. If you're given a
sharp knife, it seems to me better to use it than to blunt its edge
to avoid cutting yourself.If you do choose the ambitious route, you'll have a tailwind behind
you. There has never been a better time to be a nerd. In the past
century we've seen a continuous transfer of power from dealmakers
to technicians — from the charismatic to the competent — and I
don't see anything on the horizon that will end it. At least not
till the nerds end it themselves by bringing about the singularity.Notes[1]
To be a nerd is to be socially awkward, and there are two
distinct ways to do that: to be playing the same game as everyone
else, but badly, and to be playing a different game. The smart nerds
are the latter type.[2]
The same qualities that make fierce nerds so effective can
also make them very annoying. Fierce nerds would do well to remember
this, and (a) try to keep a lid on it, and (b) seek out organizations
and types of work where getting the right answer matters more than
preserving social harmony. In practice that means small groups
working on hard problems. Which fortunately is the most fun kind
of environment anyway.[3]
If success neutralizes bitterness, why are there some people
who are at least moderately successful and yet still quite bitter?
Because people's potential bitterness varies depending on how
naturally bitter their personality is, and how ambitious they are:
someone who's naturally very bitter will still have a lot left after
success neutralizes some of it, and someone who's very ambitious
will need proportionally more success to satisfy that ambition.So the worst-case scenario is someone who's both naturally bitter
and extremely ambitious, and yet only moderately successful.
Thanks to <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> for reading drafts of this.Chinese Translation
|
|
https://github.com/ClazyChen/Table-Tennis-Rankings | https://raw.githubusercontent.com/ClazyChen/Table-Tennis-Rankings/main/history/2010/MS-10.typ | typst |
#set text(font: ("Courier New", "NSimSun"))
#figure(
caption: "Men's Singles (1 - 32)",
table(
columns: 4,
[Ranking], [Player], [Country/Region], [Rating],
[1], [<NAME>], [CHN], [3264],
[2], [ZHANG Jike], [CHN], [3142],
[3], [WANG Hao], [CHN], [3134],
[4], [<NAME>], [GER], [3115],
[5], [<NAME>], [CHN], [3108],
[6], [<NAME>], [CHN], [3073],
[7], [XU Xin], [CHN], [3019],
[8], [SAMSONOV Vladimir], [BLR], [2993],
[9], [<NAME>], [KOR], [2962],
[10], [<NAME>], [CHN], [2939],
[11], [CHEN Qi], [CHN], [2910],
[12], [MIZUTANI Jun], [JPN], [2871],
[13], [<NAME>], [GER], [2857],
[14], [<NAME>], [ROU], [2793],
[15], [<NAME>], [DEN], [2791],
[16], [OVTCHAROV Dimitrij], [GER], [2781],
[17], [<NAME>], [GER], [2770],
[18], [APOLONIA Tiago], [POR], [2759],
[19], [GAO Ning], [SGP], [2743],
[20], [TANG Peng], [HKG], [2731],
[21], [<NAME>], [KOR], [2725],
[22], [CHUANG Chih-Yuan], [TPE], [2717],
[23], [KISHIKAWA Seiya], [JPN], [2716],
[24], [KO Lai Chak], [HKG], [2700],
[25], [LEE Jungwoo], [KOR], [2684],
[26], [<NAME>], [FRA], [2678],
[27], [<NAME>], [SLO], [2671],
[28], [<NAME>], [AUT], [2653],
[29], [LI Ching], [HKG], [2646],
[30], [OH Sangeun], [KOR], [2640],
[31], [YOSHIDA Kaii], [JPN], [2614],
[32], [HOU Yingchao], [CHN], [2610],
)
)#pagebreak()
#set text(font: ("Courier New", "NSimSun"))
#figure(
caption: "Men's Singles (33 - 64)",
table(
columns: 4,
[Ranking], [Player], [Country/Region], [Rating],
[33], [SMIRNOV Alexey], [RUS], [2605],
[34], [CHTCHETININE Evgueni], [BLR], [2603],
[35], [<NAME>], [AUT], [2603],
[36], [SEO Hyundeok], [KOR], [2591],
[37], [GERELL Par], [SWE], [2580],
[38], [<NAME>], [CRO], [2579],
[39], [GIONIS Panagiotis], [GRE], [2579],
[40], [JIANG Tianyi], [HKG], [2578],
[41], [JEOUNG Youngsik], [KOR], [2576],
[42], [YOON Jaeyoung], [KOR], [2575],
[43], [UEDA Jin], [JPN], [2575],
[44], [SIMONCIK Josef], [CZE], [2574],
[45], [<NAME>], [RUS], [2573],
[46], [KORBEL Petr], [CZE], [2563],
[47], [KIM Junghoon], [KOR], [2554],
[48], [<NAME>], [SWE], [2553],
[49], [<NAME>], [GRE], [2546],
[50], [<NAME>], [AUT], [2537],
[51], [<NAME>], [POR], [2536],
[52], [<NAME>], [QAT], [2531],
[53], [<NAME>], [KOR], [2530],
[54], [<NAME>], [AUT], [2528],
[55], [SALIFOU Abdel-Kader], [FRA], [2523],
[56], [KOSOWSKI Jakub], [POL], [2519],
[57], [<NAME>], [GER], [2502],
[58], [PROKOPCOV Dmitrij], [CZE], [2501],
[59], [<NAME>], [BEL], [2496],
[60], [<NAME>], [KOR], [2496],
[61], [<NAME>], [SWE], [2494],
[62], [<NAME>], [POL], [2494],
[63], [<NAME>], [DEN], [2492],
[64], [LEGOUT Christophe], [FRA], [2477],
)
)#pagebreak()
#set text(font: ("Courier New", "NSimSun"))
#figure(
caption: "Men's Singles (65 - 96)",
table(
columns: 4,
[Ranking], [Player], [Country/Region], [Rating],
[65], [<NAME>], [FRA], [2471],
[66], [<NAME>], [IND], [2470],
[67], [<NAME>], [JPN], [2469],
[68], [<NAME>], [ESP], [2461],
[69], [<NAME>], [HUN], [2459],
[70], [<NAME>], [SRB], [2455],
[71], [BLASZCZYK Lucjan], [POL], [2452],
[72], [<NAME>], [AUT], [2452],
[73], [MONTEIRO Joao], [POR], [2449],
[74], [CHEUNG Yuk], [HKG], [2448],
[75], [<NAME>hiro], [JPN], [2448],
[76], [<NAME>], [KOR], [2448],
[77], [<NAME>], [POL], [2446],
[78], [KIM Hyok Bong], [PRK], [2441],
[79], [LIN Ju], [DOM], [2438],
[80], [<NAME>], [SVK], [2437],
[81], [<NAME>], [GER], [2433],
[82], [<NAME>], [CRO], [2427],
[83], [<NAME>], [ESP], [2426],
[84], [<NAME>], [CHN], [2424],
[85], [HE Zhiwen], [ESP], [2423],
[86], [<NAME>], [HUN], [2423],
[87], [<NAME>], [JPN], [2422],
[88], [<NAME>], [ARG], [2421],
[89], [CANTERO Jesus], [ESP], [2417],
[90], [<NAME>], [SRB], [2417],
[91], [RUBTSOV Igor], [RUS], [2411],
[92], [<NAME>], [RUS], [2408],
[93], [HUANG Sheng-Sheng], [TPE], [2394],
[94], [JUZBASIC Ivan], [CRO], [2391],
[95], [<NAME>], [KOR], [2390],
[96], [<NAME>], [CRO], [2385],
)
)#pagebreak()
#set text(font: ("Courier New", "NSimSun"))
#figure(
caption: "Men's Singles (97 - 128)",
table(
columns: 4,
[Ranking], [Player], [Country/Region], [Rating],
[97], [WU Chih-Chi], [TPE], [2382],
[98], [SHIBAEV Alexander], [RUS], [2376],
[99], [<NAME>], [KOR], [2373],
[100], [<NAME>], [AUS], [2372],
[101], [<NAME>], [SRB], [2369],
[102], [MATSUDAIRA Kenji], [JPN], [2367],
[103], [<NAME>], [CZE], [2366],
[104], [<NAME>], [EGY], [2358],
[105], [<NAME>], [MEX], [2356],
[106], [<NAME>], [PRK], [2355],
[107], [<NAME>], [POL], [2352],
[108], [<NAME>], [SWE], [2352],
[109], [Y<NAME>], [SGP], [2352],
[110], [<NAME>], [SGP], [2347],
[111], [<NAME>], [RUS], [2340],
[112], [<NAME>], [JPN], [2337],
[113], [<NAME>], [SVK], [2334],
[114], [<NAME>], [CZE], [2331],
[115], [<NAME>], [KOR], [2326],
[116], [<NAME>], [SVK], [2324],
[117], [<NAME>lav], [UKR], [2322],
[118], [<NAME>], [BRA], [2321],
[119], [<NAME>], [BRA], [2319],
[120], [<NAME>], [GER], [2319],
[121], [<NAME>], [HKG], [2318],
[122], [LASAN Sas], [SLO], [2317],
[123], [<NAME>], [FRA], [2312],
[124], [<NAME>], [LAT], [2307],
[125], [<NAME>], [TUR], [2304],
[126], [JANG Song Man], [PRK], [2304],
[127], [#text(gray, "<NAME>")], [DEN], [2296],
[128], [<NAME>], [ROU], [2296],
)
) |
|
https://github.com/Dr00gy/Typst-thesis-template-for-VSB | https://raw.githubusercontent.com/Dr00gy/Typst-thesis-template-for-VSB/main/thesis_template/template.typ | typst | #import "show_rules.typ": *
#import "pages.typ": *
#import "outlines.typ": *
#import "misc.typ": *
// TODO support for thesis other than bachelor's is not finished
// PhD thesis has some additional requirements
/*
Template author: Dr00g on Discord / Dr00gy on GitHub and Astra3
Sources and references for the template:
1. http://www.cs.vsb.cz/dvorsky/LaTeX.html -- official LaTeX template
2. https://www.fei.vsb.cz/cs/student/zaverecne-prace -- official styleguide
3. https://vizual.vsb.cz/cs/sablony-a-loga/psani-dokumentu
4. https://vizual.vsb.cz/cs/sablony-a-loga/loga/ -- logos
Typst documentation can be found in "Help" or at:
https://typst.app/docs/reference/
*/
|
|
https://github.com/jgm/typst-hs | https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/layout/pad-02.typ | typst | Other | // Test that the pad element doesn't consume the whole region.
#set page(height: 6cm)
#align(left)[Before]
#pad(10pt, image("test/assets/files/tiger.jpg"))
#align(right)[After]
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/frackable/0.1.0/src/impl.typ | typst | Apache License 2.0 | #let _generate-symbol(
super,
sub
) = {
(:
"superscript": super,
"subscript": sub,
)
}
#let symbols = (:
"0": _generate-symbol("⁰", "₀"),
"1": _generate-symbol("¹", "₁"),
"2": _generate-symbol("²", "₂"),
"3": _generate-symbol("³", "₃"),
"4": _generate-symbol("⁴", "₄"),
"5": _generate-symbol("⁵", "₅"),
"6": _generate-symbol("⁶", "₆"),
"7": _generate-symbol("⁷", "₇"),
"8": _generate-symbol("⁸", "₈"),
"9": _generate-symbol("⁹", "₉"),
)
#let premade = (:
"1/2": "½",
"1/3": "⅓",
"2/3": "⅔",
"1/4": "¼",
"3/4": "¾",
"1/5": "⅕",
"2/5": "⅖",
"3/5": "⅗",
"4/5": "⅘",
"1/6": "⅙",
"5/6": "⅚",
"1/7": "⅐",
"1/8": "⅛",
"3/8": "⅜",
"5/8": "⅝",
"7/8": "⅞",
"1/9": "⅑",
"1/10": "⅒",
)
#let _assert-keys = symbols.keys()
#let _assert-valid(s) = {
for char in s.codepoints() {
if char not in _assert-keys {
panic("Unsupported character " + char)
}
}
}
/// Create vulgar fractions using unicode
/// #example(```typ
/// #frackable()
/// #frackable(1, 3)
/// #frackable(9, 16)
/// #frackable(31, 32)
/// #frackable(0, "000")
/// ```, scale-preview: 200%)
///
/// - numerator (integer, string): The top part of the fraction.
/// - denominator (integer, string): The bottom part of the fraction.
/// - use-predefined (boolean): While this function can typeset arbitrary vulgar
/// fractions, there are some for which there is a predefined unicode
/// codepoint that is prioritized. Set this value to false to prevent
/// predefined codepoints being used.
///
/// #example(```typ
/// #frackable() \
/// #frackable(use-predefined: false)
/// ```, scale-preview: 75%)
/// -> content
#let frackable(
numerator,
denominator,
use-predefined: true
) = {
(numerator, denominator) = (str(numerator), str(denominator))
_assert-valid(numerator)
_assert-valid(denominator)
if use-predefined {
let predefined = premade.at(
numerator + "/" + denominator,
default: none
)
if predefined != none {return predefined}
}
box({
numerator.codepoints().map(char=>{symbols.at(char).superscript}).join()
[⁄]
denominator.codepoints().map(char=>{symbols.at(char).subscript}).join()
})
}
|
https://github.com/Myriad-Dreamin/typst.ts | https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/text/quotes_06.typ | typst | Apache License 2.0 |
#import "/contrib/templates/std-tests/preset.typ": *
#show: test-page
// Test nested double and single quotes.
"'test statement'" \
"'test' statement" \
"statement 'test'"
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-11FC0.typ | typst | Apache License 2.0 | #let data = (
("TAMIL FRACTION ONE THREE-HUNDRED-AND-TWENTIETH", "No", 0),
("TAMIL FRACTION ONE ONE-HUNDRED-AND-SIXTIETH", "No", 0),
("TAMIL FRACTION ONE EIGHTIETH", "No", 0),
("TAMIL FRACTION ONE SIXTY-FOURTH", "No", 0),
("TAMIL FRACTION ONE FORTIETH", "No", 0),
("TAMIL FRACTION ONE THIRTY-SECOND", "No", 0),
("TAMIL FRACTION THREE EIGHTIETHS", "No", 0),
("TAMIL FRACTION THREE SIXTY-FOURTHS", "No", 0),
("TAMIL FRACTION ONE TWENTIETH", "No", 0),
("TAMIL FRACTION ONE SIXTEENTH-1", "No", 0),
("TAMIL FRACTION ONE SIXTEENTH-2", "No", 0),
("TAMIL FRACTION ONE TENTH", "No", 0),
("TAMIL FRACTION ONE EIGHTH", "No", 0),
("TAMIL FRACTION THREE TWENTIETHS", "No", 0),
("TAMIL FRACTION THREE SIXTEENTHS", "No", 0),
("TAMIL FRACTION ONE FIFTH", "No", 0),
("TAMIL FRACTION ONE QUARTER", "No", 0),
("TAMIL FRACTION ONE HALF-1", "No", 0),
("TAMIL FRACTION ONE HALF-2", "No", 0),
("TAMIL FRACTION THREE QUARTERS", "No", 0),
("TAMIL FRACTION DOWNSCALING FACTOR KIIZH", "No", 0),
("TAMIL SIGN NEL", "So", 0),
("TAMIL SIGN CEVITU", "So", 0),
("TAMIL SIGN AAZHAAKKU", "So", 0),
("TAMIL SIGN UZHAKKU", "So", 0),
("TAMIL SIGN MUUVUZHAKKU", "So", 0),
("TAMIL SIGN KURUNI", "So", 0),
("TAMIL SIGN PATHAKKU", "So", 0),
("TAMIL SIGN MUKKURUNI", "So", 0),
("TAMIL SIGN KAACU", "Sc", 0),
("TAMIL SIGN PANAM", "Sc", 0),
("TAMIL SIGN PON", "Sc", 0),
("TAMIL SIGN VARAAKAN", "Sc", 0),
("TAMIL SIGN PAARAM", "So", 0),
("TAMIL SIGN KUZHI", "So", 0),
("TAMIL SIGN VELI", "So", 0),
("TAMIL WET CULTIVATION SIGN", "So", 0),
("TAMIL DRY CULTIVATION SIGN", "So", 0),
("TAMIL LAND SIGN", "So", 0),
("TAMIL SALT PAN SIGN", "So", 0),
("TAMIL TRADITIONAL CREDIT SIGN", "So", 0),
("TAMIL TRADITIONAL NUMBER SIGN", "So", 0),
("TAMIL CURRENT SIGN", "So", 0),
("TAMIL AND ODD SIGN", "So", 0),
("TAMIL SPENT SIGN", "So", 0),
("TAMIL TOTAL SIGN", "So", 0),
("TAMIL IN POSSESSION SIGN", "So", 0),
("TAMIL STARTING FROM SIGN", "So", 0),
("TAMIL SIGN MUTHALIYA", "So", 0),
("TAMIL SIGN VAKAIYARAA", "So", 0),
(),
(),
(),
(),
(),
(),
(),
(),
(),
(),
(),
(),
(),
("TAMIL PUNCTUATION END OF TEXT", "Po", 0),
)
|
https://github.com/protohaven/printed_materials | https://raw.githubusercontent.com/protohaven/printed_materials/main/common-tools/hydraulic_press.typ | typst |
#import "/meta-environments/env-templates.typ": *
= TOOL
(Overview paragraph(s))
== Notes
=== Safety
=== Common Hazards
=== Care
=== Use
=== Consumables
=== Tooling
=== Materials
== Parts of the TOOL
===
== Basic Operation
=== Setting Up
=== Workholding
=== USE
=== Cleaning Up
=== SPECIAL TOPICS |
|
https://github.com/dismint/docmint | https://raw.githubusercontent.com/dismint/docmint/main/comptheory/pset2.typ | typst | #import "template.typ": *
#show: template.with(
title: "PSET 2",
subtitle: "18.404",
pset: true,
)
= Problem 1
Recall that the CFL Pumping Lemma specifies that for a CFL $bold(L)$, there is a string of length at least $p$ in the language such that:
+ $u v^i x y^i z$ is in $bold(L)$ for all $i >= 0$
+ $v y != epsilon$
+ $|v x y| <= p$
== (a)
Let us apply the Pumping Lemma to the string $s = 1^p 3^p 2^p 4^p$
Because $|v x y| <= p$, this means that no matter how we choose the slice, we will never be able to get both a $(1, 2)$ or $(3, 4)$ in the same $v x y$ slice. Therefore, since $v y != epsilon$, it must be the case that when we pump the string up or down, there will be a pair, perhaps even two, that become uneven. Thus this language cannot be context free.
== (b)
Suppose we have the languages:
$
L_1 = {w | w "has an equal number of" 1"s and" 2"s"}\
L_3 = {w | w "has an equal number of" 3"s and" 4"s"}
$
Let us show that these languages of dual equality are context free. The following grammar generates a language that recognizes any string with an equal number of $1$s and $0$s:
$
S -> S S | 1 S 0 | 0 S 1 | epsilon
$
It is immediately evident why this grammar generates strings that have an equal number of $1$s and $0$s. However, we also need to show that this language generates *all* strings that are a part of the language of equal $1$s and $0$s. We can do this by induction. The base case is trivial with the empty string. For our inductive step, assume that we have string of length $2n$ for $n >= 1$
WLOG assume that the first character is a $1$. We must now consider two cases:
+ If the last character is a $0$, then we can write the string as $1 s 0$, where we know the middle $s$ can be generated by induction.
+ If the last character is a $1$, then it must be the case that we can split the string into two pieces such that each of them have an equal number of $1$s and $0$s. This is because we start out with one too many $1$s, but then by the $2n - 1$ character, are lacking a $1$. Because each character we read can only change this difference by 1, there must have been a point in the middle where we had an equal number of $1$s and $0$s, similar to how the intermediate value theorem works in calculus. Thus it can be represented by the string $s_1 s_2$ where neither $s$ is empty, which we know can be generated from induction.
Thus we show how using the grammar specified above, we can generate all strings that have an equal number of $1$s and $0$s.
We now know that these languages are CFLs. It should then be the case that $L_1 sect L_3 = C$. However, we have just shown that $C$ is not context free, which means that CFLs can't be closed under the intersection operator.
Additionally, if we know that CFLs aren't closed under the intersection operator, it follows from DeMorgan's Law that they can't be closed under the complement operator as well.
== (c)
Note that $C$ is a language that only contains even length strings since the matching nature means we will never have an odd length. Thus $C$ is entirely contained within the regular language $(Sigma Sigma)^*$ - that is, $C union (Sigma Sigma)^* = (Sigma Sigma)^*$
The latter is a regular expression, which we know from lecture is less powerful, and can be simulated by a CFL. Thus the resulting union is a CFL.
== (d)
Contrary to above, the right hand side of the union contains a regular expression of all odd length strings. These two languages are completely disjoint from one another. However, it might be the case that the union allows for new possibilities that make the resulting language a CFL. However, this is not the case, and we can show it with the exact same Pumping Lemma argument as in *(a)*
With the presence of the union, the Pumping Lemma might be saved if we can pump the string $1^p 3^p 2^p 4^p$ to be in the language $Sigma (Sigma Sigma)^*$ (we have already shown that it can't pump to $C$). Unfortunately, this cannot be the case. Assume that $|v y|$ is odd. If we pump the string an odd number of times, we are okay because that falls within the $Sigma (Sigma Sigma)^*$ language. However, if we pump the string an even number of times, we will have an even length string. The only part of this new language that could accept an even length string is the original language $C$, which cannot be the case since we have shown that $C$ is not context free. An even simpler argument can be made for even length $|v y|$ since that will always pump up or down to an even length string. In order for the Pumping Lemma to work, we must be able to find a split that works for all $i$ on the pump, which is not the case.
Thus the resulting union cannot be a CFL.
= Problem 2
== (a)
#twocola(
bimg("imgs/p3p2.png", width: 80%),
[
The two trees on the left show two different parse trees for the grammar that produces the string:
```
if condition then
if condition then a := 1 else a := 1
```
The problem arises in the grammar when trying to tell which `if` the `else` belongs to. This is the realized problem that comes from the ambiguity in the grammar.
]
)
== (b)
```
STMT => <ASSIGN>
| <IFTHEN>
IFTHEN => if condition then <NEXT>
NEXT => <IFTHEN>
| <ASSIGNNEXT>
ASSIGNNEXT => <ASSIGN>
| <ASSIGN> else <IFTHEN>
ASSIGN => a := 1
```
There are several observations which make this new grammar possible. First, note that every `else` must be preceded by `a := 1`. Then also note that The only thing that can come after an `if` block is another `if` block or an `assign`. Lastly, note that the generated string will always terminate on `a := 1`. With these in mind, we can generate this new grammar which is gives the exact same language.
To make the reasoning more explicit, I have split some steps up into smaller sub-steps in the language. The idea for why this grammar is not ambiguous is that any expansion always has a terminal to the left, then variables to the right. Thus, the parse tree will always grow in the same way, with a heavy right tree that keeps adding terminals to the right end of the generated string. Thus it becomes impossible to have the same string come from different parse trees because there is only one way to generate a string from left to right with the grammar.
= Problem 3
== Forwards Direction
If a language is recognized by a deterministic queue automaton, let us show that it is Turing-recognizable.
To show this, we can simply simulate the queue automaton on a two-tape TM, which we showed in lecture to be equivalent to any other TM. The first tape will contain the input tape, and the second tape will simulate the queue. We will additionally add a special character to the tape alphabet $epsilon$ that marks the beginning of the queue. The second tape will simply start with a singular $epsilon$
- When a character is pushed, the head of the tape moves to the right until it finds a blank slot (not $epsilon$!), after which it then writes the character to be pushed.
- When a character is popped, the head of the tape moves to the left to the first $epsilon$ it finds, then replaces the first character after $epsilon$ with $epsilon$.
We will then end up with a tape that has leading $epsilon$ as we pop more and more values off, and the back of the queue will continually move to the right. This is perfectly fine in a TM as the tape goes on infinitely towards the right. Thus we provide a way to simulate a deterministic queue automaton on a TM with two tapes, meaning that it can be simulated on TMs in general.
== Backwards Direction
If a language is Turing-recognizable, let us show that it is recognized by a deterministic queue automaton.
Our objective will be to show that we can simulate the tape of a TM with a queue. We can do this by showing how to do two operations on a queue where we imagine the front (where we pop from) to be at the left:
+ Optionally change a character and rotate to the right
+ Optionally change a character and rotate to the left
We imagine the queue can trivially read the character at the head. The head of the queue will take place as the pointer location for a TM. With these operations, we can simulate all the necessarily functionality that a TM allows us to do. Let us now show that we can perform both of these rotations and changes. Note that similar to above, we will have a special symbol $epsilon$ that marks the beginning of the tape.
== (Change) and Rotate Right
Imagine our queue looks like:
$ a b c epsilon $
This means that our tape is $a b c$, with the head of the tape currently at $a$. To simulate rotating to the left, all we need to do is pop the head of the queue and push either it or another symbol to the back, depending on whether we want to rewrite or simply move without changing the current value. Let's say that we wanted to rewrite a new value $d$ to the current location and rotate left. We would then have:
$ b c epsilon d $
We can see that functionally, we have written a new value (which could also have been kept as $a$), and moved our tape head to the right so we are now at $b$
== (Change) and Rotate Left
Because this direction goes against the natural flow of the queue, there is a bit more work that needs to go on in order for the rotation to happen correctly. Again suppose that our tape simulation looks like the following:
$ a b c epsilon $
Let's say we want to rewrite the current value to $d$ and rotate left. It's tempting to say that we should just push the new value $d$ to the queue, then repeatedly rotate until $d$ is at the head of the queue. The problem is that there is no way for us to know where the new value is, and therefore where to stop ($d$). Instead, we need to keep track of where our current location on the tape is. We will do this by adding a new special symbol $epsilon'$ that marks the current location of the tape head. We will then have:
$ b c epsilon epsilon' d $
Now we will (in the state automata), enter a special state that says we are rotating to the left, which will keep popping and pushing the same value until we reach the special $epsilon'$, at which point we will stop and remove the $epsilon'$ from the queue. We will then have:
$ epsilon d b c $
We now possess the ability to rotate both ways, meaning we can simulate a TM! There is one slightly special case that I have not covered, which is the possibility that we wish to write to the right of the tape in a location that we have not written in. For this case, after we rotate right, if we notice the $epsilon$, then if the next instruction for the tape is to write then move to the right again, we can simply push the value, then not cycle, since we are already at the correct head location of $epsilon$. Otherwise, if we are moving to the left, we can simulate the same process described above to simulate the TM.
= Problem 4
== Forwards Direction
If there exists a decidable language, let us show that there exists some enumerator that enumerates the language in string order.
To create such an enumerator, we can simulate running the decider on a TM for all strings of the alphabet in string order. If the decider accepts, we output the string. Because we have a decider, the TM will never get stuck on a particular string, and thus, if we run each of the inputs sequentially and not in parallel, we produce an enumerator that enumerates the language in string order.
== Backwards Direction
If there exists an enumerator that enumerates the language in string order, let us show that there exists a decider for that language.
To create such a decider, for string $s$, we can run the enumerator sequentially on a TM until we reach $|s|$. At each point, compare the enumerated string to $s$. If we find a match, accept, and otherwise reject since we know the enumerator goes in order. Because we have a fixed length to which we search, we know the decider will terminate in a finite number of steps, and is thus, a decider and not a recognizer.
= Problem 5
== Forwards Direction
If there exists a Turing-recognizable language $C$, let us show that there exists a decidable language $D$ as specified.
We want to encode some information in the second part of $angle.l x, y angle.r$ such that we can confirm that $x in C$ without looping. We could do this by making a decider that takes the following form:
$ D = {angle.l x, y angle.r | M "accepts" x "with steps" y} $
The implication here is that $|y|$ is equal to the length of the steps that the TM takes to accept $x$. Because we can prescribe a finite length to each string, this $D$ is clearly decidable, and thus we have shown that the decidable counterpart to $C$ exists.
== Backwards Direction
If there exists a decidable language $D$ as specified, let us show that there exists a Turing-recognizable language $C$ for it.
This direction is much easier - given that the decider exists, and that $C$ does not have to be a decider itself, to check any input string $S$, we can take all strings in order and check the concatenation with $S$ with the decider $D$. If we ever find a match that is in $D$, then we accept, otherwise we reject by looping which is allowed in a recognizer.
= Problem 6
In the emptiness proof for DFAs, we did a graph search from the start state to explore all reachable states. We will do a similar procedure here.
We want to make a decider $D$ such that it takes in a PDA and confirms that it uses the stack to push at some point.
$D$ = "On input $P$ where $P$ is a PDA
+ *Mark* start state
+ *Repeat* until no new states are marked:
- *Mark* any state $q$ if there is a transition from a marked state to $q$
- *Specially Mark* any state $q$ if it fulfills the above but also comes from a transition that pushes to the stack
+ _Accept_ if there is a *specially* marked state\
_Reject_ otherwise"
Because there are a finite number of states $Q$ in the PDA, this graph traversal will terminate in a finite number of steps since we can't repeatedly mark nodes. Thus we have created a decider for this language $"PUSHER"$, and $"PUSHER"$ is decidable.
|
|
https://github.com/JulianJ-mtz/EvidenciaInvestigacion | https://raw.githubusercontent.com/JulianJ-mtz/EvidenciaInvestigacion/main/main.typ | typst | // #import "@preview/in-dexter:0.5.3": *
#import "content/content.typ": Introduccion, MarcoTeorico, ObjetivosDelProyecto, ResultadosEsperados, PlanDeTrabajo, TrabajoRealizado, Concluciones, Bibliografia, Anexos
#import "content/portada.typ": Portada
// #import "content/content2.typ": Bibliografia
#set page(
width: 21.5cm,
height: 28cm,
margin: (
left: 3.5cm,
right: 2.5cm,
top: 3cm,
bottom: 2.5cm,
),
)
#set text(lang: "es", size: 11pt, font: "Linux Libertine", spacing: 2pt)
#set par(
justify: true,
)
#Portada
#outline( title: [Índice
#{linebreak()}])
#set page(numbering: "1", number-align: right)
#counter(page).update(1)
#{pagebreak()}
== Introducción
#{1 * linebreak()}
#Introduccion
#{1 * linebreak()}
== <NAME>órico
#{1 * linebreak()}
#MarcoTeorico
#{1 * linebreak()}
== Objetivos del proyecto
#{1 * linebreak()}
#ObjetivosDelProyecto
== Resultados esperados
#{1 * linebreak()}
#ResultadosEsperados
== Plan de trabajo
#{1 * linebreak()}
#PlanDeTrabajo
#{1 * linebreak()}
== Trabajo Realizado
#{1 * linebreak()}
#TrabajoRealizado
#{1 * linebreak()}
== Conclusiones
#{1 * linebreak()}
#Concluciones
#{1 * linebreak()}
== Bibliografía
#{1 * linebreak()}
#Bibliografia
== Anexos
#Anexos |
|
https://github.com/drupol/master-thesis | https://raw.githubusercontent.com/drupol/master-thesis/main/resources/typst/my-app-graph-not-ok.typ | typst | Other | #import "../../src/thesis/imports/preamble.typ": *
#{
set text(
font: "Inconsolata Nerd Font Mono",
size: 1em,
)
render(
read("../../resources/graphviz/my-app-not-ok.dot"),
width: 100%,
labels: (
"pzf6dnxg8gf04xazzjdwarm7s03cbrgz-python3-3.10.12": [my-app-1.2.3],
"6947mfg2jlid97cnvzvc6cvv6wpj2yhg-bzip2-1.0.8": [bzip2-1.0.8],
"d48d0ppksa6gwxjlkwf2i93rilyv9jvq-ncurses-6.4": [ncurses-6.4],
"fmh3s032bcsbfcdp82zsjlmkj1kp72j6-sqlite-3.43.1": [sqlite-3.43.1],
"g3dx6xjlvkg2njyxjsx9dswx5wjvkrm5-readline-8.2p1": [readline-8.2p1],
"ig0kkzw4n2pws12dj7szjm71f1a43if6-zlib-1.3": [xz-5.6.1],
"jhqflhc7k4jwz5s13cj219pvwywzc6j9-gdbm-1.23": [gdbm-1.23],
"l7f1pf2dysadqpdxhsb9li01h5jwn5xr-openssl-3.0.10": [openssl-3.0.10],
"ld03l52xq2ssn4x0g5asypsxqls40497-glibc-2.37-8": [glibc-2.37-8],
"8ny01r2xa5mv5brk9srdmv91wrjvxila-libidn2-2.3.4": [libidn2-2.3.4],
"br1p5pan2pgmgrm81kj43qawd9b9nns1-libunistring-1.1": [libunistring-1.1],
"ml12av0bi52w2nyrpay8l47xwm1m6i7b-libxcrypt-4.4.36": [libxcrypt-4.4.36],
"q7gkbmmxwai8idqigl9kyv2a7vhppz92-expat-2.5.0": [expat-2.5.0],
"rfckdjskd983ylf05jm9mlsw7y618hyr-xgcc-12.3.0-libgcc": [xgcc-12.3.0-libgcc],
"xa1bg4dk78cx7g9zqqs0akhv0my9l7w5-xz-5.4.4": [zlib-1.3],
"xdqlrixlspkks50m9b0mpvag65m3pf2w-bash-5.2-p15": [bash-5.2-p15],
"xq05361kqwzcdamcsxr4gzg8ksxrb8sg-gcc-12.3.0-lib": [gcc-12.3.0-lib],
"xvxaw8q1b4dja27ljmynmc9818aagjz3-gcc-12.3.0-libgcc": [gcc-12.3.0-libgcc],
"35badg7gpxkhyzcrdyh2dfi9wfd43phz-libffi-3.4.4": [libffi-3.4.4],
),
)
}
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-13A0.typ | typst | Apache License 2.0 | #let data = (
("CHEROKEE LETTER A", "Lu", 0),
("CHEROKEE LETTER E", "Lu", 0),
("CHEROKEE LETTER I", "Lu", 0),
("CHEROKEE LETTER O", "Lu", 0),
("CHEROKEE LETTER U", "Lu", 0),
("CHEROKEE LETTER V", "Lu", 0),
("CHEROKEE LETTER GA", "Lu", 0),
("CHEROKEE LETTER KA", "Lu", 0),
("CHEROKEE LETTER GE", "Lu", 0),
("CHEROKEE LETTER GI", "Lu", 0),
("CHEROKEE LETTER GO", "Lu", 0),
("CHEROKEE LETTER GU", "Lu", 0),
("CHEROKEE LETTER GV", "Lu", 0),
("CHEROKEE LETTER HA", "Lu", 0),
("CHEROKEE LETTER HE", "Lu", 0),
("CHEROKEE LETTER HI", "Lu", 0),
("CHEROKEE LETTER HO", "Lu", 0),
("CHEROKEE LETTER HU", "Lu", 0),
("CHEROKEE LETTER HV", "Lu", 0),
("CHEROKEE LETTER LA", "Lu", 0),
("CHEROKEE LETTER LE", "Lu", 0),
("CHEROKEE LETTER LI", "Lu", 0),
("CHEROKEE LETTER LO", "Lu", 0),
("CHEROKEE LETTER LU", "Lu", 0),
("CHEROKEE LETTER LV", "Lu", 0),
("CHEROKEE LETTER MA", "Lu", 0),
("CHEROKEE LETTER ME", "Lu", 0),
("CHEROKEE LETTER MI", "Lu", 0),
("CHEROKEE LETTER MO", "Lu", 0),
("CHEROKEE LETTER MU", "Lu", 0),
("CHEROKEE LETTER NA", "Lu", 0),
("CHEROKEE LETTER HNA", "Lu", 0),
("CHEROKEE LETTER NAH", "Lu", 0),
("CHEROKEE LETTER NE", "Lu", 0),
("CHEROKEE LETTER NI", "Lu", 0),
("CHEROKEE LETTER NO", "Lu", 0),
("CHEROKEE LETTER NU", "Lu", 0),
("CHEROKEE LETTER NV", "Lu", 0),
("CHEROKEE LETTER QUA", "Lu", 0),
("CHEROKEE LETTER QUE", "Lu", 0),
("CHEROKEE LETTER QUI", "Lu", 0),
("CHEROKEE LETTER QUO", "Lu", 0),
("CHEROKEE LETTER QUU", "Lu", 0),
("CHEROKEE LETTER QUV", "Lu", 0),
("CHEROKEE LETTER SA", "Lu", 0),
("CHEROKEE LETTER S", "Lu", 0),
("CHEROKEE LETTER SE", "Lu", 0),
("CHEROKEE LETTER SI", "Lu", 0),
("CHEROKEE LETTER SO", "Lu", 0),
("CHEROKEE LETTER SU", "Lu", 0),
("CHEROKEE LETTER SV", "Lu", 0),
("CHEROKEE LETTER DA", "Lu", 0),
("CHEROKEE LETTER TA", "Lu", 0),
("CHEROKEE LETTER DE", "Lu", 0),
("CHEROKEE LETTER TE", "Lu", 0),
("CHEROKEE LETTER DI", "Lu", 0),
("CHEROKEE LETTER TI", "Lu", 0),
("CHEROKEE LETTER DO", "Lu", 0),
("CHEROKEE LETTER DU", "Lu", 0),
("CHEROKEE LETTER DV", "Lu", 0),
("CHEROKEE LETTER DLA", "Lu", 0),
("CHEROKEE LETTER TLA", "Lu", 0),
("CHEROKEE LETTER TLE", "Lu", 0),
("CHEROKEE LETTER TLI", "Lu", 0),
("CHEROKEE LETTER TLO", "Lu", 0),
("CHEROKEE LETTER TLU", "Lu", 0),
("CHEROKEE LETTER TLV", "Lu", 0),
("CHEROKEE LETTER TSA", "Lu", 0),
("CHEROKEE LETTER TSE", "Lu", 0),
("CHEROKEE LETTER TSI", "Lu", 0),
("CHEROKEE LETTER TSO", "Lu", 0),
("CHEROKEE LETTER TSU", "Lu", 0),
("CHEROKEE LETTER TSV", "Lu", 0),
("CHEROKEE LETTER WA", "Lu", 0),
("CHEROKEE LETTER WE", "Lu", 0),
("CHEROKEE LETTER WI", "Lu", 0),
("CHEROKEE LETTER WO", "Lu", 0),
("CHEROKEE LETTER WU", "Lu", 0),
("CHEROKEE LETTER WV", "Lu", 0),
("CHEROKEE LETTER YA", "Lu", 0),
("CHEROKEE LETTER YE", "Lu", 0),
("CHEROKEE LETTER YI", "Lu", 0),
("CHEROKEE LETTER YO", "Lu", 0),
("CHEROKEE LETTER YU", "Lu", 0),
("CHEROKEE LETTER YV", "Lu", 0),
("CHEROKEE LETTER MV", "Lu", 0),
(),
(),
("CHEROKEE SMALL LETTER YE", "Ll", 0),
("CHEROKEE SMALL LETTER YI", "Ll", 0),
("CHEROKEE SMALL LETTER YO", "Ll", 0),
("CHEROKEE SMALL LETTER YU", "Ll", 0),
("CHEROKEE SMALL LETTER YV", "Ll", 0),
("CHEROKEE SMALL LETTER MV", "Ll", 0),
)
|
https://github.com/jiamingluuu/notes | https://raw.githubusercontent.com/jiamingluuu/notes/main/cyber-security/notes.typ | typst | #set text(size: 12pt)
#set heading(numbering: "1.")
#set page(numbering: "1")
#outline(indent: auto)
#pagebreak()
= Definitions
- Safety: for reasonable inputs, get reasonable outputs.
- Security: for unreasonable inputs, get reasonable outputs.
*Security Theatre*
- Threat: Possibility of damage,
- Countermeasure: Limits possibility or consequence of damage,
- mitigates threats, disables attacks, removes/reduces vulnerabilities.
- Vulnerabilities: Weakness in the system,
- enables threats.
- Attacks: Exploitation of vulnerabilities to realize a threat.
*CIA*
- (C) Confidentiality: Information is disclosed to legitimate users.
- (I) Integrity: Information is created or modified by legitimate users.
- (A) Availability: Information is accessible to legitimate users.
Notice that CIA can be conflicting to each other in some scenarios.
== Risk Analysis & Policy, Mechanisms and Assurance
Risk analysis and security policy
- Goal: Infer what can go wrong with the system.
- Outcome: A set of security goals.
- Principle: You never prevent threats, you lower the risk.
Mechanisms
- Goal: Define a strategy to realize the security goals.
- Outcome: Set of security mechanisms.
- Principle: Deploying security mechanisms has a cost.
Assurance
- Goal: Make sure that the security mechanisms realize the security goals.
- Outcome Methodology.
- Principle: Full assurance cannot be achieved.
== Risk analysis
Given
$ "Risk exposure" = "probability" times "impact" $
We can set up a risk table to list out all the possible risk with their
risk exposure, and determine which risks to mitigate.
#pagebreak()
= Cryptography
*Design Principles*
- Kerkoff Principle: The security of a crypto system must not rely on keeping the algorithm secret.
- Diffusion: Mixing-up symbols.
- Confusion: Replacing a symbol with another.
- Randomization: Repeated encryptions of the same text are different.
== Symmetric Cryptography
*Requirements*
We use the same key $k$ for encryption $E_k$ and decryption $D$:
- $D_k (E_k (m)) = m$ for every key $k$ and $E, D$.
- $E_k$ and $D_k$ are easy to compute.
- Given $c = E_k (m)$, it is hard to find the plaintext $m$.
*Attacks*
- *Exhaustive Search* (brute force)
- *Ciphertext only*: know one or several random ciphertext.
- *Known plaintext*: know one or several pairs of *random* plaintext and their corresponding ciphertext.
- *Chosen plaintext*: know one or several pairs of *chosen plaintext* and their corresponding ciphertext.
- *Chosen cipher text*: know one or several pairs of plaintext and their corresponding *chosen ciphertext*.
=== Stream Cipher
*XOR* cipher:
- Message and key are xor-ed together
$ E_k (m) = k xor m $
$ D_k (c) = k xor c $
However, this cipher is vulnerable to known-plaintext attack
$ k = (k xor m) xor m $
*Mauborgne Cipher*
- Use the key $k$ as a seed for random number generator and xor with the message
$ E_k (m) = m xor "RNG"(k) $
Vulnerable to key re-use attack:
$
C_1 &= k xor m_1\
C_2 &= k xor m_2\
C_1 xor C_2 &= m_1 xor m_2
$
=== Block Cipher
Ideal block cipher
- Combines confusion and diffusion.
- Changing single bit in plaintext block or key results in changes to approximately half the ciphertext bits.
*DES* (Data Encryption Standard)
DES is broken in 1998 and 2006. And Nesting encryption process is not a valid counter-measure.
$ 2"DES"_(k_1, k_2) (m) = E_(k_2)(E_(k_1) (m)) $
To broke this paradigm we can brute for the result of $E_(k_1)(m)$ and $D_(k_2)(c)$, for every possible key pair $(k_1, k_2)$. Then match the valid key candidate. The effective key space only doubled, from 56 bits become 57 bits.
However, triple DES is widely used
$ 3"DES"_(k_1, k_2, k_3) (m) = E_(k_3) (D_(k_2) (E_(k_1) (m))), $
with effective key length 112 bits.
*AES* (Advanced Encryption Standard)
It has different encryption modes:
- ECB: electronic code book. Each plaintext block is encrypted independently with the key
- Fast, easy to perform parallelization.
- But same block is encrypted to same ciphertext (violates diffusion)
#figure(
image("./img/ecb.png", width: 70%),
caption: "AES ECB mode"
)
- CBC: cipher block chaining
- Repeating plaintext blocks are not exposed in the ciphertext.
- No parallelism.
#figure(
image("./img/cbc.png", width: 70%),
caption: "AES ECB mode"
)
- CFB: cipher feedback
- CTR: counter
- High entropy and parallelism.
- Vulnerable to key-reused attack
#figure(
image("./img/ctr.png", width: 70%),
caption: "AES ECB mode"
)
#table(
columns: 4,
inset: 10pt,
align: horizon,
table.header(
[Name], [Type], [Key size (bits)], [Speed (cycle/byte)],
),
[RC4], [stream], [40-2048], [8],
[ChaCha20], [stream], [128/256], [4],
[DES], [block], [block: 64, key: 56], [50],
[Rijndael], [block], [block: 128, key: 128192256], [18-20]
)
The trade-off between stream cipher and block cipher:
- stream cipher is fast but has low diffusion, whereas
- block cipher is slow but has high diffusion.
== Asymmetric Cryptography
*Function Requirements*
- $D_(K s) (D_(K p) (m)) = D_(K p) (D_(K s) (m)) = m$ for every key pair $(K p, K s)$.
- Easy to generate the key pair.
- Encryption and decryption are easy to compute.
- Hard to matching key $K s$ given $K p$
#table(
columns: 4,
inset: 10pt,
align: horizon,
table.header(
[Name], [Speed (cycle/byte)], [Key size (bits)], [Effective key length (bits)]
),
[RSA], [$10^6$], [1024], [80],
[RSA], [$10^6$], [2048], [112],
[RSA], [$10^6$], [3072], [128],
[RSA], [$10^6$], [4096], [140],
[RSA], [$10^6$], [15360], [224-256],
[ECC], [$10^6$], [256], [128],
[ECC], [$10^6$], [448], [224-256],
)
*Summary between symmetric and asymmetric cryptography*:
- Symmetric is fast but has key agreement.
- That is, parties is able to generate a secrete key even if an eavesdropper is listening to the communication channel.
- Often used to encrypt message
- Asymmetric is slow but does not have key agreement.
- Used for encrypt shared key or hash.
== Hash Functions
$ H(m) = x $
An ideal hash function satisfies:
- PR (Preimage Resistance): given $x$ it is hard to find $m$.
- PR2 (Second Preimage Resistance): given $H, m, x$ it is hard to find $m'$ such that
$ H(m) = H(m') = x. $
- CR (Collision Resistance): given $H$, it is hard to find $m, m'$ such that
$ H(m) = H(m') $
=== Security Issue
Due to birthday paradox:
"There are 50% chance that 2
people have the same birthday in
a room of 23 people"
Therefore if given hash function of $n$-bits output, a collision can be found
in around $2^(n/2)$ evaluations. Hence SHA-256 has 128 bits security.
*SHA-2*
#figure(
image("./img/sha2.png", width: 70%),
caption: "MD5, SHA-1, SHA-2"
)
*SHA-3*
#figure(
image("./img/sha3.png", width: 70%),
caption: "SHA-3"
)
=== Hash as MAC
MAC stands for message authentication code, commonly used for key exchange,
certificate... Given message $m$ and key $k$, people often sends a whole message
$ m parallel "MAC"_k (m) $
together. One variant is HMAC, which use a hash function on the message
and the key.
- But in practice, if the HMAC is badly designed, for instance, using SHA2 and let $"MAC"_k (m) = H(k parallel m)$, then Mallory can perform hash length extension attack on the message sent.
- Good HMAC example
$
&"HMAC"_k (m) = H(k parallel m parallel k)\
&"HMAC"_k (m) = H((k xor "opad") parallel H((k xor "ipad") parallel m)\
$
#pagebreak()
= Key Exchange Protocols
== Digital Signature
MAC we have discussed provide a decent method of verification under the scenario of no prior trust is given to the connection. Whereas it is easy to be forged. To solve this issue, digital signature is introduced, where it is
- commonly used in key exchange
== Symmetric Protocols
Symmetric protocols utilize the advantage of symmetric cryptography to provide
communication with confidentiality and integrity. The protocol defines a
procedure of key exchange which ensures two parties are able to defend
themselves from the attack of a malicious party during communication. Once the
shared key $k$ is set up, they can communicate by:
- Encrypt and MAC (E&M)
$ "AE"_k (m) = E_k (m) parallel H_k (m) $
- MAC then Encrypt (MtE)
$ "AE"_k (m) = E_k (m parallel H_k (m)) $
- Encrypt then MAC (EtM)
$ "AE"_k (m) = E_k (m) parallel H_k (E_k (m)) $
#figure(
image("./img/symmetric-protocol.png", width: 70%),
caption: "Communication in symmetric protocol"
)
However, the primary issue with symmetric protocols is how to make an agreement
on the shared key used $K_(a b)$ used between to parties, say, Alice and Bob.
The naive way is for every connection in a network, a shared key is exchange
physically using a secure channel. Therefore, $1/2 n(n-1)$ keys are required for
a network with $n$ nodes.
A better solution is purposed as a KDC (key distributed center) manages all the
keys used, with premisses:
- the key exchange channel between KDC and each party is secure,
- KDC is trusted.
Before, there were a vulnerable version of key exchange protocol
#figure(
image("./img/vulnerable-kdc.png", width: 70%),
caption: "Vulnerable KDC"
)
If the key $K_(a b)$ is compromised, then Mallory is able to perform replay
attack.
#figure(
image("./img/kdc-the-fix.png", width: 70%),
caption: "Fixed KDC"
)
But the existence of KDC has drawbacks:
- it is a single point of failure,
- one cannot exchange key with another when zero knowledge.
To solve this, we have DH (Diffie-Hellman) protocol:
- Alice
- generate $g, p$ as public key, where $g$ is small (2, 5, 7...) and $p$ is at least 2048 bits.
- choose $a$, a 2048 bits private key
- compute $"dhA" = g^a mod p$
- send $p, "dhA", n_0$ to Bob
- Then Bob
- choose $b$, another 2048 bits private key
- compute $"dhB" = g^b mod p$
- send $"dhB", n_1$ back to Alice
- The session key $K$ is
$ K = g^(a b) mod p = (g^a mod p)^b mod p = (g^b mod p)^a mod p. $
#figure(
image("./img/dh-with-auth.png", width: 70%),
caption: "DH key exchange with authentication"
)
== Asymmetric Protocols
Asymmetric protocols are used in mutual authentication, where two parties want
to engage in the communication and confirm the opposite site is the party they
intended to talk to.
#figure(
image("./img/vulnerable-asymmetric.png", width: 70%),
caption: "Vulnerable key exchange asymmetric protocol"
)
However, this protocol is vulnerable to Man-in-the-Middle (MitM) attack:
- Alice greets to Mallory with ${N_A, A}_(K_(p m))$.
- Mallory receives Alice's greeting message.
- Mallory sends Alice's greeting message to Bob ${N_A, A}_(K_(p b))$.
- Bob replies Alice with his nonce ${N_A, N_B}_(K_(p a))$.
- Alice sends Mallory Bob's nonce ${N_B}_(K_(p m))$.
- Mallory sends the nonce to Bob ${N_B}_(K_(p b))$.
- Bob recognize Mallory as Alice.
The fix is simple, Bob adds his credentials when sending nonce to Alice
#figure(
image("./img/fixed-asymmetric-protocol.png", width: 70%),
caption: "Fix asymmetric protocol"
)
However, still, this key exchange protocol has KDC as a single point of failure.
A feasible alternative is to use DH key exchange, which is commonly seen in TLS
#figure(
image("./img/tls1.2.png", width: 70%),
caption: "TLS 1.2"
)
#figure(
image("./img/tls1.3.png", width: 70%),
caption: "TLS 1.3 with one-way authentication"
)
TLS 1.3 is better than 1.2 as it is:
- only have one round in the handshake
- faster (by using ECC)
- certificate is encrypted
- protocol has been formally proven
Note: both 1.3 and 1.2 does *not* has security issue, they are still used in
modern network protocols.
== Trust Models
Trust models establish the authenticity of the binding between someone and its
public key. We have:
- (Decentralized) Web of Trust
- The person trusted by your friend is trustworthy
- (Centralized) PKI - Public Key Infrastructure
- Trust the certificates assigned by certificate authority (CA)
- If one trust the upper CA, then all the subsequent and lower CA are trusted.
#pagebreak()
= Network Security
The possible attacks of an attacker contains:
- Scanning: survey the network and its hosts.
- Eavesdropping: read messages.
- Spoofing: forge illegitimate messages.
- DOS (Denial of Service): disrupt the communication.
== Network Layers and Vulnerabilities
#figure(
image("./img/network-layer.png", width: 45%),
caption: "Network Layers"
)
#box(
columns(2, gutter: 11pt)[
#figure(
image("./img/network-layering.png", width: 100%),
caption: "Network Layers"
)
#colbreak()
#figure(
image("./img/layer-details.png", width: 100%),
caption: "Network Layers"
)
]
)
Each layer has their own role:
=== Link Layer
Responsible for the reliable transmission of data across a physical link.
- MAC (media access control) addressing: enable devices to identify the source and destination of frames on the local network.
- A host can be connect to several hosts or networks through multiple interfaces. The connection can either be
- point-to-point, connected to a single host, or
- by bus link, connected to an entire network.
It is hard for a malicious party to attack in the point-to-point mode of
connection. Whereas for bus link (aka. LAN, local area network), the attack
becomes much easier.
*Packet Sniffing*
- An attacker sets its network interface is _promiscuous mode_ to capture all traffic.
=== Network Layer
Determine the path used for transferring data package from the source to
destination across multiple interconnected networks.
*ARP* (Address Resolution Protocol)
- Used between link and network layer.
- Map IP address to MAC address within a local network segment.
- *Attack*: ARP cache poisoning
- An attacker can broadcast fake IP-MAC mappings to the other hosts on the network.
*IP* (Internet Protocol)
- Each message has the IP address of the issuer and recipient.
- Routers route packet based on their routing table and a default rout.
- *Attack*: IP Spoofing
- Router do not validate the source.
- Receiver cannot tell that the source ahs been spoofed.
- So an attacker can generate rwo IP packets with custom IP source fields.
*ICMP* (Internet Control Message Protocol)
- Exchange information about the network.
- *Attack*: Host Discovery
- By default, hosts answer to ICMP echo request messages.
- So an attacker can scan the entire network to find IP address of active hosts.
- *Attack*: ICMP Ping Flood
- An attacker can overwhelm a host by sending multiple ICMP echo requests.
=== Transport Layer
Providing end-to-end communication services for applications running on
different hosts.
- Allows hosts to have multiple connections through ports.
- Allows messages to be fragmented in to small IP packets.
- Make sure that all packets are received.
*TCP* (Transmission Control Protocol)
- The sender divides data=stream into packets sequence number is attacked to every packet.
- The receiver checks for packet errors, re-assembles packets in correct order to recreate stream.
- ACK are sent when packets are well received and lost/corrupt packets are re-sent.
- *Attack*: Port scanning
- Using the 3-way handshake, an attacker can scan for all open ports for a given host.
- *Attack*: TCP-syn flooding
- Overwhelm a host by sending multiple TCP SYN requests.
- *Attack*: TCP Connection Reset
- Each TCP connection has an associated state sequence number
- An attacker can guess (or sniff) the current sequence number for an existing connection and send packet with reset flag, which will close the connection.
*UDP* (User Datagram Protocol)
- Connectionless transport-layer protocol.
- No ack, no flow control, no message continuation, no reliability guarantees.
- *Attack*: UDP flood
- When a UDP packet is received on a non-opened port, the host replies with an ICMP Destination Unreachable
- An attacker can send a large number of UDP packets to all ports of target host.
=== Application Layer
Enabling communication between applications running on different hosts.
*BPG* (Border Gateway Protocol)
- Each router has a routing table to IP messages, BGP is the protocol for establishing routes.
- *Attack* Route hijacking
- An attacker can advertise fake routes.
*DNS* (Domain Name Server)
- Internet applications relies on canonical hostname rather than IP addresses
- DNS servers translates domain names into IP addresses.
- *Attack* DNS cache poisoning
- An attacker can advertise fake DNS information
*HTTP* (Hyper-Text Transfer Protocol)
- Governing the exchange of information between web clients and web servers.
== Countermeasures on Network Threats
=== TLS (Transport Layer Security)
HTTPS = HTTP + TLS (SSL)
- With TLS, a transport layer protection, the communication is prevent from all kinds of spoofing and eavesdropping.
- Integrity is guaranteed: authentication handshake.
- Confidentiality is guaranteed: end-to-end secure channel.
- *Attack*: SSL Stripping
- Webpages can be delivered either with HTTPS or HTTP.
- Browser can automatically switch between HTTP and HTTPS.
- An attacker can perform MitM attack and remove the SSL protection.
=== Preventing Packet Sniffing
Ethernet
- Isolate Ethernet traffics (no straightforward packet sniffing)
- Hub: broadcast all messages on all ports.
- Switch: forward messages on specific port based on their MAC addresses.
Wireless network
- Encrypt message before sending them over the air.
=== Preventing Spoofing
Preventing DNS spoofing
- DNSSEC: DNS Security Extensions, provides authentication (but not encryption) between DNS servers.
- not widely deployed
- DNS over HTTPS: provides authentication and encryption between client/server and server/server
- pushed by Google and Mozilla
Preventing rout hijacking (BGP)
- Bogon Filtering: deny route advertised by hosts with spoofed addresses
=== Preventing DOS attacks
Preventing TCP-syn flooding
- TCP-syn cookie: prevents from maintaining a queue of half-opened TCP connections.
Preventing DOS and DDOS attacks in general
- Network Ingress Filtering
- Deny access to network traffic with spoofed DOS and DDOS.
- Ensure that traffic is traceable to its correct source network.
=== Preventing Scanning Attacks
Preventing host discovery and port-scanning
- Host Discovery: ICMP can be disabled or reserved to hosts on the same network
- Port Scanning: TCP connections can be rejected if a source attempts to initiate multiple connections on multiple ports simultaneously.
- However such countermeasures are impossible in practice because
- each host needs to have packet filtering capability across different hardware, OS, and versions,
- the admin needs to have administrative privilege on every host to push the packet filtering policy
== Firewall
Defines a logical defense parameter and acts an access control between two networks.
- packet filtering based on IP address
- inbound traffic from the Internet trying to get into the protected network
- outbound traffic going the other way
There are two types of firewalls
- Stateless packet filtering, purely based on the IP address and the port
- Stateful packet filtering, tracks the status of every connection (TCP 3 way handshake)
*DMS* (DeMilitarized Zone)
- isolates exposed public servers
#figure(
image("./img/dmz.png", width: 70%),
caption: "DMZ"
)
== Intrusion Detection
How do we know when malicious party enters our network?
*IDS* (Instruction Detection Systems)
- Often operate in stealth mode, connected in the local network.
- There are two wats to build an IDS
- Signature-based IDS, have a pre-defined malicious message pattern
- Heuristic-based IDS, builds a model of acceptable message exchange pattern (use machine learning).
- The IDS normally lookup the headers, packet contents (payload), the packet fragmentation.
- However, the main issue of IDS is that, if a nomad host is hacker, Mallory is able to use his/her machine to access the local network service and skirt supervision of IDS.
*IPS* (Intrusion Prevention System)
- IPS = IDS + Firewall
== VPN (Virtual Private Network)
Tunneling protocol
- Alice's message is encapsulated and sent to the VPN server.
- The VPN extract this traffic and send it to the destination
- It provides anonymity (from the IP perspective) as no one knows Alice's IP address.
=== Tor (The Onion Router)
#figure(
image("./img/tor.png", width: 70%),
caption: "TOR"
)
In TOR, no one knows about Alice's IP and content at once.
- The content can be seen at the exit node, so whatever Alice does illegally on the Internet, the exit node might be blame for it.
- TOR prevents client being identified by using IP address. However, TOR does not prevent client being identified based on the application information.
#pagebreak()
= Human Authentication
- Identification is assigning a set of data to a person or an organization
- Authentication is making a sage link between a subject and one or several of identities.
== Authentication Factors
In general, it can be classified into three types:
- Something that you know:
- Password, PIN number, secrete questions ...
- *Good as long as* the user remember the secrete and nobody can uncover or guess this secret.
- *Gets compromised* when someone else knows this secret and is able to use it.
- Something that you have:
- IDs, physical key, mobile phone ...
- *Good as long as* they are not lost and damaged
- *Gets compromised* when someone can duplicate or fake them
- Something that you are or do (biometrics)
- Fingerprint, voice recognition, face recognition
- robustness depends on the precision of this measure
- *Good as long as* you act or look like the same and nobody can pretend the way you act.
- *Gets compromised* when someone can nearly act like you.
The way of storing password can also lead vulnerabilities
- In clear: really bad
- Hashed: bad
- one can use rainbow table to lookup the hash of commonly seen Password
- Salted Hash: better and easy to manage
- on the client side, before the password is passed to the server, pad the password with a chunk of meaningless bytes.
- Encrypted (best but complex to manage)
- how to decrypt?
#pagebreak()
= System Security
== Vulnerabilities
The security of OS often compromised when
- is process is crashed
- a process can have an undesirable behavior
- legitimate user execute a malicious software
To find program vulnerabilities, we can
- find a bug yourself and investigate
- take a look at CVE alerts (Common Vulnerabilities and Exposures)
#figure(
image("./img/timeline-of-vulnerabilities.png", width: 70%),
caption: "Timeline of Vulnerabilities"
)
== Attacks
*Stack overflow*
*TOCTOU* (Time Of Check to Time Of Use)
#figure(
image("./img/toctou.png", width: 70%),
caption: "TOCTOU"
)
== Countermeasures on System Threats
*Type-safe program*
- cannot access arbitrary memory addresses
- cannot corrupt their own memory
- do not crash
For `C` program, we can set `FORTIFY_SOURCE` when compiling source code with
`GCC`, which provides buffer overflow checks for unsafe `C` libraries.
*Canaries*
- The compiler modifies every function's prologue and epilogue regions to place and check a value (canary) on the stack.
- When a buffer overflows, the canary is overwritten. The programs detects it before the function returns and an exception is raised.
*DEP/NX* (None Executable Stack)
- The program marks important structures in memory as non-executable.
- The program generates an hardware-level exception if one is trying to execute those memory regions- This makes normal stack buffer overflows impossible.
*ASLR* (Address Space Layout Randomization)
- The OS randomize the location (random offset) where the standard libraries and other elements are stored in memory.
- Harder for the attacker to guess the address of a `lib-c` subroutine.
*PIC/PIE* (Position Independent Code/Executables)
- with PIC, code is compiled with relative addressing that are resolved dynamically when executed by calling a function to obtain the return value on stack.
*Sandbox*
- A tightly-controlled set of resources for untrusted programs to run in.
== Defensive Programming
Adopting good programming practices
- Modularity
- Have separate modules for separate functionalities.
- Easier to find security flows when components are independent.
- Encapsulation
- Limit the interaction between the components.
- Avoid wrong usage of the components.
- Information hiding
- Hide the implementation, but
- this does not import security
- Check the inputs, even between components that belongs to the same application.
- Be fault tolerant by having a consistent policy to handle failure.
- Reuse known and widely used code by using design patterns and exiting libraries.
Use formal methods to generate a program
- mathematical description of the problem
- proof of correctness
- executable code or hardware design
= Web Security
A web application consists of
- front-end, application running on client' side.
- back-end, application running on server's side.
Session
- *Session ID* is an unique and unforgeable token
- stored in cookie
- bind to key/value paris data, which is stored on the server.
- User can create, modify, delete the session ID in the cookie.
- But cannot access the key/value pairs stored on the server.
Mixed-content happens when:
- an HTTPS page contains elements served with HTTP
- an HTTPS page transfers control to another HTTP page within the same domain
- credentials (authentication cookie) will be sent over HTTP.
- modern browser block or warn mixed-content.
== Backend Vulnerabilities |
|
https://github.com/sxdl/MCM-Typst-template | https://raw.githubusercontent.com/sxdl/MCM-Typst-template/main/README.md | markdown | Apache License 2.0 | # mcm-scaffold
This is a Typst template for COMAP's Mathematical Contest in MCM/ICM.
## Usage
You can use this template in the Typst web app by clicking "Start from template"
on the dashboard and searching for `mcm-scaffold`.
Alternatively, you can use the CLI to kick this project off using the command
```
typst init @preview/mcm-scaffold
```
Typst will create a new directory with all the files needed to get you started.
## Configuration
This template exports the `mcm` function with the following named arguments:
- `title`: The paper's title as content.
- `problem-chosen`: The problem your team have chosen.
- `team-control-number`: Your team control number.
- `year`: When did the competition took place.
- `summary`: The content of a brief summary of the paper. Appears at the top of the first column in boldface.
- `keywords`: Keywords of the paper.
- `magic-leading`: adjust the leading of the summary.
The function also accepts a single, positional argument for the body of the
paper.
The template will initialize your package with a sample call to the `mcm`
function in a show rule. If you want to change an existing project to use this
template, you can add a show rule like this at the top of your file:
```typ
#import "@preview/mcm-scaffold:0.1.0": *
#show: mcm.with(
title: "A Simple Example for MCM/ICM Typst Template",
problem-chosen: "ABCDEF",
team-control-number: "1111111",
year: "2025",
summary: [
#lorem(100)
#lorem(100)
#lorem(100)
#lorem(100)
],
keywords: [MCM; ICM; Mathemetical; template],
magic-leading: 0.65em,
)
// Your content goes below.
``` |
https://github.com/noahjutz/AD | https://raw.githubusercontent.com/noahjutz/AD/main/notizen/algorithmen/kgv_primes.typ | typst | #import "@preview/cetz:0.2.2": canvas, draw
#let arrow = (d, c) => canvas(length: 24pt, {
import draw: *
if d == "down" {
set-style(mark: (end: ">"))
} else {
set-style(mark: (start: ">"))
}
line((), (0, -2), name: "arrow")
content("arrow.mid", anchor: "west", padding: 4pt, c)
})
#let down = c => arrow("down", c)
#let up = c => arrow("up", c)
#table(columns: 9, inset: 4pt,
stroke: none,
$3528$, $=$, $2^3$, $dot$, $3^2$, $dot$, $5^0$, $dot$, $7^2$,
[], [], down[$dot 2$], [], up[$dot 3$], [], up[$dot 5$], [], down[$dot 7$],
$3780$, $=$, $2^2$, $dot$, $3^3$, $dot$, $5^1$, $dot$, $7^1$
)
|
|
https://github.com/TomVer99/Typst-checklist-template | https://raw.githubusercontent.com/TomVer99/Typst-checklist-template/main/README.md | markdown | MIT License | # Typst Checklist Template
![GitHub release (with filter)](https://img.shields.io/github/v/release/TomVer99/Typst-checklist-template?style=flat-square)
![GitHub Repo stars](https://img.shields.io/github/stars/TomVer99/Typst-checklist-template?style=flat-square)
![Maintenance](https://img.shields.io/maintenance/Yes/2024?style=flat-square)
This typst checklist template is a simple checklist template for typst. It has multiple styles that require no changes to the document to switch between.
<p>
<img src="./img/BN Islander-0.png" alt="Showcase" width="49%">
<img src="./img/BN Islander-1.png" alt="Showcase" width="49%">
</p>
Left: Style 0, Right: Style 1
## Options
The following options are available for this template:
- `title`: The title of the checklist. The default is `none`.
- `disclaimer`: The disclaimer of the checklist. The default is `none`.
- `style`: The style of the checklist. The default is `0`. Supported styles are `0`, `1`.
## Getting Started
To get started with this Typst checklist template, follow these steps:
1. **Clone or Download**: Start by cloning this repository to your local machine, or download the `template` ZIP from the latest release. If you choose to manually download the individual files, you will only need the `template` folder.
2. **Install Typst**: I recommend to use VSCode with the [Typst LSP Extension](https://marketplace.visualstudio.com/items?itemName=nvarner.typst-lsp). You will also need a PDF viewer in VSCode if you want to view the document live.
3. **Import the template**: Import the template into your own typst checklist.
4. **Set the available options**: Set the available options in the template file to your liking.
5. **Start writing**: Start writing your checklist.
## Helpful Links / Resources
- The [Typst Documentation](https://typst.app/docs/) is a great resource for learning how to use Typst.
- You can use sub files to split your document into multiple files. This is especially useful for large checklists.
|
https://github.com/Karolinskis/KTU-typst | https://raw.githubusercontent.com/Karolinskis/KTU-typst/main/mainPages/PageSummaryLT.typ | typst | #import "/variables.typ": *
#page(header: none)[
#set text(size: 12pt)
#AuthorName.at(1).
#ProjectTitle.
#ProjectType.
#ProjectSupervisor.join(" ").
#ProjectFaculty, Kauno technologijos universitetas.
Studijų kryptis ir sritis: #ProjectStudyFieldAndArea.
Reikšminiai žodžiai: #ProjectKeywords.
#ProjectCity, #ProjectYear.
#context counter(page).final().at(0) p.
#set align(center)
*Santrauka*
#set align(start)
#lorem(30)\
\
#lorem(40)
] |
|
https://github.com/jgm/typst-hs | https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/show-recursive-00.typ | typst | Other | // Test basic identity.
#show heading: it => it
= Heading
|
https://github.com/LDemetrios/Typst4k | https://raw.githubusercontent.com/LDemetrios/Typst4k/master/src/test/resources/suite/layout/inline/cjk.typ | typst | // Test CJK-specific features.
--- text-chinese-basic ---
// Test basic Chinese text from Wikipedia.
#set text(font: "Noto Serif CJK SC")
是美国广播公司电视剧《迷失》第3季的第22和23集,也是全剧的第71集和72集
由执行制作人戴蒙·林道夫和卡尔顿·库斯编剧,导演则是另一名执行制作人杰克·本德
节目于2007年5月23日在美国和加拿大首播,共计吸引了1400万美国观众收看
本集加上插播广告一共也持续有两个小时
--- text-cjk-latin-spacing ---
#set page(width: 50pt + 10pt, margin: (x: 5pt))
#set text(lang: "zh", font: "Noto Serif CJK SC", cjk-latin-spacing: auto)
#set par(justify: true)
中文,中12文1中,文12中文
中文,中ab文a中,文ab中文
#set text(cjk-latin-spacing: none)
中文,中12文1中,文12中文
中文,中ab文a中,文ab中文
--- cjk-punctuation-adjustment-1 ---
#set page(width: 15em)
// In the following example, the space between 》! and ? should be squeezed.
// because zh-CN follows GB style
#set text(lang: "zh", region: "CN", font: "Noto Serif CJK SC")
原来,你也玩《原神》!?
// However, in the following example, the space between 》! and ? should not be squeezed.
// because zh-TW does not follow GB style
#set text(lang: "zh", region: "TW", font: "Noto Serif CJK TC")
原來,你也玩《原神》! ?
#set text(lang: "zh", region: "CN", font: "Noto Serif CJK SC")
「真的吗?」
#set text(lang: "ja", font: "Noto Serif CJK JP")
「本当に?」
--- cjk-punctuation-adjustment-2 ---
#set text(lang: "zh", region: "CN", font: "Noto Serif CJK SC")
《书名〈章节〉》 // the space between 〉 and 》 should be squeezed
〔茸毛〕:很细的毛 // the space between 〕 and : should be squeezed
--- cjk-punctuation-adjustment-3 ---
#set page(width: 21em)
#set text(lang: "zh", region: "CN", font: "Noto Serif CJK SC")
// These examples contain extensive use of Chinese punctuation marks,
// from 《Which parentheses should be used when applying parentheses?》.
// link: https://archive.md/2bb1N
(〔中〕医、〔中〕药、技)系列评审
(长三角[长江三角洲])(GB/T 16159—2012《汉语拼音正词法基本规则》)
【爱因斯坦(<NAME>ein)】物理学家
〔(2009)民申字第1622号〕
“江南海北长相忆,浅水深山独掩扉。”([唐]刘长卿《会赦后酬主簿所问》)
参看1378页〖象形文字〗。(《现代汉语词典》修订本)
--- issue-2538-cjk-latin-spacing-before-linebreak ---
// Issue #2538
#set text(cjk-latin-spacing: auto)
abc字
abc字#linebreak()
abc字#linebreak()
母
abc字\
母
--- issue-2650-cjk-latin-spacing-meta ---
测a试
测#context [a]试
|
|
https://github.com/31core/prescription | https://raw.githubusercontent.com/31core/prescription/main/config.typ | typst | #let block_medicine(name, amount, method) = [
#name #h(1fr) #text(size: 8pt)[#amount]
#text(size: 7pt, top-edge: 1pt)[用法: #method]
]
#let medicines = state("medicines", ())
#let add_medicine(name, amount, method) = {
context medicines.update(medicines => {
medicines.push(block_medicine(name, amount, method))
medicines
})
}
//不要修改以上内容,仅修改下列 "" 中的文字
#let title = "医院名"//在这里写医院的名称
#let name = "名字"//写你的名字
#let gender = "女"//性别
#let age = "114" //年龄
#let diagnosis = "诊断" //在这里写诊断
#let department = "内分泌" //在这里写科室,激素类药写内分泌科
#let doctor = "某某某" //编一个医生名字,不要写真的,别害人家
#let amount = "514" //药品的总金额,自己算一下写上
/* add your medicines like this*/
//调用#add_medicine 函数如下,可以复制多个,以添加多种药品,也可以修改名称、用量
#add_medicine("戊酸雌二醇 补佳乐", "1 盒", "每日 1 粒 内服 慢性病")
|
|
https://github.com/jamesrswift/pixel-pipeline | https://raw.githubusercontent.com/jamesrswift/pixel-pipeline/main/src/layers/drawing/shapes/polygon.typ | typst | The Unlicense | #import "/src/primitives/lib.typ" as primitives
#let polygon(
position: primitives.position((0,0)),
vertices,
fill: none,
stroke: auto,
..args
) = ({
primitives.pipeline.assembled(tags: ("draw", "polygon"), ..args )
primitives.pipeline.positioned(
positions: arguments(..vertices.map(primitives.position), root: position),
)
primitives.pipeline.stroked(fill: fill, stroke: stroke)
},) |
https://github.com/Myriad-Dreamin/tinymist | https://raw.githubusercontent.com/Myriad-Dreamin/tinymist/main/syntaxes/textmate/tests/unit/basic/set.typ | typst | Apache License 2.0 | #set text(fill: red);
#set text(fill: red)
#set text("")
#set text(red)
#set list.item(red);
#set list.item(fill: red);
#set (text(fill: red));
#set ((text(fill: red)));
#set text("") if {}
#set text("") if []
#set text("") if ()
#set text("") if {}{}
#set text("") if [][]
#set text("") if ()()
#set text("") if {} {}
#set text("") if [] []
#set text("") if () ()
#set text("") if {};
#set text("") if [];
#set text("") if (); () |
https://github.com/topdeoo/Course-Slides | https://raw.githubusercontent.com/topdeoo/Course-Slides/master/Seminar/2024-04-09/main.typ | typst | #import "../../theme/iTalk.typ": *
#import "@preview/algo:0.3.3": algo, i, d, comment, code
// TODO fill all "TODO" with your information
#show: nenu-theme.with(
short-title: "Stochastic Search",
short-date: "2024-04-09",
short-author: "Virgil"
)
#let argmax = math.op("arg max", limits: true)
#let argmin = math.op("arg min", limits: true)
#title-slide(
title: "Stochastic Search in Metaheuristics",
authors: (
name: "凌典",
email: "<EMAIL>"
),
logo: image("../template/fig/nenu-logo-title.png", width: 30%),
institution: "Northeast Normal University",
date: "2024-04-09"
)
#slide(
title: "General Framework",
session: "Framework"
)[
#only(1)[
一个组合优化问题可以被抽象成如下结构:
$
min f(x) quad s.t. quad x in S
$
其中, $f$ 为目标函数,$S$ 为解空间
显然 `max` 和 `min` 可以相互替换#footnote[当限制为 `min` 时函数也可以称为 `cost` ,限制为 `max` 时称为 `fitness`]
]
#only((2, 3))[
#only((2))[
那么,一个随机搜索的抽象框架如下:
在第 $t$ 次迭代时,考虑一个总集合(或者也可以当作一个内存区域,其中存储的结构是高度自定义的)$M_t$,一个解集及其邻域的子集 $L_t$
]
#only((2, 3))[
+ $M_1$ 初始化
+ 当 $t = 1 , dots $
+ $L_t = g(M_t, z_t)$
+ $L_t^+ = { (x_i, f(x_i)) |forall x_i in L_t}$
+ $M_{t + 1} = h(M_t, L^+_t, z^(prime)_t)$
]
#only(2)[
其中 $z_t, z^(prime)_t$ 表示随机程度
]
#only(3)[
值得注意的是:当前的最优解 $x^("curr")_t$ 是由 $(M_t, L^+_t)$ 定义的,且算法的结束条件也是依赖于 $(M_t, L^+_t)$ 定义的#footnote[关于这一点我们在后面会详细解释]
]
]
]
#slide(
title: "Instance of Framework",
session: "Framework"
)[
#only(1)[我们从 `SA` 与 `GA` 两个算法来说明框架的实例化]
#grid(
columns: (1fr, 1.2fr),
column-gutter: 0.5em,
[
#only(1)[
+ $M_1$ 初始化
+ 当 $t = 1 , dots $
+ $L_t = g(M_t, z_t)$
+ $L_t^+ = { (x_i, f(x_i)) |forall x_i in L_t}$
+ $M_{t + 1} = h(M_t, L^+_t, z^(prime)_t)$
]
#only(2)[
#image("fig/SA.png", fit: "contain", width: 100%)
]
#only(3)[
#image("fig/GA.png", fit: "contain", width: 100%)
]
],
[
#only(2)[
对于 `SA` 来说:
- $M_t$ 由单个元素组成,即当前搜索点 $x$
- $L_t$ 由单个元素组成,即 $x$ 的邻域解 $y$
- 为了从 $M_t$ 中确定 $L_t$,选择 $M_t$ 中元素 $x$ 的一个随机邻居 $y$
- 若将 $M_t$ 更新为 $M_(t + 1)$,则由 SA 中使用的随机接受规则决定当前解 $y$ 是否被接受
- 如果是,$M_(t + 1)$ 包含 $y$,否则包含 $x$
]
#only(3)[
对于 `GA` 来说:
- $M_t$ 就是在 $t$ 时刻的种群
- $L_t$ 由 $k$ 个解组成
- 为了从 $M_t$ 中确定 $L_t$ ,将变异和交叉算子应用于 $M_t$ 中的解, 由此得到 $L_t$
- 为了将 $M_t$ 更新到 $M_(t + 1)$,对 $L_t$ 中包含的个体使用相应的目标函数值进行适应度比例选择,给出 $M_(t + 1)$
]
]
)
]
#slide(
title: "Instance of Framework",
session: "Framework"
)[
#only((1, 2))[回顾 “当前的最优解 $x^"curr"_t$ 是由 $(M_t, L^+_t)$ 定义的,且算法的结束条件也是依赖于 $(M_t, L^+_t)$ 定义的”]
#only(1)[
对于 `SA` 而言,显然当前的最优解一定是 $L^+_t$ 中目标函数最优的那个,其结束条件一般为温度是否降低到某个阈值,这显然依赖于迭代时解是否依概率被接受,显然依赖于 $(M_t, L^+_t)$
]
#only(2)[
关于结束条件,大多情况下 `GA` 的结束条件时检查最优解的函数是否收敛,显然依赖于 $L^+_t$
]
#only(3)[
针对 $M_t$ 在不同算法内的不同抽象,可以将元启发式进行粗略的分类:
1. Stochastic Local Search Algorithms:在 `SA`, `ILS`, `VNS` 中,$M_t$ 只包含着较小且固定数量的解集,例如当前搜索点,当前邻居
2. Population-Based Stochastic Search Algorithms:`GA` ,$M_t$ 可以看作是当前迭代次数下的种群
3. Model-Based Stochastic Search Algorithms:蚁群优化( Ant Colony Optimization,ACO ),$M_t$ 由实值参数向量组成,例如 ACO 中的信息素向量
]
]
#slide(
title: "Convergence",
session: "Algorithm"
)[
观察 $(M_t, L^+_t)$,可以发现实际上这个二元组是一个离散时间的马尔科夫链:
下一次迭代的 $(M_(t + 1), L^+_(t + 1))$ 的计算方式为
$
(h(M_t, L^+_t, z^(prime)_(t + 1)), {(x_i, f(x_i)) | forall x_i in g(M_(t + 1), z_(t + 1))})
$
更一般的, $(M_t)$ 的计算就是一个马尔科夫过程
于是,通过判断此马尔科夫过程是否存在平稳态,我们能够知道算法是否能依概率收敛,且能够大致评估需要多少次迭代才能够收敛。
]
#slide(
title: "Parameter",
session: "Optimization"
)[
#only(1)[
启发式中一个重要的问题是参数应该如何设置,在随机搜索中我们将其分为两类:
+ $g$ 中包含的为采样参数,它们控制着样本点在 $L_t$ 中的分布,例如 `GA` 中的变异率和交叉率
+ $h$ 中包含的为学习参数,它们决定了在抽样试验点上观察到的对 $M_(t + 1)$ 的影响程度,例如 `SA` 中的温度参数
]
#only((2, 3, 4, 5))[
参数应该动态变化还是保持不变
#only(3)[
有很好的实证论证动态更新效果很好,多个随机搜索算法的收敛结果#footnote[指算法在迭代过程中是否能够收敛到某个目标或最优解]是基于动态参数方案的。例如,SA 的经典收敛结果要求温度参数 $T$ 是逐渐减小的
对随机搜索算法关键参数的动态管理是实现 搜索-利用 平衡的关键点
]
#only(4)[
然而,对于动态更新更优的理论保证暂时还没有,在 `SA` 中有一个持续了很长时间的讨论:
=== 是否应该进行降温?
根据理论上的收敛结果,减小优化过程中的温度 $T$ 是否真的有优势,或者是否可以通过应用所谓的 Metropolis 算法#footnote[该算法保持一个固定的、恒定的温度 $T$]来实现相同的性能
答案是对于一些实际发生的优化问题,例如最小生成树问题,SA 优于 Metropolis
]
#only(5)[
在某些问题和算法中,例如遗传算法,将变异概率 $p_m$ 固定为常数比周期性改变 $p_m$ 收敛结果要更好。
]
]
]
#slide(
title: "Black-Box Optimization",
session: "Optimization"
)[
#only(1)[
=== 黑盒优化器
大多数随机元启发式方法的基本形式并不利用关于特定问题实例的任何信息(比如,TSP中的距离矩阵),只使用关于搜索空间的信息,例如邻域结构,以及关于所考虑的特定问题类型的信息。
那么,可以想象算法是一个反复调用返回给定解 $x$ 的适应度值的“黑盒”过程,但算法并不知道这些适应度值是如何确定的。
此时将算法称为黑盒优化器
]
#only(2)[
实际应用的随机元启发式方法的几个变体并不是黑盒优化器,因为它们除了使用黑盒类型的核心机制外,还利用了问题实例的信息。
例如在蚁群算法中使用问题特定的启发值,我们可以称这样的算法为灰盒优化器,同时将它们与数学规划(MP)的“白盒”算法区分。
一些元启发式方法,如GRASP,本质上就是“灰盒”的。灰盒优化器的一种特殊形式是元启发式方法和数学规划方法(如局部分支)之间的混合方法,被称为数学启发式算法(matheuristic algorithms)。
]
]
|
|
https://github.com/soul667/typst | https://raw.githubusercontent.com/soul667/typst/main/PPT/MATLAB/touying/slide.typ | typst | #import "utils/utils.typ"
#import "utils/states.typ"
#import "utils/pdfpc.typ"
// touying pause mark
#let pause = metadata((kind: "touying-pause"))
// touying meanwhile mark
#let meanwhile = metadata((kind: "touying-meanwhile"))
// touying slides-end mark
#let slides-end = metadata((kind: "touying-slides-end"))
// touying equation mark
#let touying-equation(block: true, numbering: none, supplement: auto, scope: (:), body) = {
metadata((
kind: "touying-equation",
block: block,
numbering: numbering,
supplement: supplement,
scope: scope,
body: {
if type(body) == function {
body
} else if type(body) == str {
body
} else if type(body) == content and body.has("text") {
body.text
} else {
panic("Unsupported type: " + str(type(body)))
}
},
))
}
// touying reducer mark
#let touying-reducer(reduce: arr => arr.sum(), cover: arr => none, ..args) = {
metadata((
kind: "touying-reducer",
reduce: reduce,
cover: cover,
kwargs: args.named(),
args: args.pos(),
))
}
// parse touying equation, and get the repetitions
#let _parse-touying-equation(self: none, need-cover: true, base: 1, index: 1, eqt) = {
let result-arr = ()
// repetitions
let repetitions = base
let max-repetitions = repetitions
// get cover function from self
let cover = self.methods.cover.with(self: self)
// get eqt body
let it = eqt.body
// if it is a function, then call it with self
if type(it) == function {
// subslide index
self.subslide = index
it = it(self)
}
assert(type(it) == str, message: "Unsupported type: " + str(type(it)))
// parse the content
let result = ()
let cover-arr = ()
let children = it.split(regex("(#meanwhile;?)|(meanwhile)")).intersperse("touying-meanwhile")
.map(s => s.split(regex("(#pause;?)|(pause)")).intersperse("touying-pause")).flatten()
.map(s => s.split(regex("(\\\\\\s)|(\\\\\\n)")).intersperse("\\\n")).flatten()
.map(s => s.split(regex("&")).intersperse("&")).flatten()
for child in children {
if child == "touying-pause" {
repetitions += 1
} else if child == "touying-meanwhile" {
// clear the cover-arr when encounter #meanwhile
if cover-arr.len() != 0 {
result.push("cover(" + cover-arr.sum() + ")")
cover-arr = ()
}
// then reset the repetitions
max-repetitions = calc.max(max-repetitions, repetitions)
repetitions = 1
} else if child == "\\\n" or child == "&" {
// clear the cover-arr when encounter linebreak or parbreak
if cover-arr.len() != 0 {
result.push("cover(" + cover-arr.sum() + ")")
cover-arr = ()
}
result.push(child)
} else {
if repetitions <= index or not need-cover {
result.push(child)
} else {
cover-arr.push(child)
}
}
}
// clear the cover-arr when end
if cover-arr.len() != 0 {
result.push("cover(" + cover-arr.sum() + ")")
cover-arr = ()
}
result-arr.push(
math.equation(
block: eqt.block,
numbering: eqt.numbering,
supplement: eqt.supplement,
eval("$" + result.sum(default: "") + "$", scope: eqt.scope + (cover: (..args) => {
let cover = eqt.scope.at("cover", default: cover)
if args.pos().len() != 0 {
cover(args.pos().first())
}
})),
)
)
max-repetitions = calc.max(max-repetitions, repetitions)
return (result-arr, max-repetitions)
}
// parse touying reducer, and get the repetitions
#let _parse-touying-reducer(self: none, base: 1, index: 1, reducer) = {
let result-arr = ()
// repetitions
let repetitions = base
let max-repetitions = repetitions
// get cover function from self
let cover = reducer.cover
// parse the content
let result = ()
let cover-arr = ()
for child in reducer.args.flatten() {
if type(child) == content and child.func() == metadata {
let kind = child.value.at("kind", default: none)
if kind == "touying-pause" {
repetitions += 1
} else if kind == "touying-meanwhile" {
// clear the cover-arr when encounter #meanwhile
if cover-arr.len() != 0 {
result.push(cover(cover-arr.sum()))
cover-arr = ()
}
// then reset the repetitions
max-repetitions = calc.max(max-repetitions, repetitions)
repetitions = 1
} else {
if repetitions <= index {
result.push(child)
} else {
cover-arr.push(child)
}
}
} else {
if repetitions <= index {
result.push(child)
} else {
cover-arr.push(child)
}
}
}
// clear the cover-arr when end
if cover-arr.len() != 0 {
let r = cover(cover-arr)
if type(r) == array {
result += r
} else {
result.push(r)
}
cover-arr = ()
}
result-arr.push(
(reducer.reduce)(
..reducer.kwargs,
result,
)
)
max-repetitions = calc.max(max-repetitions, repetitions)
return (result-arr, max-repetitions)
}
// parse a sequence into content, and get the repetitions
#let _parse-content(self: none, need-cover: true, base: 1, index: 1, ..bodies) = {
let bodies = bodies.pos()
let result-arr = ()
// repetitions
let repetitions = base
let max-repetitions = repetitions
// get cover function from self
let cover = self.methods.cover.with(self: self)
for it in bodies {
// if it is a function, then call it with self
if type(it) == function {
// subslide index
self.subslide = index
it = it(self)
}
// parse the content
let result = ()
let cover-arr = ()
let children = if utils.is-sequence(it) { it.children } else { (it,) }
for child in children {
if type(child) == content and child.func() == metadata {
let kind = child.value.at("kind", default: none)
if kind == "touying-pause" {
repetitions += 1
} else if kind == "touying-meanwhile" {
// clear the cover-arr when encounter #meanwhile
if cover-arr.len() != 0 {
result.push(cover(cover-arr.sum()))
cover-arr = ()
}
// then reset the repetitions
max-repetitions = calc.max(max-repetitions, repetitions)
repetitions = 1
} else if kind == "touying-equation" {
// handle touying-equation
let (conts, nextrepetitions) = _parse-touying-equation(
self: self, need-cover: repetitions <= index, base: repetitions, index: index, child.value
)
let cont = conts.first()
if repetitions <= index or not need-cover {
result.push(cont)
} else {
cover-arr.push(cont)
}
repetitions = nextrepetitions
} else if kind == "touying-reducer" {
// handle touying-reducer
let (conts, nextrepetitions) = _parse-touying-reducer(
self: self, base: repetitions, index: index, child.value
)
let cont = conts.first()
if repetitions <= index or not need-cover {
result.push(cont)
} else {
cover-arr.push(cont)
}
repetitions = nextrepetitions
} else {
if repetitions <= index or not need-cover {
result.push(child)
} else {
cover-arr.push(child)
}
}
} else if child == linebreak() or child == parbreak() {
// clear the cover-arr when encounter linebreak or parbreak
if cover-arr.len() != 0 {
result.push(cover(cover-arr.sum()))
cover-arr = ()
}
result.push(child)
} else if utils.is-sequence(child) {
// handle the list item
let (conts, nextrepetitions) = _parse-content(
self: self, need-cover: repetitions <= index, base: repetitions, index: index, child
)
let cont = conts.first()
if repetitions <= index or not need-cover {
result.push(cont)
} else {
cover-arr.push(cont)
}
repetitions = nextrepetitions
} else if type(child) == content and child.func() in (list.item, enum.item, align) {
// handle the list item
let (conts, nextrepetitions) = _parse-content(
self: self, need-cover: repetitions <= index, base: repetitions, index: index, child.body
)
let cont = conts.first()
if repetitions <= index or not need-cover {
result.push(utils.reconstruct(child, cont))
} else {
cover-arr.push(utils.reconstruct(child, cont))
}
repetitions = nextrepetitions
} else if type(child) == content and child.func() in (pad,) {
// handle the list item
let (conts, nextrepetitions) = _parse-content(
self: self, need-cover: repetitions <= index, base: repetitions, index: index, child.body
)
let cont = conts.first()
if repetitions <= index or not need-cover {
result.push(utils.reconstruct(named: true, child, cont))
} else {
cover-arr.push(utils.reconstruct(named: true, child, cont))
}
repetitions = nextrepetitions
} else if type(child) == content and child.func() == terms.item {
// handle the terms item
let (conts, nextrepetitions) = _parse-content(
self: self, need-cover: repetitions <= index, base: repetitions, index: index, child.description
)
let cont = conts.first()
if repetitions <= index or not need-cover {
result.push(terms.item(child.term, cont))
} else {
cover-arr.push(terms.item(child.term, cont))
}
repetitions = nextrepetitions
} else {
if repetitions <= index or not need-cover {
result.push(child)
} else {
cover-arr.push(child)
}
}
}
// clear the cover-arr when end
if cover-arr.len() != 0 {
result.push(cover(cover-arr.sum()))
cover-arr = ()
}
result-arr.push(result.sum(default: []))
}
max-repetitions = calc.max(max-repetitions, repetitions)
return (result-arr, max-repetitions)
}
// touying-slide
#let touying-slide(
self: none,
repeat: auto,
setting: body => body,
composer: utils.side-by-side,
section: none,
subsection: none,
title: none,
..bodies,
) = {
assert(bodies.named().len() == 0, message: "unexpected named arguments:" + repr(bodies.named().keys()))
let setting-with-pad(body) = {
pad(..self.padding, setting(body))
}
let bodies = bodies.pos()
let page-preamble(curr-subslide) = locate(loc => {
if loc.page() == self.first-slide-number {
// preamble
utils.call-or-display(self, self.preamble)
// pdfpc slide markers
if self.pdfpc-file {
pdfpc.pdfpc-file(loc)
}
}
[
#metadata((t: "NewSlide")) <pdfpc>
#metadata((t: "Idx", v: loc.page() - 1)) <pdfpc>
#metadata((t: "Overlay", v: curr-subslide - 1)) <pdfpc>
#metadata((t: "LogicalSlide", v: states.slide-counter.at(loc).first())) <pdfpc>
]
})
// update states
let _update-states(repetitions) = {
states.slide-counter.step()
if not self.appendix or self.appendix-in-outline {
// if section is not none, then create a new section
let section = utils.unify-section(section)
if section != none {
states._new-section(short-title: section.short-title, section.title)
}
// if subsection is not none, then create a new subsection
let subsection = utils.unify-section(subsection)
if subsection != none {
states._new-subsection(short-title: subsection.short-title, subsection.title)
}
}
// if appendix is false, then update the last-slide-counter and sections step
if not self.appendix {
states.last-slide-counter.step()
states._sections-step(repetitions)
}
}
// page header and footer
let header = utils.call-or-display(self, self.page-args.at("header", default: none))
let footer = utils.call-or-display(self, self.page-args.at("footer", default: none))
// for speed up, do not parse the content if repeat is none
if repeat == none {
return {
header = _update-states(1) + header
page(..(self.page-args + (header: header, footer: footer)), setting-with-pad(
page-preamble(1) + composer(..bodies)
))
}
}
// for single page slide, get the repetitions
if repeat == auto {
let (_, repetitions) = _parse-content(
self: self,
base: 1,
index: 1,
..bodies,
)
repeat = repetitions
}
if self.handout {
let (conts, _) = _parse-content(self: self, index: repeat, ..bodies)
header = _update-states(1) + header
page(..(self.page-args + (header: header, footer: footer)), setting-with-pad(
page-preamble(1) + composer(..conts)
))
} else {
// render all the subslides
let result = ()
let current = 1
for i in range(1, repeat + 1) {
let new-header = header
let (conts, _) = _parse-content(self: self, index: i, ..bodies)
// update the counter in the first subslide
if i == 1 {
new-header = _update-states(repeat) + new-header
}
result.push(page(
..(self.page-args + (header: new-header, footer: footer)),
setting-with-pad(page-preamble(i) + composer(..conts)),
))
}
// return the result
result.sum()
}
}
// touying-slides
#let touying-slides(self: none, slide-level: 1, body) = {
// init
let (section, subsection, title, slide) = (none, none, none, ())
let last-title = none
let children = if utils.is-sequence(body) { body.children } else { (body,) }
// flatten children
children = children.map(it => {
if utils.is-sequence(it) { it.children } else { it }
}).flatten()
// trim space of children
children = utils.trim(children)
if children.len() == 0 { return none }
// begin
let i = 0
let is-end = false
for child in children {
i += 1
if type(child) == content and child.func() == metadata and child.value.at("kind", default: none) == "touying-slides-end" {
is-end = true
break
} else if type(child) == content and child.func() == metadata and child.value.at("kind", default: none) == "touying-wrapper" {
slide = utils.trim(slide)
if slide != () {
(self.methods.slide)(self: self, section: section, subsection: subsection, ..(if last-title != none { (title: last-title) }), slide.sum())
(section, subsection, title, slide) = (none, none, none, ())
}
if child.value.at("name") in self.slides {
(child.value.at("fn"))(section: section, subsection: subsection, ..(if last-title != none { (title: last-title) }), ..child.value.at("args"))
} else {
(child.value.at("fn"))(..child.value.at("args"))
}
(section, subsection, title, slide) = (none, none, none, ())
} else if type(child) == content and child.func() == heading and utils.heading-depth(child) in (1, 2, 3) {
slide = utils.trim(slide)
if (utils.heading-depth(child) == 1 and section != none) or (utils.heading-depth(child) == 2 and subsection != none) or (utils.heading-depth(child) > slide-level and title != none) or slide != () {
(self.methods.slide)(self: self, section: section, subsection: subsection, ..(if last-title != none { (title: last-title) }), slide.sum(default: []))
(section, subsection, title, slide) = (none, none, none, ())
if utils.heading-depth(child) <= slide-level {
last-title = none
}
}
let child-body = if child.body != [] { child.body } else { none }
if utils.heading-depth(child) == 1 {
if slide-level >= 1 {
if "touying-new-section-slide" in self.methods {
(self.methods.touying-new-section-slide)(self: self, child-body)
} else {
section = child-body
}
last-title = none
} else {
title = child.body
last-title = child-body
}
} else if utils.heading-depth(child) == 2 {
if slide-level >= 2 {
if "touying-new-subsection-slide" in self.methods {
(self.methods.touying-new-subsection-slide)(self: self, child-body)
} else {
subsection = child-body
}
last-title = none
} else {
title = child.body
last-title = child-body
}
} else {
title = child.body
last-title = child-body
}
} else {
slide.push(child)
}
}
slide = utils.trim(slide)
if section != none or subsection != none or title != none or slide != () {
(self.methods.slide)(self: self, section: section, subsection: subsection, ..(if last-title != none { (title: last-title) }), slide.sum(default: []))
}
if is-end {
children.slice(i).sum(default: none)
}
}
// build the touying singleton
#let s = (
// info interface
info: (
title: none,
short-title: auto,
subtitle: none,
short-subtitle: auto,
author: none,
date: none,
institution: none,
),
// colors interface
colors: (
neutral: rgb("#303030"),
neutral-light: rgb("#a0a0a0"),
neutral-lighter: rgb("#d0d0d0"),
neutral-lightest: rgb("#ffffff"),
neutral-dark: rgb("#202020"),
neutral-darker: rgb("#101010"),
neutral-darkest: rgb("#000000"),
primary: rgb("#303030"),
primary-light: rgb("#a0a0a0"),
primary-lighter: rgb("#d0d0d0"),
primary-lightest: rgb("#ffffff"),
primary-dark: rgb("#202020"),
primary-darker: rgb("#101010"),
primary-darkest: rgb("#000000"),
secondary: rgb("#303030"),
secondary-light: rgb("#a0a0a0"),
secondary-lighter: rgb("#d0d0d0"),
secondary-lightest: rgb("#ffffff"),
secondary-dark: rgb("#202020"),
secondary-darker: rgb("#101010"),
secondary-darkest: rgb("#000000"),
tertiary: rgb("#303030"),
tertiary-light: rgb("#a0a0a0"),
tertiary-lighter: rgb("#d0d0d0"),
tertiary-lightest: rgb("#ffffff"),
tertiary-dark: rgb("#202020"),
tertiary-darker: rgb("#101010"),
tertiary-darkest: rgb("#000000"),
),
// slides mode
slides: ("slide",),
// handle mode
handout: false,
// appendix mode
appendix: false,
appendix-in-outline: true,
// enable pdfpc-file
pdfpc-file: true,
// first-slide page number, which will affect preamble,
// default is 1
first-slide-number: 1,
// global preamble
preamble: [],
// page args
page-args: (
paper: "presentation-16-9",
header: none,
footer: none,
fill: rgb("#ffffff"),
),
padding: (x: 0em, y: 0em),
// datetime format
datetime-format: auto,
// register the methods
methods: (
// info
info: (self: none, ..args) => {
self.info += args.named()
self
},
// colors
colors: (self: none, ..args) => {
self.colors += args.named()
self
},
// cover method
cover: utils.wrap-method(hide),
update-cover: (self: none, is-method: false, cover-fn) => {
if is-method {
self.methods.cover = cover-fn
} else {
self.methods.cover = utils.wrap-method(cover-fn)
}
self
},
enable-transparent-cover: (
self: none, constructor: rgb, alpha: 85%) => {
// it is based on the default cover method
self.methods.cover = (self: none, body) => {
utils.cover-with-rect(fill: utils.update-alpha(
constructor: constructor, self.page-args.fill, alpha), body)
}
self
},
// dynamic control
uncover: utils.uncover,
only: utils.only,
alternatives-match: utils.alternatives-match,
alternatives: utils.alternatives,
alternatives-fn: utils.alternatives-fn,
alternatives-cases: utils.alternatives-cases,
// alert interface
alert: utils.wrap-method(text.with(weight: "bold")),
// handout mode
enable-handout-mode: (self: none) => {
self.handout = true
self
},
// disable pdfpc-file mode
disable-pdfpc-file: (self: none) => {
self.pdfpc-file = false
self
},
// default slide
touying-slide: touying-slide,
slide: touying-slide,
touying-slides: touying-slides,
slides: touying-slides,
// append the preamble
append-preamble: (self: none, preamble) => {
self.preamble += preamble
self
},
// datetime format
datetime-format: (self: none, format) => {
self.datetime-format = format
self
},
// default init
init: (self: none, body) => {
// default text size
set text(size: 20pt)
show heading.where(level: 2): set block(below: 1em)
body
},
// default outline
touying-outline: (self: none, ..args) => {
states.touying-outline(..args)
},
appendix: (self: none) => {
self.appendix = true
self
},
appendix-in-outline: (self: none, value) => {
self.appendix-in-outline = value
self
}
),
) |
|
https://github.com/daskol/typst-templates | https://raw.githubusercontent.com/daskol/typst-templates/main/tmlr/tmlr.typ | typst | MIT License | #let std-bibliography = bibliography
// We prefer to use CMU Bright variant instead of Computer Modern Bright when
// ever it is possible.
#let font-family = ("CMU Serif", "Latin Modern Roman", "New Computer Modern",
"Serif")
#let font-family-sans = ("CMU Sans Serif", "Latin Modern Sans",
"New Computer Modern Sans", "Sans")
#let font-family-mono = ("Latin Modern Mono", "New Computer Modern Mono",
"Mono")
#let font = (
Large: 17pt,
footnote: 10pt,
large: 12pt,
normal: 10pt,
script: 8pt,
small: 9pt,
)
#let affl-keys = ("department", "institution", "location", "country")
#let header(accepted, pubdate) = {
if accepted == none {
return ""
} else if accepted {
return (
"Published in Transactions on Machine Learning Research (",
pubdate.display("[month]/[year]"), ")",
).join()
} else {
return "Under review as submission to TMLR"
}
}
#let make-author(author, affls) = {
let author-affls = if type(author.affl) == array {
author.affl
} else {
(author.affl, )
}
let lines = author-affls.map(key => {
let affl = affls.at(key)
return affl-keys
.map(key => affl.at(key, default: none))
.filter(it => it != none)
.join("\n")
}).map(it => emph(it))
return block(spacing: 0em, {
set par(justify: true, leading: 0.50em) // Visually perfect.
show par: set block(spacing: 0em)
text(size: font.normal)[*#author.name*\ ]
text(size: font.small)[#lines.join([\ ])]
})
}
#let make-email(author) = {
let label = text(size: font.small, emph(author.email))
return block(spacing: 0em, {
// Compensate difference between name and email font sizes (10pt vs 9pt).
v(1pt)
link("mailto:" + author.email, label)
})
}
#let make-authors(authors, affls) = {
let cells = authors
.map(it => (make-author(it, affls), make-email(it)))
.join()
return grid(
columns: (2fr, 1fr),
align: (left + top, right + top),
row-gutter: 15.8pt, // Visually perfect.
..cells)
}
#let make-title(title, authors, abstract, review, accepted) = {
// Render title.
v(-0.03in) // Visually perfect.
block(spacing: 0em, {
set block(spacing: 0em)
set par(leading: 10pt) // Empirically found.
text(font: font-family-sans, size: font.Large, weight: "bold", title)
})
// Render authors if paper is accepted or not accepted or ther is no
// acceptance status (aka preprint).
if accepted == none {
v(31pt, weak: true) // Visually perfect.
make-authors(..authors)
v(-2pt) // Visually perfect.
} else if accepted {
v(31pt, weak: true) // Visually perfect.
make-authors(..authors)
v(14.9pt, weak: true) // Visually perfect.
let label = text(font: font-family-mono, weight: "bold", emph(review))
[*Reviewed on OpenReview:* #link(review, label)]
} else {
v(0.3in + 0.2in - 3.5pt, weak: true)
block(spacing: 0em, {
[*Anonymous authors*\ ]
[*Paper under double-blind review*]
})
}
v(0.45in, weak: true) // Visually perfect.
// Render abstract.
block(spacing: 0em, width: 100%, {
set text(size: font.normal)
set par(leading: 0.51em) // Original 0.55em (or 0.45em?).
// While all content is serif, headers and titles are sans serif.
align(center,
text(
font: font-family-sans,
size: font.large,
weight: "bold",
[*Abstract*]))
v(22.2pt, weak: true)
pad(left: 0.5in, right: 0.5in, abstract)
})
v(29.5pt, weak: true) // Visually perfect.
}
/**
* tmlr
*
* Args:
* title: Paper title.
* authors: Tuple of author objects and affilation dictionary.
* keywords: Publication keywords (used in PDF metadata).
* date: Creation date (used in PDF metadata).
* abstract: Paper abstract.
* bibliography: Bibliography content. If it is not specified then there is
* not reference section.
* appendix: Content to append after bibliography section.
* accepted: Valid values are `none`, `false`, and `true`. Missing value
* (`none`) is designed to prepare arxiv publication. Default is `false`.
* review: Hypertext link to review on OpenReview.
* pubdate: Date of publication (used only month and date).
*/
#let tmlr(
title: [],
authors: (),
keywords: (),
date: auto,
abstract: none,
bibliography: none,
appendix: none,
accepted: false,
review: none,
pubdate: none,
body,
) = {
if pubdate == none {
pubdate = if date != auto and data != none {
date
} else {
datetime.today()
}
}
// Prepare authors for PDF metadata.
let author = if accepted == none or accepted {
authors.at(0).map(it => it.name)
} else {
()
}
set document(
title: title,
author: author,
keywords: keywords,
date: date)
set page(
paper: "us-letter",
margin: (left: 1in,
right: 8.5in - (1in + 6.5in),
// top: 1in - 0.25in,
// bottom: 11in - (1in + 9in + 0.25in)),
top: 1.18in,
bottom: 11in - (1.18in + 9in)),
header-ascent: 46pt, // 1.5em in case of 10pt font
header: locate(loc => block(spacing: 0em, {
header(accepted, pubdate)
v(3.5pt, weak: true)
line(length: 100%, stroke: (thickness: 0.4pt))
})),
footer-descent: 20pt, // Visually perfect.
footer: locate(loc => {
let ix = counter(page).at(loc).first()
return align(center, text(size: font.normal, [#ix]))
}))
// The original style requirements is to use Computer Modern Bright but we
// just use OpenType CMU Bright font.
set text(font: font-family, size: font.normal)
set par(justify: true, leading: 0.52em) // TODO: Why? Visually perfect.
show par: set block(spacing: 1.1em)
// Configure heading appearence and numbering.
set heading(numbering: "1.1")
show heading: set text(font: font-family-sans)
show heading: it => {
// Create the heading numbering.
let number = if it.numbering != none {
counter(heading).display(it.numbering)
}
// Render section with such names without numbering as level 3 heading.
let unnumbered = (
[Broader Impact Statement],
[Author Contributions],
[Acknowledgments],
)
let level = it.level
let prefix = [#number ]
if unnumbered.any(name => name == it.body) {
level = 3
prefix = []
}
// TODO(@daskol): Use styles + measure to estimate ex.
set align(left)
if level == 1 {
text(size: font.large, weight: "bold", {
let ex = 10pt
v(2.05 * ex, weak: true) // Visually perfect.
[#prefix*#it.body*]
v(1.80 * ex, weak: true) // Visually perfect.
})
} else if level == 2 {
text(size: font.normal, weight: "bold", {
let ex = 6.78pt
v(2.8 * ex, weak: true) // Visually perfect.
[#prefix*#it.body*]
v(2.15 * ex, weak: true) // Visually perfect. Original 1ex.
})
} else if level == 3 {
text(size: font.normal, weight: "bold", {
let ex = 6.78pt
v(2.7 * ex, weak: true) // Visually perfect.
[#prefix*#it.body*]
v(2.0 * ex, weak: true) // Visually perfect. Original -0.7em.
})
}
}
// Configure code blocks (listings).
show raw: set block(spacing: 1.95em)
// Configure footnote (almost default).
show footnote.entry: set text(size: 8pt)
set footnote.entry(
separator: line(length: 2in, stroke: 0.35pt),
clearance: 6.65pt,
gap: 0.40em,
indent: 12pt) // Original 12pt.
// All captions either centered or aligned to the left.
show figure.caption: set align(left)
// Configure figures.
show figure.where(kind: image): set figure.caption(position: bottom)
set figure(gap: 16pt)
// Configure tables.
show figure.where(kind: table): set figure.caption(position: top)
show figure.where(kind: table): set figure(gap: 6pt)
set table(inset: 4pt)
// Configure numbered lists.
set enum(indent: 2.4em, spacing: 1.3em)
show enum: set block(above: 2em)
// Configure bullet lists.
set list(indent: 2.4em, spacing: 1.3em, marker: ([•], [‣], [⁃]))
show list: set block(above: 2em)
// Configure math numbering and referencing.
set math.equation(numbering: "(1)", supplement: [])
show ref: it => {
let eq = math.equation
let el = it.element
if el != none and el.func() == eq {
let numb = numbering(
"1",
..counter(eq).at(el.location())
)
let color = rgb(0%, 8%, 45%) // Originally `mydarkblue`. :D
let content = link(el.location(), text(fill: color, numb))
[(#content)]
} else {
it
}
}
// Render title + authors + abstract.
make-title(title, authors, abstract, review, accepted)
// Render body as is.
body
if bibliography != none {
set std-bibliography(title: [References], style: "tmlr.csl")
bibliography
}
if appendix != none {
set heading(numbering: "A.1")
counter(heading).update(0)
appendix
}
}
|
https://github.com/SWATEngineering/Docs | https://raw.githubusercontent.com/SWATEngineering/Docs/main/src/2_RTB/PianoDiProgetto/sections/PreventivoSprint/PrimaRevisione.typ | typst | MIT License | #import "../../const.typ": Re_cost, Am_cost, An_cost, Ve_cost, Pr_cost, Pt_cost
#import "../../functions.typ": glossary
#let ruoli_ore = (
Re: 40,
Am: 61,
An: 42,
Pt: 0,
Pr: 27,
Ve: 64,
)
#let costo_ruoli_ore=(
Costo_Re: Re_cost * ruoli_ore.at("Re"),
Costo_Am: Am_cost * ruoli_ore.at("Am"),
Costo_An: An_cost * ruoli_ore.at("An"),
Costo_Pt: Pt_cost * ruoli_ore.at("Pt"),
Costo_Pr: Pr_cost * ruoli_ore.at("Pr"),
Costo_Ve: Ve_cost * ruoli_ore.at("Ve"),
)
#let rimanente_ore = 570 - ruoli_ore.values().sum();
#let rimanente_soldi = 11070 - costo_ruoli_ore.values().sum();
== Prima revisione (Requirements and Technology Baseline #glossary[RTB])
=== Prospetto economico
In questa #glossary[milestone], il costo per ogni ruolo sarà come da tabella:
#table(
columns: (120pt,60pt,100pt),
align: center,
[*Ruolo*],[*Ore*],[*Costo*],
[Responsabile],[#ruoli_ore.at("Re")],[#costo_ruoli_ore.at("Costo_Re")€],
[Amministratore],[#ruoli_ore.at("Am")],[#costo_ruoli_ore.at("Costo_Am")€],
[Analista],[#ruoli_ore.at("An")],[#costo_ruoli_ore.at("Costo_An")€],
[Progettista],[#ruoli_ore.at("Pt")],[#costo_ruoli_ore.at("Costo_Pt") €],
[Programmatore],[#ruoli_ore.at("Pr")],[#costo_ruoli_ore.at("Costo_Pr") €],
[Verificatore],[#ruoli_ore.at("Ve")],[#costo_ruoli_ore.at("Costo_Ve")€],
[*Totale*],[#ruoli_ore.values().sum()],[#costo_ruoli_ore.values().sum()€],
[*Rimanente*],[#rimanente_ore],[#rimanente_soldi€]
) |
https://github.com/TypstApp-team/typst | https://raw.githubusercontent.com/TypstApp-team/typst/master/tests/typ/compiler/set.typ | typst | Apache License 2.0 | // General tests for set.
---
// Test that text is affected by instantiation-site bold.
#let x = [World]
Hello *#x*
---
// Test that lists are affected by correct indents.
#let fruit = [
- Apple
- Orange
#list(body-indent: 20pt)[Pear]
]
- Fruit
#[#set list(indent: 10pt)
#fruit]
- No more fruit
---
// Test that that block spacing and text style are respected from
// the outside, but the more specific fill is respected.
#set block(spacing: 4pt)
#set text(style: "italic", fill: eastern)
#let x = [And the forest #parbreak() lay silent!]
#text(fill: forest, x)
---
// Test that scoping works as expected.
#{
if true {
set text(blue)
[Blue ]
}
[Not blue]
}
---
// Test relative path resolving in layout phase.
#let choice = ("monkey.svg", "rhino.png", "tiger.jpg")
#set enum(numbering: n => {
let path = "/files/" + choice.at(n - 1)
move(dy: -0.15em, image(path, width: 1em, height: 1em))
})
+ Monkey
+ Rhino
+ Tiger
---
// Test conditional set.
#show ref: it => {
set text(red) if it.target == <unknown>
"@" + str(it.target)
}
@hello from the @unknown
---
// Error: 19-24 expected boolean, found integer
#set text(red) if 1 + 2
---
// Error: 12-26 set is only allowed directly in code and content blocks
#{ let x = set text(blue) }
|
https://github.com/herbertskyper/HITsz_Proposal_report_Template | https://raw.githubusercontent.com/herbertskyper/HITsz_Proposal_report_Template/main/utils/counters.typ | typst | MIT License | #let cover_end_before_counter = counter("before_cover_end_counter")
#let cover_end_after_counter = counter("after_cover_end_counter")
|
https://github.com/sofianedjerbi/Resume | https://raw.githubusercontent.com/sofianedjerbi/Resume/main/modules/education.typ | typst | Apache License 2.0 | #import "../brilliant-CV/template.typ": *
#cvSection("Education")
#cvEntry(
title: [Master in Mathematics],
society: [Université Grenoble Alpes],
date: [2022 - 2023],
location: [Grenoble, France],
)
#cvEntry(
title: [Bachelor in Computer Science],
society: [Université Grenoble Alpes],
date: [2019 - 2022],
location: [Grenoble, France],
)
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/keyle/0.1.1/src/keyle.typ | typst | Apache License 2.0 | #import "sym.typ": mac-key, biolinum-key
#let _inset = 4pt
#let _outset = 2pt
#let _radius = 3pt
#let gen-examples(kbd) = [
#kbd("Ctrl", "Alt", "A") #h(2em)
#kbd("Ctrl", "Shift", "A", compact: true) #h(2em)
#kbd("Home") #kbd("End") #kbd("Ins") #kbd("Del")
]
/// Theme function to render keys in a standard style.
///
/// #example(```typst
/// #let kbd = keyle.config(theme: keyle.themes.standard)
/// #keyle.gen-examples(kbd)
/// ```)
///
/// - sym (string): The key symbol to render.
/// -> content
#let theme-func-stardard(sym) = box(
rect(
inset: (x: _inset),
outset: (top: _outset),
stroke: rgb("#555"),
radius: _radius,
fill: rgb("#eee"),
text(fill: black, sym),
),
)
/// Theme function to render keys in a deep blue style.
///
/// #example(```typst
/// #let kbd = keyle.config(theme: keyle.themes.deep-blue)
/// #keyle.gen-examples(kbd)
/// ```)
///
/// - sym (string): The key symbol to render.
/// -> content
#let theme-func-deep-blue(sym) = box(
rect(
inset: (x: _inset),
outset: (top: _outset),
stroke: rgb("#2a6596"),
radius: _radius,
fill: rgb("#4682b4"),
smallcaps(text(fill: white, sym)),
),
)
/// Theme function to render keys in a type writer style.
///
/// #example(```typst
/// #let kbd = keyle.config(theme: keyle.themes.type-writer)
/// #keyle.gen-examples(kbd)
/// ```)
///
/// - sym (string): The key symbol to render.
/// -> content
#let theme-func-type-writer(sym) = box(
rect(inset: (x: _inset), stroke: rgb("#2b2b2b"), radius: 50%, fill: rgb("#333"), smallcaps(text(fill: white, sym))),
)
/// Theme function to render keys in a Linux Biolinum Keyboard style.
///
/// You need to have the font installed on your system.
///
/// #example(```typst
/// #let kbd = keyle.config(theme: keyle.themes.biolinum, delim: keyle.biolinum-key.delim_plus)
/// #keyle.gen-examples(kbd)
/// ```)
///
/// - sym (string): The key symbol to render.
/// -> content
#let theme-func-biolinum(sym) = text(
fill: black,
font: ("Linux Biolinum Keyboard"),
size: 1.4em,
sym,
)
#let themes = (
standard: theme-func-stardard,
deep-blue: theme-func-deep-blue,
type-writer: theme-func-type-writer,
biolinum: theme-func-biolinum,
)
/// Config function to generate keyboard rendering helper function.
///
/// - theme (function): The theme function to use.
/// - compact (bool): Whether to render keys in a compact format.
/// - delim (string): The delimiter to use when rendering keys in compact format.
/// -> function
#let config(
theme: themes.standard,
compact: false,
delim: "+",
) = (
(..keys, compact: compact, delim: delim) => {
if compact {
theme(keys.pos().join(delim))
} else {
if delim == biolinum-key.delim_plus or delim == biolinum-key.delim_minus {
keys.pos().map(k => [#theme(k)]).join(theme(delim))
} else {
keys.pos().map(k => [#theme(k)]).join([ #box(height: 1.2em, delim) ])
}
}
}
)
|
https://github.com/Ngan-Ngoc-Dang-Nguyen/thesis | https://raw.githubusercontent.com/Ngan-Ngoc-Dang-Nguyen/thesis/main/my-outline.typ | typst | #let my-outline-row( textSize:none,
textWeight: "regular",
insetSize: 0pt,
textColor: blue,
number: "0",
title: none,
heading_page: "0",
location: none) = {
set text(size: textSize, fill: textColor, weight: textWeight)
box(width: 1.1cm, inset: (y: insetSize), align(left, number))
h(0.1cm)
box(inset: (y: insetSize), width: 100% - 1.2cm, )[
#link(location, title)
#box(width: 1fr, repeat(text(weight: "regular")[. #h(4pt)]))
#link(location, heading_page)
]
}
#let my-outline(appendix_state, part_state, part_location,part_change,part_counter, mainColor, textSize1:none, textSize2:none, textSize3:none, textSize4:none) = {
show outline.entry: it => {
let appendix_state = appendix_state.at(it.element.location())
let numberingFormat = if appendix_state != none {"A.1"} else {"1.1"}
let counterInt = counter(heading).at(it.element.location())
let number = none
if counterInt.first() >0 {
number = numbering(numberingFormat, ..counterInt)
}
let title = it.element.body
let heading_page = it.page
if it.level == 1 {
let part_state = part_state.at(it.element.location())
let part_location = part_location.at(it.element.location())
let part_change = part_change.at(it.element.location())
let part_counter = part_counter.at(it.element.location())
if (part_change){
v(0.7cm, weak: true)
box(width: 1.1cm, fill: mainColor.lighten(80%), inset: 5pt, align(center, text(size: textSize1, weight: "bold", fill: mainColor.lighten(30%), numbering("I",part_counter.first()))))
h(0.1cm)
box(width: 100% - 1.2cm, fill: mainColor.lighten(60%), inset: 5pt, align(center, link(part_location,text(size: textSize1, weight: "bold", part_state))))
v(0.45cm, weak: true)
}
else{
v(0.5cm, weak: true)
}
if (counterInt.first() == 1 and appendix_state != none ){
my-outline-row(insetSize: 2pt, textWeight: "bold", textSize: textSize2, textColor:mainColor, number: none, title: appendix_state, heading_page: heading_page, location: it.element.location())
v(0.5cm, weak: true)
}
my-outline-row(insetSize: 2pt, textWeight: "bold", textSize: textSize2, textColor:mainColor, number: number, title: title, heading_page: heading_page, location: it.element.location())
}
else if it.level ==2 {
my-outline-row(insetSize: 2pt, textWeight: "bold", textSize: textSize3, textColor:black, number: number, title: title, heading_page: heading_page, location: it.element.location())
} else {
my-outline-row(textWeight: "regular", textSize: textSize4, textColor:black, number: number, title: title, heading_page: heading_page, location: it.element.location())
}
}
pagebreak(to: "odd")
outline(depth: 3, indent: false)
}
#let my-outline-small(partTitle, appendix_state, part_state, part_location,part_change,part_counter, mainColor, textSize1:none, textSize2:none, textSize3:none, textSize4:none) = {
show outline.entry: it => {
let appendix_state = appendix_state.at(it.element.location())
let numberingFormat = if appendix_state != none {"A.1"} else {"1.1"}
let counterInt = counter(heading).at(it.element.location())
let number = none
if counterInt.first() >0 {
number = numbering(numberingFormat, ..counterInt)
}
let title = it.element.body
let heading_page = it.page
let part_state = part_state.at(it.element.location())
if (part_state == partTitle and counterInt.first() >0 and appendix_state==none){
if it.level == 1 {
v(0.5cm, weak: true)
my-outline-row(insetSize: 1pt, textWeight: "bold", textSize: textSize2, textColor:mainColor, number: number, title: title, heading_page: heading_page, location: it.element.location())
}
else if it.level ==2 {
my-outline-row(textWeight: "regular", textSize: textSize4, textColor:black, number: number, title: text(fill: black, title), heading_page: text(fill: black, heading_page), location: it.element.location())
}
}
else{
v(-0.65em, weak: true)
}
}
box(width: 9.5cm, outline(depth: 2, indent: false, title: none))
}
#let my-outline-sec(listOfFigureTitle, target, textSize) = {
show outline.entry.where(level: 1): it => {
let heading_page = it.page
[
#set text(size: textSize)
#box(width: 0.75cm, align(right, [#it.body.at("children").at(2) #h(0.2cm)]))
#link(it.element.location(), it.body.at("children").at(4))
#box(width: 1fr, repeat(text(weight: "regular")[. #h(4pt)]))
#link(it.element.location(),heading_page)
]
}
pagebreak(to: "odd")
outline(
title: listOfFigureTitle,
target: target,
)
} |
|
https://github.com/frectonz/the-pg-book | https://raw.githubusercontent.com/frectonz/the-pg-book/main/book/024.%20say.html.typ | typst | say.html
What You Can't Say
January 2004
Have you ever seen an old photo of yourself and
been embarrassed at the way you looked? Did we actually
dress like that? We did. And we had no idea how
silly we looked.
It's the nature of fashion to be invisible, in the
same way the movement of the earth is invisible to all
of us riding on it.What scares me is that there are moral fashions too.
They're just as arbitrary, and just as invisible to most people.
But they're much more dangerous.
Fashion is mistaken for good design;
moral fashion is mistaken for good.
Dressing oddly gets you laughed at. Violating
moral fashions can get you fired, ostracized, imprisoned, or
even killed.If you could travel back in a time machine, one thing
would be true no matter where you went: you'd have to watch
what you said.
Opinions we consider harmless could have
gotten you in big trouble.
I've already said at least one thing that would have gotten me in big
trouble in most of Europe in the seventeenth century,
and did get Galileo in big trouble when he said
it � that the earth moves. [1]
It seems to be a constant throughout history: In every
period, people believed things that were just ridiculous,
and believed them so strongly that you would have gotten in
terrible trouble for saying otherwise.Is our time any different?
To anyone who has read any amount of history, the answer is
almost certainly no. It would be a remarkable coincidence if ours
were the first era to get everything just right.It's tantalizing to think we believe
things that people in the future will find ridiculous.
What would someone coming back to visit us in a time machine
have to be careful not to say?
That's what I want to study here.
But
I want to do more than just shock everyone with
the heresy du jour. I want to find general
recipes for discovering what you can't say, in any era.The Conformist TestLet's start with a test:
Do you have any opinions that you would be reluctant to express
in front of a group of your peers?If the answer is no,
you might want to stop and think about that. If everything
you believe is something you're supposed to believe, could
that possibly be a coincidence? Odds are it isn't. Odds are
you just think what you're told.The other alternative would be that you independently considered
every question and came up with the exact same answers that
are now considered acceptable. That seems unlikely, because
you'd also have to make the same mistakes. Mapmakers
deliberately put slight mistakes in their maps so they can
tell when someone copies them. If another map has the same
mistake, that's very convincing evidence.Like every other era in history, our moral map almost certainly
contains a few mistakes. And anyone who makes the same mistakes
probably didn't do it by accident. It would be
like someone claiming they had independently decided in
1972 that bell-bottom jeans were a good idea.If you believe everything you're supposed to now, how can
you be sure you wouldn't also have believed everything you
were supposed to if you had grown up among the plantation
owners of the pre-Civil War South, or in Germany in the 1930s � or
among the Mongols in 1200, for that matter? Odds are you
would have.Back in the era of terms like "well-adjusted," the idea
seemed to be that there was something wrong with
you if you thought things you didn't dare say out loud.
This seems backward. Almost certainly, there
is something wrong with you if you don't think things
you don't dare say out loud.TroubleWhat can't we say? One way to find these ideas is simply to look
at things people do say, and get in trouble for. [2]Of course, we're not just looking for things we can't say.
We're looking for things we can't say that are true, or at least
have enough chance of being true that the question
should remain open. But many of the
things people get in trouble for saying probably
do make it over this second, lower threshold. No one
gets in trouble for saying
that 2 + 2 is 5, or that people in Pittsburgh are ten feet tall.
Such obviously false statements might be treated as jokes, or
at worst as evidence of insanity, but they are not likely to
make anyone mad. The statements that make people mad are
the ones they worry might be believed.
I suspect the statements that make people maddest
are those they worry might be true.If Galileo had said that people in Padua were ten feet tall,
he would have been regarded as a harmless eccentric. Saying
the earth orbited the sun was another matter. The church knew
this would set people thinking.Certainly, as we look back on the past, this rule of thumb works
well. A lot of the statements people got in trouble for seem
harmless now. So it's likely that visitors from the
future would agree with at least some of the statements that
get people in trouble today. Do we have no Galileos? Not
likely.To find them,
keep track of opinions that get
people in trouble, and start asking, could this be true?
Ok, it may be heretical (or whatever modern equivalent), but
might it also be true?HeresyThis won't get us all the answers, though. What if no one
happens to have gotten in trouble for a particular idea yet?
What if some idea would be so radioactively controversial that
no one would dare express it in public? How can we find these too?Another approach is to follow that word, heresy. In every period
of history, there seem to have been labels that got applied to
statements to shoot them down before anyone had a chance to ask
if they were true or not. "Blasphemy", "sacrilege", and "heresy"
were such
labels for a good part of western history, as in more recent times
"indecent", "improper", and "unamerican" have been. By now these
labels have lost their sting. They always do.
By now they're mostly used ironically.
But in their time,
they had real force.The word "defeatist", for example, has no particular political
connotations now.
But in Germany in 1917 it was a weapon, used by Ludendorff in
a purge of those who favored a negotiated peace.
At the start of World War II it was used
extensively by Churchill and his supporters to silence their
opponents.
In 1940, any argument against Churchill's aggressive policy was "defeatist".
Was it right or wrong? Ideally, no one got far enough to ask
that.
We have such labels today, of course, quite a lot of them,
from the all-purpose "inappropriate" to the dreaded "divisive."
In any period, it should be easy to figure out what such labels are,
simply by looking at what people call ideas they disagree
with besides untrue. When a politician says his opponent is
mistaken, that's a straightforward criticism, but when he
attacks a statement as "divisive" or "racially insensitive"
instead of arguing that it's false, we should start paying
attention.So another way to figure out which of our taboos future generations
will laugh at is to start with the
labels. Take a label � "sexist", for example � and try to think
of some ideas that would be called that. Then for each ask, might
this be true?Just start listing ideas at random? Yes, because they
won't really be random. The ideas that come to mind first
will be the most plausible ones. They'll be things you've already
noticed but didn't let yourself think.In 1989 some clever researchers tracked
the eye movements of radiologists as they scanned chest images for
signs of lung cancer. [3] They found that even when the radiologists
missed a cancerous lesion, their eyes had usually paused at the site of it.
Part of their brain knew there was something there; it just
didn't percolate all the way up into conscious knowledge.
I think many interesting heretical thoughts are already mostly
formed in our minds. If we turn off our self-censorship
temporarily, those will be the first to emerge.Time and SpaceIf we could look into the future it would be obvious which
of our taboos they'd laugh at.
We can't do that, but we can do something almost as good: we can
look into the past. Another way to figure out what we're
getting wrong is to look at what used to be acceptable
and is now unthinkable.Changes between the past and the present sometimes do represent
progress. In a field like physics,
if we disagree with past generations it's because we're
right and they're wrong. But this becomes rapidly less true as
you move away from the certainty of the hard sciences. By the time
you get to social questions, many changes are just fashion.
The age of consent fluctuates like hemlines.We may imagine that we are a great deal smarter and more virtuous than
past generations, but the more history you read, the less likely
this seems. People in past times were much like us. Not heroes,
not barbarians. Whatever their ideas were, they were ideas
reasonable people could believe.So here is another source of interesting heresies. Diff present
ideas against those of various past cultures, and see what you
get. [4]
Some will be
shocking by present standards. Ok, fine; but which might also be true?You don't have to look into the past to find big differences.
In our own time, different societies have wildly varying ideas
of what's ok and what isn't.
So you can try diffing other cultures' ideas against ours as well.
(The best way to do that is to visit them.)
Any idea that's considered harmless in a significant
percentage of times and places, and yet is taboo in ours,
is a candidate for something we're mistaken
about.For example, at the high water mark of political correctness
in the early 1990s, Harvard distributed to its
faculty and staff a brochure saying, among other things, that it
was inappropriate to compliment a colleague or student's
clothes. No more "nice shirt."
I think this principle is rare among the world's cultures, past or present.
There are probably more where it's considered especially
polite to compliment someone's clothing than where it's considered
improper.
Odds are this is, in a mild form, an example of one of
the taboos a visitor from the future would
have to be careful to avoid if he happened to set his time machine for
Cambridge, Massachusetts, 1992. [5]PrigsOf course, if they have time machines in the future they'll
probably have a separate reference manual just for Cambridge.
This has always been a fussy place, a town of i dotters and
t crossers, where you're liable to get both your grammar and
your ideas corrected in the same conversation. And that
suggests another way to find taboos. Look for prigs,
and see what's inside their heads.Kids' heads are repositories of all our taboos.
It seems fitting to us that kids' ideas should be bright and clean.
The picture we give them of the world is
not merely simplified, to suit their developing minds,
but sanitized as well, to suit our
ideas of what kids ought to think. [6]You can see this on a small scale in the matter of
dirty words. A lot of my friends are starting to have children
now, and they're all trying
not to use words like
"fuck" and "shit" within baby's hearing, lest baby start using
these words too.
But these
words are part of the language, and adults use them all the
time. So parents are giving their kids an inaccurate idea of
the language by not using
them. Why do they do this? Because they don't think it's
fitting that kids should use the whole language. We like
children to seem innocent. [7]Most adults, likewise, deliberately give kids a misleading
view of the world.
One of the most obvious
examples is Santa Claus. We think it's cute for little kids to
believe in Santa Claus. I myself think it's cute for little
kids to believe in Santa Claus. But one wonders, do we tell
them this stuff for their sake, or for ours?I'm not arguing for or against this idea here. It is probably
inevitable that parents should want to dress up their kids'
minds in cute little baby outfits. I'll probably do it myself.
The important thing for our purposes is that, as a result,
a well brought-up teenage kid's brain is a more
or less complete collection of all our taboos � and in mint
condition, because they're untainted by experience.
Whatever we think that will later turn out to be ridiculous,
it's almost certainly inside that head.How do we get at these ideas? By the following thought experiment.
Imagine a kind of latter-day Conrad character
who has worked for a time as a mercenary in Africa, for a time
as a doctor in Nepal, for a time as the manager of a
nightclub in Miami. The specifics don't matter � just
someone who has
seen a lot. Now imagine comparing what's inside this guy's head
with what's inside the head
of a well-behaved sixteen year old girl from
the suburbs. What does he think that
would shock her?
He knows the world; she knows, or at least embodies, present
taboos. Subtract one from the other, and the result is what
we can't say.
MechanismI can think of one more way to figure out what we can't
say: to look at how taboos are created. How do moral
fashions arise, and why are they adopted?
If we can understand this mechanism, we
may be able to see it at work in our own time.Moral fashions don't seem to be created the way ordinary
fashions are. Ordinary fashions seem to arise by accident when
everyone imitates the whim of some influential person.
The fashion for broad-toed shoes in
late fifteenth century Europe began because Charles VIII of
France had six toes on one foot. The fashion for the
name Gary began when the actor <NAME> adopted the name
of a tough mill town in Indiana. Moral fashions more often
seem to be created deliberately. When there's something we
can't say, it's often because some group doesn't want us to.The prohibition will be strongest when the group is nervous.
The irony of Galileo's situation was that he got in trouble
for repeating Copernicus's ideas. Copernicus himself didn't.
In fact, Copernicus was a canon of a cathedral, and dedicated his
book to the pope. But by Galileo's time the church was in
the throes of the Counter-Reformation and was much more
worried about unorthodox ideas.To launch a taboo, a group has to be poised halfway between
weakness and power. A confident group doesn't need taboos
to protect it. It's not considered improper to
make disparaging remarks about Americans, or the English.
And yet a group has to be powerful enough to enforce a
taboo. Coprophiles, as of this writing, don't seem to be
numerous or energetic enough to have had their
interests promoted to a lifestyle.I suspect the biggest source of moral taboos will turn out to
be power struggles in which one side only barely has
the upper hand. That's where you'll find a group
powerful enough to enforce taboos, but weak enough to need them.Most struggles, whatever they're really about, will be cast
as struggles between competing ideas.
The English Reformation was at bottom a struggle for wealth and power,
but it ended up being
cast as a struggle to preserve the souls
of Englishmen from the corrupting influence of Rome.
It's easier to get people to fight for an idea.
And whichever side wins, their
ideas will also be considered to have triumphed, as if God
wanted to signal his agreement by selecting that side as the victor.We often like to think of World War II as a triumph
of freedom over totalitarianism. We conveniently forget that
the Soviet Union was also one of the winners.I'm not saying that struggles are never about ideas,
just that they will always be made to seem to be about
ideas, whether they are or not. And just as there is nothing
so unfashionable as the last, discarded fashion, there is
nothing so wrong as the principles of the most recently
defeated opponent.
Representational art is only now
recovering from the approval of both Hitler and Stalin. [8]Although moral fashions tend to arise from different sources
than fashions in clothing, the mechanism of their adoption seems
much the same. The early adopters will be driven by ambition:
self-consciously cool people who want to distinguish themselves
from the common herd. As the fashion becomes established they'll
be joined by a second, much larger group, driven by fear. [9] This
second group adopt the fashion not because they want to stand
out but because they are afraid of standing out.So if you want to figure out what we can't say, look at the
machinery of fashion and try to predict what it would make
unsayable. What groups are powerful but nervous, and what
ideas would they like to suppress? What ideas were tarnished by
association when they ended up on the losing side of a recent
struggle? If a self-consciously cool person wanted to differentiate
himself from preceding fashions (e.g. from his parents),
which of their ideas would he tend to reject?
What are conventional-minded people afraid of saying?This technique won't find us all the things we can't say.
I can think of some that aren't the result of
any recent struggle. Many of our taboos are rooted
deep in the past. But this approach, combined with the
preceding four, will turn up a good number of unthinkable
ideas.WhySome would ask, why would one want to do this? Why deliberately
go poking around among nasty, disreputable ideas? Why look
under rocks?I do it, first of all, for the same reason I did look under
rocks as a kid: plain curiosity. And I'm especially curious about
anything that's forbidden. Let me see and decide for myself.Second, I do it because I don't like the idea of being mistaken.
If, like other eras, we believe things that will later seem ridiculous,
I want to know what they are so that I, at least, can avoid
believing them.Third, I do it because it's good for the brain. To do good work
you need a brain that can go anywhere. And you especially need a
brain that's in the habit of going where it's not supposed to.Great work tends to grow out of ideas
that others have overlooked, and no idea is so overlooked as one that's
unthinkable.
Natural selection, for example.
It's so simple. Why didn't anyone think of it before? Well,
that is all too obvious. Darwin himself was careful to tiptoe
around the implications of his theory. He wanted to spend his
time thinking about biology, not arguing with people who accused
him of being an atheist.In the sciences, especially, it's a great advantage to be able to
question assumptions.
The m.o. of scientists, or at least of the
good ones, is precisely that: look for places where
conventional wisdom is broken, and then try to pry apart the
cracks and see what's underneath. That's where new theories come
from.A good scientist, in other words, does not merely ignore
conventional wisdom, but makes a special effort to break it.
Scientists go looking for trouble.
This should be the m.o. of any scholar, but
scientists seem much more willing to look under rocks. [10]Why? It could
be that the scientists are simply smarter; most physicists could,
if necessary, make it through a PhD program in French literature,
but few professors of French literature could make it through
a PhD program in physics. Or it could be because it's clearer
in the sciences whether theories are true or false, and this
makes scientists bolder. (Or it could be that, because it's
clearer in the sciences whether theories are true or false, you
have to be smart to get jobs as a scientist, rather than just a
good politician.)Whatever the reason, there seems a clear correlation between
intelligence and willingness to consider shocking ideas.
This isn't just because smart people actively work to find holes in
conventional thinking. I think conventions also have
less hold over them to start with.
You can see that in the
way they dress.It's not only in the sciences that heresy pays off.
In any competitive field, you can
win big by seeing things that others daren't.
And in every
field there are probably heresies few dare utter. Within
the US car industry there is a lot of hand-wringing now
about declining market share.
Yet the cause is so obvious that any observant outsider could
explain it in a second: they make bad cars. And they have for
so long that by now the US car brands are antibrands � something
you'd buy a car despite, not because of. Cadillac stopped
being the Cadillac of cars in about 1970. And yet I suspect
no one dares say this. [11] Otherwise these companies would have
tried to fix the problem.Training yourself to think unthinkable thoughts has advantages
beyond the thoughts themselves. It's like stretching.
When you stretch before running, you put your body into positions
much more extreme
than any it will assume during the run.
If you can think things
so outside the box that they'd make people's hair stand on end,
you'll have no trouble with the small trips outside the box that
people call innovative.Pensieri StrettiWhen you find something you can't say, what do you do with it?
My advice is, don't say it. Or at least, pick your battles.Suppose in the future there is a movement to ban
the color yellow. Proposals to paint anything yellow are
denounced as "yellowist", as is anyone suspected of liking the
color. People who like orange are tolerated but viewed with
suspicion. Suppose you realize there is nothing
wrong with yellow. If you go around saying this, you'll be
denounced as a yellowist too, and you'll find yourself having a
lot of arguments with anti-yellowists.
If your aim in life is to rehabilitate the color yellow, that may
be what you want.
But if you're mostly interested in
other questions, being labelled as a yellowist will just be
a distraction. Argue with idiots, and you become an idiot.The most important thing is to be able to think what you
want, not to say what you want. And if you feel you have to
say everything you think, it may inhibit you from thinking
improper thoughts. I think it's better to follow the opposite
policy. Draw a sharp line between your thoughts and your
speech. Inside your head, anything is allowed.
Within my head I make a point of encouraging the most outrageous
thoughts I can imagine.
But, as in
a secret society, nothing that happens within the building
should be told to outsiders. The first rule of Fight
Club is, you do not talk about Fight Club.When Milton was going to visit Italy in the 1630s,
Sir <NAME>, who had been ambassador to Venice, told him
his motto should be
"i pensieri stretti & il viso sciolto." Closed thoughts
and an open face. Smile at everyone, and don't tell them
what you're thinking. This was wise advice.
Milton was an argumentative fellow, and the Inquisition
was a bit restive at that time. But I think the difference
between Milton's situation and ours is only a matter of
degree.
Every era has its heresies, and if you don't get imprisoned for them you
will at least get in enough trouble that it becomes a complete
distraction.I admit it seems cowardly to keep quiet.
When I read about the harassment to which
the Scientologists subject their critics [12], or that pro-Israel groups
are "compiling dossiers" on those who speak out against Israeli
human rights abuses [13], or about people being sued for
violating the DMCA [14], part of me wants
to say, "All right, you bastards, bring it on."
The problem is, there are so many things you can't say.
If you said them all you'd
have no time left for your real work.
You'd have to turn into Noam Chomsky. [15]The trouble with keeping your thoughts secret, though,
is that you lose the advantages of discussion. Talking
about an idea leads to more ideas.
So the optimal plan, if you can manage it,
is to have a few trusted
friends you can speak openly to. This is not just a
way to develop ideas; it's also a good
rule of thumb for choosing friends. The people
you can say heretical things to without getting jumped on
are also the most interesting to know.Viso Sciolto?I don't think we need
the viso sciolto so much as the pensieri stretti.
Perhaps the best policy is to make it plain that you don't
agree with whatever zealotry is current in your time, but
not to be too specific about what you disagree with. Zealots
will try to draw you out, but you don't have to answer them.
If they try to force you to treat a question on their
terms by asking "are you with us or against us?" you can
always just answer "neither".Better still, answer "I haven't decided."
That's what <NAME>
did when a group tried to put
him in this position. Explaining himself later, he said
"I don't do litmus tests." [16]
A lot of the
questions people get hot about are actually quite complicated.
There is no prize for getting the answer quickly.If the anti-yellowists seem to be getting out of hand and
you want to fight back, there are ways
to do it without getting yourself accused of being a
yellowist. Like skirmishers in
an ancient army, you want to avoid directly engaging the
main body of the enemy's troops. Better to harass them
with arrows from a distance.One way to do this is to ratchet the debate up one level of
abstraction.
If you argue against censorship in general, you can avoid being
accused of whatever heresy is contained
in the book or film that someone is trying to censor.
You can attack labels with meta-labels: labels that refer
to the use of labels to prevent discussion.
The spread of the term "political correctness" meant the beginning of
the end of political correctness, because it enabled one to
attack the phenomenon as a whole without being accused of any
of the specific heresies it sought to suppress.Another way to counterattack is with metaphor. <NAME>
undermined the House Un-American Activities Committee
by writing a play, "The Crucible," about the Salem witch trials.
He never referred directly to the committee and so gave them
no way to reply.
What could HUAC do, defend the Salem witch trials? And yet
Miller's metaphor stuck so well that to this day the activities
of the committee are often described as a "witch-hunt."Best of all, probably, is humor. Zealots, whatever their
cause, invariably lack a sense of humor.
They can't reply in kind to jokes.
They're as unhappy on the territory of
humor as a mounted knight on a skating rink.
Victorian prudishness, for example, seems to have been defeated
mainly by treating it as a joke. Likewise its reincarnation as
political correctness.
"I am glad that I
managed to write 'The Crucible,'" <NAME> wrote,
"but looking back I have often wished I'd
had the temperament to do an absurd comedy, which is what the
situation deserved." [17]ABQA Dutch friend says
I should use Holland as an example of a tolerant society.
It's true they have a long tradition of
comparative open-mindedness. For centuries the low countries were the place
to go to say things you couldn't say anywhere else,
and this helped to make the region a center of scholarship and industry
(which have been closely tied for longer than most people realize).
Descartes, though claimed by the French, did much of his thinking in
Holland.And yet, I wonder. The Dutch seem to live their lives up to their
necks in rules and regulations. There's so much you can't do there;
is there really nothing
you can't say?Certainly the fact that they value open-mindedness is no guarantee.
Who thinks they're not open-minded? Our hypothetical prim miss from
the suburbs thinks she's open-minded. Hasn't she been
taught to be? Ask anyone, and they'll say the same thing: they're
pretty open-minded, though they draw the line at things that are really
wrong. (Some tribes
may avoid "wrong" as
judgemental, and may instead use a more neutral sounding euphemism
like "negative" or "destructive".)When people are bad at math, they know it, because they get the
wrong answers on tests. But when people are bad at open-mindedness
they don't know it. In fact they tend to think the opposite.
Remember, it's the nature of fashion to be invisible. It wouldn't
work otherwise. Fashion doesn't
seem like fashion to someone in the grip of it. It just seems like
the right thing to do. It's only by looking from a distance that
we see oscillations in people's idea of the right thing to do, and
can identify them as fashions.Time gives us such distance for free. Indeed, the arrival of new
fashions makes old fashions easy to see, because they
seem so ridiculous by contrast. From one end of a pendulum's
swing, the other end seems especially far away.To see fashion in your own time, though, requires a conscious effort.
Without time to give you distance, you have to create distance yourself.
Instead of being part of the mob, stand
as far away from it as you can and watch what it's
doing. And pay especially close attention whenever an idea is being
suppressed. Web filters for children and employees often ban
sites containing pornography, violence, and hate speech. What
counts as pornography and violence? And what, exactly, is
"hate speech?" This sounds like a phrase out of 1984.Labels like that are probably the biggest external clue.
If a statement is false,
that's the worst thing you can say about it. You don't
need to say that it's heretical. And if it isn't false, it
shouldn't be suppressed. So when you see statements being
attacked as x-ist or y-ic (substitute your current values of
x and y), whether in 1630 or 2030, that's a sure sign that
something is wrong. When you hear such labels being used,
ask why.Especially if you hear yourself using them. It's not just
the mob you need to learn to watch from a distance. You need to be
able to watch your own thoughts from a distance. That's not
a radical idea, by the way; it's the main difference between
children and adults. When a child gets angry because he's
tired, he doesn't know what's happening. An adult can
distance himself enough from the
situation to say "never mind, I'm just tired." I don't
see why one couldn't, by a similar process, learn to
recognize and discount the effects of moral fashions.You have to take that extra step if you want to think clearly.
But it's harder, because now you're working against social customs
instead of with them. Everyone encourages you to grow up to the
point where you can discount your own bad moods. Few encourage
you to continue to the point where you can discount society's bad
moods.How can you see the wave, when you're the water? Always be
questioning. That's the only defence. What can't you say? And why?NotesThanks to <NAME>, <NAME>, <NAME>,
<NAME>, <NAME> and <NAME> for reading drafts of this
essay, and to <NAME>, <NAME>, <NAME> and <NAME>
for conversations about heresy.
Needless to say they bear no blame for opinions
expressed in it, and especially for opinions not
expressed in it.Re: What You Can't SayLabelsJapanese TranslationFrench TranslationGerman TranslationDutch TranslationRomanian TranslationHebrew TranslationTurkish TranslationChinese TranslationButtonsA Civic Duty to AnnoyThe Perils of ObedienceAliens Cause Global WarmingHays CodeStratagem 32Conspiracy TheoriesMark Twain: Corn-pone OpinionsA Blacklist for "Excuse Makers"What You Can't Say Will Hurt You
|
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-1B100.typ | typst | Apache License 2.0 | #let data = (
("HENTAIGANA LETTER RE-3", "Lo", 0),
("HENTAIGANA LETTER RE-4", "Lo", 0),
("HENTAIGANA LETTER RO-1", "Lo", 0),
("HENTAIGANA LETTER RO-2", "Lo", 0),
("HENTAIGANA LETTER RO-3", "Lo", 0),
("HENTAIGANA LETTER RO-4", "Lo", 0),
("HENTAIGANA LETTER RO-5", "Lo", 0),
("HENTAIGANA LETTER RO-6", "Lo", 0),
("HENTAIGANA LETTER WA-1", "Lo", 0),
("HENTAIGANA LETTER WA-2", "Lo", 0),
("HENTAIGANA LETTER WA-3", "Lo", 0),
("HENTAIGANA LETTER WA-4", "Lo", 0),
("HENTAIGANA LETTER WA-5", "Lo", 0),
("HENTAIGANA LETTER WI-1", "Lo", 0),
("HENTAIGANA LETTER WI-2", "Lo", 0),
("HENTAIGANA LETTER WI-3", "Lo", 0),
("HENTAIGANA LETTER WI-4", "Lo", 0),
("HENTAIGANA LETTER WI-5", "Lo", 0),
("HENTAIGANA LETTER WE-1", "Lo", 0),
("HENTAIGANA LETTER WE-2", "Lo", 0),
("HENTAIGANA LETTER WE-3", "Lo", 0),
("HENTAIGANA LETTER WE-4", "Lo", 0),
("HENTAIGANA LETTER WO-1", "Lo", 0),
("HENTAIGANA LETTER WO-2", "Lo", 0),
("HENTAIGANA LETTER WO-3", "Lo", 0),
("HENTAIGANA LETTER WO-4", "Lo", 0),
("HENTAIGANA LETTER WO-5", "Lo", 0),
("HENTAIGANA LETTER WO-6", "Lo", 0),
("HENTAIGANA LETTER WO-7", "Lo", 0),
("HENTAIGANA LETTER N-MU-MO-1", "Lo", 0),
("HENTAIGANA LETTER N-MU-MO-2", "Lo", 0),
("HIRAGANA LETTER ARCHAIC WU", "Lo", 0),
("KATAKANA LETTER ARCHAIC YI", "Lo", 0),
("KATAKANA LETTER ARCHAIC YE", "Lo", 0),
("KATAKANA LETTER ARCHAIC WU", "Lo", 0),
)
|
https://github.com/WinstonMDP/math | https://raw.githubusercontent.com/WinstonMDP/math/main/knowledge/limits.typ | typst | #import "../cfg.typ": *
#show: cfg
= Limits
$lim_(n -> oo) x_n :=
a: all(epsilon > 0) ex(N in NN) all(n > N): abs(x_n - a) < epsilon$
A sequence is convergent $:=$ it has a limit.
A sequence is fundamental $:=
all(epsilon > 0) ex(N) all(n\, m > N): abs(x_n - x_m) < epsilon$.
*The Cauchy criterion:* A sequence is fundamental $<->$ it converges.
*Bernoulli's principle:*
$all(x >= -1): cases(1 + x n <= (1 + x)^n "if" x = 0, 1 + x n < (1 + x)^n)$
Euler's number $:= e := lim_(n -> oo) (1 + 1/n)^n$
$x_n -> oo := all(c in RR) ex(N in NN) all(n > N): c < abs(x_n)$
$liminf_(k -> oo) x_k := lim_(n -> oo) inf_(n <= k) x_k$
A partial limit of a sequence $:=$ a limit of its subsequence
A function $f: RR supset.eq E -> RR$ converges to $A$ with $x$ tending to a limit
point $a :=
lim_(x -> a) f(x) = A :=
all(epsilon > 0) ex(delta > 0) all(x in E):
0 < abs(x - a) < delta -> abs(f(x) - A) < epsilon$
#let punc(U, E) = math.attach(
math.limits(U),
t: math.circle.stroked.tiny,
br: E,
)
$lim_(x -> a) f(x) = A <->
all(V_RR (A)) ex(punc(U, E)(a)): f[punc(U, E)(a)] subset.eq V_RR (A)$
$lim_(x -> a) f(x) = A <->
all({x_n} subset.eq E without a):
lim_(n -> oo) x_n = a -> lim_(n -> oo) f(x_n) = A$
$cal(B) subset.eq op(cal(P)) X$ is a base in a set $X :=$
+ $all(B in cal(B)): B != emptyset$
+ $all(B_1\, B_2 in cal(B)) ex(B in cal(B)): B subset.eq B_1 sect B_2$
A number $A in RR$ is a limit of a function $f: X -> RR$ on a base $cal(B)$ in
$X :=
lim_cal(B) = A :=
all(V(A)) ex(B in cal(B)): f[B] subset.eq V(A) $
An oscillation of a function $f: X -> RR$ on a set $E subset.eq X :=
omega(f, E) :=
sup_(x_1, x_2 in E) abs(f(x_1) - f(x_2))$
$ex(lim_cal(B) f) <->
all(epsilon > 0) ex(B in cal(B)): omega(f, B) < epsilon$
$ex(lim_cal(B)_Y g) ->
(all(B_Y in cal(B)_Y) ex(B_X in cal(B)_X): f[B_X] subset.eq B_Y) ->
lim_cal(B)_X g compose f = lim_cal(B)_Y g$
$lim_(x -> oo) (1 + 1/x)^x = e$
A function is monotone $:=$ it's nondecreasing or nonincreasing
$inf E, sup E$ are limit points $->$
(a monotone function has a limit $<->$ it's bounded)
A property between functions holds finally on a base $cal(B) :=
ex(B in cal(B)):$ in which property holds
A function $f$ is infinitiesimal on a base $cal(B) := lim_cal(B) f = 0$
A function $f$ is infinitesimal compared to a function $g$ on a base $cal(B) :=
f =_cal(B) o(g) :=
ex("a inifnitesimal function" alpha): f(x) = alpha(x) g(x)$ holds finally on
$cal(B)$
A function $f$ is infinitesimal function of a higher order compared to a
infinitesimal function $g$ on a base $cal(B) :=
f =_cal(B) o(g)$
$f =_cal(B) O(g) :=
ex("finally bounded on" cal(B) "function"):
f(x) = beta(x) g(x)$ holds on finally on $cal(B)$
$f, g$ has the same order on a basis $cal(B) :=
f =_cal(B) O(g) and g =_cal(B) O(f)$
$f tilde g := ex(alpha): lim_cal(B) alpha(x) = 1 and f(x) = alpha(x) g(x)$
$sin x tilde
ln(1 + x) tilde
e^x - 1 tilde
((1 + x)^alpha - 1)/alpha tilde
x$
with a base $x -> 0$
$all(alpha > 0): log_a x =_(x -> +oo) o(x^alpha)$
$all(a > 1): x^alpha =_(x -> +oo) o(a^x)$
|
|
https://github.com/Dherse/ugent-templates | https://raw.githubusercontent.com/Dherse/ugent-templates/main/masterproef/parts/appendix.typ | typst | MIT License | #import "../ugent-template.typ": *
#show: ugent-appendix
= First appendix
#ufigure(
caption: [ An overview of the hierarchy of the programmable PIC. Source: <NAME>, et al.],
outline: [ Hierarchy of programmable PICs. ],
image("../assets/programmable-pic-hierarchy.png")
) |
https://github.com/Jollywatt/typst-fletcher | https://raw.githubusercontent.com/Jollywatt/typst-fletcher/master/tests/template.typ | typst | MIT License | #set page(width: auto, height: auto, margin: 1em)
#import "/src/exports.typ" as fletcher: diagram, node, edge
|
https://github.com/RaphGL/ElectronicsFromBasics | https://raw.githubusercontent.com/RaphGL/ElectronicsFromBasics/main/DC/chap1/4_voltage_and_current.typ | typst | Other | #import "../../core/core.typ"
=== Voltage and current
As was previously mentioned, we need more than just a continuous path (circuit) before a continuous flow of electrons will occur: we also need some means to push these electrons around the circuit. Just like marbles in a tube or water in a pipe, it takes some kind of influencing force to initiate flow. With electrons, this force is the same force at work in static electricity: the force produced by an imbalance of electric charge.
If we take the examples of wax and wool which have been rubbed together, we find that the surplus of electrons in the wax (negative charge) and the deficit of electrons in the wool (positive charge) creates an imbalance of charge between them. This imbalance manifests itself as an attractive force between the two objects:
#image("static/4-wax-wool-attraction.png.png")
If a conductive wire is placed between the charged wax and wool, electrons will flow through it, as some of the excess electrons in the wax rush through the wire to get back to the wool, filling the deficiency of electrons there:
#image("static/4-wax-wool-electron-flow.png")
The imbalance of electrons between the atoms in the wax and the atoms in the wool creates a force between the two materials. With no path for electrons to flow from the wax to the wool, all this force can do is attract the two objects together. Now that a conductor bridges the insulating gap, however, the force will provoke electrons to flow in a uniform direction through the wire, if only momentarily, until the charge in that area neutralizes and the force between the wax and wool diminishes.
The electric charge formed between these two materials by rubbing them together serves to store a certain amount of energy. This energy is not unlike the energy stored in a high reservoir of water that has been pumped from a lower-level pond:
#image("static/4-pond-reservoir.png")
The influence of gravity on the water in the reservoir creates a force that attempts to move the water down to the lower level again. If a suitable pipe is run from the reservoir back to the pond, water will flow under the influence of gravity down from the reservoir, through the pipe:
#image("static/4-pond-reservoir-2.png")
It takes energy to pump that water from the low-level pond to the high-level reservoir, and the movement of water through the piping back down to its original level constitutes a releasing of energy stored from previous pumping.
If the water is pumped to an even higher level, it will take even more energy to do so, thus more energy will be stored, and more energy released if the water is allowed to flow through a pipe back down again:
#image("static/4-pond-reservoir-3.png")
Electrons are not much different. If we rub wax and wool together, we "pump" electrons away from their normal "levels," creating a condition where a force exists between the wax and wool, as the electrons seek to re-establish their former positions (and balance within their respective atoms). The force attracting electrons back to their original positions around the positive nuclei of their atoms is analogous to the force gravity exerts on water in the reservoir, trying to draw it down to its former level.
Just as the pumping of water to a higher level results in energy being stored, "pumping" electrons to create an electric charge imbalance results in a certain amount of energy being stored in that imbalance. And, just as providing a way for water to flow back down from the heights of the reservoir results in a release of that stored energy, providing a way for electrons to flow back to their original "levels" results in a release of stored energy.
When the electrons are poised in that static condition (just like water sitting still, high in a reservoir), the energy stored there is called potential energy, because it has the possibility (potential) of release that has not been fully realized yet. When you scuff your rubber-soled shoes against a fabric carpet on a dry day, you create an imbalance of electric charge between yourself and the carpet. The action of scuffing your feet stores energy in the form of an imbalance of electrons forced from their original locations. This charge (static electricity) is stationary, and you won't realize that energy is being stored at all. However, once you place your hand against a metal doorknob (with lots of electron mobility to neutralize your electric charge), that stored energy will be released in the form of a sudden flow of electrons through your hand, and you will perceive it as an electric shock!
This potential energy, stored in the form of an electric charge imbalance and capable of provoking electrons to flow through a conductor, can be expressed as a term called voltage, which technically is a measure of potential energy per unit charge of electrons, or something a physicist would call specific potential energy. Defined in the context of static electricity, voltage is the measure of work required to move a unit charge from one location to another, against the force which tries to keep electric charges balanced. In the context of electrical power sources, voltage is the amount of potential energy available (work to be done) per unit charge, to move electrons through a conductor.
Because voltage is an expression of potential energy, representing the possibility or potential for energy release as the electrons move from one "level" to another, it is always referenced between two points. Consider the water reservoir analogy:
#image("static/4-reservoir-drop.png")
Because of the difference in the height of the drop, there's potential for much more energy to be released from the reservoir through the piping to location 2 than to location 1. The principle can be intuitively understood in dropping a rock: which results in a more violent impact, a rock dropped from a height of one foot, or the same rock dropped from a height of one mile? Obviously, the drop of greater height results in greater energy released (a more violent impact). We cannot assess the amount of stored energy in a water reservoir simply by measuring the volume of water any more than we can predict the severity of a falling rock's impact simply from knowing the weight of the rock: in both cases we must also consider how far these masses will drop from their initial height. The amount of energy released by allowing a mass to drop is relative to the distance between its starting and ending points. Likewise, the potential energy available for moving electrons from one point to another is relative to those two points. Therefore, voltage is always expressed as a quantity between two points. Interestingly enough, the analogy of a mass potentially "dropping" from one height to another is such an apt model that voltage between two points is sometimes called a voltage drop.
Voltage can be generated by means other than rubbing certain types of materials against each other. Chemical reactions, radiant energy, and the influence of magnetism on conductors are a few ways in which voltage may be produced. Respective examples of these three sources of voltage are batteries, solar cells, and generators (such as the "alternator" unit under the hood of your automobile). For now, we won't go into detail as to how each of these voltage sources works -- more important is that we understand how voltage sources can be applied to create electron flow in a circuit.
Let's take the symbol for a chemical battery and build a circuit step by step:
#image("static/4-battery.png")
Any source of voltage, including batteries, have two points for electrical contact. In this case, we have point 1 and point 2 in the above diagram. The horizontal lines of varying length indicate that this is a battery, and they further indicate the direction which this battery's voltage will try to push electrons through a circuit. The fact that the horizontal lines in the battery symbol appear separated (and thus unable to serve as a path for electrons to move) is no cause for concern: in real life, those horizontal lines represent metallic plates immersed in a liquid or semi-solid material that not only conducts electrons, but also generates the voltage to push them along by interacting with the plates.
Notice the little "+" and "-" signs to the immediate left of the battery symbol. The negative (-) end of the battery is always the end with the shortest dash, and the positive (+) end of the battery is always the end with the longest dash. Since we have decided to call electrons "negatively" charged (thanks, Ben!), the negative end of a battery is that end which tries to push electrons out of it. Likewise, the positive end is that end which tries to attract electrons.
With the "+" and "-" ends of the battery not connected to anything, there will be voltage between those two points, but there will be no flow of electrons through the battery, because there is no continuous path for the electrons to move.
#image("static/4-water-analogy.png")
The same principle holds true for the water reservoir and pump analogy: without a return pipe back to the pond, stored energy in the reservoir cannot be released in the form of water flow. Once the reservoir is completely filled up, no flow can occur, no matter how much pressure the pump may generate. There needs to be a complete path (circuit) for water to flow from the pond, to the reservoir, and back to the pond in order for continuous flow to occur.
We can provide such a path for the battery by connecting a piece of wire from one end of the battery to the other. Forming a circuit with a loop of wire, we will initiate a continuous flow of electrons in a clockwise direction:
#image("static/4-water-analogy-2.png")
So long as the battery continues to produce voltage and the continuity of the electrical path isn't broken, electrons will continue to flow in the circuit. Following the metaphor of water moving through a pipe, this continuous, uniform flow of electrons through the circuit is called a _current_. So long as the voltage source keeps "pushing" in the same direction, the electron flow will continue to move in the same direction in the circuit. This single-direction flow of electrons is called a _Direct Current_, or DC. In the second volume of this book series, electric circuits are explored where the direction of current switches back and forth: _Alternating Current_, or AC. But for now, we'll just concern ourselves with DC circuits.
Because electric current is composed of individual electrons flowing in unison through a conductor by moving along and pushing on the electrons ahead, just like marbles through a tube or water through a pipe, the amount of flow throughout a single circuit will be the same at any point. If we were to monitor a cross-section of the wire in a single circuit, counting the electrons flowing by, we would notice the exact same quantity per unit of time as in any other part of the circuit, regardless of conductor length or conductor diameter.
If we break the circuit's continuity at any point, the electric current will cease in the entire loop, and the full voltage produced by the battery will be manifested across the break, between the wire ends that used to be connected:
#image("static/4-broken-continuity.png")
Notice the "+" and "-" signs drawn at the ends of the break in the circuit, and how they correspond to the "+" and "-" signs next to the battery's terminals. These markers indicate the direction that the voltage attempts to push electron flow, that potential direction commonly referred to as polarity. Remember that voltage is always relative between two points. Because of this fact, the polarity of a voltage drop is also relative between two points: whether a point in a circuit gets labeled with a "+" or a "-" depends on the other point to which it is referenced. Take a look at the following circuit, where each corner of the loop is marked with a number for reference:
#image("static/4-broken-continuity-2.png")
With the circuit's continuity broken between points 2 and 3, the polarity of the voltage dropped between points 2 and 3 is "-" for point 2 and "+" for point 3. The battery's polarity (1 "-" and 4 "+") is trying to push electrons through the loop clockwise from 1 to 2 to 3 to 4 and back to 1 again.
Now let's see what happens if we connect points 2 and 3 back together again, but place a break in the circuit between points 3 and 4:
#image("static/4-broken-continuity-3.png")
With the break between 3 and 4, the polarity of the voltage drop between those two points is "+" for 4 and "-" for 3. Take special note of the fact that point 3's "sign" is opposite of that in the first example, where the break was between points 2 and 3 (where point 3 was labeled "+"). It is impossible for us to say that point 3 in this circuit will always be either "+" or "-", because polarity, like voltage itself, is not specific to a single point, but is always relative between two points!
#core.review[
- Electrons can be motivated to flow through a conductor by the same force manifested in static electricity.
- Voltage is the measure of specific potential energy (potential energy per unit charge) between two locations. In layman's terms, it is the measure of "push" available to motivate electrons.
- Voltage, as an expression of potential energy, is always relative between two locations, or points. Sometimes it is called a voltage "drop."
- When a voltage source is connected to a circuit, the voltage will cause a uniform flow of electrons through that circuit called a current.
- In a single (one loop) circuit, the amount of current at any point is the same as the amount of current at any other point.
- If a circuit containing a voltage source is broken, the full voltage of that source will appear across the points of the break.
- The +/- orientation a voltage drop is called the polarity. It is also relative between two points.core.review[
]
|
https://github.com/Tiggax/zakljucna_naloga | https://raw.githubusercontent.com/Tiggax/zakljucna_naloga/main/main.typ | typst | #import "@preview/sunny-famnit:0.2.0": project
#import "/src/additional.typ" as ab
#let meta = toml("metadata.toml")
#let res = meta.insert("ključne_besede", meta.kljucne_besede)
#let res = meta.remove("kljucne_besede")
// - - - - - Add to template - - - - -
#set outline(fill: repeat[.#h(8pt)], indent: 2em)
#show heading.where(level: 2): it => upper(text(weight: "regular", it))
// - - - - - - - - - - - - - - - - - -
#show: project.with(
date: datetime(day: 1, month: 5, year: 2024),
..meta,
izvleček: ab.izvleček,
abstract: ab.abstract,
kratice: meta.kratice,
priloge: ("Links and description of Git repositories containing the app and the workflow": ab.git), // you can add attachments as a dict of a title and content like `"name": [content],`
zahvala: ab.zahvala,
bib_file: bibliography(
"references.bib",
style: "ieee",
title: [References],
),
)
#import "@preview/cheq:0.1.0": checklist
#show: checklist.with(fill: luma(95%), stroke: navy)
// - - - - - Add to template - - - - -
#show heading.where(level: 1):it => {pagebreak(weak: true);it}
#show heading: set block(spacing: 2em)
#set text(hyphenate: false)
#set math.equation(numbering: "(1)")
// - - - - - - - - - - - - - - - - - -
#include "/src/sec/1uvod.typ"
= Methods
#include "/src/sec/3metode.typ"
= Results and discussion
#include "/src/sec/4rezultati.typ"
= Conclusion
#include "/src/sec/5summary.typ"
= Daljši povzetek v slovenskem jeziku
#include "/src/sec/6zaključek.typ" |
|
https://github.com/lxl66566/my-college-files | https://raw.githubusercontent.com/lxl66566/my-college-files/main/信息科学与工程学院/嵌入式系统/实验/报告/template.typ | typst | The Unlicense | #let 字号 = (
初号: 42pt,
小初: 36pt,
一号: 26pt,
小一: 24pt,
二号: 22pt,
小二: 18pt,
三号: 16pt,
小三: 15pt,
四号: 14pt,
中四: 13pt,
小四: 12pt,
五号: 10.5pt,
小五: 9pt,
六号: 7.5pt,
小六: 6.5pt,
七号: 5.5pt,
小七: 5pt,
)
#let 字体 = (
仿宋: ("Times New Roman", "FangSong"),
宋体: ("Times New Roman", "SimSun"),
黑体: ("Times New Roman", "SimHei"),
楷体: ("Times New Roman", "KaiTi"),
代码: ("Fira Code", "Times New Roman", "SimSun"),
)
#let frame(title: none, body) = {
let stroke = black + 1pt
let radius = 5pt
let font = (font: 字体.代码, size: 10pt)
let name = block(
breakable: false,
fill: color.linear-rgb(0, 0, 0, 10),
stroke: stroke,
inset: 0.5em,
below: -1.5em,
radius: (top-right: radius, bottom-left: radius),
title,
)
set text(..font)
show raw: set text(..font)
box(stroke: stroke, radius: radius)[
#if title != none {
align(top + right, name)
}
#block(
width: 100%,
inset: (rest: 0.5em),
body,
)
]
}
#let include_code_file(file_path, name, lang) = {
frame(title: name)[
#raw(read(file_path), lang: lang)
]
}
#let project(
title: "",
authors: (),
body
) = {
set document(author: authors, title: title)
set page(numbering: "1", number-align: center, margin: 0.7in)
// 正文,两端对齐,段前缩进2字符
set text(font: 字体.宋体, size: 字号.小四, lang: "zh")
set par(justify: true,first-line-indent: 2em)
show heading: it => {
it
par()[#text(size:0.5em)[#h(0.0em)]]
}
// heading,一级标题换页,首行居中
show heading: set text(font: 字体.黑体)
set heading(numbering: "1.1")
show heading: it => {
if it.level == 1 {
pagebreak(weak: true)
align(center)[#text(font: 字体.黑体, size: 字号.小二, it)]
}
else if it.level == 2 {
text(font: 字体.黑体, size: 字号.四号, it)
}
else if it.level == 3 {
text(font: 字体.黑体, size: 字号.小四, it)
}
}
// figure(image)
show figure: it => [
#set align(center)
#if not it.has("kind") {
it
} else if it.kind == image {
it.body
[
#set text(font: 字体.宋体, size: 字号.五号, weight: "extrabold")
#h(1em)
#it.caption
]
} else if it.kind == "code" {
[
#set text(font: 字体.宋体, size: 字号.五号, weight: "bold")
#h(1em)
#it.caption
]
it.body
}
]
body
}
|
https://github.com/EGmux/PCOM-2023.2 | https://raw.githubusercontent.com/EGmux/PCOM-2023.2/main/lista2/lista2q9.typ | typst | === A relação sinal-ruído de quantização SNR em um sistema PCM pode ser definida como a razão da potência média do sinal pela petência média do ruído de quantização. Para um sinal de modulação senidal de escala cheia de amplitude A, mostre que
#math.equation(block: true, $ "SNR" = (S/N_q) = 3/2L^2 $)
*ou*
#math.equation(block: true, $ (S/N_q)_"dB" = 1,76 + 20 log L $)
\
*em que L é a quantidade de níveis de quantização*.
_Assumimos log na base 10 nesse caso_
\
\
Primeiro é identificar como achar o rúido de sinal e o erro.
lembremos da seguinte expressão
#math.equation(block: true, $ |e_"max"| = V_"pp"/(2L) $)
no caso $V_"pp"$ é o sinal, mas agora precisamos encontrar outra expressão
vamos partir do fato que o erro máximo pode ser expresso em função de _q_
#math.equation(block: true, $e_"max" = q/2$)
lembremos que a variância total pode ser computado como
#math.equation(block: true, $ integral^infinity_infinity e^2 p(e)"de"$)
expandido a equação
#math.equation(block: true, $ integral^(q/2) _(-q/2) e^2(1/q)"de" $)
note que podemos tirar o termo em função de _q_
#math.equation(
block: true, $ 1/q integral^(q/2)_(-q/2)e^2"de" &= && \
&= (1/q)(e^3/3)bar.v^(q/2)_(-q/2) && \
&= (1/(3q)((q/2)^3 - (-q/2)^3)) && \
&= (1/(3q))(q^3/8 + q^3/8) && \
&= 1/(3q)(q^3/4) && \
&= q^2/12$,
)
e pronto achamos a relação desejada envolvendo $sigma^2$ e _q_
agora é encontrar o sinal, que nada mais é que a potência média do sinal, que é
dada por $V^2_"p"/2$ logo a relação entre $V_"p"$ e $V_"pp"$ é dada por
#math.equation(block: true, $ V_"p" = (V_"pp"/2)^2 = ((L q)/2)^2 $)
e o ruído já foi computado previamente, logo
#math.equation(block: true, $ "SNR" &= (L q)^2/4/(q^2/12) &&\
&= 3 L^2$)
// TODO: fazer depois
|
|
https://github.com/soul667/typst | https://raw.githubusercontent.com/soul667/typst/main/PPT/typst-slides-fudan/themes/polylux/book/src/themes/your-own.md | markdown | # Build your own theme
Again, there is no right or wrong when it comes to how a polylux theme works.
If you consider building a theme that you would like to contribute to the
package ([which you are cordially invited to do!](https://github.com/andreasKroepelin/polylux/pulls)),
we kindly ask you to follow the convention presented before.
In any case, it is probably a good idea to take that as an orientation.
To demonstrate how one would go about defining a custom theme, let us create one
together!
How about we make one for science slams?
If you have ever been to one, you might have noticed that the presenters there
love sparse dark-mode slides with huge font sizes.
## Imports
Depending on whether this is a theme for yourself or supposed to be part of
polylux, you do one of two things:
For yourself, you simply import polylux as always:
```typ
#import "@preview/polylux:0.2.0": *
```
A theme that is shipped with polylux doesn't have to do that, and it shouldn't!
Otherwise circular imports can occur.
Instead, you depend on the two files `logic.typ` and `helpers.typ`.
As your theme file `science-slam.typ` will be inside the `themes` directory, the
imports will be:
```typ
#import "../logic.typ"
#import "../helpers.typ"
```
Additionally, you have to make polylux know about your theme which you do by
adding
```typ
#import "science-slam.typ"
```
to `themes/themes.typ`.
## The initialisation function
With that out of the way, we start with the function that sets the scene for
everything else to come.
By convention, we call it `science-slam-theme` and it can accept some keyword
arguments along a single content argument.
The keyword arguments are for configuration options, the content is for the rest
of the document (read [here](https://typst.app/docs/tutorial/making-a-template/)
if you are unfamiliar with this kind of function, this feature is rather unique
to Typst).
```typ
{{#include science-slam.typ:init}}
```
As you can see, we have two configuration options:
One for the aspect ratio (as is convention) and one to determine the background
colour — the more serious you are, the darker your background colour is, of course.
After we have set the `page` parameters accordingly, we also define the text to
be huge, bright and in a sans serif font.
Using it looks like this:
```typ
{{#include science-slam.typ:use-init}}
```
## Title slide
Next up, let us define a cool title slide.
The only thing you have to keep in mind when defining your own slide functions
is that **you need to put the content you produce into the `#polylux-slide`
function in the end**.
It might look as if it works without that as well but it actually breaks when you
use `#uncover` or similar polylux features.
If you build a theme as part of polylux and you have followed the import
instructions from above, you will qualify the function as `logic.polylux-slide`.
Our title slide here is very simple, it just makes very sure to let the audience
know what the topic is and who is speaking:
```typ
{{#include science-slam.typ:title-slide}}
```
You can use it like this:
```typ
{{#include science-slam.typ:use-title-slide}}
```
Note that the user does not actually provide any content themselves to this function.
That is a common thing for title slides because their structure is so well-defined
that a theme can produce all the content by just asking for a few pieces of
information (like the title etc.).
## Regular slides
The principle is the same as with the title slide.
Define a function, create some content, pass it to `polylux-slide`.
By convention, you should name the function for regular slides `slide` because
it will be used most often.
Here you will typically accept arbitrary content as a positional parameter that
will make up the main content of the slide.
For example:
```typ
{{#include science-slam.typ:slide}}
```
And you can use it like this:
```typ
{{#include science-slam.typ:use-slide}}
```
In case you wondered, this is how the theme and the slides we just put together
look like:
![science-slam](science-slam.png)
## Any number of further variants
Be creative!
There are no limits but your own to the slide functions your theme can contain
once you grasped the simple structure.
For "serious" themes (other than this demo) you will probably want to think
about adding headers, footers, slide numbers etc.
Why not look into the source code of existing themes to get some inspiration?
The next page also lists some small tools that polylux provides to make common
tasks simpler when creating a slide.
|
|
https://github.com/polarkac/MTG-Stories | https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/046_Streets%20of%20New%20Capenna.typ | typst | #import "@local/mtgset:0.1.0": conf
#show: doc => conf("Streets of New Capenna", doc)
#include "./046 - Streets of New Capenna/001_Episode 1: Homecoming.typ"
#include "./046 - Streets of New Capenna/002_The Contract Breaker.typ"
#include "./046 - Streets of New Capenna/003_Episode 2: Dirty Laundry.typ"
#include "./046 - Streets of New Capenna/004_What You Expect to See.typ"
#include "./046 - Streets of New Capenna/005_The Side of Freedom.typ"
#include "./046 - Streets of New Capenna/006_Episode 3: Tests.typ"
#include "./046 - Streets of New Capenna/007_Episode 4: The Font.typ"
#include "./046 - Streets of New Capenna/008_The Family Man.typ"
#include "./046 - Streets of New Capenna/009_Episode 5: Hymn of the Angels.typ"
#include "./046 - Streets of New Capenna/010_Alley Cat Blues.typ"
#include "./046 - Streets of New Capenna/011_A Garden of Flesh.typ"
|
|
https://github.com/antonWetzel/prettypst | https://raw.githubusercontent.com/antonWetzel/prettypst/master/readme.md | markdown | MIT License | # Prettypst
## Formatter for Typst!
| Before | Default Style | OTBS Style |
| :-------------------------------------: | :----------------------------------: | :----------------------------: |
| ![Before formatting](images/before.png) | ![Default style](images/default.png) | ![OTBS style](images/otbs.png) |
## Install
cargo install --git=https://github.com/antonWetzel/prettypst.git --locked
## Usage / Features
- Prettypst might change your work, use only if you can recover a previous version.
- Insert or remove trailing comma to change between single line and multiline
- Any feature requests are welcome
### Terminal
prettypst [--help] | Print help
prettypst <file> | Format file inplace
prettypst --use-std-in ... | Use terminal as input
prettypst ... -o <output-file> | Change output file
prettypst ... --use-std-out | Use terminal as output
prettypst --style=[default | otbs] ... | Change the used style
prettypst --use-configuration ... | Load a configuration from "prettypst.toml"
prettypst --file-location=<path> ... | Change file location to search for configuration (use with --use-std-in)
### VSCodium or VSCode
- install custom-local-formatters
- use prettypst for typst
- ```json
"customLocalFormatters.formatters": [
{
"command": "prettypst --use-std-in --use-std-out --file-location=${file}",
"languages": [
"typst",
],
}
],
```
- change command to use wanted style or configuration
- `--style=otbs`
- `--use-configuration`
- `prettypst.toml` must be in the same folder as the formatted file or a parent folder
### Helix
- Add `prettypst` to your `languages.toml`
- ```toml
[[language]]
name = "typst"
# ...
formatter = { command = "prettypst", args = ["--use-std-in", "--use-std-out"] }
```
- change command to use wanted style or configuration
- `--style=otbs`
- `--use-configuration`
- `prettypst.toml` must be in the current working directory or a parent folder
## Settings
- create configuration with `prettypst [--style=otbs] --save-configuration`
```gdscript
indentation = 0 | 1 | ... # use 0 for tabs
seperate-label = false | true # insert space before a label
final-newline = false | true # insert a final newline at the end
[preserve-newline]
content = false | true # preserve single newlines in markup
math = false | true # preserve single newline in equations
[block]
long-block-style = "seperate" | "compact" # '[' and ']' on the same or seperate lines as the content
[term]
space-before = false | true # insert a space before the ':' in terms
space-after = false | true # insert a space after the ':' in terms
[named-argument]
space-before = false | true # insert a space before the ':' in named arguments
space-after = false | true # insert a space after the ':' in named arguments
[dictionary-entry]
space-before = false | true # insert a space before the ':' in dictionary entries
space-after = false | true # insert a space after the ':' in dictionary entries
[import-statement]
space-before = false | true # insert a space before the ':' in import statements
space-after = false | true # insert a space after the ':' in import statements
[comma]
space-before = false | true # insert a space before the ',' in arguments
space-after = false | true # insert a space after the ',' in arguments
[columns]
comma = "end-of-content" | "end-of-cell" # how to align commas in tables
[heading]
blank-lines-before = 0 | 1 | ... # blank lines before a heading
blank-lines-after = 0 | 1 | ... # blank lines after a heading
[columns-commands]
grid = "columns" # format the "grid", "gridx", ... command with
gridx = "columns" # columns specified by the named argument "columns"
table = "columns"
tablex = "columns"
```
|
https://github.com/Champitoad/typst-slides | https://raw.githubusercontent.com/Champitoad/typst-slides/main/svg-emoji/lib.typ | typst | Creative Commons Zero v1.0 Universal | #let noto = (
dict: json("noto.json"),
regex: read("noto.regex"),
)
#let github = json("./github.json")
#let emoji-image(svg, alt: "", height: 1em) = {
style(styles => {
let h = measure([X], styles).height
box(
align(horizon, image(svg, format: "svg", height: height, alt: alt)),
height: h,
outset: (y: (height - h) / 2),
)
})
}
#let setup-emoji(font: noto, height: 1em, body) = {
show regex(noto.regex): it => {
emoji-image(
alt: it.text,
height: height,
noto.dict.at(it.text)
)
}
// [with svg emoji font]
body
}
// do not work
#let setup-github(body) = {
let re = read("github.regex")
let data = json("./github.json")
show regex(re): it => {
let t = it.text
let n = t.len()
data.at(t.slice(1, n - 1))
}
// [with github]
body
}
|
https://github.com/thanhdxuan/dacn-report | https://raw.githubusercontent.com/thanhdxuan/dacn-report/master/Lab02/contents/01-bia.typ | typst | #let m = yaml("/metadata.yml")
#set align(center)
#[
#show: upper
#set par(leading: 1.2em)
Đại học Quốc gia Thành phố <NAME> \
Trường Đại học Bách Khoa \
*Khoa Khoa học và Kỹ thuật Máy tính*
]
#v(2fr)
#align(center, image("/components/logo.png", height: 5cm))
#v(2fr)
#[
#set text(size: 15pt)
#set align(left)
*#m.at("môn học").at("tên") - #m.at("môn học").at("mã")*
]
#v(.5fr)
#block(width: 100%, inset: (y: 2em), stroke: (y: 1pt))[
#set text(weight: "bold", size: 14pt)
#align(left)[Báo cáo]
#set par(leading: 1em)
#set text(size: 18pt)
#upper(m.at("tiêu đề"))
]
#v(1fr)
#grid(
columns: (1fr, 1fr),
rows: (4em, auto),
column-gutter: .5cm,
align(right, [_Giảng viên hướng dẫn_:]), align(left, for s in m.at("giảng viên") [
#v(1em, weak: true)
#s.at("tên")
#v(1em, weak: true)
]),
align(right, [_Sinh viên thực hiện_:]) , align(left, for s in m.at("sinh viên") [
#v(1em, weak: true)
#s.at("mssv") - #s.at("tên")
]),
)
#v(1fr)
TP. Hồ Chí Minh, #datetime.today().display("[month]/[year]")
|
|
https://github.com/zenor0/simple-neat-typst-cv | https://raw.githubusercontent.com/zenor0/simple-neat-typst-cv/master/cv/utils/blocks.typ | typst | MIT License |
#let main_head(title, prefix: [\/\/], postfix: []) = {
let prefix = text(size: 12pt)[#prefix]
let postfix = text(font: "Cascadia Code")[#postfix]
text(weight: "black", size: 16pt)[#prefix #title #postfix]
v(-8pt)
line(length: 100%, stroke: (thickness: 0.5pt, paint: rgb("#0000006F")))
}
#let time_width = 3cm
#let info_block(title, content, ) = {
grid(
columns: (time_width, 5fr),
gutter: 2em,
row-gutter: 1em,
align: (right, left),
text(weight: "black", size: 10pt)[#title],
text(size: 10pt)[#content]
)
}
#let award_info(time: none, title: none, label: none, ) = {
grid(
columns: (time_width, 5fr, 1fr),
gutter: 2em,
row-gutter: 1em,
align: (right + horizon, left + horizon, right),
text(size: 8pt)[#time],
text(weight: "black", size: 12pt)[#title],
text(weight: "bold", size: 9pt)[#label],
)
}
#let award_detail(content) = {
grid(
columns: (time_width, 5fr),
gutter: 2em,
row-gutter: 1em,
align: (right, left),
text(size: 8pt)[],
text(size: 8pt)[#content]
)
}
#let award_block(dict: ((time: none, title: none, label: none),), content, ) = {
set list(indent: -0.8em)
set par(leading: 1em)
for value in dict {
award_info(..value)
}
award_detail(content)
}
|
https://github.com/antonWetzel/Masterarbeit | https://raw.githubusercontent.com/antonWetzel/Masterarbeit/main/arbeit/stand_der_technik.typ | typst | #import "setup.typ": *
= Stand der Technik <stand_der_technik>
== Punktdaten
Die Erfassung von Punktwolken kann durch verschiedene Lidar-Scanverfahren erfolgen. Aufnahmen vom Boden oder aus der Luft bieten dabei verschiedene Vor- und Nachteile @scantech. Bei einem Scan vom Boden aus wird eine kleinere Fläche mit hoher Genauigkeit abgetastet, wodurch einzelne Bäume genau analysiert werden können @terrestriallidar. Aus der Luft werden größere Flächen mit gleichbleibender Genauigkeit erfasst, wodurch größere Waldgebiete aufgenommen werden können. Dafür ist die Punktanzahl pro Baum geringer, was eine Analyse der einzelnen Bäume erschwert @forestscan.
Alternativ zu Lidar kann auch DAP#footnote[digital aerial photogrammetry] als Datenquelle verwendet werden @dap_als_comp. Dabei wird von einer Drohne oder einem Flugzeug mehrere Bilder vom Waldgebiet gemacht, mit denen die Höheninformationen bestimmt werden. Im Vergleich zu Lidar ist die Datengewinnung günstiger, dafür enthalten die DAP-Daten nur Informationen über die Baumkronen, weil nur diese von der Drohne oder dem Flugzeug sichtbar sind @dap.
Lidar-Daten können im LAS Dateiformat abgespeichert werden @las. Bei diesem werden die Messpunkte als Liste mit den bekannten Punkteigenschaften gespeichert. Je nach Messtechnologie können die erfassten Daten bei unterschiedlichen Punktwolken variieren, aber die dreidimensionale Position der Punkte ist immer gegeben. Aufgrund der großen Datenmengen werden LAS Dateien häufig im komprimierten LASzip Format gespeichert @laz. Die Kompression ist verlustfrei und ermöglicht eine Kompressionsrate zwischen #number(5) und #number(15) je nach Eingabedaten.
LAStools ist eine Sammlung von Software für die allgemeine Verarbeitung von LAS Dateien @lastools. Dazu gehört die Umwandlung in andere Dateiformate, Analyse der Daten und Visualisierung der Punkte. Durch den allgemeinen Fokus ist die Software nicht für die Verarbeitung von Wäldern ausgelegt, wodurch Funktionalitäten wie die Berechnungen von Baumeigenschaften mit zugehöriger Visualisierung nicht gegeben sind.
== Analyse
Die Punktwolke kann für die Analyse vom abgetasteten Gebiet verwendet werden. Für agrarisch verwendete Nutzflächen kann der momentane Stand von den Pflanzen bestimmt werden und das weitere Vorgehen kann daran angepasst werden @lidar_agri.
Mit der Punktwolke von einem Baum können charakteristische Informationen abgeschätzt werden. Aus der Verteilung der Punkte kann die Höhe vom Stamm, der Krone und vom ganzen Baum berechnet werden @forestscan. Ein weiterer relevanter Messwert ist der Durchmesser vom Stamm bei #number(1.3, unit: [m]) über dem Boden @pang. Aus den Messwerten können Eigenschaften wie das Alter vom Baum oder die aufgenommene Menge von Kohlenstoffdioxid, welche schwer zu messen sind, abgeschätzt werden.
Mit Punktwolken kann auch die Baumspezies bestimmt werden. Dafür kann die Verteilung der Punkte zugehörig zur Baumkrone benutzt werden @tree_ident. Mit hochauflösenden Daten kann ein Profil von der Rinde bestimmt werden, welches für die Einordnung verwendet wird @tree_bark. Zusätzlich zu den Punktwolken können auch hyperspektrale Bilder vom Gebiet als eine weitere Datenquelle dienen, wodurch die Klassifikation verbessert werden kann @tree_ident_spectral @tree_ident_spectral_2.
Für Waldgebiete wird die Punktwolke automatisch oder manuell in Segmente unterteilt @treeseg @pang, welche wie die einzelnen Bäume weiter analysiert werden können. Aus den kombinierten Daten von den einzelnen Bäumen kann eine Forstinventur berechnet werden @forest_inventory. Wenn zeitlich versetze Datensätze vom gleichem Waldgebiet existieren, kann daraus auch die Entwicklung von den Bäumen abgeschätzt werden @forest_inventory_change.
== Rekonstruktion
Aus den Punktwolken können die ursprünglich abgetasteten Objekte rekonstruiert werden. Dazu gehören Objekte wie Gebäude und Straßen, aber auch Bäume @urban_recon.
Für die Rekonstruktion von Bäumen kann vom Boden aus zuerst der Stamm, dann die Äste und final die Blätter bestimmt werden @synthetic_trees. Für die Berechnung der Baumstruktur können auch neuronale Ansätze verwendet werden @neural_decomp. Mit einer Rekonstruktion vom Baum kann das Holzvolumen geschätzt werden @simple_tree. Dafür werden der Stamm und die Äste mit Zylindern approximiert, mit denen das totale Holzvolumen berechnet wird.
Für die Analyse von einzelnen Bäumen können mehrere Fotografien als alternative Datenquelle verwendet werden @from_images. Für die Fotos werden die Tiefeninformationen geschätzt, womit eine dreidimensionale Rekonstruktion ermöglicht wird. Das Verfahren ist nur für die Rekonstruktion von einzelnen Bäumen geeignet, dafür sind Farbinformationen vorhanden, womit realistische Modelle erstellt werden können.
|
|
https://github.com/cmoog/tex2typst | https://raw.githubusercontent.com/cmoog/tex2typst/master/example.typ | typst | MIT License | frac(123, 34) sin (123) space pi space f (omega) = (frac(alpha, beta))
|
https://github.com/darioglasl/Arbeiten-Vorlage-Typst | https://raw.githubusercontent.com/darioglasl/Arbeiten-Vorlage-Typst/main/glossar.typ | typst | / Glossar: Eine Ansammlung an Begriffen, die in erklärt werden müssen. |
|
https://github.com/ludwig-austermann/typst-din-5008-letter | https://raw.githubusercontent.com/ludwig-austermann/typst-din-5008-letter/main/documentation.md | markdown | MIT License | # Reference
## Letter options
| Option | Type | Deutsch | English |
|---|---|---|---|
| `title` | `content` | Betreff | Subject of letter |
| `address-zone` | `content` | Empfängeradresse /-zone | recipient address /-zone |
| `return-information` | `content` | Rücksendehinweis | return information |
| `information-field` | `content` | Informationsblock | information block |
| `reference-signs` | `array of content pairs` | Bezugszeichen | reference signs |
| `attachments` | `array of content` | Anhänge | attachments |
| `ps` | `content` | PS | PS |
| `signature` | `content` | Bild von Unterschrift | picture of signature |
| `name` | `str` | eigener Name | own name |
| `date` | `content` | Datum | Date |
| `wordings` | `dict` / `str` / `auto` | Phrasen, siehe unten | phrases, see below |
| `styling-options` | `dict` | Styling, siehe unten | Styling, see below |
| `debug-options` | `dict` | Debug Optionen, siehe unten | debug options, see below |
| `block-hooks` | `dict` | siehe unten | see below |
| `labels` | `dict` | siehe unten | see below |
| `extra-options` | `dict` | extra Argumente für eigene hooks | extra arguments for custom hooks |
## Labels
Labels can be defined with the `labels` args of the main `letter` function. Additionally, `name` and `date` are predefined. Hence, one can use `@name` in the text and _blocks_.
## Subsubjects
You can use subsubjects (Teilbetreff) and subsubsubjects and so on with the typst heading function `= subsubject`, `== subsubsubject`.
## Styling options
| Option | Type | Deutsch | English |
|---|---|---|---|
| `theme-color` | `color` | Themenfarbe | theme color |
| `text-params` | `dict` | Schriftartsoptionen | Font options |
| `page-margin-right` | dim | Rechter Rand | right margin |
| `folding-mark` | `bool` | Falzmarken? | folding marks? |
| `hole-mark` | `bool` | Lochmarke? | hole mark? |
| `form` | `str` | Typ A / B | type A / B |
| `handsigned` | `bool` | Unterschrift? | Signed? |
| `attachment-right` | `bool` | Anhang rechts? | attachment set right? |
| `background` | `content` | Hintergrundinhalt | background content |
| `foreground` | `content` | Vordergrundinhalt | foreground content |
| `head-par-leading` | `length` | Zeilenabstand Briefkopf | linespacing letter head |
| `address-field-seperator-spacing` | `length` | Abstand zwischen Rücksendezone und Adresszone | distance between return address and address zone |
| `pagenumber-content-spacing` | `length` | Abstand zwischen Seitenzahl und Briefinhalt | distance between pagenumber and letter content |
| `pagenumber-footer-spacing` | `length` | Abstand zwischen Seitenzahl und Fußblock | distance between pagenumber and footer |
| `pagenumber-footer-spacing` | `length` | Abstand vom Fußblock zum unterem Rand | distance of footer to bottom page border |
In examples directory is `template_letter.*` which maps all fields of this class to a pdf.
## Wording / Phrasing options
There are two possibilities to access wordings:
- using the `load-wordings(lang, wordings-file: "wordings.toml")` function, where you specify the wordings entry by `lang`, for instance `de-formal`, and the corresponding file
- giving the `wordings` argument in `letter` function with
- `auto`, then `<lang>-formal` is taken with `load-wordings`
- `x: str`, then `x` is taken with `load-wordings`
- `x: dict`, then `x` is taken directly
In the `wordings.toml` file, the following fields are defined.
| Option | Type | Deutsch | English |
|---|---|---|---|
| `salutation` | `str` | Grußformel / Anrede | greeting |
| `closing` | `str` | Grußformel am Ende | closing |
| `attachments` | `str` | Anhang | attachment |
## Hooks
Hooks enable you to change the behaviour of various blocks in the letter. Such, many parts of this letter class can be modified to one likings. We denote with `Ö`, that the function has also the arguments `styling: (:), extras: (:)`, and by `Ä`, if we have additionally `wordings: (:)`.
| Option | Type | Deutsch | English |
|---|---|---|---|
| `subject` | `(content: content, Ö) -> content` | Betreff | Subject |
| `subsubject` | `(content: content, level: int, Ö) -> content` | Teilbetreff | Subsubject |
| `reference-sign` | `(heading: content, content: content, Ö) -> content` | Bezugszeichen | Reference signs |
| `salutation` | `(Ä) -> content` | Grußformel | Greeting |
| `closing` | `(signature: content / none, Ä) -> content` | Grußformel am Ende | Closing |
| `pagenumber` | `(Ä) -> content` | Seitenzahl | Pagenumber |
| `letter-head` | `content` | Kopfzeile des Briefkopfes | Header of letter head |
| `attachments` | `(items: array of content, Ä) -> content` | Anhänge | Attachments |
| `postscriptum` | `(content: content, Ä) -> content` | PS | PS |
| `header` | `(title: content, Ä) -> content` | Kopfzeile | Header |
| `footer` | `content` | Fußzeile | Footer |
## Debug options
| Option | Type | Deutsch | English |
|---|---|---|---|
| `show-block-frames` | `bool` | Makiert Blöcke | marks blocks |
| `show-address-field-calculation` | `bool` | Zeigt Adressfeldberechnungen | shows address field calculation |
# Envelope
There are various envelope formats: C6, DL, C6/C5, C5A, C5B, C4A, C4B.
The mechanism of the envelope class is similar to the letter one, but a lot simpler. The envelope function can be accessed in `lib/envelope` or in `letter` by the `envelope` variable. Take a look at the example.
## Envelope options
| Option | Type | Deutsch | English |
|---|---|---|---|
| `envelope-format` | `string` | Format des Briefumschlag | Format of the envelope |
| `sender-zone` | `content` | Absenderzone | sender zone |
| `frank-zone` | `content` | Frankierzone | frank zone |
| `read-zone` | `content` | Lesezone | read zone |
| `encoding-zone` | `content` | Codierzone | (en?)coding zone |
| `styling-options` | `dict` | Styling, siehe unten | Styling, see below |
| `debug` | `bool` | Debugmodus | debug mode |
## Styling options
| Option | Type | Deutsch | English |
|---|---|---|---|
| `theme-color` | `color` | Themenfarbe | theme color |
| `text-params` | `dict` | Schriftartsoptionen | Font options |
| `margin` | `length` | Randabstand | margin |
| `background` | `content` | Hintergrundinhalt | background content |
| `foreground` | `content` | Vordergrundinhalt | foreground content | |
https://github.com/schweller/cv | https://raw.githubusercontent.com/schweller/cv/main/resume.typ | typst | #import "@preview/modern-cv:0.2.0": *
#show: resume.with(
author: (
firstname: "Inacio",
lastname: "Schweller",
email: "<EMAIL>",
phone: "x",
github: "schweller",
linkedin: "ischweller",
address: "x",
positions: (
"Director of Engineering",
"VP of Engineering",
"Senior Engineering Manager"
)
),
date: datetime.today().display(),
)
#set text(
font: "Source Sans 3",
)
= About me
#coverletter-content[
#strong("15 years experienced") and #strong("detailed-oriented") Software Engineering Manager, with a passion for #strong("delivering effective") and #strong("performant solutions"). Applies a multidisciplinary background and global experience to deal with all the complexity of human interaction, building and facilitating teams to perform at their best and scaling technology to support business continuity. \ Owns a track record of developing an eCommerce platform for a top-tier US company, a robust FinTech platform for a Berlin scale-up, a frontend publishing platform for the biggest Brazilian news portal, and fleet management for remote control and insights over vehicles.
]
= Technical Skill Set
#resume-skill-item("Languages", (strong("Go"), strong("TypeScript"), strong("bash"), "Lua", "Python"))
#resume-skill-item("Client-side", (strong("React"), strong("React Native"), strong("JavaScript"), "GraphQL", "Cypress", "Playwright"))
#resume-skill-item("Server-side", (strong("Go"), strong("PostgreSQL"), strong("Kubernetes"), strong("Docker"), "Redis", "Istio", "GraphQL", ))
#resume-skill-item("Cloud & Tooling", (strong("AWS"), strong("GCP"), strong("Doc"), "GitHub Actions", "TeamCity", "Bazel"))
#v(12pt)
= Career Highlights
#resume-entry(
title: "Senior Software Engineering Manager",
location: "Berlin, Germany",
date: "August 2023 - Current",
description: "Klang Games GmbH"
)
#resume-item[
- Leading three teams around: Platform Engineering, Customer Facing and Developer Experience efforts
- Adopted delivery metrics for the IDP (Internal Developer Platform), reducing 70% of the time needed to test our products in development
- Responsible for Technical product management of our customer facing interfaces and for the IDP
]
#resume-entry(
title: "Director of Software Engineering",
location: "Berlin, Germany",
date: "July 2022 - July 2023",
description: "Glassnode"
)
#resume-item[
- Spearheaded a financial services chapter within the organization, creating core functions, hiring funnels, and the initial architecture
- Designed a hiring process and funnel resulting in a 10-day cycle for DevOps, InfraOps, and Application security functions
- Created data flow diagrams, and overall policies and reviewed IT compliance documents
- Supported the existing Internal Developer Platform team in implementing DORA metrics
- Open-sourced within the company an internal library for repository metric collection, mainly utilized by Product Engineering teams
- Designed in conjunction with middle-management the Career Level and Ladder for the Engineering organization
- Identified improvement areas inside the teams with surveys, prepared a development plan fitting the business and people needs
- Supported as a hiring manager in several key positions, such as Head of Design and Chief Marketing Officer.
]
#resume-entry(
title: "Senior Software Engineering Manager",
location: "Berlin, Germany",
date: "October 2021 - July 2022",
description: "Nuri"
)
#resume-item[
- Scaled a business domain from 7 individual contributors to 25 over 10 months, resourcing and staffing three extra cross-functional squads
- Hired across multiple functions: Software Engineers, DevOps / SRE, QA engineers, and Engineering Managers
- Defined and followed up on engineering operations metrics to better support the business domain delivery and productivity
- Delivered two critical projects for the organization, the rebranding and a groundbreaking financial savings product
- Built from the scratch onboarding experience for new joiners impacting the time positively to the first contribution
- Helped the team transition from Scrum to an Agile-Kanban setup, facilitating meetings and acting as a decision maker with the Product chapter
]
#resume-entry(
title: "Software Engineering Team Lead",
location: "Berlin, Germany",
date: "April 2021 - October 2021",
description: "Nuri"
)
#resume-item[
- Promoted to Senior Engineering Manager in three months
]
#resume-entry(
title: "Senior Software Engineer",
location: "San Mateo, California, USA",
date: "September 2017 - June 2019",
description: "Fanatics, Inc."
)
#resume-item[
- Led small squads using short sprint cycles which greatly improved iterations on technical and business features.
- Built cutting edge NodeJS service application for an e-commerce platform which contributed to organization Y-o-Y Revenue growth in 2018
- Replaced an old UI framework with new one built with React and Redux and which enabled a small team to support 600+ sites with one codebase.
- Helped the organization to move from on-prem infrastructure solution to AWS which led to effective reducing cost of the new application
- Remodeled frontend bundling pipeline that reduced build times by 75% on local development and 40% on CI.
- Replaced orchestration layer with a GraphQL application which improved code reusability across teams and repositories
- Built a deploy and automation pipeline model of service and frontend application that was reused across teams.
- Adapted existing QA handbook, enabling the team to start on automated Integration testing.
- Interviewed engineers on-site and with phone screens.
]
#v(12pt)
= Languages
#resume-item[
- English | Fluent
- Portuguese | Fluent
- German | B1
] |
|
https://github.com/polarkac/MTG-Stories | https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/043_Innistrad%3A%20Midnight%20Hunt.typ | typst | #import "@local/mtgset:0.1.0": conf
#show: doc => conf("Innistrad: Midnight Hunt", doc)
#include "./043 - Innistrad: Midnight Hunt/001_Episode 1: The Witch of the Woods.typ"
#include "./043 - Innistrad: Midnight Hunt/002_Tangles.typ"
#include "./043 - Innistrad: Midnight Hunt/003_Episode 2: The Motives of the Wolf.typ"
#include "./043 - Innistrad: Midnight Hunt/004_Sisters.typ"
#include "./043 - Innistrad: Midnight Hunt/005_Episode 3: The Fall of the House of Betzold.typ"
#include "./043 - Innistrad: Midnight Hunt/006_His Eyes, All of Them.typ"
#include "./043 - Innistrad: Midnight Hunt/007_Episode 4: Harvesttide.typ"
#include "./043 - Innistrad: Midnight Hunt/008_The Dance of Undeath.typ"
#include "./043 - Innistrad: Midnight Hunt/009_Episode 5: Night Closes 'Round.typ"
#include "./043 - Innistrad: Midnight Hunt/010_The Dusk Reborn.typ"
|
|
https://github.com/kdog3682/mathematical | https://raw.githubusercontent.com/kdog3682/mathematical/main/0.1.0/src/meta-content-layouts/extended.typ | typst | #import "@local/typkit:0.1.0": *
#let extended-meta-content(meta) = {
let description = meta.at("description", default: none)
if empty(description) {
return do-simple-meta-content(meta)
}
let subtitle = meta.at("subtitle", default: none)
let subject = meta.at("subject", default: none)
let topic = meta.at("topic", default: none)
let tags = meta.at("tags", default: none)
let title = meta.title
let skills = meta.skills
let speakers = meta.speakers
let requirements = meta.requirements
// let assignment-id = meta.assignment-id
// let student-group = meta.student-group
// let team-id = 7
// let team-name = templater("Team $1", team-id)
let get-name(s) = {
return split(s, " ").at(0)
}
let students = speakers.map(get-name)
let scope = (
students: oxford(students)
)
let raw = templater(meta.description, scope)
let description = markup(raw, typkit)
let score-box = {
[*Points:* #h(10pt) #rect()]
}
// set text(size: 1.7em)
let season = "Summer 2024"
let title-content = {
let a = {
ink.blue(h2(title))
v(-10pt)
if subtitle != none {
text(subtitle, font: "Inconsolata")
// if topic != none {
// -50 determines the placement for the topic pill
// place(color-match(topic), dy: -50pt)
// }
}
else if topic != none {
h4(topic)
}
}
let b = box(inset: 5pt, radius: 5pt, stroke: strokes.soft, {
h3(meta.class)
v(-10pt)
ink.blue(season)
v(3pt)
})
table(a, b, columns: (auto, 1fr), align: (left, right), stroke: none)
}
// looks very neat
let colon-content = colon(
join: "newlines",
"speakers", bullet(speakers),
"skills", bullet(skills),
"requirements", bullet(requirements),
)
let colon-content = {
let a = bullet(speakers, label: "speakers")
let b = bullet(skills, label: "skills")
let c = bullet(requirements, label: "requirements")
stack(a, b, c, spacing: 20pt)
}
// let colon-content = {
// let a = bullet(speakers, label: "speakers")
// let b = bullet(skills, label: "skills")
// let c = bullet(requirements, label: "requirements")
// stack(dir: ltr, spacing: 10pt, a, b, c)
// }
title-content
dashbreak(style: "spacebar")
let pill = text(color-match(topic), size: 0.8em)
context {
let measurement = measure(pill).width
let loc = (
dy: -5pt,
dx: 100% - measurement,
)
place(pill, ..loc)
}
// colon-content
// description
table(
columns: (auto, 1fr),
stroke: none,
column-gutter: 15pt,
colon-content,
table.cell(
align: center + horizon,
box(emph(text(description, size: 0.95em)), width: 65%)
)
)
v(5pt)
// css-flex(
// align: "apart",
// columns: (50%, 1fr),
// colon-content,
// score-box
// )
dashbreak(style: "topbar")
}
|
|
https://github.com/Vikingu-del/Resume | https://raw.githubusercontent.com/Vikingu-del/Resume/main/DeutchResume/main.typ | typst | MIT License | #import "template.typ": *
#set page(
margin: (
left: 5mm,
right: 5mm,
top: 5mm,
bottom: 5mm
),
)
#set text(font: "Mulish")
#show: project.with(
theme: rgb("#0F83C0"),
name: "<NAME>",
title: "Softwareentwickler", // Translated Title
contact: (
contact(
text: "+49 176 361 713 25"
),
contact(
text: "<EMAIL>",
link: "mailto:<EMAIL>"
),
contact(
text: "GitHub.com/Vikingu-del",
link: "https://github.com/Vikingu-del"
),
contact(
text: "Portofolio",
link: "https://vikingu-del.github.io/MyPortofolio/"
),
),
main: (
section(
title: "Berufserfahrung", // Translated Section Title
content: (
subSection(
title: "SHARP GROUP LTD",
titleEnd: "Zeitraum", // Translated Subtitle
subTitle: "Webentwicklungspraktikant", // Translated Subtitle
subTitleEnd: "(November 2022 — Dezember 2022)",
content:
[- Benutzeroberflächen mit modernen JavaScript-Frameworks, HTML5 und CSS3 entwickelt.
- Inhaltserstellungstools und digitale Medien verwendet, um Websites zu gestalten.
- Auf Kundenanfragen reagiert und technische Unterstützung am Telefon und persönlich bereitgestellt.]
),
subSection(
title: "ISA NET",
titleEnd: "Zeitraum",
subTitle: "IT Help Desk Support",
subTitleEnd: "(August 2021 — Oktober 2021)",
content:
[- Diagnose und Behebung von Hardware-, Software- und Netzwerkproblemen.
- Server und Systeme gewartet, um die Netzwerke während der Spitzenzeiten betriebsbereit zu halten.
- Benutzerkonten erstellt und Berechtigungen zugewiesen.
- Auf Kundenanfragen reagiert und technische Unterstützung am Telefon und persönlich bereitgestellt.]
),
),
),
section(
title: "Projekte", // Translated Section Title
content: (
subSection(
title: "Webserv",
content: "Einen Webserver von Grund auf in C++ erstellt, der den HTTP/1.1-Standards entspricht, mehrere Clientverbindungen verwaltet und die ordnungsgemäße Anfrageverarbeitung, die Reaktionsbehandlung und das Fehler-Management sicherstellt."
),
subSection(
title: "Inception",
content: "Mehrere Docker-Container bereitgestellt, die mit Docker Compose orchestriert werden, um ein Multi-Service-System zu simulieren. Dieses Projekt zeigt die Beherrschung der Containerisierung und der Dienstisolierung für Entwicklungs- und Produktionsumgebungen."
),
subSection(
title: "Minishell",
content: "Eine funktionale Shell in C implementiert, die in der Lage ist, einfache Befehle zu parsen und auszuführen, Prozesse zu verwalten, Umgebungsvariablen zu behandeln und grundlegendes Fehlermanagement anzubieten."
),
subSection(
title: "Cub3D",
content: "Eine 3D-Spiel-Engine in C mit der Raycasting-Technik entwickelt, ähnlich den Mechaniken früher FPS-Spiele wie Wolfenstein 3D. Das Projekt umfasste die Handhabung der Spielerbewegung, das Rendern von Texturen und die Implementierung von Kollisionserkennung, um ein immersives 3D-Erlebnis in einer 2D-Welt zu bieten."
),
subSection(
title: "Push_swap",
content: "Einen Algorithmus zum Sortieren von Daten auf einem Stapel unter Verwendung einer begrenzten Anzahl von Operationen erstellt. Das Projekt umfasste die Optimierung von Sortieralgorithmen hinsichtlich Effizienz in Bezug auf Zeit- und Speicherkomplexität und die Implementierung verschiedener Strategien, um die kürzestmögliche Abfolge von Operationen zu erreichen."
),
),
),
),
sidebar: (
section(
title: "Fähigkeiten", // Translated Section Title
content: (
subSection(
title: "Programmiersprachen", // Translated Subtitle
content: (
"C",
"C++",
"CSS",
"HTML5",
"JavaScript",
"Python",
"BashScript",
"Typst (CV ist in Typst geschrieben)"
).join(" • "),
),
subSection(
title: "Technologien", // Translated Subtitle
content: (
"NodeJS",
"VueJS",
"Git",
"SQL",
"Linux",
"RHEL",
"Arduino",
"React",
"Microsoft Office",
).join(" • "),
),
subSection(
title: "Konzepte", // Translated Subtitle
content: (
"Objektorientierte Programmierung",
"Unix-basierte Systemprogrammierung",
"Speicherverwaltung",
"Nebenläufigkeit und Multithreading",
"Netzwerkprogrammierung",
"Algorithmusoptimierung",
"Datenstrukturen",
"Versionskontrolle (Git)",
"Debugging und Profiling",
"Shell-Scripting"
).join(" • ")
),
subSection(
title: "Sprachen", // Translated Subtitle
content: (
"Englisch - Fließend",
"Italienisch - Mittelstufe",
"Spanisch - Anfänger",
"Deutsch - Anfänger",
).join(" • ")
),
),
),
section(
title: "Bildung", // Translated Section Title
content: (
subSection(
title: [
#set par(justify: false)
Polytechnic University of Tirana
],
subTitle: "BSc in Elektronikingenieurwesen", // Translated Subtitle
content: [
Absolvent: August 2022\
],
),
subSection(
title: [
#set par(justify: false)
42 Wolfsburg
],
subTitle: "Software Engineering",
content: [
"Projektbasierter Lehrplan mit Peer-to-Peer-Lernansatz, fokussiert auf praktische Problemlösungen und Programmierung."
],
),
),
),
section(
title: "Hobbys", // Translated Section Title
content: (
subSection(
title: "Sport",
content: (
"MMA (Mixed Martial Arts), ehemaliger Balkanchampion",
"Tischtennis",
"Schach",
"Wandern",
).join(" • "),
),
),
),
),
)
|
https://github.com/tiankaima/typst-notes | https://raw.githubusercontent.com/tiankaima/typst-notes/master/eda4a9-math_analysis_mid_2023/main.typ | typst | #set text(
font: "Source Han Serif SC",
size: 10pt,
)
= 2023-2024 数学分析(B1)期中
1. 用极限的定义证明: 若$limits(lim)_(n->infinity) a_n = limits(lim)_(n->infinity) b_n = c$, 则$limits(lim)_(n->infinity) max {a_n,b_n} = c$.
2. 计算下列各题
1. $limits(lim)_(n->infinity) ((n+1) / (n-1))^n$
2. $limits(lim)_(n->infinity) ((n+1)^k-n^k), 0<k<1$
3. $limits(lim)_(n->infinity) (root(4,1+x+x^2)-1) / (tan 2x)$
4. $limits(lim)_(n->infinity) (cos x - e^(-1 / 2 x^2)) / (sin^4 x)$
5. $limits(lim)_(x->a)((sin x)/(sin a))^(1/(x-a))$ ($a!=k pi, k in ZZ$)
6. 求$f(x)=ln(cos x)$带 Peano 余项的 4 阶 Maclaurin 公式.
3. 计算下列各题
1. 求由参数方程 $cases(x=t cos t, y=t sin t) space.quad (0<=t<=pi)$ 确定的曲线 $y = y(x)$在$(0,pi/2)$处的切线方程.
2. 函数 $f(x) = cases(x^2+x+1 space.quad x>=0, a sin x + b space.quad x<0)$. 请问:当$a,b$分别满足什么条件时, $f(x)$在$(-infinity,+infinity)$上连续和可导?并在可导时, 求$f(x)$在$x=0$处的微分.
4. 设 $f(x) = sin 2x - x, space.quad x in [-pi/2,pi/2]$
1. 求$f(x)$的最值.
2. 求曲线$y=f(x)$的拐点.
5. 设$f(x)$在$[a,b]$上连续, $(a,b)$上可微, 且
$
f(a)dot.c f(b) > 0, space.quad f(a) dot.c f((a+b) / 2) < 0
$
求证:$exists xi in (a,b)$, 使得$f'(xi)=f(xi)$.
6. 设$f(x)$在有界闭区间$[a,b]$上有定义且满足以下两个条件:
- $f(x) in [a,b], forall x in [a,b]$
- $abs(f(x) - f(y)) <= k abs(x-y) space.quad forall x,y in [a,b]$, 其中$0<k<1$为常数
称满足$f(c) = c$的实数$c$为$f(x)$的不动点.
证明:
- $f(x)$ 在 $[a,b]$上有唯一的不动点$x^*$.
- 对$forall x_1 in [a,b]$, 归纳定义数列$x_(n+1) = f(x_n) space.quad (n=1,2,...)$, 则${x_n}$收敛于$f(x)$的不动点$x^*$.
7. 设函数$f(x)$在$[0,1]$上二阶可导, 且$f(0) = f(1)$, $abs(f^('') (x))<=2 space.quad (forall x in [0,1])$. 求证:$abs(f^' (x))<=1$. |
|
https://github.com/Myriad-Dreamin/typst.ts | https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/layout/grid-4_02.typ | typst | Apache License 2.0 |
#import "/contrib/templates/std-tests/preset.typ": *
#show: test-page
// Test that all three kinds of rows use the correct bases.
#set page(height: 4cm, margin: 0cm)
#grid(
rows: (1cm, 1fr, 1fr, auto),
rect(height: 50%, width: 100%, fill: conifer),
rect(height: 50%, width: 100%, fill: forest),
rect(height: 50%, width: 100%, fill: conifer),
rect(height: 25%, width: 100%, fill: forest),
)
|
https://github.com/0x1B05/nju_os | https://raw.githubusercontent.com/0x1B05/nju_os/main/lecture_notes/main.typ | typst | #import "template.typ": *
#show: template.with(
title: [OS - JYY Lecture Notes],
short_title: "Lecture Notes",
description: [
JYY OS-2024 课程笔记
],
date: datetime(year: 2023, month: 09, day: 22),
authors: (
(
name: "0x1B05",
github: "https://github.com/0x1B05",
homepage: "https://github.com/0x1B05", // 个人主页
affiliations: "1",
),
),
affiliations: (
(id: "1", name: "NUFE"),
),
paper_size: "a4",
text_font: "Linux Libertine",
sc_font: "Noto Sans CJK SC",
code_font: "DejaVu Sans Mono",
// 主题色
accent: orange,
// 封面背景图片
cover_image: "./figures/Pine_Tree.jpg", // 图片路径或 none
// 正文背景颜色
// background_color: "#FAF9DE" // HEX 颜色或 none
)
#include "content/01_操作系统概述.typ"
#include "content/02_应用视角的操作系统.typ"
#include "content/03_硬件视角的操作系统.typ"
#include "content/04_Python建模操作系统.typ"
#include "content/05_多处理器编程:从入门到放弃.typ"
#include "content/06_并发控制基础.typ"
#include "content/07_并发控制-互斥.typ"
#include "content/09_并发控制-同步1.typ"
#include "content/10_并发控制-同步2.typ"
#include "content/11_真实世界的并发编程.typ"
#include "content/12_真实世界的并发Bug.typ"
#include "content/13_并发Bug的应对.typ"
#include "content/14_多处理器系统与中断机制.typ"
#include "content/15_操作系统上的进程.typ"
#include "content/16_Linux操作系统.typ"
#include "content/17_Linux进程的地址空间.typ"
#include "content/18_操作系统实验生存指南.typ"
#include "content/19_系统调用和UNIX-Shell.typ"
#include "content/21_可执行文件和加载.typ"
#include "content/27_设备驱动程序与文件系统.typ"
#include "content/28_FAT和UNIX文件系统.typ"
#include "content/29_持久数据的可靠性.typ"
|
|
https://github.com/TypstApp-team/typst | https://raw.githubusercontent.com/TypstApp-team/typst/master/tests/typ/bugs/smartquotes-in-outline.typ | typst | Apache License 2.0 | #set page(width: 15em)
#outline()
= "This" "is" "a" "test"
|
https://github.com/lyzynec/orr-go-brr | https://raw.githubusercontent.com/lyzynec/orr-go-brr/main/08/main.typ | typst | #import "../lib.typ": *
#knowledge[
#question(name: [Give the problem statement for the indirect approach to
continuous--time optimal control. Explain the essence of one or two
algorithms that are based on the indirect approach (for example, iteration
over the control, shooting, multiple shooting, collocation)])[
The indirect approach calls for removing the control from the problem
using algebraic operations.
The problem statement thus is
$
min_(bold(x), bold(u)) [ phi.alt(bold(x)(t_f))
&+ integral_(t_i)^(t_f) L(bold(x)(t), bold(u)(t), t) upright(d) t ]\
$
suject to
#align(center)[#grid(columns: 2,
row-gutter: 10pt, column-gutter: 10pt, align: left,
[system dynamics], $dot(bold(x))(t) - bold(f)(bold(x), bold(u), t)
= bold(0)$,
[path constarints],
$bold(h)(bold(x)(t), bold(u)(t), t) <= bold(0)$,
[initial value],
$bold(x)(0) - bold(x)_0 = bold(0)$,
[terminal constraints],
$bold(r)(bold(x)(t_f)) <= bold(0)$,
)]
#part(name: [Indirect single shooting])[
With the initial guess of $bold(lambda)_0$ we can calculate the
state--costate trajectory of the entire system, we can than find the
optimal $bold(lambda)_0$ by Newtons method.
It creates a map
$
bold(lambda)_0 |-> bold(lambda)(t_f, bold(lambda)_0, bold(x)_0)
$
This map is very nonlinear and thus hard to solve using the
Newtons method, without accurate initial guess of $bold(lambda)_0$.
]
#part(name: [Indirect Multiple Mhooting])[
In multiple shooting, we deiide the time iterval into $N$ parts.
This allows us to make arbitarily short intervals, in which the
linear approach of shooting method will be increasingly accurate.
This performs well even without good initial guess $bold(lambda)_0$.
But same as the single shooting it still is essentialy unusable for
strongly unstable systems.
]
#part(name: [Indirect Collocation])[
It does the obvious thing and divide the states and costates
in time into polynomials, the solvers work through magic fuelled
by caffeine addiction of its developers.
]
]
#question(name: [Give the problem statement for the direct approach to
continous--time optimal control. Explain the essence of one or two
algorithms from this group such as direct single shooting, direct multiple
shooting, direct collocation. ])[
This approach "discretizes" the problem into finite dimensional
nonlinear ptogram (NLP), and utilizes numerizal NLP methods.
The problem statement thus is
$
min_(bold(x), bold(u)) [ phi.alt(bold(x)(t_f))
&+ integral_(t_i)^(t_f) L(bold(x)(t), bold(u)(t), t) upright(d) t ]\
$
suject to
#align(center)[#grid(columns: 2,
row-gutter: 10pt, column-gutter: 10pt, align: left,
[system dynamics], $dot(bold(x))(t) - bold(f)(bold(x), bold(u), t)
= bold(0)$,
[path constarints],
$bold(h)(bold(x)(t), bold(u)(t), t) <= bold(0)$,
[initial value],
$bold(x)(0) - bold(x)_0 = bold(0)$,
[terminal constraints],
$bold(r)(bold(x)(t_f)) <= bold(0)$,
)]
#part(name: [Direct single shooting])[
This method parametrizes the control $bold(u)(t)$ using piecewise
constant functions (such as polynomials). Than we somehow wiggle the
control signal and watch as the systems responds.
This thing is also nonlinear as hell, and without good initial guess
of nearly every value it will not work.
]
#part(name: [Direct multiple shooting method])[
You split the time interval and approximate using some polynomials
the state and the control using _some_ intial staes.
Now you have many solved problems, that does not fit together,
congratulations you have made your problem worse.
Luckily there are some methods that solve this issue, but we cant be
really bothered to learn them. Just use CasADi.
]
]
]
#skills[
#question(name: [Solve a continuous-time optimal control problem for a
nonlinear system using a direct method (direct collocation).])[]
]
|
|
https://github.com/StandingPadAnimations/papers | https://raw.githubusercontent.com/StandingPadAnimations/papers/main/README.md | markdown | This is a collection of all of my papers that I've written with https://github.com/typst/typst. Each folder has the source files for a single paper as well as their built PDFs.
To build a paper, open a terminal and go into the source folder of the paper in question, then type:
```sh
typst compile <name_of_paper>.typ
```
The output will be saved as `<name_of_paper>.pdf`.
|
|
https://github.com/lkndl/typst-bioinfo-thesis | https://raw.githubusercontent.com/lkndl/typst-bioinfo-thesis/main/modules/front-matter.typ | typst | #import "styles.typ": *
#import "footers.typ": *
#import "@preview/outrageous:0.1.0"
#let submission-info-table(args) = {
set align(center + bottom)
set par(leading: .5em)
set text(size: 14pt)
let opts = (
columns: 2,
//inset: 0pt,
align: left + top,
column-gutter: 5pt,
row-gutter: 5pt,
stroke: none,
)
if args.lang == "en" [
#table(
..opts,
[Author:], [#args.author],
[Supervisor] + if args.supervisors.len() > 1 [s:] else [:], args.supervisors.join([ \ ]),
[Advisor] + if args.advisors.len() > 1 [s:] else [:], args.advisors.join([ \ ]),
[Submitted:], [#args.date.display("[day] [month repr:short] [year]")]
)
] else if args.lang == "de" [
#table(
..opts,
[Verfasser:], [#args.author],
[Themensteller:], args.supervisors.join([ \ ]),
[Betreuer:], args.advisors.join([ #parbreak() ]),
[Abgabedatum:], [#args.date.display("[day].[month].[year]")]
)
]
}
#let make-cover(args) = {
// define a page numbering, but don't show it
set page(numbering: "a", footer: [])
// load the correct module for this flavour
import str(args.flavour + "-covers.typ") : *
cover-page(args)
}
#let make-title(args) = {
counter(page).update(1)
pagebreak(weak: true, to: "odd")
// load the correct module for this flavour
import str(args.flavour + "-covers.typ") : *
title-page(args, submission-info-table(args))
}
#let declare-page(args) = {
pagebreak(weak: true, to: "odd")
set heading(numbering: none, outlined: false, bookmarked: true, level: 1)
if args.lang == "en" [
= Declaration of Authorship
I confirm that this #box([#args.degree's thesis]) is my own work and I have documented all sources and material used.
#v(2cm)#h(2cm)Date#h(1fr)#args.author#h(2cm)
] else if args.lang == "de" [
= Eigenständigkeitserklärung
Ich versichere, dass ich diese #box([#args.Degree's Thesis]) selbständig verfasst und nur die angegebenen Quellen und Hilfsmittel verwendet habe.
#v(2cm)#h(2cm)Datum#h(1fr)#args.author#h(2cm)
]
}
#let acknowledgements(lang, content, title: none) = {
pagebreak(weak: true, to: "odd")
set heading(numbering: none, outlined: false, bookmarked: true, level: 1)
if title != none [= #title] else if lang == "en" [= Acknowledgements] else [= Danksagung]
content
}
#let abstract(german, english) = {
pagebreak(weak: true, to: "even")
set heading(numbering: none, outlined: false, bookmarked: true, level: 1)
[
= Zusammenfassung
#german
#pagebreak()
= Abstract
#english
]
}
#let table-of-contents(lang, simple: true) = {
pagebreak(weak: false, to: "even")
set heading(numbering: none, outlined: false, bookmarked: true, level: 1)
let toc-title = if lang == "en" [Contents] else [Inhaltsverzeichnis]
if simple {
show outline.entry: outrageous.show-entry.with(
..outrageous.presets.outrageous-toc,
font-weight: ("bold", auto),
vspace: (32pt, none),
fill-right-pad: 5pt,
fill: (none, align(right, typst-repeat(justify: false, box(width: quantum, "."))))
)
[#outline(title: toc-title, indent: auto) <toc> #metadata("toc-end") <toc-end>
]
return
} else {
// more complicated table-of-contents
let extractor(it) = {
let loc = it.element.location()
let title = it.element.body
let number = none
if it.element.numbering != none {
number = numbering(it.element.numbering, ..counter(heading).at(loc))
}
(number, title, loc)
}
let layoutor(number, title, loc, page, indent: 0pt, fill: none) = {
if fill == none {
fill = box(
width: 1fr, baseline: -1pt,
align(right, scale(x: -100%, line(
length: 100%, stroke: (dash: ("dot", quantum)))))
)
}
box(width: 100%, stack(dir: ltr,
h(indent),
number,
box(width: 100% - page-num-width - indent - if number == none {0pt} else {number.width}, [
#link(loc, title)#fill
]),
align(bottom + right, box(width: page-num-width, link(loc, page)))
))
}
show outline.entry.where(
level: 1
): it => {
let (number, title, loc) = extractor(it)
// Most level 1 headings have a numbering; so display that one in a separately aligned "column" to the left, reserving 1.4em here. For example the bibliography shows up in the TOC, but does not get a numbering.
number = if number == none {none} else [#box(width: 1.4em, link(loc, number))]
v(6pt) // free space above
strong( // bold
layoutor(number, title, loc, it.page, //str(counter(page).at(loc).first()),//display(loc.page-numbering()),
indent: 0pt, fill: h(1fr)))
}
show outline.entry.where(
level: 2
): it => {
let (number, title, loc) = extractor(it)
number = [#box(width: 2em, link(loc, number))]
layoutor(number, title, loc, it.page, indent: 5pt)
}
show outline.entry.where(
level: 3
): it => {
let (number, title, loc) = extractor(it)
number = none // hide numbering
layoutor(number, title, loc, it.page, indent: 2em + 5pt) // more ident, but actually not
}
[#outline(title: toc-title) <toc> #metadata("toc-end") <toc-end>]
}
}
|
|
https://github.com/RiccardoTonioloDev/TypUrNotes | https://raw.githubusercontent.com/RiccardoTonioloDev/TypUrNotes/main/README.md | markdown | MIT License | # TypUrNotes
A Typst template for both effective and aesthetic note taking.
# Installation
To use the template you just have to install the fonts you can find inside of `tun_template/fonts/`.
After you installed the fonts, just download the `tun_template` folder, move it into your project folder, and follow the _Usage_ instructions.
# Usage
You can find a `template_demo.typ` file, that uses all the implemented features.
TLDR: you just have to `#import "tun_template/tun.typ": *` at the beginning of your document, and after that you have to set the configurations of the template like what's below.
_Setting configurations:_
```typst
#show: config.with(
myAuthor: "<NAME>",
myTitle: "Template creation in Typst",
myLang: "en",
pages_numbering: "1",
creation_day: "01",
creation_month: "01",
creation_year: "1970",
associated_with: "University Typography Course",
use_glossary: true,
use_bibliography: true,
digital: true,
)
```
Explanation of the settings:
- `myAuthor`: sets the author name of the document;
- `myTitle`: sets the title of the document;
- `myLang`: sets the language of the document;
- `pages_numbering`: sets the style of the numbering used in the footer of pages;
- `creation_day`: sets the day of creation of the document;
- `creation_month`: sets the month of creation of the document;
- `creation_year`: sets the year of creation of the document;
- `associated_with`: sets the article, conference, lecture, context that the document is associated with;
- `use_glossary`: if true, adds the glossary page;
- The package `glossarium` was used to create the glossary page, that you can set in `tun_template/appendix/glossary.typ` (see the documentation of `glossarium` [here](https://typst.app/universe/package/glossarium/)).
- `use_bibliography`: if true, adds the bibliograpy page;
- You can modify the bibliography sources directly interacting with `tun_template/appendix/bibliography.yml` (see how to manage it [here](https://typst.app/docs/reference/model/bibliography/)).
- `digital`: if true uses less eye straining colors for a better digital reading experience. To print the document it's highly suggested to set this parameter to false, in order to have a white background and save ink.
|
https://github.com/crystalsolenoid/typst-resume-template | https://raw.githubusercontent.com/crystalsolenoid/typst-resume-template/main/src/resume.typ | typst | #let accent = gray
#let quad(a, b, c, d) = {
grid(
columns: (1fr, auto),
[
=== #a\
#b
],
align(right)[
#c\
#d
]
)
}
#let bullets(details) = {
if details != none {
for detail in details [
- #detail
]
}
}
#let header-link(title: none, url: none, text-url: none, icon: none) = [
#set align(horizon)
#set text(8pt)
#box(baseline: 20%)[
#set align(center)
// #image("icons/link-2-svgrepo-com.svg", height: 1em)
#image("icons/" + icon, height: 1em)
] #link(url)[#text-url]
]
#let header-links(links) = {
v(1em)
set align(center)
for link-item in links {
h(6pt)
box[#header-link(..link-item)]
h(6pt)
}
}
#let project(
title: none,
url: none,
text-url: none,
date: none,
description: none,
technology: none,
details: (),
) = {
quad([
#title
#if url != none [
// #h(8pt)
#set text(accent)
// #link(url)[#text-url]
// terrible hack:
#move(dx: 200pt, dy: -2.05em)[
#link(url)[#text-url]
#v(-4.26em)
]
]
], description,
technology, date)
bullets(details)
}
#let education(
title: none,
organization: none,
location: none,
show-detail: true,
dates: none,
details: none) = {
quad(title, organization, location, dates)
if show-detail {
bullets(details)
}
}
#let position(
department: none,
organization: none,
title: none,
location: none,
dates: none,
show-detail: true,
details: none) = {
quad(
if department != none [#department, ] + organization,
title,
location,
dates
)
if show-detail {
bullets(details)
}
}
#let resume(
name: none,
doc,
) = {
set document(
title: name + "'s Résumé",
author: name,
date: auto,
)
set page(
"us-letter",
margin: (x: 0.5in, y: 0.5in),
)
set text(10pt)
show heading.where(
level: 1
): it => [
#set align(center)
#set pad(bottom: 1em)
#it
]
show heading.where(
level: 2
): it => [
#pad(y: 0.4em)[
#grid(columns: (auto, auto), gutter: 4pt,
[
#it
],
align(right + bottom)[
#line(length: 100%, stroke: accent)
]
)]
]
doc
} |