mirror of
https://github.com/typst/typst
synced 2025-08-25 20:24:14 +08:00
Compare commits
4 Commits
46d57b00b5
...
64d0a564bf
Author | SHA1 | Date | |
---|---|---|---|
|
64d0a564bf | ||
|
4a638f41cd | ||
|
f9897479d2 | ||
|
bd41fb9427 |
@ -5,9 +5,9 @@ use typst::diag::{bail, HintedStrResult, StrResult, Warned};
|
|||||||
use typst::engine::Sink;
|
use typst::engine::Sink;
|
||||||
use typst::foundations::{Content, IntoValue, LocatableSelector, Scope};
|
use typst::foundations::{Content, IntoValue, LocatableSelector, Scope};
|
||||||
use typst::layout::PagedDocument;
|
use typst::layout::PagedDocument;
|
||||||
use typst::syntax::Span;
|
use typst::syntax::{Span, SyntaxMode};
|
||||||
use typst::World;
|
use typst::World;
|
||||||
use typst_eval::{eval_string, EvalMode};
|
use typst_eval::eval_string;
|
||||||
|
|
||||||
use crate::args::{QueryCommand, SerializationFormat};
|
use crate::args::{QueryCommand, SerializationFormat};
|
||||||
use crate::compile::print_diagnostics;
|
use crate::compile::print_diagnostics;
|
||||||
@ -63,7 +63,7 @@ fn retrieve(
|
|||||||
Sink::new().track_mut(),
|
Sink::new().track_mut(),
|
||||||
&command.selector,
|
&command.selector,
|
||||||
Span::detached(),
|
Span::detached(),
|
||||||
EvalMode::Code,
|
SyntaxMode::Code,
|
||||||
Scope::default(),
|
Scope::default(),
|
||||||
)
|
)
|
||||||
.map_err(|errors| {
|
.map_err(|errors| {
|
||||||
|
@ -18,7 +18,6 @@ pub use self::call::{eval_closure, CapturesVisitor};
|
|||||||
pub use self::flow::FlowEvent;
|
pub use self::flow::FlowEvent;
|
||||||
pub use self::import::import;
|
pub use self::import::import;
|
||||||
pub use self::vm::Vm;
|
pub use self::vm::Vm;
|
||||||
pub use typst_library::routines::EvalMode;
|
|
||||||
|
|
||||||
use self::access::*;
|
use self::access::*;
|
||||||
use self::binding::*;
|
use self::binding::*;
|
||||||
@ -32,7 +31,7 @@ use typst_library::introspection::Introspector;
|
|||||||
use typst_library::math::EquationElem;
|
use typst_library::math::EquationElem;
|
||||||
use typst_library::routines::Routines;
|
use typst_library::routines::Routines;
|
||||||
use typst_library::World;
|
use typst_library::World;
|
||||||
use typst_syntax::{ast, parse, parse_code, parse_math, Source, Span};
|
use typst_syntax::{ast, parse, parse_code, parse_math, Source, Span, SyntaxMode};
|
||||||
|
|
||||||
/// Evaluate a source file and return the resulting module.
|
/// Evaluate a source file and return the resulting module.
|
||||||
#[comemo::memoize]
|
#[comemo::memoize]
|
||||||
@ -104,13 +103,13 @@ pub fn eval_string(
|
|||||||
sink: TrackedMut<Sink>,
|
sink: TrackedMut<Sink>,
|
||||||
string: &str,
|
string: &str,
|
||||||
span: Span,
|
span: Span,
|
||||||
mode: EvalMode,
|
mode: SyntaxMode,
|
||||||
scope: Scope,
|
scope: Scope,
|
||||||
) -> SourceResult<Value> {
|
) -> SourceResult<Value> {
|
||||||
let mut root = match mode {
|
let mut root = match mode {
|
||||||
EvalMode::Code => parse_code(string),
|
SyntaxMode::Code => parse_code(string),
|
||||||
EvalMode::Markup => parse(string),
|
SyntaxMode::Markup => parse(string),
|
||||||
EvalMode::Math => parse_math(string),
|
SyntaxMode::Math => parse_math(string),
|
||||||
};
|
};
|
||||||
|
|
||||||
root.synthesize(span);
|
root.synthesize(span);
|
||||||
@ -141,11 +140,11 @@ pub fn eval_string(
|
|||||||
|
|
||||||
// Evaluate the code.
|
// Evaluate the code.
|
||||||
let output = match mode {
|
let output = match mode {
|
||||||
EvalMode::Code => root.cast::<ast::Code>().unwrap().eval(&mut vm)?,
|
SyntaxMode::Code => root.cast::<ast::Code>().unwrap().eval(&mut vm)?,
|
||||||
EvalMode::Markup => {
|
SyntaxMode::Markup => {
|
||||||
Value::Content(root.cast::<ast::Markup>().unwrap().eval(&mut vm)?)
|
Value::Content(root.cast::<ast::Markup>().unwrap().eval(&mut vm)?)
|
||||||
}
|
}
|
||||||
EvalMode::Math => Value::Content(
|
SyntaxMode::Math => Value::Content(
|
||||||
EquationElem::new(root.cast::<ast::Math>().unwrap().eval(&mut vm)?)
|
EquationElem::new(root.cast::<ast::Math>().unwrap().eval(&mut vm)?)
|
||||||
.with_block(false)
|
.with_block(false)
|
||||||
.pack()
|
.pack()
|
||||||
|
@ -9,7 +9,7 @@ use std::ops::Add;
|
|||||||
|
|
||||||
use ecow::eco_format;
|
use ecow::eco_format;
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use typst_syntax::{Span, Spanned};
|
use typst_syntax::{Span, Spanned, SyntaxMode};
|
||||||
use unicode_math_class::MathClass;
|
use unicode_math_class::MathClass;
|
||||||
|
|
||||||
use crate::diag::{At, HintedStrResult, HintedString, SourceResult, StrResult};
|
use crate::diag::{At, HintedStrResult, HintedString, SourceResult, StrResult};
|
||||||
@ -459,6 +459,21 @@ impl FromValue for Never {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cast! {
|
||||||
|
SyntaxMode,
|
||||||
|
self => IntoValue::into_value(match self {
|
||||||
|
SyntaxMode::Markup => "markup",
|
||||||
|
SyntaxMode::Math => "math",
|
||||||
|
SyntaxMode::Code => "code",
|
||||||
|
}),
|
||||||
|
/// Evaluate as markup, as in a Typst file.
|
||||||
|
"markup" => SyntaxMode::Markup,
|
||||||
|
/// Evaluate as math, as in an equation.
|
||||||
|
"math" => SyntaxMode::Math,
|
||||||
|
/// Evaluate as code, as after a hash.
|
||||||
|
"code" => SyntaxMode::Code,
|
||||||
|
}
|
||||||
|
|
||||||
cast! {
|
cast! {
|
||||||
MathClass,
|
MathClass,
|
||||||
self => IntoValue::into_value(match self {
|
self => IntoValue::into_value(match self {
|
||||||
|
@ -69,6 +69,7 @@ pub use self::ty::*;
|
|||||||
pub use self::value::*;
|
pub use self::value::*;
|
||||||
pub use self::version::*;
|
pub use self::version::*;
|
||||||
pub use typst_macros::{scope, ty};
|
pub use typst_macros::{scope, ty};
|
||||||
|
use typst_syntax::SyntaxMode;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
@ -83,7 +84,6 @@ use typst_syntax::Spanned;
|
|||||||
|
|
||||||
use crate::diag::{bail, SourceResult, StrResult};
|
use crate::diag::{bail, SourceResult, StrResult};
|
||||||
use crate::engine::Engine;
|
use crate::engine::Engine;
|
||||||
use crate::routines::EvalMode;
|
|
||||||
use crate::{Feature, Features};
|
use crate::{Feature, Features};
|
||||||
|
|
||||||
/// Hook up all `foundations` definitions.
|
/// Hook up all `foundations` definitions.
|
||||||
@ -273,8 +273,8 @@ pub fn eval(
|
|||||||
/// #eval("1_2^3", mode: "math")
|
/// #eval("1_2^3", mode: "math")
|
||||||
/// ```
|
/// ```
|
||||||
#[named]
|
#[named]
|
||||||
#[default(EvalMode::Code)]
|
#[default(SyntaxMode::Code)]
|
||||||
mode: EvalMode,
|
mode: SyntaxMode,
|
||||||
/// A scope of definitions that are made available.
|
/// A scope of definitions that are made available.
|
||||||
///
|
///
|
||||||
/// ```example
|
/// ```example
|
||||||
|
@ -16,7 +16,7 @@ use hayagriva::{
|
|||||||
};
|
};
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use smallvec::{smallvec, SmallVec};
|
use smallvec::{smallvec, SmallVec};
|
||||||
use typst_syntax::{Span, Spanned};
|
use typst_syntax::{Span, Spanned, SyntaxMode};
|
||||||
use typst_utils::{Get, ManuallyHash, NonZeroExt, PicoStr};
|
use typst_utils::{Get, ManuallyHash, NonZeroExt, PicoStr};
|
||||||
|
|
||||||
use crate::diag::{
|
use crate::diag::{
|
||||||
@ -39,7 +39,7 @@ use crate::model::{
|
|||||||
CitationForm, CiteGroup, Destination, FootnoteElem, HeadingElem, LinkElem, ParElem,
|
CitationForm, CiteGroup, Destination, FootnoteElem, HeadingElem, LinkElem, ParElem,
|
||||||
Url,
|
Url,
|
||||||
};
|
};
|
||||||
use crate::routines::{EvalMode, Routines};
|
use crate::routines::Routines;
|
||||||
use crate::text::{
|
use crate::text::{
|
||||||
FontStyle, Lang, LocalName, Region, Smallcaps, SubElem, SuperElem, TextElem,
|
FontStyle, Lang, LocalName, Region, Smallcaps, SubElem, SuperElem, TextElem,
|
||||||
WeightDelta,
|
WeightDelta,
|
||||||
@ -1024,7 +1024,7 @@ impl ElemRenderer<'_> {
|
|||||||
Sink::new().track_mut(),
|
Sink::new().track_mut(),
|
||||||
math,
|
math,
|
||||||
self.span,
|
self.span,
|
||||||
EvalMode::Math,
|
SyntaxMode::Math,
|
||||||
Scope::new(),
|
Scope::new(),
|
||||||
)
|
)
|
||||||
.map(Value::display)
|
.map(Value::display)
|
||||||
|
@ -59,7 +59,7 @@ pub struct EmbedElem {
|
|||||||
// We can't distinguish between the two at the moment.
|
// We can't distinguish between the two at the moment.
|
||||||
#[required]
|
#[required]
|
||||||
#[parse(
|
#[parse(
|
||||||
match args.find::<Bytes>()? {
|
match args.eat::<Bytes>()? {
|
||||||
Some(data) => data,
|
Some(data) => data,
|
||||||
None => engine.world.file(id).at(span)?,
|
None => engine.world.file(id).at(span)?,
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ use std::hash::{Hash, Hasher};
|
|||||||
use std::num::NonZeroUsize;
|
use std::num::NonZeroUsize;
|
||||||
|
|
||||||
use comemo::{Tracked, TrackedMut};
|
use comemo::{Tracked, TrackedMut};
|
||||||
use typst_syntax::Span;
|
use typst_syntax::{Span, SyntaxMode};
|
||||||
use typst_utils::LazyHash;
|
use typst_utils::LazyHash;
|
||||||
|
|
||||||
use crate::diag::SourceResult;
|
use crate::diag::SourceResult;
|
||||||
@ -58,7 +58,7 @@ routines! {
|
|||||||
sink: TrackedMut<Sink>,
|
sink: TrackedMut<Sink>,
|
||||||
string: &str,
|
string: &str,
|
||||||
span: Span,
|
span: Span,
|
||||||
mode: EvalMode,
|
mode: SyntaxMode,
|
||||||
scope: Scope,
|
scope: Scope,
|
||||||
) -> SourceResult<Value>
|
) -> SourceResult<Value>
|
||||||
|
|
||||||
@ -312,17 +312,6 @@ routines! {
|
|||||||
) -> SourceResult<Fragment>
|
) -> SourceResult<Fragment>
|
||||||
}
|
}
|
||||||
|
|
||||||
/// In which mode to evaluate a string.
|
|
||||||
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
|
|
||||||
pub enum EvalMode {
|
|
||||||
/// Evaluate as code, as after a hash.
|
|
||||||
Code,
|
|
||||||
/// Evaluate as markup, like in a Typst file.
|
|
||||||
Markup,
|
|
||||||
/// Evaluate as math, as in an equation.
|
|
||||||
Math,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Defines what kind of realization we are performing.
|
/// Defines what kind of realization we are performing.
|
||||||
pub enum RealizationKind<'a> {
|
pub enum RealizationKind<'a> {
|
||||||
/// This the root realization for layout. Requires a mutable reference
|
/// This the root realization for layout. Requires a mutable reference
|
||||||
|
@ -14,13 +14,14 @@ macro_rules! translation {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const TRANSLATIONS: [(&str, &str); 40] = [
|
const TRANSLATIONS: &[(&str, &str)] = &[
|
||||||
translation!("ar"),
|
translation!("ar"),
|
||||||
translation!("bg"),
|
translation!("bg"),
|
||||||
translation!("ca"),
|
translation!("ca"),
|
||||||
translation!("cs"),
|
translation!("cs"),
|
||||||
translation!("da"),
|
translation!("da"),
|
||||||
translation!("de"),
|
translation!("de"),
|
||||||
|
translation!("el"),
|
||||||
translation!("en"),
|
translation!("en"),
|
||||||
translation!("es"),
|
translation!("es"),
|
||||||
translation!("et"),
|
translation!("et"),
|
||||||
@ -28,7 +29,6 @@ const TRANSLATIONS: [(&str, &str); 40] = [
|
|||||||
translation!("fi"),
|
translation!("fi"),
|
||||||
translation!("fr"),
|
translation!("fr"),
|
||||||
translation!("gl"),
|
translation!("gl"),
|
||||||
translation!("el"),
|
|
||||||
translation!("he"),
|
translation!("he"),
|
||||||
translation!("hu"),
|
translation!("hu"),
|
||||||
translation!("id"),
|
translation!("id"),
|
||||||
@ -41,8 +41,8 @@ const TRANSLATIONS: [(&str, &str); 40] = [
|
|||||||
translation!("nl"),
|
translation!("nl"),
|
||||||
translation!("nn"),
|
translation!("nn"),
|
||||||
translation!("pl"),
|
translation!("pl"),
|
||||||
translation!("pt-PT"),
|
|
||||||
translation!("pt"),
|
translation!("pt"),
|
||||||
|
translation!("pt-PT"),
|
||||||
translation!("ro"),
|
translation!("ro"),
|
||||||
translation!("ru"),
|
translation!("ru"),
|
||||||
translation!("sl"),
|
translation!("sl"),
|
||||||
@ -53,8 +53,8 @@ const TRANSLATIONS: [(&str, &str); 40] = [
|
|||||||
translation!("tr"),
|
translation!("tr"),
|
||||||
translation!("uk"),
|
translation!("uk"),
|
||||||
translation!("vi"),
|
translation!("vi"),
|
||||||
translation!("zh-TW"),
|
|
||||||
translation!("zh"),
|
translation!("zh"),
|
||||||
|
translation!("zh-TW"),
|
||||||
];
|
];
|
||||||
|
|
||||||
/// An identifier for a natural language.
|
/// An identifier for a natural language.
|
||||||
@ -312,14 +312,74 @@ fn lang_str(lang: Lang, region: Option<Region>) -> EcoString {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use typst_utils::option_eq;
|
use typst_utils::option_eq;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
fn translation_files_iter() -> impl Iterator<Item = PathBuf> {
|
||||||
|
std::fs::read_dir("translations")
|
||||||
|
.unwrap()
|
||||||
|
.map(|e| e.unwrap().path())
|
||||||
|
.filter(|e| e.is_file() && e.extension().is_some_and(|e| e == "txt"))
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_region_option_eq() {
|
fn test_region_option_eq() {
|
||||||
let region = Some(Region([b'U', b'S']));
|
let region = Some(Region([b'U', b'S']));
|
||||||
assert!(option_eq(region, "US"));
|
assert!(option_eq(region, "US"));
|
||||||
assert!(!option_eq(region, "AB"));
|
assert!(!option_eq(region, "AB"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_all_translations_included() {
|
||||||
|
let defined_keys =
|
||||||
|
HashSet::<&str>::from_iter(TRANSLATIONS.iter().map(|(lang, _)| *lang));
|
||||||
|
let mut checked = 0;
|
||||||
|
for file in translation_files_iter() {
|
||||||
|
assert!(
|
||||||
|
defined_keys.contains(
|
||||||
|
file.file_stem()
|
||||||
|
.expect("translation file should have basename")
|
||||||
|
.to_str()
|
||||||
|
.expect("translation file name should be utf-8 encoded")
|
||||||
|
),
|
||||||
|
"translation from {:?} should be registered in TRANSLATIONS in {}",
|
||||||
|
file.file_name().unwrap(),
|
||||||
|
file!(),
|
||||||
|
);
|
||||||
|
checked += 1;
|
||||||
|
}
|
||||||
|
assert_eq!(TRANSLATIONS.len(), checked);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_all_translation_files_formatted() {
|
||||||
|
for file in translation_files_iter() {
|
||||||
|
let content = std::fs::read_to_string(&file)
|
||||||
|
.expect("translation file should be in utf-8 encoding");
|
||||||
|
let filename = file.file_name().unwrap();
|
||||||
|
assert!(
|
||||||
|
content.ends_with('\n'),
|
||||||
|
"translation file {filename:?} should end with linebreak",
|
||||||
|
);
|
||||||
|
for line in content.lines() {
|
||||||
|
assert_eq!(
|
||||||
|
line.trim(),
|
||||||
|
line,
|
||||||
|
"line {line:?} in {filename:?} should not have extra whitespaces"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_translations_sorted() {
|
||||||
|
assert!(
|
||||||
|
TRANSLATIONS.is_sorted_by_key(|(lang, _)| lang),
|
||||||
|
"TRANSLATIONS should be sorted"
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,4 +5,4 @@ bibliography = المراجع
|
|||||||
heading = الفصل
|
heading = الفصل
|
||||||
outline = المحتويات
|
outline = المحتويات
|
||||||
raw = قائمة
|
raw = قائمة
|
||||||
page = صفحة
|
page = صفحة
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Библиография
|
|||||||
heading = Раздел
|
heading = Раздел
|
||||||
outline = Съдържание
|
outline = Съдържание
|
||||||
raw = Приложение
|
raw = Приложение
|
||||||
page = стр.
|
page = стр.
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografia
|
|||||||
heading = Secció
|
heading = Secció
|
||||||
outline = Índex
|
outline = Índex
|
||||||
raw = Llistat
|
raw = Llistat
|
||||||
page = pàgina
|
page = pàgina
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografie
|
|||||||
heading = Kapitola
|
heading = Kapitola
|
||||||
outline = Obsah
|
outline = Obsah
|
||||||
raw = Výpis
|
raw = Výpis
|
||||||
page = strana
|
page = strana
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografi
|
|||||||
heading = Afsnit
|
heading = Afsnit
|
||||||
outline = Indhold
|
outline = Indhold
|
||||||
raw = Liste
|
raw = Liste
|
||||||
page = side
|
page = side
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliographie
|
|||||||
heading = Abschnitt
|
heading = Abschnitt
|
||||||
outline = Inhaltsverzeichnis
|
outline = Inhaltsverzeichnis
|
||||||
raw = Listing
|
raw = Listing
|
||||||
page = Seite
|
page = Seite
|
||||||
|
@ -4,4 +4,4 @@ equation = Εξίσωση
|
|||||||
bibliography = Βιβλιογραφία
|
bibliography = Βιβλιογραφία
|
||||||
heading = Κεφάλαιο
|
heading = Κεφάλαιο
|
||||||
outline = Περιεχόμενα
|
outline = Περιεχόμενα
|
||||||
raw = Παράθεση
|
raw = Παράθεση
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliography
|
|||||||
heading = Section
|
heading = Section
|
||||||
outline = Contents
|
outline = Contents
|
||||||
raw = Listing
|
raw = Listing
|
||||||
page = page
|
page = page
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografía
|
|||||||
heading = Sección
|
heading = Sección
|
||||||
outline = Índice
|
outline = Índice
|
||||||
raw = Listado
|
raw = Listado
|
||||||
page = página
|
page = página
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Viited
|
|||||||
heading = Peatükk
|
heading = Peatükk
|
||||||
outline = Sisukord
|
outline = Sisukord
|
||||||
raw = List
|
raw = List
|
||||||
page = lk.
|
page = lk.
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Viitteet
|
|||||||
heading = Osio
|
heading = Osio
|
||||||
outline = Sisällys
|
outline = Sisällys
|
||||||
raw = Esimerkki
|
raw = Esimerkki
|
||||||
page = sivu
|
page = sivu
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliographie
|
|||||||
heading = Chapitre
|
heading = Chapitre
|
||||||
outline = Table des matières
|
outline = Table des matières
|
||||||
raw = Liste
|
raw = Liste
|
||||||
page = page
|
page = page
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografía
|
|||||||
heading = Sección
|
heading = Sección
|
||||||
outline = Índice
|
outline = Índice
|
||||||
raw = Listado
|
raw = Listado
|
||||||
page = páxina
|
page = páxina
|
||||||
|
@ -5,4 +5,4 @@ bibliography = רשימת מקורות
|
|||||||
heading = חלק
|
heading = חלק
|
||||||
outline = תוכן עניינים
|
outline = תוכן עניינים
|
||||||
raw = קטע מקור
|
raw = קטע מקור
|
||||||
page = עמוד
|
page = עמוד
|
||||||
|
@ -4,5 +4,5 @@ equation = Egyenlet
|
|||||||
bibliography = Irodalomjegyzék
|
bibliography = Irodalomjegyzék
|
||||||
heading = Fejezet
|
heading = Fejezet
|
||||||
outline = Tartalomjegyzék
|
outline = Tartalomjegyzék
|
||||||
# raw =
|
# raw =
|
||||||
page = oldal
|
page = oldal
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Heimildaskrá
|
|||||||
heading = Kafli
|
heading = Kafli
|
||||||
outline = Efnisyfirlit
|
outline = Efnisyfirlit
|
||||||
raw = Sýnishorn
|
raw = Sýnishorn
|
||||||
page = blaðsíða
|
page = blaðsíða
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografia
|
|||||||
heading = Sezione
|
heading = Sezione
|
||||||
outline = Indice
|
outline = Indice
|
||||||
raw = Codice
|
raw = Codice
|
||||||
page = pag.
|
page = pag.
|
||||||
|
@ -5,4 +5,4 @@ bibliography = 参考文献
|
|||||||
heading = 節
|
heading = 節
|
||||||
outline = 目次
|
outline = 目次
|
||||||
raw = リスト
|
raw = リスト
|
||||||
page = ページ
|
page = ページ
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Conspectus librorum
|
|||||||
heading = Caput
|
heading = Caput
|
||||||
outline = Index capitum
|
outline = Index capitum
|
||||||
raw = Exemplum
|
raw = Exemplum
|
||||||
page = charta
|
page = charta
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografi
|
|||||||
heading = Kapittel
|
heading = Kapittel
|
||||||
outline = Innhold
|
outline = Innhold
|
||||||
raw = Utskrift
|
raw = Utskrift
|
||||||
page = side
|
page = side
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografie
|
|||||||
heading = Hoofdstuk
|
heading = Hoofdstuk
|
||||||
outline = Inhoudsopgave
|
outline = Inhoudsopgave
|
||||||
raw = Listing
|
raw = Listing
|
||||||
page = pagina
|
page = pagina
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografi
|
|||||||
heading = Kapittel
|
heading = Kapittel
|
||||||
outline = Innhald
|
outline = Innhald
|
||||||
raw = Utskrift
|
raw = Utskrift
|
||||||
page = side
|
page = side
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografia
|
|||||||
heading = Sekcja
|
heading = Sekcja
|
||||||
outline = Spis treści
|
outline = Spis treści
|
||||||
raw = Program
|
raw = Program
|
||||||
page = strona
|
page = strona
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
# figure =
|
# figure =
|
||||||
# table =
|
# table =
|
||||||
# equation =
|
# equation =
|
||||||
# bibliography =
|
# bibliography =
|
||||||
heading = Secção
|
heading = Secção
|
||||||
outline = Índice
|
outline = Índice
|
||||||
# raw =
|
# raw =
|
||||||
page = página
|
page = página
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografia
|
|||||||
heading = Seção
|
heading = Seção
|
||||||
outline = Sumário
|
outline = Sumário
|
||||||
raw = Listagem
|
raw = Listagem
|
||||||
page = página
|
page = página
|
||||||
|
@ -6,4 +6,4 @@ heading = Secțiunea
|
|||||||
outline = Cuprins
|
outline = Cuprins
|
||||||
# may be wrong
|
# may be wrong
|
||||||
raw = Listă
|
raw = Listă
|
||||||
page = pagina
|
page = pagina
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Библиография
|
|||||||
heading = Раздел
|
heading = Раздел
|
||||||
outline = Содержание
|
outline = Содержание
|
||||||
raw = Листинг
|
raw = Листинг
|
||||||
page = с.
|
page = с.
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Literatura
|
|||||||
heading = Poglavje
|
heading = Poglavje
|
||||||
outline = Kazalo
|
outline = Kazalo
|
||||||
raw = Program
|
raw = Program
|
||||||
page = stran
|
page = stran
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografi
|
|||||||
heading = Kapitull
|
heading = Kapitull
|
||||||
outline = Përmbajtja
|
outline = Përmbajtja
|
||||||
raw = List
|
raw = List
|
||||||
page = faqe
|
page = faqe
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Литература
|
|||||||
heading = Поглавље
|
heading = Поглавље
|
||||||
outline = Садржај
|
outline = Садржај
|
||||||
raw = Програм
|
raw = Програм
|
||||||
page = страна
|
page = страна
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliografi
|
|||||||
heading = Kapitel
|
heading = Kapitel
|
||||||
outline = Innehåll
|
outline = Innehåll
|
||||||
raw = Listing
|
raw = Listing
|
||||||
page = sida
|
page = sida
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Bibliograpiya
|
|||||||
heading = Seksyon
|
heading = Seksyon
|
||||||
outline = Talaan ng mga Nilalaman
|
outline = Talaan ng mga Nilalaman
|
||||||
raw = Listahan
|
raw = Listahan
|
||||||
# page =
|
# page =
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Kaynakça
|
|||||||
heading = Bölüm
|
heading = Bölüm
|
||||||
outline = İçindekiler
|
outline = İçindekiler
|
||||||
raw = Liste
|
raw = Liste
|
||||||
page = sayfa
|
page = sayfa
|
||||||
|
@ -5,4 +5,4 @@ bibliography = Бібліографія
|
|||||||
heading = Розділ
|
heading = Розділ
|
||||||
outline = Зміст
|
outline = Зміст
|
||||||
raw = Лістинг
|
raw = Лістинг
|
||||||
page = c.
|
page = c.
|
||||||
|
@ -6,4 +6,4 @@ heading = Phần
|
|||||||
outline = Mục lục
|
outline = Mục lục
|
||||||
# may be wrong
|
# may be wrong
|
||||||
raw = Chương trình
|
raw = Chương trình
|
||||||
page = trang
|
page = trang
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
figure = 圖
|
figure = 圖
|
||||||
# table =
|
# table =
|
||||||
equation = 式
|
equation = 式
|
||||||
bibliography = 書目
|
bibliography = 書目
|
||||||
heading = 小節
|
heading = 小節
|
||||||
outline = 目錄
|
outline = 目錄
|
||||||
raw = 程式
|
raw = 程式
|
||||||
# page =
|
# page =
|
||||||
|
@ -5,4 +5,4 @@ bibliography = 参考文献
|
|||||||
heading = 小节
|
heading = 小节
|
||||||
outline = 目录
|
outline = 目录
|
||||||
raw = 代码
|
raw = 代码
|
||||||
# page =
|
# page =
|
||||||
|
@ -4,7 +4,7 @@ use unicode_script::{Script, UnicodeScript};
|
|||||||
use unicode_segmentation::UnicodeSegmentation;
|
use unicode_segmentation::UnicodeSegmentation;
|
||||||
use unscanny::Scanner;
|
use unscanny::Scanner;
|
||||||
|
|
||||||
use crate::{SyntaxError, SyntaxKind, SyntaxNode};
|
use crate::{SyntaxError, SyntaxKind, SyntaxMode, SyntaxNode};
|
||||||
|
|
||||||
/// An iterator over a source code string which returns tokens.
|
/// An iterator over a source code string which returns tokens.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@ -13,28 +13,17 @@ pub(super) struct Lexer<'s> {
|
|||||||
s: Scanner<'s>,
|
s: Scanner<'s>,
|
||||||
/// The mode the lexer is in. This determines which kinds of tokens it
|
/// The mode the lexer is in. This determines which kinds of tokens it
|
||||||
/// produces.
|
/// produces.
|
||||||
mode: LexMode,
|
mode: SyntaxMode,
|
||||||
/// Whether the last token contained a newline.
|
/// Whether the last token contained a newline.
|
||||||
newline: bool,
|
newline: bool,
|
||||||
/// An error for the last token.
|
/// An error for the last token.
|
||||||
error: Option<SyntaxError>,
|
error: Option<SyntaxError>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// What kind of tokens to emit.
|
|
||||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
|
||||||
pub(super) enum LexMode {
|
|
||||||
/// Text and markup.
|
|
||||||
Markup,
|
|
||||||
/// Math atoms, operators, etc.
|
|
||||||
Math,
|
|
||||||
/// Keywords, literals and operators.
|
|
||||||
Code,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'s> Lexer<'s> {
|
impl<'s> Lexer<'s> {
|
||||||
/// Create a new lexer with the given mode and a prefix to offset column
|
/// Create a new lexer with the given mode and a prefix to offset column
|
||||||
/// calculations.
|
/// calculations.
|
||||||
pub fn new(text: &'s str, mode: LexMode) -> Self {
|
pub fn new(text: &'s str, mode: SyntaxMode) -> Self {
|
||||||
Self {
|
Self {
|
||||||
s: Scanner::new(text),
|
s: Scanner::new(text),
|
||||||
mode,
|
mode,
|
||||||
@ -44,12 +33,12 @@ impl<'s> Lexer<'s> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get the current lexing mode.
|
/// Get the current lexing mode.
|
||||||
pub fn mode(&self) -> LexMode {
|
pub fn mode(&self) -> SyntaxMode {
|
||||||
self.mode
|
self.mode
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Change the lexing mode.
|
/// Change the lexing mode.
|
||||||
pub fn set_mode(&mut self, mode: LexMode) {
|
pub fn set_mode(&mut self, mode: SyntaxMode) {
|
||||||
self.mode = mode;
|
self.mode = mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,7 +81,7 @@ impl Lexer<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Shared methods with all [`LexMode`].
|
/// Shared methods with all [`SyntaxMode`].
|
||||||
impl Lexer<'_> {
|
impl Lexer<'_> {
|
||||||
/// Return the next token in our text. Returns both the [`SyntaxNode`]
|
/// Return the next token in our text. Returns both the [`SyntaxNode`]
|
||||||
/// and the raw [`SyntaxKind`] to make it more ergonomic to check the kind
|
/// and the raw [`SyntaxKind`] to make it more ergonomic to check the kind
|
||||||
@ -114,14 +103,14 @@ impl Lexer<'_> {
|
|||||||
);
|
);
|
||||||
kind
|
kind
|
||||||
}
|
}
|
||||||
Some('`') if self.mode != LexMode::Math => return self.raw(),
|
Some('`') if self.mode != SyntaxMode::Math => return self.raw(),
|
||||||
Some(c) => match self.mode {
|
Some(c) => match self.mode {
|
||||||
LexMode::Markup => self.markup(start, c),
|
SyntaxMode::Markup => self.markup(start, c),
|
||||||
LexMode::Math => match self.math(start, c) {
|
SyntaxMode::Math => match self.math(start, c) {
|
||||||
(kind, None) => kind,
|
(kind, None) => kind,
|
||||||
(kind, Some(node)) => return (kind, node),
|
(kind, Some(node)) => return (kind, node),
|
||||||
},
|
},
|
||||||
LexMode::Code => self.code(start, c),
|
SyntaxMode::Code => self.code(start, c),
|
||||||
},
|
},
|
||||||
|
|
||||||
None => SyntaxKind::End,
|
None => SyntaxKind::End,
|
||||||
@ -145,7 +134,7 @@ impl Lexer<'_> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
self.newline = newlines > 0;
|
self.newline = newlines > 0;
|
||||||
if self.mode == LexMode::Markup && newlines >= 2 {
|
if self.mode == SyntaxMode::Markup && newlines >= 2 {
|
||||||
SyntaxKind::Parbreak
|
SyntaxKind::Parbreak
|
||||||
} else {
|
} else {
|
||||||
SyntaxKind::Space
|
SyntaxKind::Space
|
||||||
@ -965,9 +954,9 @@ impl ScannerExt for Scanner<'_> {
|
|||||||
|
|
||||||
/// Whether a character will become a [`SyntaxKind::Space`] token.
|
/// Whether a character will become a [`SyntaxKind::Space`] token.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn is_space(character: char, mode: LexMode) -> bool {
|
fn is_space(character: char, mode: SyntaxMode) -> bool {
|
||||||
match mode {
|
match mode {
|
||||||
LexMode::Markup => matches!(character, ' ' | '\t') || is_newline(character),
|
SyntaxMode::Markup => matches!(character, ' ' | '\t') || is_newline(character),
|
||||||
_ => character.is_whitespace(),
|
_ => character.is_whitespace(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,5 +30,16 @@ pub use self::path::VirtualPath;
|
|||||||
pub use self::source::Source;
|
pub use self::source::Source;
|
||||||
pub use self::span::{Span, Spanned};
|
pub use self::span::{Span, Spanned};
|
||||||
|
|
||||||
use self::lexer::{LexMode, Lexer};
|
use self::lexer::Lexer;
|
||||||
use self::parser::{reparse_block, reparse_markup};
|
use self::parser::{reparse_block, reparse_markup};
|
||||||
|
|
||||||
|
/// The syntax mode of a portion of Typst code.
|
||||||
|
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
|
||||||
|
pub enum SyntaxMode {
|
||||||
|
/// Text and markup, as in the top level.
|
||||||
|
Markup,
|
||||||
|
/// Math atoms, operators, etc., as in equations.
|
||||||
|
Math,
|
||||||
|
/// Keywords, literals and operators, as after hashes.
|
||||||
|
Code,
|
||||||
|
}
|
||||||
|
@ -7,12 +7,12 @@ use typst_utils::default_math_class;
|
|||||||
use unicode_math_class::MathClass;
|
use unicode_math_class::MathClass;
|
||||||
|
|
||||||
use crate::set::{syntax_set, SyntaxSet};
|
use crate::set::{syntax_set, SyntaxSet};
|
||||||
use crate::{ast, set, LexMode, Lexer, SyntaxError, SyntaxKind, SyntaxNode};
|
use crate::{ast, set, Lexer, SyntaxError, SyntaxKind, SyntaxMode, SyntaxNode};
|
||||||
|
|
||||||
/// Parses a source file as top-level markup.
|
/// Parses a source file as top-level markup.
|
||||||
pub fn parse(text: &str) -> SyntaxNode {
|
pub fn parse(text: &str) -> SyntaxNode {
|
||||||
let _scope = typst_timing::TimingScope::new("parse");
|
let _scope = typst_timing::TimingScope::new("parse");
|
||||||
let mut p = Parser::new(text, 0, LexMode::Markup);
|
let mut p = Parser::new(text, 0, SyntaxMode::Markup);
|
||||||
markup_exprs(&mut p, true, syntax_set!(End));
|
markup_exprs(&mut p, true, syntax_set!(End));
|
||||||
p.finish_into(SyntaxKind::Markup)
|
p.finish_into(SyntaxKind::Markup)
|
||||||
}
|
}
|
||||||
@ -20,7 +20,7 @@ pub fn parse(text: &str) -> SyntaxNode {
|
|||||||
/// Parses top-level code.
|
/// Parses top-level code.
|
||||||
pub fn parse_code(text: &str) -> SyntaxNode {
|
pub fn parse_code(text: &str) -> SyntaxNode {
|
||||||
let _scope = typst_timing::TimingScope::new("parse code");
|
let _scope = typst_timing::TimingScope::new("parse code");
|
||||||
let mut p = Parser::new(text, 0, LexMode::Code);
|
let mut p = Parser::new(text, 0, SyntaxMode::Code);
|
||||||
code_exprs(&mut p, syntax_set!(End));
|
code_exprs(&mut p, syntax_set!(End));
|
||||||
p.finish_into(SyntaxKind::Code)
|
p.finish_into(SyntaxKind::Code)
|
||||||
}
|
}
|
||||||
@ -28,7 +28,7 @@ pub fn parse_code(text: &str) -> SyntaxNode {
|
|||||||
/// Parses top-level math.
|
/// Parses top-level math.
|
||||||
pub fn parse_math(text: &str) -> SyntaxNode {
|
pub fn parse_math(text: &str) -> SyntaxNode {
|
||||||
let _scope = typst_timing::TimingScope::new("parse math");
|
let _scope = typst_timing::TimingScope::new("parse math");
|
||||||
let mut p = Parser::new(text, 0, LexMode::Math);
|
let mut p = Parser::new(text, 0, SyntaxMode::Math);
|
||||||
math_exprs(&mut p, syntax_set!(End));
|
math_exprs(&mut p, syntax_set!(End));
|
||||||
p.finish_into(SyntaxKind::Math)
|
p.finish_into(SyntaxKind::Math)
|
||||||
}
|
}
|
||||||
@ -63,7 +63,7 @@ pub(super) fn reparse_markup(
|
|||||||
nesting: &mut usize,
|
nesting: &mut usize,
|
||||||
top_level: bool,
|
top_level: bool,
|
||||||
) -> Option<Vec<SyntaxNode>> {
|
) -> Option<Vec<SyntaxNode>> {
|
||||||
let mut p = Parser::new(text, range.start, LexMode::Markup);
|
let mut p = Parser::new(text, range.start, SyntaxMode::Markup);
|
||||||
*at_start |= p.had_newline();
|
*at_start |= p.had_newline();
|
||||||
while !p.end() && p.current_start() < range.end {
|
while !p.end() && p.current_start() < range.end {
|
||||||
// If not top-level and at a new RightBracket, stop the reparse.
|
// If not top-level and at a new RightBracket, stop the reparse.
|
||||||
@ -205,7 +205,7 @@ fn reference(p: &mut Parser) {
|
|||||||
/// Parses a mathematical equation: `$x$`, `$ x^2 $`.
|
/// Parses a mathematical equation: `$x$`, `$ x^2 $`.
|
||||||
fn equation(p: &mut Parser) {
|
fn equation(p: &mut Parser) {
|
||||||
let m = p.marker();
|
let m = p.marker();
|
||||||
p.enter_modes(LexMode::Math, AtNewline::Continue, |p| {
|
p.enter_modes(SyntaxMode::Math, AtNewline::Continue, |p| {
|
||||||
p.assert(SyntaxKind::Dollar);
|
p.assert(SyntaxKind::Dollar);
|
||||||
math(p, syntax_set!(Dollar, End));
|
math(p, syntax_set!(Dollar, End));
|
||||||
p.expect_closing_delimiter(m, SyntaxKind::Dollar);
|
p.expect_closing_delimiter(m, SyntaxKind::Dollar);
|
||||||
@ -615,7 +615,7 @@ fn code_exprs(p: &mut Parser, stop_set: SyntaxSet) {
|
|||||||
|
|
||||||
/// Parses an atomic code expression embedded in markup or math.
|
/// Parses an atomic code expression embedded in markup or math.
|
||||||
fn embedded_code_expr(p: &mut Parser) {
|
fn embedded_code_expr(p: &mut Parser) {
|
||||||
p.enter_modes(LexMode::Code, AtNewline::Stop, |p| {
|
p.enter_modes(SyntaxMode::Code, AtNewline::Stop, |p| {
|
||||||
p.assert(SyntaxKind::Hash);
|
p.assert(SyntaxKind::Hash);
|
||||||
if p.had_trivia() || p.end() {
|
if p.had_trivia() || p.end() {
|
||||||
p.expected("expression");
|
p.expected("expression");
|
||||||
@ -777,7 +777,7 @@ fn code_primary(p: &mut Parser, atomic: bool) {
|
|||||||
|
|
||||||
/// Reparses a full content or code block.
|
/// Reparses a full content or code block.
|
||||||
pub(super) fn reparse_block(text: &str, range: Range<usize>) -> Option<SyntaxNode> {
|
pub(super) fn reparse_block(text: &str, range: Range<usize>) -> Option<SyntaxNode> {
|
||||||
let mut p = Parser::new(text, range.start, LexMode::Code);
|
let mut p = Parser::new(text, range.start, SyntaxMode::Code);
|
||||||
assert!(p.at(SyntaxKind::LeftBracket) || p.at(SyntaxKind::LeftBrace));
|
assert!(p.at(SyntaxKind::LeftBracket) || p.at(SyntaxKind::LeftBrace));
|
||||||
block(&mut p);
|
block(&mut p);
|
||||||
(p.balanced && p.prev_end() == range.end)
|
(p.balanced && p.prev_end() == range.end)
|
||||||
@ -796,7 +796,7 @@ fn block(p: &mut Parser) {
|
|||||||
/// Parses a code block: `{ let x = 1; x + 2 }`.
|
/// Parses a code block: `{ let x = 1; x + 2 }`.
|
||||||
fn code_block(p: &mut Parser) {
|
fn code_block(p: &mut Parser) {
|
||||||
let m = p.marker();
|
let m = p.marker();
|
||||||
p.enter_modes(LexMode::Code, AtNewline::Continue, |p| {
|
p.enter_modes(SyntaxMode::Code, AtNewline::Continue, |p| {
|
||||||
p.assert(SyntaxKind::LeftBrace);
|
p.assert(SyntaxKind::LeftBrace);
|
||||||
code(p, syntax_set!(RightBrace, RightBracket, RightParen, End));
|
code(p, syntax_set!(RightBrace, RightBracket, RightParen, End));
|
||||||
p.expect_closing_delimiter(m, SyntaxKind::RightBrace);
|
p.expect_closing_delimiter(m, SyntaxKind::RightBrace);
|
||||||
@ -807,7 +807,7 @@ fn code_block(p: &mut Parser) {
|
|||||||
/// Parses a content block: `[*Hi* there!]`.
|
/// Parses a content block: `[*Hi* there!]`.
|
||||||
fn content_block(p: &mut Parser) {
|
fn content_block(p: &mut Parser) {
|
||||||
let m = p.marker();
|
let m = p.marker();
|
||||||
p.enter_modes(LexMode::Markup, AtNewline::Continue, |p| {
|
p.enter_modes(SyntaxMode::Markup, AtNewline::Continue, |p| {
|
||||||
p.assert(SyntaxKind::LeftBracket);
|
p.assert(SyntaxKind::LeftBracket);
|
||||||
markup(p, true, true, syntax_set!(RightBracket, End));
|
markup(p, true, true, syntax_set!(RightBracket, End));
|
||||||
p.expect_closing_delimiter(m, SyntaxKind::RightBracket);
|
p.expect_closing_delimiter(m, SyntaxKind::RightBracket);
|
||||||
@ -1516,10 +1516,10 @@ fn pattern_leaf<'s>(
|
|||||||
/// ### Modes
|
/// ### Modes
|
||||||
///
|
///
|
||||||
/// The parser manages the transitions between the three modes of Typst through
|
/// The parser manages the transitions between the three modes of Typst through
|
||||||
/// [lexer modes](`LexMode`) and [newline modes](`AtNewline`).
|
/// [syntax modes](`SyntaxMode`) and [newline modes](`AtNewline`).
|
||||||
///
|
///
|
||||||
/// The lexer modes map to the three Typst modes and are stored in the lexer,
|
/// The syntax modes map to the three Typst modes and are stored in the lexer,
|
||||||
/// changing which`SyntaxKind`s it will generate.
|
/// changing which `SyntaxKind`s it will generate.
|
||||||
///
|
///
|
||||||
/// The newline mode is used to determine whether a newline should end the
|
/// The newline mode is used to determine whether a newline should end the
|
||||||
/// current expression. If so, the parser temporarily changes `token`'s kind to
|
/// current expression. If so, the parser temporarily changes `token`'s kind to
|
||||||
@ -1529,7 +1529,7 @@ struct Parser<'s> {
|
|||||||
/// The source text shared with the lexer.
|
/// The source text shared with the lexer.
|
||||||
text: &'s str,
|
text: &'s str,
|
||||||
/// A lexer over the source text with multiple modes. Defines the boundaries
|
/// A lexer over the source text with multiple modes. Defines the boundaries
|
||||||
/// of tokens and determines their [`SyntaxKind`]. Contains the [`LexMode`]
|
/// of tokens and determines their [`SyntaxKind`]. Contains the [`SyntaxMode`]
|
||||||
/// defining our current Typst mode.
|
/// defining our current Typst mode.
|
||||||
lexer: Lexer<'s>,
|
lexer: Lexer<'s>,
|
||||||
/// The newline mode: whether to insert a temporary end at newlines.
|
/// The newline mode: whether to insert a temporary end at newlines.
|
||||||
@ -1612,7 +1612,7 @@ impl AtNewline {
|
|||||||
AtNewline::RequireColumn(min_col) => {
|
AtNewline::RequireColumn(min_col) => {
|
||||||
// When the column is `None`, the newline doesn't start a
|
// When the column is `None`, the newline doesn't start a
|
||||||
// column, and we continue parsing. This may happen on the
|
// column, and we continue parsing. This may happen on the
|
||||||
// boundary of lexer modes, since we only report a column in
|
// boundary of syntax modes, since we only report a column in
|
||||||
// Markup.
|
// Markup.
|
||||||
column.is_some_and(|column| column <= min_col)
|
column.is_some_and(|column| column <= min_col)
|
||||||
}
|
}
|
||||||
@ -1643,8 +1643,8 @@ impl IndexMut<Marker> for Parser<'_> {
|
|||||||
|
|
||||||
/// Creating/Consuming the parser and getting info about the current token.
|
/// Creating/Consuming the parser and getting info about the current token.
|
||||||
impl<'s> Parser<'s> {
|
impl<'s> Parser<'s> {
|
||||||
/// Create a new parser starting from the given text offset and lexer mode.
|
/// Create a new parser starting from the given text offset and syntax mode.
|
||||||
fn new(text: &'s str, offset: usize, mode: LexMode) -> Self {
|
fn new(text: &'s str, offset: usize, mode: SyntaxMode) -> Self {
|
||||||
let mut lexer = Lexer::new(text, mode);
|
let mut lexer = Lexer::new(text, mode);
|
||||||
lexer.jump(offset);
|
lexer.jump(offset);
|
||||||
let nl_mode = AtNewline::Continue;
|
let nl_mode = AtNewline::Continue;
|
||||||
@ -1825,13 +1825,13 @@ impl<'s> Parser<'s> {
|
|||||||
self.nodes.insert(from, SyntaxNode::inner(kind, children));
|
self.nodes.insert(from, SyntaxNode::inner(kind, children));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parse within the [`LexMode`] for subsequent tokens (does not change the
|
/// Parse within the [`SyntaxMode`] for subsequent tokens (does not change the
|
||||||
/// current token). This may re-lex the final token on exit.
|
/// current token). This may re-lex the final token on exit.
|
||||||
///
|
///
|
||||||
/// This function effectively repurposes the call stack as a stack of modes.
|
/// This function effectively repurposes the call stack as a stack of modes.
|
||||||
fn enter_modes(
|
fn enter_modes(
|
||||||
&mut self,
|
&mut self,
|
||||||
mode: LexMode,
|
mode: SyntaxMode,
|
||||||
stop: AtNewline,
|
stop: AtNewline,
|
||||||
func: impl FnOnce(&mut Parser<'s>),
|
func: impl FnOnce(&mut Parser<'s>),
|
||||||
) {
|
) {
|
||||||
@ -1891,7 +1891,8 @@ impl<'s> Parser<'s> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let newline = if had_newline {
|
let newline = if had_newline {
|
||||||
let column = (lexer.mode() == LexMode::Markup).then(|| lexer.column(start));
|
let column =
|
||||||
|
(lexer.mode() == SyntaxMode::Markup).then(|| lexer.column(start));
|
||||||
let newline = Newline { column, parbreak };
|
let newline = Newline { column, parbreak };
|
||||||
if nl_mode.stop_at(newline, kind) {
|
if nl_mode.stop_at(newline, kind) {
|
||||||
// Insert a temporary `SyntaxKind::End` to halt the parser.
|
// Insert a temporary `SyntaxKind::End` to halt the parser.
|
||||||
@ -1938,7 +1939,7 @@ struct Checkpoint {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct PartialState {
|
struct PartialState {
|
||||||
cursor: usize,
|
cursor: usize,
|
||||||
lex_mode: LexMode,
|
lex_mode: SyntaxMode,
|
||||||
token: Token,
|
token: Token,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ impl PicoStr {
|
|||||||
pub const fn constant(string: &'static str) -> PicoStr {
|
pub const fn constant(string: &'static str) -> PicoStr {
|
||||||
match PicoStr::try_constant(string) {
|
match PicoStr::try_constant(string) {
|
||||||
Ok(value) => value,
|
Ok(value) => value,
|
||||||
Err(err) => panic!("{}", err.message()),
|
Err(err) => failed_to_compile_time_intern(err, string),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,15 +190,9 @@ mod bitcode {
|
|||||||
impl EncodingError {
|
impl EncodingError {
|
||||||
pub const fn message(&self) -> &'static str {
|
pub const fn message(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
Self::TooLong => {
|
Self::TooLong => "the maximum auto-internible string length is 12",
|
||||||
"the maximum auto-internible string length is 12. \
|
|
||||||
you can add an exception to typst-utils/src/pico.rs \
|
|
||||||
to intern longer strings."
|
|
||||||
}
|
|
||||||
Self::BadChar => {
|
Self::BadChar => {
|
||||||
"can only auto-intern the chars 'a'-'z', '1'-'4', and '-'. \
|
"can only auto-intern the chars 'a'-'z', '1'-'4', and '-'"
|
||||||
you can add an exception to typst-utils/src/pico.rs \
|
|
||||||
to intern other strings."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -356,6 +350,39 @@ impl Hash for ResolvedPicoStr {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The error when a string could not be interned at compile time. Because the
|
||||||
|
/// normal formatting machinery is not available at compile time, just producing
|
||||||
|
/// the message is a bit involved ...
|
||||||
|
#[track_caller]
|
||||||
|
const fn failed_to_compile_time_intern(
|
||||||
|
error: bitcode::EncodingError,
|
||||||
|
string: &'static str,
|
||||||
|
) -> ! {
|
||||||
|
const CAPACITY: usize = 512;
|
||||||
|
const fn push((buf, i): &mut ([u8; CAPACITY], usize), s: &str) {
|
||||||
|
let mut k = 0;
|
||||||
|
while k < s.len() && *i < buf.len() {
|
||||||
|
buf[*i] = s.as_bytes()[k];
|
||||||
|
k += 1;
|
||||||
|
*i += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut dest = ([0; CAPACITY], 0);
|
||||||
|
push(&mut dest, "failed to compile-time intern string \"");
|
||||||
|
push(&mut dest, string);
|
||||||
|
push(&mut dest, "\". ");
|
||||||
|
push(&mut dest, error.message());
|
||||||
|
push(&mut dest, ". you can add an exception to ");
|
||||||
|
push(&mut dest, file!());
|
||||||
|
push(&mut dest, " to intern longer strings.");
|
||||||
|
|
||||||
|
let (slice, _) = dest.0.split_at(dest.1);
|
||||||
|
let Ok(message) = std::str::from_utf8(slice) else { panic!() };
|
||||||
|
|
||||||
|
panic!("{}", message);
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -28,3 +28,7 @@
|
|||||||
mime-type: "text/plain",
|
mime-type: "text/plain",
|
||||||
description: "A test file",
|
description: "A test file",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
--- pdf-embed-invalid-data ---
|
||||||
|
// Error: 38-45 expected bytes, found string
|
||||||
|
#pdf.embed("/assets/text/hello.txt", "hello")
|
||||||
|
Loading…
x
Reference in New Issue
Block a user