Rename token modes ✏

This commit is contained in:
Laurenz 2021-01-10 20:48:32 +01:00
parent 515905d78d
commit 3c7d249ae4
5 changed files with 179 additions and 180 deletions

View File

@ -152,7 +152,7 @@ fn unicode_escape(p: &mut Parser, token: TokenUnicodeEscape) -> String {
/// Parse a block expression. /// Parse a block expression.
fn block_expr(p: &mut Parser) -> Option<Expr> { fn block_expr(p: &mut Parser) -> Option<Expr> {
p.push_mode(TokenMode::Header); p.push_mode(TokenMode::Code);
p.start_group(Group::Brace); p.start_group(Group::Brace);
let expr = expr(p); let expr = expr(p);
while !p.eof() { while !p.eof() {
@ -173,7 +173,7 @@ fn paren_call(p: &mut Parser, name: Spanned<Ident>) -> ExprCall {
/// Parse a bracketed function call. /// Parse a bracketed function call.
fn bracket_call(p: &mut Parser) -> ExprCall { fn bracket_call(p: &mut Parser) -> ExprCall {
p.push_mode(TokenMode::Header); p.push_mode(TokenMode::Code);
p.start_group(Group::Bracket); p.start_group(Group::Bracket);
// One header is guaranteed, but there may be more (through chaining). // One header is guaranteed, but there may be more (through chaining).
@ -228,7 +228,7 @@ fn bracket_subheader(p: &mut Parser) -> ExprCall {
/// Parse the body of a bracketed function call. /// Parse the body of a bracketed function call.
fn bracket_body(p: &mut Parser) -> Tree { fn bracket_body(p: &mut Parser) -> Tree {
p.push_mode(TokenMode::Body); p.push_mode(TokenMode::Markup);
p.start_group(Group::Bracket); p.start_group(Group::Bracket);
let tree = tree(p); let tree = tree(p);
p.pop_mode(); p.pop_mode();
@ -348,7 +348,7 @@ fn value(p: &mut Parser) -> Option<Expr> {
// Parse a content value: `{...}`. // Parse a content value: `{...}`.
fn content(p: &mut Parser) -> Tree { fn content(p: &mut Parser) -> Tree {
p.push_mode(TokenMode::Body); p.push_mode(TokenMode::Markup);
p.start_group(Group::Brace); p.start_group(Group::Brace);
let tree = tree(p); let tree = tree(p);
p.pop_mode(); p.pop_mode();

View File

@ -16,7 +16,7 @@ pub struct Parser<'s> {
peeked: Option<Token<'s>>, peeked: Option<Token<'s>>,
/// The start position of the peeked token. /// The start position of the peeked token.
next_start: Pos, next_start: Pos,
/// The end position of the last (non-whitespace if in header) token. /// The end position of the last (non-whitespace if in code mode) token.
last_end: Pos, last_end: Pos,
/// The stack of modes we were in. /// The stack of modes we were in.
modes: Vec<TokenMode>, modes: Vec<TokenMode>,
@ -29,7 +29,7 @@ pub struct Parser<'s> {
impl<'s> Parser<'s> { impl<'s> Parser<'s> {
/// Create a new parser for the source string. /// Create a new parser for the source string.
pub fn new(src: &'s str) -> Self { pub fn new(src: &'s str) -> Self {
let mut tokens = Tokens::new(src, TokenMode::Body); let mut tokens = Tokens::new(src, TokenMode::Markup);
let next = tokens.next(); let next = tokens.next();
Self { Self {
tokens, tokens,
@ -151,7 +151,7 @@ impl<'s> Parser<'s> {
} }
/// Execute `f` and return the result alongside the span of everything `f` /// Execute `f` and return the result alongside the span of everything `f`
/// ate. Excludes leading and trailing whitespace in header mode. /// ate. Excludes leading and trailing whitespace in code mode.
pub fn span<T, F>(&mut self, f: F) -> Spanned<T> pub fn span<T, F>(&mut self, f: F) -> Spanned<T>
where where
F: FnOnce(&mut Self) -> T, F: FnOnce(&mut Self) -> T,
@ -243,7 +243,7 @@ impl<'s> Parser<'s> {
/// The position at which the last token ended. /// The position at which the last token ended.
/// ///
/// Refers to the end of the last _non-whitespace_ token in header mode. /// Refers to the end of the last _non-whitespace_ token in code mode.
pub fn last_end(&self) -> Pos { pub fn last_end(&self) -> Pos {
self.last_end self.last_end
} }
@ -266,8 +266,8 @@ impl<'s> Parser<'s> {
self.next = self.tokens.next(); self.next = self.tokens.next();
match self.tokens.mode() { match self.tokens.mode() {
TokenMode::Body => {} TokenMode::Markup => {}
TokenMode::Header => { TokenMode::Code => {
while matches!( while matches!(
self.next, self.next,
Some(Token::Space(_)) | Some(Token::Space(_)) |

View File

@ -179,11 +179,11 @@ macro_rules! Call {
#[test] #[test]
fn test_parse_comments() { fn test_parse_comments() {
// In body. // In markup.
t!("a// you\nb" Text("a"), Space, Text("b")); t!("a// you\nb" Text("a"), Space, Text("b"));
t!("* // \n /*\n\n*/*" Strong, Space, Space, Strong); t!("* // \n /*\n\n*/*" Strong, Space, Space, Strong);
// In header. // In code.
t!("[v /*12pt*/]" Call!("v")); t!("[v /*12pt*/]" Call!("v"));
t!("[v //\n]" Call!("v")); t!("[v //\n]" Call!("v"));
t!("[v 12, /*\n*/ size: 14]" Call!("v", Args![Int(12), "size" => Int(14)])); t!("[v 12, /*\n*/ size: 14]" Call!("v", Args![Int(12), "size" => Int(14)]));

View File

@ -4,8 +4,6 @@ use super::{is_newline, Scanner};
use crate::geom::{AngularUnit, LengthUnit}; use crate::geom::{AngularUnit, LengthUnit};
use crate::syntax::*; use crate::syntax::*;
use TokenMode::*;
/// An iterator over the tokens of a string of source code. /// An iterator over the tokens of a string of source code.
#[derive(Clone)] #[derive(Clone)]
pub struct Tokens<'s> { pub struct Tokens<'s> {
@ -13,13 +11,13 @@ pub struct Tokens<'s> {
mode: TokenMode, mode: TokenMode,
} }
/// Whether to tokenize in header mode which yields expression, comma and /// What kind of tokens to emit.
/// similar tokens or in body mode which yields text and star, underscore,
/// backtick tokens.
#[derive(Debug, Copy, Clone, Eq, PartialEq)] #[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum TokenMode { pub enum TokenMode {
Header, /// Text and markup.
Body, Markup,
/// Blocks and expressions.
Code,
} }
impl<'s> Tokens<'s> { impl<'s> Tokens<'s> {
@ -81,7 +79,7 @@ impl<'s> Iterator for Tokens<'s> {
} }
Some(match self.mode { Some(match self.mode {
Body => match c { TokenMode::Markup => match c {
// Markup. // Markup.
'*' => Token::Star, '*' => Token::Star,
'_' => Token::Underscore, '_' => Token::Underscore,
@ -95,7 +93,7 @@ impl<'s> Iterator for Tokens<'s> {
_ => self.text(start), _ => self.text(start),
}, },
Header => match c { TokenMode::Code => match c {
// Parens. // Parens.
'(' => Token::LeftParen, '(' => Token::LeftParen,
')' => Token::RightParen, ')' => Token::RightParen,
@ -446,6 +444,7 @@ mod tests {
use Option::None; use Option::None;
use Token::{Ident, *}; use Token::{Ident, *};
use TokenMode::{Code, Markup};
fn Raw(text: &str, backticks: usize, terminated: bool) -> Token { fn Raw(text: &str, backticks: usize, terminated: bool) -> Token {
Token::Raw(TokenRaw { text, backticks, terminated }) Token::Raw(TokenRaw { text, backticks, terminated })
@ -491,33 +490,33 @@ mod tests {
(' ', None, "\r", Space(1)), (' ', None, "\r", Space(1)),
(' ', None, "\r\n", Space(1)), (' ', None, "\r\n", Space(1)),
// Letter suffixes. // Letter suffixes.
('a', Some(Body), "hello", Text("hello")), ('a', Some(Markup), "hello", Text("hello")),
('a', Some(Body), "💚", Text("💚")), ('a', Some(Markup), "💚", Text("💚")),
('a', Some(Header), "if", If), ('a', Some(Code), "if", If),
('a', Some(Header), "val", Ident("val")), ('a', Some(Code), "val", Ident("val")),
('a', Some(Header), "α", Ident("α")), ('a', Some(Code), "α", Ident("α")),
('a', Some(Header), "_", Ident("_")), ('a', Some(Code), "_", Ident("_")),
// Number suffixes. // Number suffixes.
('1', Some(Header), "2", Int(2)), ('1', Some(Code), "2", Int(2)),
('1', Some(Header), ".2", Float(0.2)), ('1', Some(Code), ".2", Float(0.2)),
// Symbol suffixes. // Symbol suffixes.
('/', None, "[", LeftBracket), ('/', None, "[", LeftBracket),
('/', None, "//", LineComment("")), ('/', None, "//", LineComment("")),
('/', None, "/**/", BlockComment("")), ('/', None, "/**/", BlockComment("")),
('/', Some(Body), "*", Star), ('/', Some(Markup), "*", Star),
('/', Some(Body), "_", Underscore), ('/', Some(Markup), "_", Underscore),
('/', Some(Body), r"\\", Text(r"\")), ('/', Some(Markup), r"\\", Text(r"\")),
('/', Some(Body), "#let", Let), ('/', Some(Markup), "#let", Let),
('/', Some(Header), "(", LeftParen), ('/', Some(Code), "(", LeftParen),
('/', Some(Header), ":", Colon), ('/', Some(Code), ":", Colon),
('/', Some(Header), "+=", PlusEq), ('/', Some(Code), "+=", PlusEq),
('/', Some(Header), "#123", Hex("123")), ('/', Some(Code), "#123", Hex("123")),
]; ];
macro_rules! t { macro_rules! t {
(Both $($tts:tt)*) => { (Both $($tts:tt)*) => {
t!(Body $($tts)*); t!(Markup $($tts)*);
t!(Header $($tts)*); t!(Code $($tts)*);
}; };
($mode:ident $([$blocks:literal])?: $src:expr => $($token:expr),*) => {{ ($mode:ident $([$blocks:literal])?: $src:expr => $($token:expr),*) => {{
// Test without suffix. // Test without suffix.
@ -545,69 +544,69 @@ mod tests {
#[test] #[test]
fn test_tokenize_brackets() { fn test_tokenize_brackets() {
// Test body. // Test in markup.
t!(Body: "[" => LeftBracket); t!(Markup: "[" => LeftBracket);
t!(Body: "]" => RightBracket); t!(Markup: "]" => RightBracket);
t!(Body: "{" => LeftBrace); t!(Markup: "{" => LeftBrace);
t!(Body: "}" => RightBrace); t!(Markup: "}" => RightBrace);
t!(Body[" /"]: "(" => Text("(")); t!(Markup[" /"]: "(" => Text("("));
t!(Body[" /"]: ")" => Text(")")); t!(Markup[" /"]: ")" => Text(")"));
// Test header. // Test in code.
t!(Header: "[" => LeftBracket); t!(Code: "[" => LeftBracket);
t!(Header: "]" => RightBracket); t!(Code: "]" => RightBracket);
t!(Header: "{" => LeftBrace); t!(Code: "{" => LeftBrace);
t!(Header: "}" => RightBrace); t!(Code: "}" => RightBrace);
t!(Header: "(" => LeftParen); t!(Code: "(" => LeftParen);
t!(Header: ")" => RightParen); t!(Code: ")" => RightParen);
} }
#[test] #[test]
fn test_tokenize_body_symbols() { fn test_tokenize_body_symbols() {
// Test markup tokens. // Test markup tokens.
t!(Body[" a1"]: "*" => Star); t!(Markup[" a1"]: "*" => Star);
t!(Body: "_" => Underscore); t!(Markup: "_" => Underscore);
t!(Body["a1/"]: "# " => Hash, Space(0)); t!(Markup["a1/"]: "# " => Hash, Space(0));
t!(Body: "~" => Tilde); t!(Markup: "~" => Tilde);
t!(Body[" "]: r"\" => Backslash); t!(Markup[" "]: r"\" => Backslash);
} }
#[test] #[test]
fn test_tokenize_header_symbols() { fn test_tokenize_header_symbols() {
// Test all symbols. // Test all symbols.
t!(Header: "," => Comma); t!(Code: "," => Comma);
t!(Header: ":" => Colon); t!(Code: ":" => Colon);
t!(Header: "|" => Pipe); t!(Code: "|" => Pipe);
t!(Header: "+" => Plus); t!(Code: "+" => Plus);
t!(Header: "-" => Hyph); t!(Code: "-" => Hyph);
t!(Header[" a1"]: "*" => Star); t!(Code[" a1"]: "*" => Star);
t!(Header[" a1"]: "/" => Slash); t!(Code[" a1"]: "/" => Slash);
t!(Header: "=" => Eq); t!(Code: "=" => Eq);
t!(Header: "==" => EqEq); t!(Code: "==" => EqEq);
t!(Header: "!=" => BangEq); t!(Code: "!=" => BangEq);
t!(Header: "<" => Lt); t!(Code: "<" => Lt);
t!(Header: "<=" => LtEq); t!(Code: "<=" => LtEq);
t!(Header: ">" => Gt); t!(Code: ">" => Gt);
t!(Header: ">=" => GtEq); t!(Code: ">=" => GtEq);
t!(Header: "+=" => PlusEq); t!(Code: "+=" => PlusEq);
t!(Header: "-=" => HyphEq); t!(Code: "-=" => HyphEq);
t!(Header: "*=" => StarEq); t!(Code: "*=" => StarEq);
t!(Header: "/=" => SlashEq); t!(Code: "/=" => SlashEq);
t!(Header: "?" => Question); t!(Code: "?" => Question);
t!(Header: ".." => Dots); t!(Code: ".." => Dots);
t!(Header: "=>" => Arrow); t!(Code: "=>" => Arrow);
// Test combinations. // Test combinations.
t!(Header: "|=>" => Pipe, Arrow); t!(Code: "|=>" => Pipe, Arrow);
t!(Header: "<=>" => LtEq, Gt); t!(Code: "<=>" => LtEq, Gt);
t!(Header[" a/"]: "..." => Dots, Invalid(".")); t!(Code[" a/"]: "..." => Dots, Invalid("."));
// Test hyphen as symbol vs part of identifier. // Test hyphen as symbol vs part of identifier.
t!(Header[" /"]: "-1" => Hyph, Int(1)); t!(Code[" /"]: "-1" => Hyph, Int(1));
t!(Header[" /"]: "-a" => Hyph, Ident("a")); t!(Code[" /"]: "-a" => Hyph, Ident("a"));
t!(Header[" /"]: "--1" => Hyph, Hyph, Int(1)); t!(Code[" /"]: "--1" => Hyph, Hyph, Int(1));
t!(Header[" /"]: "--_a" => Hyph, Hyph, Ident("_a")); t!(Code[" /"]: "--_a" => Hyph, Hyph, Ident("_a"));
t!(Header[" /"]: "a-b" => Ident("a-b")); t!(Code[" /"]: "a-b" => Ident("a-b"));
} }
#[test] #[test]
@ -625,13 +624,13 @@ mod tests {
]; ];
for &(s, t) in &both { for &(s, t) in &both {
t!(Header[" "]: s => t); t!(Code[" "]: s => t);
t!(Body[" "]: format!("#{}", s) => t); t!(Markup[" "]: format!("#{}", s) => t);
t!(Body[" "]: format!("#{0}#{0}", s) => t, t); t!(Markup[" "]: format!("#{0}#{0}", s) => t, t);
t!(Body[" /"]: format!("# {}", s) => Hash, Space(0), Text(s)); t!(Markup[" /"]: format!("# {}", s) => Hash, Space(0), Text(s));
} }
let header = [ let code = [
("not", Not), ("not", Not),
("and", And), ("and", And),
("or", Or), ("or", Or),
@ -640,103 +639,103 @@ mod tests {
("true", Bool(true)), ("true", Bool(true)),
]; ];
for &(s, t) in &header { for &(s, t) in &code {
t!(Header[" "]: s => t); t!(Code[" "]: s => t);
t!(Body[" /"]: s => Text(s)); t!(Markup[" /"]: s => Text(s));
} }
// Test invalid case. // Test invalid case.
t!(Header[" /"]: "None" => Ident("None")); t!(Code[" /"]: "None" => Ident("None"));
t!(Header[" /"]: "True" => Ident("True")); t!(Code[" /"]: "True" => Ident("True"));
// Test word that contains keyword. // Test word that contains keyword.
t!(Body[" "]: "#letter" => Invalid("#letter")); t!(Markup[" "]: "#letter" => Invalid("#letter"));
t!(Header[" /"]: "falser" => Ident("falser")); t!(Code[" /"]: "falser" => Ident("falser"));
} }
#[test] #[test]
fn test_tokenize_text() { fn test_tokenize_text() {
// Test basic text. // Test basic text.
t!(Body[" /"]: "hello" => Text("hello")); t!(Markup[" /"]: "hello" => Text("hello"));
t!(Body[" /"]: "hello-world" => Text("hello-world")); t!(Markup[" /"]: "hello-world" => Text("hello-world"));
// Test header symbols in text. // Test code symbols in text.
t!(Body[" /"]: "a():\"b" => Text("a():\"b")); t!(Markup[" /"]: "a():\"b" => Text("a():\"b"));
t!(Body[" /"]: ":,=|/+-" => Text(":,=|/+-")); t!(Markup[" /"]: ":,=|/+-" => Text(":,=|/+-"));
// Test text ends. // Test text ends.
t!(Body[""]: "hello " => Text("hello"), Space(0)); t!(Markup[""]: "hello " => Text("hello"), Space(0));
t!(Body[""]: "hello~" => Text("hello"), Tilde); t!(Markup[""]: "hello~" => Text("hello"), Tilde);
} }
#[test] #[test]
fn test_tokenize_raw_blocks() { fn test_tokenize_raw_blocks() {
// Test basic raw block. // Test basic raw block.
t!(Body: "`raw`" => Raw("raw", 1, true)); t!(Markup: "`raw`" => Raw("raw", 1, true));
t!(Body[""]: "`]" => Raw("]", 1, false)); t!(Markup[""]: "`]" => Raw("]", 1, false));
// Test special symbols in raw block. // Test special symbols in raw block.
t!(Body: "`[func]`" => Raw("[func]", 1, true)); t!(Markup: "`[func]`" => Raw("[func]", 1, true));
t!(Body[""]: r"`\`` " => Raw(r"\", 1, true), Raw(" ", 1, false)); t!(Markup[""]: r"`\`` " => Raw(r"\", 1, true), Raw(" ", 1, false));
// Test more backticks. // Test more backticks.
t!(Body: "````🚀````" => Raw("🚀", 4, true)); t!(Markup: "````🚀````" => Raw("🚀", 4, true));
t!(Body[""]: "````👩‍🚀``noend" => Raw("👩‍🚀``noend", 4, false)); t!(Markup[""]: "````👩‍🚀``noend" => Raw("👩‍🚀``noend", 4, false));
t!(Body[""]: "````raw``````new" => Raw("raw", 4, true), Raw("new", 2, false)); t!(Markup[""]: "````raw``````new" => Raw("raw", 4, true), Raw("new", 2, false));
// Test separated closing backticks. // Test separated closing backticks.
t!(Body: "```not `y`e`t```" => Raw("not `y`e`t", 3, true)); t!(Markup: "```not `y`e`t```" => Raw("not `y`e`t", 3, true));
} }
#[test] #[test]
fn test_tokenize_math_formulas() { fn test_tokenize_math_formulas() {
// Test basic formula. // Test basic formula.
t!(Body: "$x$" => Math("x", true, true)); t!(Markup: "$x$" => Math("x", true, true));
t!(Body: "$$x + y$$" => Math("x + y", false, true)); t!(Markup: "$$x + y$$" => Math("x + y", false, true));
// Test unterminated. // Test unterminated.
t!(Body[""]: "$$x" => Math("x", false, false)); t!(Markup[""]: "$$x" => Math("x", false, false));
t!(Body[""]: "$$x$\n$" => Math("x$\n$", false, false)); t!(Markup[""]: "$$x$\n$" => Math("x$\n$", false, false));
// Test escape sequences. // Test escape sequences.
t!(Body: r"$$\\\$$$" => Math(r"\\\$", false, true)); t!(Markup: r"$$\\\$$$" => Math(r"\\\$", false, true));
t!(Body[""]: r"$$ $\\$" => Math(r" $\\$", false, false)); t!(Markup[""]: r"$$ $\\$" => Math(r" $\\$", false, false));
} }
#[test] #[test]
fn test_tokenize_escape_sequences() { fn test_tokenize_escape_sequences() {
// Test escapable symbols. // Test escapable symbols.
t!(Body: r"\\" => Text(r"\")); t!(Markup: r"\\" => Text(r"\"));
t!(Body: r"\/" => Text("/")); t!(Markup: r"\/" => Text("/"));
t!(Body: r"\[" => Text("[")); t!(Markup: r"\[" => Text("["));
t!(Body: r"\]" => Text("]")); t!(Markup: r"\]" => Text("]"));
t!(Body: r"\{" => Text("{")); t!(Markup: r"\{" => Text("{"));
t!(Body: r"\}" => Text("}")); t!(Markup: r"\}" => Text("}"));
t!(Body: r"\*" => Text("*")); t!(Markup: r"\*" => Text("*"));
t!(Body: r"\_" => Text("_")); t!(Markup: r"\_" => Text("_"));
t!(Body: r"\#" => Text("#")); t!(Markup: r"\#" => Text("#"));
t!(Body: r"\~" => Text("~")); t!(Markup: r"\~" => Text("~"));
t!(Body: r"\`" => Text("`")); t!(Markup: r"\`" => Text("`"));
// Test unescapable symbols. // Test unescapable symbols.
t!(Body[" /"]: r"\a" => Text(r"\"), Text("a")); t!(Markup[" /"]: r"\a" => Text(r"\"), Text("a"));
t!(Body[" /"]: r"\u" => Text(r"\"), Text("u")); t!(Markup[" /"]: r"\u" => Text(r"\"), Text("u"));
t!(Body[" /"]: r"\1" => Text(r"\"), Text("1")); t!(Markup[" /"]: r"\1" => Text(r"\"), Text("1"));
t!(Body[" /"]: r"\:" => Text(r"\"), Text(":")); t!(Markup[" /"]: r"\:" => Text(r"\"), Text(":"));
t!(Body[" /"]: r"\=" => Text(r"\"), Text("=")); t!(Markup[" /"]: r"\=" => Text(r"\"), Text("="));
t!(Body[" /"]: r#"\""# => Text(r"\"), Text("\"")); t!(Markup[" /"]: r#"\""# => Text(r"\"), Text("\""));
// Test basic unicode escapes. // Test basic unicode escapes.
t!(Body: r"\u{}" => UnicodeEscape("", true)); t!(Markup: r"\u{}" => UnicodeEscape("", true));
t!(Body: r"\u{2603}" => UnicodeEscape("2603", true)); t!(Markup: r"\u{2603}" => UnicodeEscape("2603", true));
t!(Body: r"\u{P}" => UnicodeEscape("P", true)); t!(Markup: r"\u{P}" => UnicodeEscape("P", true));
// Test unclosed unicode escapes. // Test unclosed unicode escapes.
t!(Body[" /"]: r"\u{" => UnicodeEscape("", false)); t!(Markup[" /"]: r"\u{" => UnicodeEscape("", false));
t!(Body[" /"]: r"\u{1" => UnicodeEscape("1", false)); t!(Markup[" /"]: r"\u{1" => UnicodeEscape("1", false));
t!(Body[" /"]: r"\u{26A4" => UnicodeEscape("26A4", false)); t!(Markup[" /"]: r"\u{26A4" => UnicodeEscape("26A4", false));
t!(Body[" /"]: r"\u{1Q3P" => UnicodeEscape("1Q3P", false)); t!(Markup[" /"]: r"\u{1Q3P" => UnicodeEscape("1Q3P", false));
t!(Body: r"\u{1🏕}" => UnicodeEscape("1", false), Text("🏕"), RightBrace); t!(Markup: r"\u{1🏕}" => UnicodeEscape("1", false), Text("🏕"), RightBrace);
} }
#[test] #[test]
@ -763,18 +762,18 @@ mod tests {
#[test] #[test]
fn test_tokenize_idents() { fn test_tokenize_idents() {
// Test valid identifiers. // Test valid identifiers.
t!(Header[" /"]: "x" => Ident("x")); t!(Code[" /"]: "x" => Ident("x"));
t!(Header[" /"]: "value" => Ident("value")); t!(Code[" /"]: "value" => Ident("value"));
t!(Header[" /"]: "__main__" => Ident("__main__")); t!(Code[" /"]: "__main__" => Ident("__main__"));
t!(Header[" /"]: "_snake_case" => Ident("_snake_case")); t!(Code[" /"]: "_snake_case" => Ident("_snake_case"));
// Test non-ascii. // Test non-ascii.
t!(Header[" /"]: "α" => Ident("α")); t!(Code[" /"]: "α" => Ident("α"));
t!(Header[" /"]: "ម្តាយ" => Ident("ម្តាយ")); t!(Code[" /"]: "ម្តាយ" => Ident("ម្តាយ"));
// Test hyphen parsed as identifier. // Test hyphen parsed as identifier.
t!(Header[" /"]: "kebab-case" => Ident("kebab-case")); t!(Code[" /"]: "kebab-case" => Ident("kebab-case"));
t!(Header[" /"]: "one-10" => Ident("one-10")); t!(Code[" /"]: "one-10" => Ident("one-10"));
} }
#[test] #[test]
@ -798,22 +797,22 @@ mod tests {
// Test integers. // Test integers.
for &(s, v) in &ints { for &(s, v) in &ints {
t!(Header[" /"]: s => Int(v)); t!(Code[" /"]: s => Int(v));
} }
// Test floats. // Test floats.
for &(s, v) in &floats { for &(s, v) in &floats {
t!(Header[" /"]: s => Float(v)); t!(Code[" /"]: s => Float(v));
} }
// Test attached numbers. // Test attached numbers.
t!(Header[" /"]: ".2.3" => Float(0.2), Float(0.3)); t!(Code[" /"]: ".2.3" => Float(0.2), Float(0.3));
t!(Header[" /"]: "1.2.3" => Float(1.2), Float(0.3)); t!(Code[" /"]: "1.2.3" => Float(1.2), Float(0.3));
t!(Header[" /"]: "1e-2+3" => Float(0.01), Plus, Int(3)); t!(Code[" /"]: "1e-2+3" => Float(0.01), Plus, Int(3));
// Test float from too large integer. // Test float from too large integer.
let large = i64::MAX as f64 + 1.0; let large = i64::MAX as f64 + 1.0;
t!(Header[" /"]: large.to_string() => Float(large)); t!(Code[" /"]: large.to_string() => Float(large));
// Combined integers and floats. // Combined integers and floats.
let nums = ints.iter().map(|&(k, v)| (k, v as f64)).chain(floats.iter().copied()); let nums = ints.iter().map(|&(k, v)| (k, v as f64)).chain(floats.iter().copied());
@ -831,7 +830,7 @@ mod tests {
// Numeric types. // Numeric types.
for &(suffix, build) in &suffixes { for &(suffix, build) in &suffixes {
for (s, v) in nums.clone() { for (s, v) in nums.clone() {
t!(Header[" /"]: format!("{}{}", s, suffix) => build(v)); t!(Code[" /"]: format!("{}{}", s, suffix) => build(v));
} }
} }
} }
@ -839,26 +838,26 @@ mod tests {
#[test] #[test]
fn test_tokenize_hex() { fn test_tokenize_hex() {
// Test basic hex expressions. // Test basic hex expressions.
t!(Header[" /"]: "#6ae6dd" => Hex("6ae6dd")); t!(Code[" /"]: "#6ae6dd" => Hex("6ae6dd"));
t!(Header[" /"]: "#8A083c" => Hex("8A083c")); t!(Code[" /"]: "#8A083c" => Hex("8A083c"));
// Test with non-hex letters. // Test with non-hex letters.
t!(Header[" /"]: "#PQ" => Hex("PQ")); t!(Code[" /"]: "#PQ" => Hex("PQ"));
} }
#[test] #[test]
fn test_tokenize_strings() { fn test_tokenize_strings() {
// Test basic strings. // Test basic strings.
t!(Header: "\"hi\"" => Str("hi", true)); t!(Code: "\"hi\"" => Str("hi", true));
t!(Header: "\"hi\nthere\"" => Str("hi\nthere", true)); t!(Code: "\"hi\nthere\"" => Str("hi\nthere", true));
t!(Header: "\"🌎\"" => Str("🌎", true)); t!(Code: "\"🌎\"" => Str("🌎", true));
// Test unterminated. // Test unterminated.
t!(Header[""]: "\"hi" => Str("hi", false)); t!(Code[""]: "\"hi" => Str("hi", false));
// Test escaped quote. // Test escaped quote.
t!(Header: r#""a\"bc""# => Str(r#"a\"bc"#, true)); t!(Code: r#""a\"bc""# => Str(r#"a\"bc"#, true));
t!(Header[""]: r#""\""# => Str(r#"\""#, false)); t!(Code[""]: r#""\""# => Str(r#"\""#, false));
} }
#[test] #[test]
@ -907,19 +906,19 @@ mod tests {
t!(Both: "/**/*/" => BlockComment(""), Token::Invalid("*/")); t!(Both: "/**/*/" => BlockComment(""), Token::Invalid("*/"));
// Test invalid expressions. // Test invalid expressions.
t!(Header: r"\" => Invalid(r"\")); t!(Code: r"\" => Invalid(r"\"));
t!(Header: "🌓" => Invalid("🌓")); t!(Code: "🌓" => Invalid("🌓"));
t!(Header: r"\:" => Invalid(r"\"), Colon); t!(Code: r"\:" => Invalid(r"\"), Colon);
t!(Header: "meal⌚" => Ident("meal"), Invalid("")); t!(Code: "meal⌚" => Ident("meal"), Invalid(""));
t!(Header[" /"]: r"\a" => Invalid(r"\"), Ident("a")); t!(Code[" /"]: r"\a" => Invalid(r"\"), Ident("a"));
// Test invalid number suffixes. // Test invalid number suffixes.
t!(Header[" /"]: "1foo" => Invalid("1foo")); t!(Code[" /"]: "1foo" => Invalid("1foo"));
t!(Header: "1p%" => Invalid("1p"), Invalid("%")); t!(Code: "1p%" => Invalid("1p"), Invalid("%"));
t!(Header: "1%%" => Percent(1.0), Invalid("%")); t!(Code: "1%%" => Percent(1.0), Invalid("%"));
// Test invalid keyword. // Test invalid keyword.
t!(Body[" /"]: "#-" => Hash, Text("-")); t!(Markup[" /"]: "#-" => Hash, Text("-"));
t!(Body[" "]: "#do" => Invalid("#do")) t!(Markup[" "]: "#do" => Invalid("#do"))
} }
} }

View File

@ -109,7 +109,7 @@ pub fn pretty_bracket_call(call: &ExprCall, p: &mut Printer, chained: bool) {
// Function name. // Function name.
p.push_str(&call.name.v); p.push_str(&call.name.v);
// Find out whether this can be written as body or chain. // Find out whether this can be written with a body or as a chain.
// //
// Example: Transforms "[v {Hi}]" => "[v][Hi]". // Example: Transforms "[v {Hi}]" => "[v][Hi]".
if let [head @ .., Argument::Pos(Spanned { v: Expr::Content(content), .. })] = if let [head @ .., Argument::Pos(Spanned { v: Expr::Content(content), .. })] =