summaryrefslogtreecommitdiff
path: root/support/texlab/src/syntax/bibtex/lexer.rs
diff options
context:
space:
mode:
authorNorbert Preining <norbert@preining.info>2022-05-26 03:01:06 +0000
committerNorbert Preining <norbert@preining.info>2022-05-26 03:01:06 +0000
commit02d941fa9c9895bb08a84ac9afe3559abd1ba8ad (patch)
tree481d1d368c6d295a779b590d23bcd5397bcba628 /support/texlab/src/syntax/bibtex/lexer.rs
parentf01a37f8311f33e32441d25bdadcda9dcdbd165d (diff)
CTAN sync 202205260301
Diffstat (limited to 'support/texlab/src/syntax/bibtex/lexer.rs')
-rw-r--r--support/texlab/src/syntax/bibtex/lexer.rs202
1 files changed, 150 insertions, 52 deletions
diff --git a/support/texlab/src/syntax/bibtex/lexer.rs b/support/texlab/src/syntax/bibtex/lexer.rs
index dff111d645..3ea8e9f8d2 100644
--- a/support/texlab/src/syntax/bibtex/lexer.rs
+++ b/support/texlab/src/syntax/bibtex/lexer.rs
@@ -1,82 +1,180 @@
use logos::Logos;
-use super::kind::SyntaxKind;
+use super::SyntaxKind::{self, *};
-#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, PartialOrd, Ord, Logos)]
-#[allow(non_camel_case_types)]
-#[repr(u16)]
-enum LogosToken {
- #[regex(r"\s+")]
- WHITESPACE = 2,
-
- #[regex(r"@[Pp][Rr][Ee][Aa][Mm][Bb][Ll][Ee]")]
- PREAMBLE_TYPE,
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Hash, Logos)]
+pub enum RootToken {
+ #[token(r"@preamble", ignore(ascii_case))]
+ Preamble,
- #[regex(r"@[Ss][Tt][Rr][Ii][Nn][Gg]")]
- STRING_TYPE,
+ #[token(r"@string", ignore(ascii_case))]
+ String,
- #[regex(r"@[Cc][Oo][Mm][Mm][Ee][Nn][Tt]")]
- COMMENT_TYPE,
+ #[token(r"@comment", ignore(ascii_case))]
+ Comment,
- #[regex(r"@[!\$\&\*\+\-\./:;<>\?@\[\]\\\^_`\|\~a-zA-Z][!\$\&\*\+\-\./:;<>\?@\[\]\\\^_`\|\~a-zA-Z0-9]*|@")]
- ENTRY_TYPE,
+ #[regex(r"@[a-zA-Z]*")]
+ Entry,
- #[regex(r#"[^\s\{\}\(\),#"=\\]+"#)]
+ #[regex(r"[^@]+")]
#[error]
- WORD,
+ Junk,
+}
- #[token("{")]
- L_CURLY,
+impl From<RootToken> for SyntaxKind {
+ fn from(token: RootToken) -> Self {
+ match token {
+ RootToken::Preamble | RootToken::String | RootToken::Comment | RootToken::Entry => TYPE,
+ RootToken::Junk => JUNK,
+ }
+ }
+}
- #[token("}")]
- R_CURLY,
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Hash, Logos)]
+pub enum BodyToken {
+ #[regex(r"\s+")]
+ Whitespace,
+ #[token("{")]
#[token("(")]
- L_PAREN,
+ LDelim,
+ #[token("}")]
#[token(")")]
- R_PAREN,
+ RDelim,
#[token(",")]
- COMMA,
+ Comma,
- #[token("#")]
- HASH,
+ #[token("=")]
+ Eq,
- #[token("\"")]
- QUOTE,
+ #[regex(r"[^\s\(\)\{\}@,=]+")]
+ Name,
- #[token("=")]
- EQUALITY_SIGN,
+ #[error]
+ Error,
+}
- #[regex(r"\\([^\r\n]|[@a-zA-Z:_]+\*?)?")]
- COMMAND_NAME,
+impl From<BodyToken> for SyntaxKind {
+ fn from(token: BodyToken) -> Self {
+ match token {
+ BodyToken::Whitespace => WHITESPACE,
+ BodyToken::LDelim => L_DELIM,
+ BodyToken::RDelim => R_DELIM,
+ BodyToken::Comma => COMMA,
+ BodyToken::Eq => EQ,
+ BodyToken::Name => NAME,
+ BodyToken::Error => unreachable!(),
+ }
+ }
}
-#[derive(Debug, PartialEq, Eq, Clone)]
-pub struct Lexer<'a> {
- tokens: Vec<(SyntaxKind, &'a str)>,
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Hash, Logos)]
+pub enum ValueToken {
+ #[regex(r"\s+")]
+ Whitespace,
+
+ #[token("#")]
+ Pound,
+
+ #[token(",")]
+ Comma,
+
+ #[token("{")]
+ LCurly,
+
+ #[token("}")]
+ RCurly,
+
+ #[token("\"")]
+ Quote,
+
+ #[regex(r"\d+", priority = 2)]
+ Integer,
+
+ #[regex(r#"[^\s"\{\},]+"#)]
+ #[error]
+ Name,
}
-impl<'a> Lexer<'a> {
- pub fn new(text: &'a str) -> Self {
- let mut tokens = Vec::new();
- let mut lexer = LogosToken::lexer(text);
- while let Some(kind) = lexer.next() {
- tokens.push((
- unsafe { std::mem::transmute::<LogosToken, SyntaxKind>(kind) },
- lexer.slice(),
- ));
+impl From<ValueToken> for SyntaxKind {
+ fn from(token: ValueToken) -> Self {
+ match token {
+ ValueToken::Whitespace => WHITESPACE,
+ ValueToken::Pound => POUND,
+ ValueToken::Comma => COMMA,
+ ValueToken::LCurly => L_CURLY,
+ ValueToken::RCurly => R_CURLY,
+ ValueToken::Quote => QUOTE,
+ ValueToken::Integer => INTEGER,
+ ValueToken::Name => NAME,
}
- tokens.reverse();
- Self { tokens }
}
+}
- pub fn peek(&self) -> Option<SyntaxKind> {
- self.tokens.last().map(|(kind, _)| *kind)
- }
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Hash, Logos)]
+pub enum ContentToken {
+ #[regex(r"\s+")]
+ Whitespace,
+
+ #[token(",")]
+ Comma,
- pub fn consume(&mut self) -> Option<(SyntaxKind, &'a str)> {
- self.tokens.pop()
+ #[token("{")]
+ LCurly,
+
+ #[token("}")]
+ RCurly,
+
+ #[token("\"")]
+ Quote,
+
+ #[regex(r"\d+", priority = 2)]
+ Integer,
+
+ #[token(r#"~"#)]
+ Nbsp,
+
+ #[token(r#"\`"#)]
+ #[token(r#"\'"#)]
+ #[token(r#"\^"#)]
+ #[token(r#"\""#)]
+ #[token(r#"\H"#)]
+ #[token(r#"\~"#)]
+ #[token(r#"\c"#)]
+ #[token(r#"\k"#)]
+ #[token(r#"\="#)]
+ #[token(r#"\b"#)]
+ #[token(r#"\."#)]
+ #[token(r#"\d"#)]
+ #[token(r#"\r"#)]
+ #[token(r#"\u"#)]
+ #[token(r#"\v"#)]
+ #[token(r#"\t"#)]
+ AccentName,
+
+ #[regex(r"\\([^\r\n]|[@a-zA-Z:_]+\*?)?")]
+ CommandName,
+
+ #[regex(r#"[^\s"\{\}\\~,]+"#)]
+ #[error]
+ Word,
+}
+
+impl From<ContentToken> for SyntaxKind {
+ fn from(token: ContentToken) -> Self {
+ match token {
+ ContentToken::Whitespace => WHITESPACE,
+ ContentToken::Comma => COMMA,
+ ContentToken::LCurly => L_CURLY,
+ ContentToken::RCurly => R_CURLY,
+ ContentToken::Quote => QUOTE,
+ ContentToken::Integer => INTEGER,
+ ContentToken::Nbsp => NBSP,
+ ContentToken::AccentName => ACCENT_NAME,
+ ContentToken::CommandName => COMMAND_NAME,
+ ContentToken::Word => WORD,
+ }
}
}