| //! Low-level Rust lexer. |
| //! |
| //! The idea with `rustc_lexer` is to make a reusable library, |
| //! by separating out pure lexing and rustc-specific concerns, like spans, |
| //! error reporting, and interning. So, rustc_lexer operates directly on `&str`, |
| //! produces simple tokens which are a pair of type-tag and a bit of original text, |
| //! and does not report errors, instead storing them as flags on the token. |
| //! |
| //! Tokens produced by this lexer are not yet ready for parsing the Rust syntax. |
| //! For that see [`rustc_parse::lexer`], which converts this basic token stream |
| //! into wide tokens used by actual parser. |
| //! |
| //! The purpose of this crate is to convert raw sources into a labeled sequence |
| //! of well-known token types, so building an actual Rust token stream will |
| //! be easier. |
| //! |
| //! The main entity of this crate is the [`TokenKind`] enum which represents common |
| //! lexeme types. |
| //! |
| //! [`rustc_parse::lexer`]: ../rustc_parse/lexer/index.html |
| // We want to be able to build this crate with a stable compiler, so no |
| // `#![feature]` attributes should be added. |
| |
| mod cursor; |
| pub mod unescape; |
| |
| #[cfg(test)] |
| mod tests; |
| |
| use self::LiteralKind::*; |
| use self::TokenKind::*; |
| use crate::cursor::{Cursor, EOF_CHAR}; |
| use std::convert::TryFrom; |
| |
| /// Parsed token. |
| /// It doesn't contain information about data that has been parsed, |
| /// only the type of the token and its size. |
| #[derive(Debug)] |
| pub struct Token { |
| pub kind: TokenKind, |
| pub len: usize, |
| } |
| |
| impl Token { |
| fn new(kind: TokenKind, len: usize) -> Token { |
| Token { kind, len } |
| } |
| } |
| |
| /// Enum representing common lexeme types. |
| // perf note: Changing all `usize` to `u32` doesn't change performance. See #77629 |
| #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] |
| pub enum TokenKind { |
| // Multi-char tokens: |
| /// "// comment" |
| LineComment { doc_style: Option<DocStyle> }, |
| /// `/* block comment */` |
| /// |
| /// Block comments can be recursive, so the sequence like `/* /* */` |
| /// will not be considered terminated and will result in a parsing error. |
| BlockComment { doc_style: Option<DocStyle>, terminated: bool }, |
| /// Any whitespace characters sequence. |
| Whitespace, |
| /// "ident" or "continue" |
| /// At this step keywords are also considered identifiers. |
| Ident, |
| /// Like the above, but containing invalid unicode codepoints. |
| InvalidIdent, |
| /// "r#ident" |
| RawIdent, |
| /// An unknown prefix like `foo#`, `foo'`, `foo"`. Note that only the |
| /// prefix (`foo`) is included in the token, not the separator (which is |
| /// lexed as its own distinct token). In Rust 2021 and later, reserved |
| /// prefixes are reported as errors; in earlier editions, they result in a |
| /// (allowed by default) lint, and are treated as regular identifier |
| /// tokens. |
| UnknownPrefix, |
| /// "12_u8", "1.0e-40", "b"123"". See `LiteralKind` for more details. |
| Literal { kind: LiteralKind, suffix_start: usize }, |
| /// "'a" |
| Lifetime { starts_with_number: bool }, |
| |
| // One-char tokens: |
| /// ";" |
| Semi, |
| /// "," |
| Comma, |
| /// "." |
| Dot, |
| /// "(" |
| OpenParen, |
| /// ")" |
| CloseParen, |
| /// "{" |
| OpenBrace, |
| /// "}" |
| CloseBrace, |
| /// "[" |
| OpenBracket, |
| /// "]" |
| CloseBracket, |
| /// "@" |
| At, |
| /// "#" |
| Pound, |
| /// "~" |
| Tilde, |
| /// "?" |
| Question, |
| /// ":" |
| Colon, |
| /// "$" |
| Dollar, |
| /// "=" |
| Eq, |
| /// "!" |
| Bang, |
| /// "<" |
| Lt, |
| /// ">" |
| Gt, |
| /// "-" |
| Minus, |
| /// "&" |
| And, |
| /// "|" |
| Or, |
| /// "+" |
| Plus, |
| /// "*" |
| Star, |
| /// "/" |
| Slash, |
| /// "^" |
| Caret, |
| /// "%" |
| Percent, |
| |
| /// Unknown token, not expected by the lexer, e.g. "№" |
| Unknown, |
| } |
| |
| #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] |
| pub enum DocStyle { |
| Outer, |
| Inner, |
| } |
| |
| #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] |
| pub enum LiteralKind { |
| /// "12_u8", "0o100", "0b120i99" |
| Int { base: Base, empty_int: bool }, |
| /// "12.34f32", "0b100.100" |
| Float { base: Base, empty_exponent: bool }, |
| /// "'a'", "'\\'", "'''", "';" |
| Char { terminated: bool }, |
| /// "b'a'", "b'\\'", "b'''", "b';" |
| Byte { terminated: bool }, |
| /// ""abc"", ""abc" |
| Str { terminated: bool }, |
| /// "b"abc"", "b"abc" |
| ByteStr { terminated: bool }, |
| /// "r"abc"", "r#"abc"#", "r####"ab"###"c"####", "r#"a" |
| RawStr { n_hashes: u16, err: Option<RawStrError> }, |
| /// "br"abc"", "br#"abc"#", "br####"ab"###"c"####", "br#"a" |
| RawByteStr { n_hashes: u16, err: Option<RawStrError> }, |
| } |
| |
| /// Error produced validating a raw string. Represents cases like: |
| /// - `r##~"abcde"##`: `InvalidStarter` |
| /// - `r###"abcde"##`: `NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)` |
| /// - Too many `#`s (>65535): `TooManyDelimiters` |
| // perf note: It doesn't matter that this makes `Token` 36 bytes bigger. See #77629 |
| #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] |
| pub enum RawStrError { |
| /// Non `#` characters exist between `r` and `"` eg. `r#~"..` |
| InvalidStarter { bad_char: char }, |
| /// The string was never terminated. `possible_terminator_offset` is the number of characters after `r` or `br` where they |
| /// may have intended to terminate it. |
| NoTerminator { expected: usize, found: usize, possible_terminator_offset: Option<usize> }, |
| /// More than 65535 `#`s exist. |
| TooManyDelimiters { found: usize }, |
| } |
| |
| /// Base of numeric literal encoding according to its prefix. |
| #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] |
| pub enum Base { |
| /// Literal starts with "0b". |
| Binary, |
| /// Literal starts with "0o". |
| Octal, |
| /// Literal starts with "0x". |
| Hexadecimal, |
| /// Literal doesn't contain a prefix. |
| Decimal, |
| } |
| |
| /// `rustc` allows files to have a shebang, e.g. "#!/usr/bin/rustrun", |
| /// but shebang isn't a part of rust syntax. |
| pub fn strip_shebang(input: &str) -> Option<usize> { |
| // Shebang must start with `#!` literally, without any preceding whitespace. |
| // For simplicity we consider any line starting with `#!` a shebang, |
| // regardless of restrictions put on shebangs by specific platforms. |
| if let Some(input_tail) = input.strip_prefix("#!") { |
| // Ok, this is a shebang but if the next non-whitespace token is `[`, |
| // then it may be valid Rust code, so consider it Rust code. |
| let next_non_whitespace_token = tokenize(input_tail).map(|tok| tok.kind).find(|tok| { |
| !matches!( |
| tok, |
| TokenKind::Whitespace |
| | TokenKind::LineComment { doc_style: None } |
| | TokenKind::BlockComment { doc_style: None, .. } |
| ) |
| }); |
| if next_non_whitespace_token != Some(TokenKind::OpenBracket) { |
| // No other choice than to consider this a shebang. |
| return Some(2 + input_tail.lines().next().unwrap_or_default().len()); |
| } |
| } |
| None |
| } |
| |
| /// Parses the first token from the provided input string. |
| pub fn first_token(input: &str) -> Token { |
| debug_assert!(!input.is_empty()); |
| Cursor::new(input).advance_token() |
| } |
| |
| /// Creates an iterator that produces tokens from the input string. |
| pub fn tokenize(mut input: &str) -> impl Iterator<Item = Token> + '_ { |
| std::iter::from_fn(move || { |
| if input.is_empty() { |
| return None; |
| } |
| let token = first_token(input); |
| input = &input[token.len..]; |
| Some(token) |
| }) |
| } |
| |
| /// True if `c` is considered a whitespace according to Rust language definition. |
| /// See [Rust language reference](https://doc.rust-lang.org/reference/whitespace.html) |
| /// for definitions of these classes. |
| pub fn is_whitespace(c: char) -> bool { |
| // This is Pattern_White_Space. |
| // |
| // Note that this set is stable (ie, it doesn't change with different |
| // Unicode versions), so it's ok to just hard-code the values. |
| |
| matches!( |
| c, |
| // Usual ASCII suspects |
| '\u{0009}' // \t |
| | '\u{000A}' // \n |
| | '\u{000B}' // vertical tab |
| | '\u{000C}' // form feed |
| | '\u{000D}' // \r |
| | '\u{0020}' // space |
| |
| // NEXT LINE from latin1 |
| | '\u{0085}' |
| |
| // Bidi markers |
| | '\u{200E}' // LEFT-TO-RIGHT MARK |
| | '\u{200F}' // RIGHT-TO-LEFT MARK |
| |
| // Dedicated whitespace characters from Unicode |
| | '\u{2028}' // LINE SEPARATOR |
| | '\u{2029}' // PARAGRAPH SEPARATOR |
| ) |
| } |
| |
| /// True if `c` is valid as a first character of an identifier. |
| /// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for |
| /// a formal definition of valid identifier name. |
| pub fn is_id_start(c: char) -> bool { |
| // This is XID_Start OR '_' (which formally is not a XID_Start). |
| c == '_' || unicode_xid::UnicodeXID::is_xid_start(c) |
| } |
| |
| /// True if `c` is valid as a non-first character of an identifier. |
| /// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for |
| /// a formal definition of valid identifier name. |
| pub fn is_id_continue(c: char) -> bool { |
| unicode_xid::UnicodeXID::is_xid_continue(c) |
| } |
| |
| /// The passed string is lexically an identifier. |
| pub fn is_ident(string: &str) -> bool { |
| let mut chars = string.chars(); |
| if let Some(start) = chars.next() { |
| is_id_start(start) && chars.all(is_id_continue) |
| } else { |
| false |
| } |
| } |
| |
| impl Cursor<'_> { |
| /// Parses a token from the input string. |
| fn advance_token(&mut self) -> Token { |
| let first_char = self.bump().unwrap(); |
| let token_kind = match first_char { |
| // Slash, comment or block comment. |
| '/' => match self.first() { |
| '/' => self.line_comment(), |
| '*' => self.block_comment(), |
| _ => Slash, |
| }, |
| |
| // Whitespace sequence. |
| c if is_whitespace(c) => self.whitespace(), |
| |
| // Raw identifier, raw string literal or identifier. |
| 'r' => match (self.first(), self.second()) { |
| ('#', c1) if is_id_start(c1) => self.raw_ident(), |
| ('#', _) | ('"', _) => { |
| let (n_hashes, err) = self.raw_double_quoted_string(1); |
| let suffix_start = self.len_consumed(); |
| if err.is_none() { |
| self.eat_literal_suffix(); |
| } |
| let kind = RawStr { n_hashes, err }; |
| Literal { kind, suffix_start } |
| } |
| _ => self.ident_or_unknown_prefix(), |
| }, |
| |
| // Byte literal, byte string literal, raw byte string literal or identifier. |
| 'b' => match (self.first(), self.second()) { |
| ('\'', _) => { |
| self.bump(); |
| let terminated = self.single_quoted_string(); |
| let suffix_start = self.len_consumed(); |
| if terminated { |
| self.eat_literal_suffix(); |
| } |
| let kind = Byte { terminated }; |
| Literal { kind, suffix_start } |
| } |
| ('"', _) => { |
| self.bump(); |
| let terminated = self.double_quoted_string(); |
| let suffix_start = self.len_consumed(); |
| if terminated { |
| self.eat_literal_suffix(); |
| } |
| let kind = ByteStr { terminated }; |
| Literal { kind, suffix_start } |
| } |
| ('r', '"') | ('r', '#') => { |
| self.bump(); |
| let (n_hashes, err) = self.raw_double_quoted_string(2); |
| let suffix_start = self.len_consumed(); |
| if err.is_none() { |
| self.eat_literal_suffix(); |
| } |
| let kind = RawByteStr { n_hashes, err }; |
| Literal { kind, suffix_start } |
| } |
| _ => self.ident_or_unknown_prefix(), |
| }, |
| |
| // Identifier (this should be checked after other variant that can |
| // start as identifier). |
| c if is_id_start(c) => self.ident_or_unknown_prefix(), |
| |
| // Numeric literal. |
| c @ '0'..='9' => { |
| let literal_kind = self.number(c); |
| let suffix_start = self.len_consumed(); |
| self.eat_literal_suffix(); |
| TokenKind::Literal { kind: literal_kind, suffix_start } |
| } |
| |
| // One-symbol tokens. |
| ';' => Semi, |
| ',' => Comma, |
| '.' => Dot, |
| '(' => OpenParen, |
| ')' => CloseParen, |
| '{' => OpenBrace, |
| '}' => CloseBrace, |
| '[' => OpenBracket, |
| ']' => CloseBracket, |
| '@' => At, |
| '#' => Pound, |
| '~' => Tilde, |
| '?' => Question, |
| ':' => Colon, |
| '$' => Dollar, |
| '=' => Eq, |
| '!' => Bang, |
| '<' => Lt, |
| '>' => Gt, |
| '-' => Minus, |
| '&' => And, |
| '|' => Or, |
| '+' => Plus, |
| '*' => Star, |
| '^' => Caret, |
| '%' => Percent, |
| |
| // Lifetime or character literal. |
| '\'' => self.lifetime_or_char(), |
| |
| // String literal. |
| '"' => { |
| let terminated = self.double_quoted_string(); |
| let suffix_start = self.len_consumed(); |
| if terminated { |
| self.eat_literal_suffix(); |
| } |
| let kind = Str { terminated }; |
| Literal { kind, suffix_start } |
| } |
| // Identifier starting with an emoji. Only lexed for graceful error recovery. |
| c if !c.is_ascii() && unic_emoji_char::is_emoji(c) => { |
| self.fake_ident_or_unknown_prefix() |
| } |
| _ => Unknown, |
| }; |
| Token::new(token_kind, self.len_consumed()) |
| } |
| |
| fn line_comment(&mut self) -> TokenKind { |
| debug_assert!(self.prev() == '/' && self.first() == '/'); |
| self.bump(); |
| |
| let doc_style = match self.first() { |
| // `//!` is an inner line doc comment. |
| '!' => Some(DocStyle::Inner), |
| // `////` (more than 3 slashes) is not considered a doc comment. |
| '/' if self.second() != '/' => Some(DocStyle::Outer), |
| _ => None, |
| }; |
| |
| self.eat_while(|c| c != '\n'); |
| LineComment { doc_style } |
| } |
| |
| fn block_comment(&mut self) -> TokenKind { |
| debug_assert!(self.prev() == '/' && self.first() == '*'); |
| self.bump(); |
| |
| let doc_style = match self.first() { |
| // `/*!` is an inner block doc comment. |
| '!' => Some(DocStyle::Inner), |
| // `/***` (more than 2 stars) is not considered a doc comment. |
| // `/**/` is not considered a doc comment. |
| '*' if !matches!(self.second(), '*' | '/') => Some(DocStyle::Outer), |
| _ => None, |
| }; |
| |
| let mut depth = 1usize; |
| while let Some(c) = self.bump() { |
| match c { |
| '/' if self.first() == '*' => { |
| self.bump(); |
| depth += 1; |
| } |
| '*' if self.first() == '/' => { |
| self.bump(); |
| depth -= 1; |
| if depth == 0 { |
| // This block comment is closed, so for a construction like "/* */ */" |
| // there will be a successfully parsed block comment "/* */" |
| // and " */" will be processed separately. |
| break; |
| } |
| } |
| _ => (), |
| } |
| } |
| |
| BlockComment { doc_style, terminated: depth == 0 } |
| } |
| |
| fn whitespace(&mut self) -> TokenKind { |
| debug_assert!(is_whitespace(self.prev())); |
| self.eat_while(is_whitespace); |
| Whitespace |
| } |
| |
| fn raw_ident(&mut self) -> TokenKind { |
| debug_assert!(self.prev() == 'r' && self.first() == '#' && is_id_start(self.second())); |
| // Eat "#" symbol. |
| self.bump(); |
| // Eat the identifier part of RawIdent. |
| self.eat_identifier(); |
| RawIdent |
| } |
| |
| fn ident_or_unknown_prefix(&mut self) -> TokenKind { |
| debug_assert!(is_id_start(self.prev())); |
| // Start is already eaten, eat the rest of identifier. |
| self.eat_while(is_id_continue); |
| // Known prefixes must have been handled earlier. So if |
| // we see a prefix here, it is definitely an unknown prefix. |
| match self.first() { |
| '#' | '"' | '\'' => UnknownPrefix, |
| c if !c.is_ascii() && unic_emoji_char::is_emoji(c) => { |
| self.fake_ident_or_unknown_prefix() |
| } |
| _ => Ident, |
| } |
| } |
| |
| fn fake_ident_or_unknown_prefix(&mut self) -> TokenKind { |
| // Start is already eaten, eat the rest of identifier. |
| self.eat_while(|c| { |
| unicode_xid::UnicodeXID::is_xid_continue(c) |
| || (!c.is_ascii() && unic_emoji_char::is_emoji(c)) |
| || c == '\u{200d}' |
| }); |
| // Known prefixes must have been handled earlier. So if |
| // we see a prefix here, it is definitely an unknown prefix. |
| match self.first() { |
| '#' | '"' | '\'' => UnknownPrefix, |
| _ => InvalidIdent, |
| } |
| } |
| |
| fn number(&mut self, first_digit: char) -> LiteralKind { |
| debug_assert!('0' <= self.prev() && self.prev() <= '9'); |
| let mut base = Base::Decimal; |
| if first_digit == '0' { |
| // Attempt to parse encoding base. |
| let has_digits = match self.first() { |
| 'b' => { |
| base = Base::Binary; |
| self.bump(); |
| self.eat_decimal_digits() |
| } |
| 'o' => { |
| base = Base::Octal; |
| self.bump(); |
| self.eat_decimal_digits() |
| } |
| 'x' => { |
| base = Base::Hexadecimal; |
| self.bump(); |
| self.eat_hexadecimal_digits() |
| } |
| // Not a base prefix. |
| '0'..='9' | '_' | '.' | 'e' | 'E' => { |
| self.eat_decimal_digits(); |
| true |
| } |
| // Just a 0. |
| _ => return Int { base, empty_int: false }, |
| }; |
| // Base prefix was provided, but there were no digits |
| // after it, e.g. "0x". |
| if !has_digits { |
| return Int { base, empty_int: true }; |
| } |
| } else { |
| // No base prefix, parse number in the usual way. |
| self.eat_decimal_digits(); |
| }; |
| |
| match self.first() { |
| // Don't be greedy if this is actually an |
| // integer literal followed by field/method access or a range pattern |
| // (`0..2` and `12.foo()`) |
| '.' if self.second() != '.' && !is_id_start(self.second()) => { |
| // might have stuff after the ., and if it does, it needs to start |
| // with a number |
| self.bump(); |
| let mut empty_exponent = false; |
| if self.first().is_digit(10) { |
| self.eat_decimal_digits(); |
| match self.first() { |
| 'e' | 'E' => { |
| self.bump(); |
| empty_exponent = !self.eat_float_exponent(); |
| } |
| _ => (), |
| } |
| } |
| Float { base, empty_exponent } |
| } |
| 'e' | 'E' => { |
| self.bump(); |
| let empty_exponent = !self.eat_float_exponent(); |
| Float { base, empty_exponent } |
| } |
| _ => Int { base, empty_int: false }, |
| } |
| } |
| |
| fn lifetime_or_char(&mut self) -> TokenKind { |
| debug_assert!(self.prev() == '\''); |
| |
| let can_be_a_lifetime = if self.second() == '\'' { |
| // It's surely not a lifetime. |
| false |
| } else { |
| // If the first symbol is valid for identifier, it can be a lifetime. |
| // Also check if it's a number for a better error reporting (so '0 will |
| // be reported as invalid lifetime and not as unterminated char literal). |
| is_id_start(self.first()) || self.first().is_digit(10) |
| }; |
| |
| if !can_be_a_lifetime { |
| let terminated = self.single_quoted_string(); |
| let suffix_start = self.len_consumed(); |
| if terminated { |
| self.eat_literal_suffix(); |
| } |
| let kind = Char { terminated }; |
| return Literal { kind, suffix_start }; |
| } |
| |
| // Either a lifetime or a character literal with |
| // length greater than 1. |
| |
| let starts_with_number = self.first().is_digit(10); |
| |
| // Skip the literal contents. |
| // First symbol can be a number (which isn't a valid identifier start), |
| // so skip it without any checks. |
| self.bump(); |
| self.eat_while(is_id_continue); |
| |
| // Check if after skipping literal contents we've met a closing |
| // single quote (which means that user attempted to create a |
| // string with single quotes). |
| if self.first() == '\'' { |
| self.bump(); |
| let kind = Char { terminated: true }; |
| Literal { kind, suffix_start: self.len_consumed() } |
| } else { |
| Lifetime { starts_with_number } |
| } |
| } |
| |
| fn single_quoted_string(&mut self) -> bool { |
| debug_assert!(self.prev() == '\''); |
| // Check if it's a one-symbol literal. |
| if self.second() == '\'' && self.first() != '\\' { |
| self.bump(); |
| self.bump(); |
| return true; |
| } |
| |
| // Literal has more than one symbol. |
| |
| // Parse until either quotes are terminated or error is detected. |
| loop { |
| match self.first() { |
| // Quotes are terminated, finish parsing. |
| '\'' => { |
| self.bump(); |
| return true; |
| } |
| // Probably beginning of the comment, which we don't want to include |
| // to the error report. |
| '/' => break, |
| // Newline without following '\'' means unclosed quote, stop parsing. |
| '\n' if self.second() != '\'' => break, |
| // End of file, stop parsing. |
| EOF_CHAR if self.is_eof() => break, |
| // Escaped slash is considered one character, so bump twice. |
| '\\' => { |
| self.bump(); |
| self.bump(); |
| } |
| // Skip the character. |
| _ => { |
| self.bump(); |
| } |
| } |
| } |
| // String was not terminated. |
| false |
| } |
| |
| /// Eats double-quoted string and returns true |
| /// if string is terminated. |
| fn double_quoted_string(&mut self) -> bool { |
| debug_assert!(self.prev() == '"'); |
| while let Some(c) = self.bump() { |
| match c { |
| '"' => { |
| return true; |
| } |
| '\\' if self.first() == '\\' || self.first() == '"' => { |
| // Bump again to skip escaped character. |
| self.bump(); |
| } |
| _ => (), |
| } |
| } |
| // End of file reached. |
| false |
| } |
| |
| /// Eats the double-quoted string and returns `n_hashes` and an error if encountered. |
| fn raw_double_quoted_string(&mut self, prefix_len: usize) -> (u16, Option<RawStrError>) { |
| // Wrap the actual function to handle the error with too many hashes. |
| // This way, it eats the whole raw string. |
| let (n_hashes, err) = self.raw_string_unvalidated(prefix_len); |
| // Only up to 65535 `#`s are allowed in raw strings |
| match u16::try_from(n_hashes) { |
| Ok(num) => (num, err), |
| // We lie about the number of hashes here :P |
| Err(_) => (0, Some(RawStrError::TooManyDelimiters { found: n_hashes })), |
| } |
| } |
| |
| fn raw_string_unvalidated(&mut self, prefix_len: usize) -> (usize, Option<RawStrError>) { |
| debug_assert!(self.prev() == 'r'); |
| let start_pos = self.len_consumed(); |
| let mut possible_terminator_offset = None; |
| let mut max_hashes = 0; |
| |
| // Count opening '#' symbols. |
| let mut eaten = 0; |
| while self.first() == '#' { |
| eaten += 1; |
| self.bump(); |
| } |
| let n_start_hashes = eaten; |
| |
| // Check that string is started. |
| match self.bump() { |
| Some('"') => (), |
| c => { |
| let c = c.unwrap_or(EOF_CHAR); |
| return (n_start_hashes, Some(RawStrError::InvalidStarter { bad_char: c })); |
| } |
| } |
| |
| // Skip the string contents and on each '#' character met, check if this is |
| // a raw string termination. |
| loop { |
| self.eat_while(|c| c != '"'); |
| |
| if self.is_eof() { |
| return ( |
| n_start_hashes, |
| Some(RawStrError::NoTerminator { |
| expected: n_start_hashes, |
| found: max_hashes, |
| possible_terminator_offset, |
| }), |
| ); |
| } |
| |
| // Eat closing double quote. |
| self.bump(); |
| |
| // Check that amount of closing '#' symbols |
| // is equal to the amount of opening ones. |
| // Note that this will not consume extra trailing `#` characters: |
| // `r###"abcde"####` is lexed as a `RawStr { n_hashes: 3 }` |
| // followed by a `#` token. |
| let mut n_end_hashes = 0; |
| while self.first() == '#' && n_end_hashes < n_start_hashes { |
| n_end_hashes += 1; |
| self.bump(); |
| } |
| |
| if n_end_hashes == n_start_hashes { |
| return (n_start_hashes, None); |
| } else if n_end_hashes > max_hashes { |
| // Keep track of possible terminators to give a hint about |
| // where there might be a missing terminator |
| possible_terminator_offset = |
| Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len); |
| max_hashes = n_end_hashes; |
| } |
| } |
| } |
| |
| fn eat_decimal_digits(&mut self) -> bool { |
| let mut has_digits = false; |
| loop { |
| match self.first() { |
| '_' => { |
| self.bump(); |
| } |
| '0'..='9' => { |
| has_digits = true; |
| self.bump(); |
| } |
| _ => break, |
| } |
| } |
| has_digits |
| } |
| |
| fn eat_hexadecimal_digits(&mut self) -> bool { |
| let mut has_digits = false; |
| loop { |
| match self.first() { |
| '_' => { |
| self.bump(); |
| } |
| '0'..='9' | 'a'..='f' | 'A'..='F' => { |
| has_digits = true; |
| self.bump(); |
| } |
| _ => break, |
| } |
| } |
| has_digits |
| } |
| |
| /// Eats the float exponent. Returns true if at least one digit was met, |
| /// and returns false otherwise. |
| fn eat_float_exponent(&mut self) -> bool { |
| debug_assert!(self.prev() == 'e' || self.prev() == 'E'); |
| if self.first() == '-' || self.first() == '+' { |
| self.bump(); |
| } |
| self.eat_decimal_digits() |
| } |
| |
| // Eats the suffix of the literal, e.g. "_u8". |
| fn eat_literal_suffix(&mut self) { |
| self.eat_identifier(); |
| } |
| |
| // Eats the identifier. |
| fn eat_identifier(&mut self) { |
| if !is_id_start(self.first()) { |
| return; |
| } |
| self.bump(); |
| |
| self.eat_while(is_id_continue); |
| } |
| |
| /// Eats symbols while predicate returns true or until the end of file is reached. |
| fn eat_while(&mut self, mut predicate: impl FnMut(char) -> bool) { |
| while predicate(self.first()) && !self.is_eof() { |
| self.bump(); |
| } |
| } |
| } |