diff options
author | Titus Wormer <tituswormer@gmail.com> | 2022-08-11 13:45:24 +0200 |
---|---|---|
committer | Titus Wormer <tituswormer@gmail.com> | 2022-08-11 13:45:24 +0200 |
commit | 6eb2f644057f371841fe25330a57ee185f91c7af (patch) | |
tree | 7b4d02586339d1a7f82104b4473d9ac243b3abf9 /src/content/text.rs | |
parent | 2d35cbfceace81a217cd0fbdae7a8777c7a6465e (diff) | |
download | markdown-rs-6eb2f644057f371841fe25330a57ee185f91c7af.tar.gz markdown-rs-6eb2f644057f371841fe25330a57ee185f91c7af.tar.bz2 markdown-rs-6eb2f644057f371841fe25330a57ee185f91c7af.zip |
Refactor to move some code to `state.rs`
Diffstat (limited to 'src/content/text.rs')
-rw-r--r-- | src/content/text.rs | 73 |
1 files changed, 35 insertions, 38 deletions
diff --git a/src/content/text.rs b/src/content/text.rs index 4e93779..1b3890e 100644 --- a/src/content/text.rs +++ b/src/content/text.rs @@ -21,7 +21,8 @@ //! > [whitespace][crate::construct::partial_whitespace]. use crate::construct::partial_whitespace::resolve_whitespace; -use crate::tokenizer::{State, StateName, Tokenizer}; +use crate::state::{Name, State}; +use crate::tokenizer::Tokenizer; const MARKERS: [u8; 9] = [ b'!', // `label_start_image` @@ -39,7 +40,7 @@ const MARKERS: [u8; 9] = [ pub fn start(tokenizer: &mut Tokenizer) -> State { tokenizer.register_resolver("whitespace".to_string(), Box::new(resolve)); tokenizer.tokenize_state.markers = &MARKERS; - State::Retry(StateName::TextBefore) + State::Retry(Name::TextBefore) } /// Before text. @@ -47,75 +48,71 @@ pub fn before(tokenizer: &mut Tokenizer) -> State { match tokenizer.current { None => State::Ok, Some(b'!') => tokenizer.attempt( - StateName::LabelStartImageStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeData), + Name::LabelStartImageStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeData), ), Some(b'&') => tokenizer.attempt( - StateName::CharacterReferenceStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeData), + Name::CharacterReferenceStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeData), ), Some(b'*' | b'_') => tokenizer.attempt( - StateName::AttentionStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeData), + Name::AttentionStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeData), ), // `autolink`, `html_text` (order does not matter) Some(b'<') => tokenizer.attempt( - StateName::AutolinkStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeHtml), + Name::AutolinkStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeHtml), ), Some(b'[') => tokenizer.attempt( - StateName::LabelStartLinkStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeData), + Name::LabelStartLinkStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeData), ), Some(b'\\') => tokenizer.attempt( - StateName::CharacterEscapeStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeHardBreakEscape), + Name::CharacterEscapeStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeHardBreakEscape), ), Some(b']') => tokenizer.attempt( - StateName::LabelEndStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeData), + Name::LabelEndStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeData), ), Some(b'`') => tokenizer.attempt( - StateName::CodeTextStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeData), + Name::CodeTextStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeData), ), - _ => State::Retry(StateName::TextBeforeData), + _ => State::Retry(Name::TextBeforeData), } } /// At `<`, which wasn’t an autolink: before HTML? pub fn before_html(tokenizer: &mut Tokenizer) -> State { tokenizer.attempt( - StateName::HtmlTextStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeData), + Name::HtmlTextStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeData), ) } /// At `\`, which wasn’t a character escape: before a hard break? pub fn before_hard_break_escape(tokenizer: &mut Tokenizer) -> State { tokenizer.attempt( - StateName::HardBreakEscapeStart, - State::Next(StateName::TextBefore), - State::Next(StateName::TextBeforeData), + Name::HardBreakEscapeStart, + State::Next(Name::TextBefore), + State::Next(Name::TextBeforeData), ) } /// At data. pub fn before_data(tokenizer: &mut Tokenizer) -> State { - tokenizer.attempt( - StateName::DataStart, - State::Next(StateName::TextBefore), - State::Nok, - ) + tokenizer.attempt(Name::DataStart, State::Next(Name::TextBefore), State::Nok) } /// Resolve whitespace. |