aboutsummaryrefslogtreecommitdiffstats
path: root/src/construct
diff options
context:
space:
mode:
authorLibravatar Titus Wormer <tituswormer@gmail.com>2022-08-10 09:16:36 +0200
committerLibravatar Titus Wormer <tituswormer@gmail.com>2022-08-10 09:16:41 +0200
commit8162222295d71ea7fd9270c7b3b9497b91db3f1f (patch)
tree77a42dab6775450836ba0ca1b5d0d2360e506d52 /src/construct
parented2e62f99ca9cf594c677e47df9d954309b43294 (diff)
downloadmarkdown-rs-8162222295d71ea7fd9270c7b3b9497b91db3f1f.tar.gz
markdown-rs-8162222295d71ea7fd9270c7b3b9497b91db3f1f.tar.bz2
markdown-rs-8162222295d71ea7fd9270c7b3b9497b91db3f1f.zip
Rename `State::Fn` to `State::Next`
Diffstat (limited to 'src/construct')
-rw-r--r--src/construct/attention.rs2
-rw-r--r--src/construct/autolink.rs20
-rw-r--r--src/construct/blank_line.rs8
-rw-r--r--src/construct/block_quote.rs16
-rw-r--r--src/construct/character_escape.rs2
-rw-r--r--src/construct/character_reference.rs8
-rw-r--r--src/construct/code_fenced.rs64
-rw-r--r--src/construct/code_indented.rs30
-rw-r--r--src/construct/code_text.rs8
-rw-r--r--src/construct/definition.rs52
-rw-r--r--src/construct/hard_break_escape.rs2
-rw-r--r--src/construct/heading_atx.rs30
-rw-r--r--src/construct/heading_setext.rs16
-rw-r--r--src/construct/html_flow.rs100
-rw-r--r--src/construct/html_text.rs89
-rw-r--r--src/construct/label_end.rs50
-rw-r--r--src/construct/label_start_image.rs2
-rw-r--r--src/construct/list.rs40
-rw-r--r--src/construct/paragraph.rs2
-rw-r--r--src/construct/partial_bom.rs2
-rw-r--r--src/construct/partial_data.rs6
-rw-r--r--src/construct/partial_destination.rs18
-rw-r--r--src/construct/partial_label.rs14
-rw-r--r--src/construct/partial_non_lazy_continuation.rs2
-rw-r--r--src/construct/partial_space_or_tab.rs20
-rw-r--r--src/construct/partial_title.rs14
-rw-r--r--src/construct/thematic_break.rs16
27 files changed, 309 insertions, 324 deletions
diff --git a/src/construct/attention.rs b/src/construct/attention.rs
index 5a98a89..ff33f97 100644
--- a/src/construct/attention.rs
+++ b/src/construct/attention.rs
@@ -136,7 +136,7 @@ pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'*' | b'_') if tokenizer.current.unwrap() == tokenizer.tokenize_state.marker => {
tokenizer.consume();
- State::Fn(StateName::AttentionInside)
+ State::Next(StateName::AttentionInside)
}
_ => {
tokenizer.exit(Token::AttentionSequence);
diff --git a/src/construct/autolink.rs b/src/construct/autolink.rs
index 15bfac1..f1b92d9 100644
--- a/src/construct/autolink.rs
+++ b/src/construct/autolink.rs
@@ -121,7 +121,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.consume();
tokenizer.exit(Token::AutolinkMarker);
tokenizer.enter(Token::AutolinkProtocol);
- State::Fn(StateName::AutolinkOpen)
+ State::Next(StateName::AutolinkOpen)
}
_ => State::Nok,
}
@@ -140,7 +140,7 @@ pub fn open(tokenizer: &mut Tokenizer) -> State {
// ASCII alphabetic.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::AutolinkSchemeOrEmailAtext)
+ State::Next(StateName::AutolinkSchemeOrEmailAtext)
}
_ => email_atext(tokenizer),
}
@@ -179,7 +179,7 @@ pub fn scheme_inside_or_email_atext(tokenizer: &mut Tokenizer) -> State {
Some(b':') => {
tokenizer.consume();
tokenizer.tokenize_state.size = 0;
- State::Fn(StateName::AutolinkUrlInside)
+ State::Next(StateName::AutolinkUrlInside)
}
// ASCII alphanumeric and `+`, `-`, and `.`.
Some(b'+' | b'-' | b'.' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z')
@@ -187,7 +187,7 @@ pub fn scheme_inside_or_email_atext(tokenizer: &mut Tokenizer) -> State {
{
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
- State::Fn(StateName::AutolinkSchemeInsideOrEmailAtext)
+ State::Next(StateName::AutolinkSchemeInsideOrEmailAtext)
}
_ => {
tokenizer.tokenize_state.size = 0;
@@ -212,7 +212,7 @@ pub fn url_inside(tokenizer: &mut Tokenizer) -> State {
None | Some(b'\0'..=0x1F | b' ' | b'<' | 0x7F) => State::Nok,
Some(_) => {
tokenizer.consume();
- State::Fn(StateName::AutolinkUrlInside)
+ State::Next(StateName::AutolinkUrlInside)
}
}
}
@@ -227,7 +227,7 @@ pub fn email_atext(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'@') => {
tokenizer.consume();
- State::Fn(StateName::AutolinkEmailAtSignOrDot)
+ State::Next(StateName::AutolinkEmailAtSignOrDot)
}
// ASCII atext.
//
@@ -250,7 +250,7 @@ pub fn email_atext(tokenizer: &mut Tokenizer) -> State {
b'#'..=b'\'' | b'*' | b'+' | b'-'..=b'9' | b'=' | b'?' | b'A'..=b'Z' | b'^'..=b'~',
) => {
tokenizer.consume();
- State::Fn(StateName::AutolinkEmailAtext)
+ State::Next(StateName::AutolinkEmailAtext)
}
_ => State::Nok,
}
@@ -281,7 +281,7 @@ pub fn email_label(tokenizer: &mut Tokenizer) -> State {
Some(b'.') => {
tokenizer.tokenize_state.size = 0;
tokenizer.consume();
- State::Fn(StateName::AutolinkEmailAtSignOrDot)
+ State::Next(StateName::AutolinkEmailAtSignOrDot)
}
Some(b'>') => {
tokenizer.tokenize_state.size = 0;
@@ -310,14 +310,14 @@ pub fn email_value(tokenizer: &mut Tokenizer) -> State {
Some(b'-' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z')
if tokenizer.tokenize_state.size < AUTOLINK_DOMAIN_SIZE_MAX =>
{
- let state_name = if matches!(tokenizer.current, Some(b'-')) {
+ let name = if matches!(tokenizer.current, Some(b'-')) {
StateName::AutolinkEmailValue
} else {
StateName::AutolinkEmailLabel
};
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
- State::Fn(state_name)
+ State::Next(name)
}
_ => {
tokenizer.tokenize_state.size = 0;
diff --git a/src/construct/blank_line.rs b/src/construct/blank_line.rs
index 22dfdc0..d7d4817 100644
--- a/src/construct/blank_line.rs
+++ b/src/construct/blank_line.rs
@@ -46,11 +46,11 @@ use crate::tokenizer::{State, StateName, Tokenizer};
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::BlankLineAfter),
- State::Fn(StateName::BlankLineAfter),
+ name,
+ State::Next(StateName::BlankLineAfter),
+ State::Next(StateName::BlankLineAfter),
)
}
diff --git a/src/construct/block_quote.rs b/src/construct/block_quote.rs
index cec3dce..0e02be9 100644
--- a/src/construct/block_quote.rs
+++ b/src/construct/block_quote.rs
@@ -46,7 +46,7 @@ use crate::tokenizer::{State, StateName, Tokenizer};
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.constructs.block_quote {
- let state_name = space_or_tab_min_max(
+ let name = space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.constructs.code_indented {
@@ -55,11 +55,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
usize::MAX
},
);
- tokenizer.attempt(
- state_name,
- State::Fn(StateName::BlockQuoteBefore),
- State::Nok,
- )
+ tokenizer.attempt(name, State::Next(StateName::BlockQuoteBefore), State::Nok)
} else {
State::Nok
}
@@ -89,7 +85,7 @@ pub fn before(tokenizer: &mut Tokenizer) -> State {
/// ^
/// ```
pub fn cont_start(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab_min_max(
+ let name = space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.constructs.code_indented {
@@ -99,8 +95,8 @@ pub fn cont_start(tokenizer: &mut Tokenizer) -> State {
},
);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::BlockQuoteContBefore),
+ name,
+ State::Next(StateName::BlockQuoteContBefore),
State::Nok,
)
}
@@ -119,7 +115,7 @@ pub fn cont_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::BlockQuoteMarker);
tokenizer.consume();
tokenizer.exit(Token::BlockQuoteMarker);
- State::Fn(StateName::BlockQuoteContAfter)
+ State::Next(StateName::BlockQuoteContAfter)
}
_ => State::Nok,
}
diff --git a/src/construct/character_escape.rs b/src/construct/character_escape.rs
index de09f17..52b2873 100644
--- a/src/construct/character_escape.rs
+++ b/src/construct/character_escape.rs
@@ -49,7 +49,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::CharacterEscapeMarker);
tokenizer.consume();
tokenizer.exit(Token::CharacterEscapeMarker);
- State::Fn(StateName::CharacterEscapeInside)
+ State::Next(StateName::CharacterEscapeInside)
}
_ => State::Nok,
}
diff --git a/src/construct/character_reference.rs b/src/construct/character_reference.rs
index ba05fab..5a0e15c 100644
--- a/src/construct/character_reference.rs
+++ b/src/construct/character_reference.rs
@@ -86,7 +86,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::CharacterReferenceMarker);
tokenizer.consume();
tokenizer.exit(Token::CharacterReferenceMarker);
- State::Fn(StateName::CharacterReferenceOpen)
+ State::Next(StateName::CharacterReferenceOpen)
}
_ => State::Nok,
}
@@ -109,7 +109,7 @@ pub fn open(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::CharacterReferenceMarkerNumeric);
tokenizer.consume();
tokenizer.exit(Token::CharacterReferenceMarkerNumeric);
- State::Fn(StateName::CharacterReferenceNumeric)
+ State::Next(StateName::CharacterReferenceNumeric)
} else {
tokenizer.tokenize_state.marker = b'&';
tokenizer.enter(Token::CharacterReferenceValue);
@@ -134,7 +134,7 @@ pub fn numeric(tokenizer: &mut Tokenizer) -> State {
tokenizer.exit(Token::CharacterReferenceMarkerHexadecimal);
tokenizer.enter(Token::CharacterReferenceValue);
tokenizer.tokenize_state.marker = b'x';
- State::Fn(StateName::CharacterReferenceValue)
+ State::Next(StateName::CharacterReferenceValue)
} else {
tokenizer.enter(Token::CharacterReferenceValue);
tokenizer.tokenize_state.marker = b'#';
@@ -202,7 +202,7 @@ pub fn value(tokenizer: &mut Tokenizer) -> State {
if tokenizer.tokenize_state.size < max && test(&byte) {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
- return State::Fn(StateName::CharacterReferenceValue);
+ return State::Next(StateName::CharacterReferenceValue);
}
}
diff --git a/src/construct/code_fenced.rs b/src/construct/code_fenced.rs
index 91fd8e4..17c7566 100644
--- a/src/construct/code_fenced.rs
+++ b/src/construct/code_fenced.rs
@@ -119,7 +119,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.constructs.code_fenced {
tokenizer.enter(Token::CodeFenced);
tokenizer.enter(Token::CodeFencedFence);
- let state_name = space_or_tab_min_max(
+ let name = space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.constructs.code_indented {
@@ -129,8 +129,8 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
},
);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::CodeFencedBeforeSequenceOpen),
+ name,
+ State::Next(StateName::CodeFencedBeforeSequenceOpen),
State::Nok,
)
} else {
@@ -183,15 +183,15 @@ pub fn sequence_open(tokenizer: &mut Tokenizer) -> State {
Some(b'`' | b'~') if tokenizer.current.unwrap() == tokenizer.tokenize_state.marker => {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
- State::Fn(StateName::CodeFencedSequenceOpen)
+ State::Next(StateName::CodeFencedSequenceOpen)
}
_ if tokenizer.tokenize_state.size >= CODE_FENCED_SEQUENCE_SIZE_MIN => {
tokenizer.exit(Token::CodeFencedFenceSequence);
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::CodeFencedInfoBefore),
- State::Fn(StateName::CodeFencedInfoBefore),
+ name,
+ State::Next(StateName::CodeFencedInfoBefore),
+ State::Next(StateName::CodeFencedInfoBefore),
)
}
_ => {
@@ -248,11 +248,11 @@ pub fn info(tokenizer: &mut Tokenizer) -> State {
Some(b'\t' | b' ') => {
tokenizer.exit(Token::Data);
tokenizer.exit(Token::CodeFencedFenceInfo);
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::CodeFencedMetaBefore),
- State::Fn(StateName::CodeFencedMetaBefore),
+ name,
+ State::Next(StateName::CodeFencedMetaBefore),
+ State::Next(StateName::CodeFencedMetaBefore),
)
}
Some(b'`') if tokenizer.tokenize_state.marker == b'`' => {
@@ -264,7 +264,7 @@ pub fn info(tokenizer: &mut Tokenizer) -> State {
}
Some(_) => {
tokenizer.consume();
- State::Fn(StateName::CodeFencedInfo)
+ State::Next(StateName::CodeFencedInfo)
}
}
}
@@ -320,7 +320,7 @@ pub fn meta(tokenizer: &mut Tokenizer) -> State {
}
_ => {
tokenizer.consume();
- State::Fn(StateName::CodeFencedMeta)
+ State::Next(StateName::CodeFencedMeta)
}
}
}
@@ -337,8 +337,8 @@ pub fn meta(tokenizer: &mut Tokenizer) -> State {
pub fn at_break(tokenizer: &mut Tokenizer) -> State {
tokenizer.check(
StateName::NonLazyContinuationStart,
- State::Fn(StateName::CodeFencedAtNonLazyBreak),
- State::Fn(StateName::CodeFencedAfter),
+ State::Next(StateName::CodeFencedAtNonLazyBreak),
+ State::Next(StateName::CodeFencedAfter),
)
}
@@ -354,8 +354,8 @@ pub fn at_break(tokenizer: &mut Tokenizer) -> State {
pub fn at_non_lazy_break(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
StateName::CodeFencedCloseBefore,
- State::Fn(StateName::CodeFencedAfter),
- State::Fn(StateName::CodeFencedContentBefore),
+ State::Next(StateName::CodeFencedAfter),
+ State::Next(StateName::CodeFencedContentBefore),
)
}
@@ -373,7 +373,7 @@ pub fn close_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LineEnding);
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::CodeFencedCloseStart)
+ State::Next(StateName::CodeFencedCloseStart)
}
_ => unreachable!("expected eol"),
}
@@ -389,7 +389,7 @@ pub fn close_before(tokenizer: &mut Tokenizer) -> State {
/// ```
pub fn close_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::CodeFencedFence);
- let state_name = space_or_tab_min_max(
+ let name = space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.constructs.code_indented {
@@ -399,8 +399,8 @@ pub fn close_start(tokenizer: &mut Tokenizer) -> State {
},
);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::CodeFencedBeforeSequenceClose),
+ name,
+ State::Next(StateName::CodeFencedBeforeSequenceClose),
State::Nok,
)
}
@@ -436,18 +436,18 @@ pub fn sequence_close(tokenizer: &mut Tokenizer) -> State {
Some(b'`' | b'~') if tokenizer.current.unwrap() == tokenizer.tokenize_state.marker => {
tokenizer.tokenize_state.size_other += 1;
tokenizer.consume();
- State::Fn(StateName::CodeFencedSequenceClose)
+ State::Next(StateName::CodeFencedSequenceClose)
}
_ if tokenizer.tokenize_state.size_other >= CODE_FENCED_SEQUENCE_SIZE_MIN
&& tokenizer.tokenize_state.size_other >= tokenizer.tokenize_state.size =>
{
tokenizer.tokenize_state.size_other = 0;
tokenizer.exit(Token::CodeFencedFenceSequence);
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::CodeFencedAfterSequenceClose),
- State::Fn(StateName::CodeFencedAfterSequenceClose),
+ name,
+ State::Next(StateName::CodeFencedAfterSequenceClose),
+ State::Next(StateName::CodeFencedAfterSequenceClose),
)
}
_ => {
@@ -487,7 +487,7 @@ pub fn content_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LineEnding);
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::CodeFencedContentStart)
+ State::Next(StateName::CodeFencedContentStart)
}
/// Before code content, definitely not before a closing fence.
///
@@ -498,10 +498,10 @@ pub fn content_before(tokenizer: &mut Tokenizer) -> State {
/// | ~~~
/// ```
pub fn content_start(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab_min_max(tokenizer, 0, tokenizer.tokenize_state.prefix);
+ let name = space_or_tab_min_max(tokenizer, 0, tokenizer.tokenize_state.prefix);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::CodeFencedBeforeContentChunk),
+ name,
+ State::Next(StateName::CodeFencedBeforeContentChunk),
State::Nok,
)
}
@@ -540,7 +540,7 @@ pub fn content_chunk(tokenizer: &mut Tokenizer) -> State {
}
_ => {
tokenizer.consume();
- State::Fn(StateName::CodeFencedContentChunk)
+ State::Next(StateName::CodeFencedContentChunk)
}
}
}
diff --git a/src/construct/code_indented.rs b/src/construct/code_indented.rs
index 0906e5f..de7683d 100644
--- a/src/construct/code_indented.rs
+++ b/src/construct/code_indented.rs
@@ -64,10 +64,10 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
// Do not interrupt paragraphs.
if !tokenizer.interrupt && tokenizer.parse_state.constructs.code_indented {
tokenizer.enter(Token::CodeIndented);
- let state_name = space_or_tab_min_max(tokenizer, TAB_SIZE, TAB_SIZE);
+ let name = space_or_tab_min_max(tokenizer, TAB_SIZE, TAB_SIZE);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::CodeIndentedAtBreak),
+ name,
+ State::Next(StateName::CodeIndentedAtBreak),
State::Nok,
)
} else {
@@ -86,8 +86,8 @@ pub fn at_break(tokenizer: &mut Tokenizer) -> State {
None => after(tokenizer),
Some(b'\n') => tokenizer.attempt(
StateName::CodeIndentedFurtherStart,
- State::Fn(StateName::CodeIndentedAtBreak),
- State::Fn(StateName::CodeIndentedAfter),
+ State::Next(StateName::CodeIndentedAtBreak),
+ State::Next(StateName::CodeIndentedAfter),
),
_ => {
tokenizer.enter(Token::CodeFlowChunk);
@@ -110,7 +110,7 @@ pub fn inside(tokenizer: &mut Tokenizer) -> State {
}
_ => {
tokenizer.consume();
- State::Fn(StateName::CodeIndentedInside)
+ State::Next(StateName::CodeIndentedInside)
}
}
}
@@ -141,14 +141,14 @@ pub fn further_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LineEnding);
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::CodeIndentedFurtherStart)
+ State::Next(StateName::CodeIndentedFurtherStart)
}
_ if !tokenizer.lazy => {
- let state_name = space_or_tab_min_max(tokenizer, TAB_SIZE, TAB_SIZE);
+ let name = space_or_tab_min_max(tokenizer, TAB_SIZE, TAB_SIZE);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::CodeIndentedFurtherEnd),
- State::Fn(StateName::CodeIndentedFurtherBegin),
+ name,
+ State::Next(StateName::CodeIndentedFurtherEnd),
+ State::Next(StateName::CodeIndentedFurtherBegin),
)
}
_ => State::Nok,
@@ -174,11 +174,11 @@ pub fn further_end(_tokenizer: &mut Tokenizer) -> State {
/// ^
/// ```
pub fn further_begin(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::CodeIndentedFurtherAfter),
- State::Fn(StateName::CodeIndentedFurtherAfter),
+ name,
+ State::Next(StateName::CodeIndentedFurtherAfter),
+ State::Next(StateName::CodeIndentedFurtherAfter),
)
}
diff --git a/src/construct/code_text.rs b/src/construct/code_text.rs
index 5bdefbb..729abe5 100644
--- a/src/construct/code_text.rs
+++ b/src/construct/code_text.rs
@@ -121,7 +121,7 @@ pub fn sequence_open(tokenizer: &mut Tokenizer) -> State {
if let Some(b'`') = tokenizer.current {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
- State::Fn(StateName::CodeTextSequenceOpen)
+ State::Next(StateName::CodeTextSequenceOpen)
} else {
tokenizer.exit(Token::CodeTextSequence);
between(tokenizer)
@@ -144,7 +144,7 @@ pub fn between(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LineEnding);
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::CodeTextBetween)
+ State::Next(StateName::CodeTextBetween)
}
Some(b'`') => {
tokenizer.enter(Token::CodeTextSequence);
@@ -171,7 +171,7 @@ pub fn data(tokenizer: &mut Tokenizer) -> State {
}
_ => {
tokenizer.consume();
- State::Fn(StateName::CodeTextData)
+ State::Next(StateName::CodeTextData)
}
}
}
@@ -187,7 +187,7 @@ pub fn sequence_close(tokenizer: &mut Tokenizer) -> State {
Some(b'`') => {
tokenizer.tokenize_state.size_other += 1;
tokenizer.consume();
- State::Fn(StateName::CodeTextSequenceClose)
+ State::Next(StateName::CodeTextSequenceClose)
}
_ => {
if tokenizer.tokenize_state.size == tokenizer.tokenize_state.size_other {
diff --git a/src/construct/definition.rs b/src/construct/definition.rs
index 350992b..62d0f3b 100644
--- a/src/construct/definition.rs
+++ b/src/construct/definition.rs
@@ -119,11 +119,11 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
if possible && tokenizer.parse_state.constructs.definition {
tokenizer.enter(Token::Definition);
// Note: arbitrary whitespace allowed even if code (indented) is on.
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::DefinitionBefore),
- State::Fn(StateName::DefinitionBefore),
+ name,
+ State::Next(StateName::DefinitionBefore),
+ State::Next(StateName::DefinitionBefore),
)
} else {
State::Nok
@@ -144,7 +144,7 @@ pub fn before(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_3 = Token::DefinitionLabelString;
tokenizer.attempt(
StateName::LabelStart,
- State::Fn(StateName::DefinitionLabelAfter),
+ State::Next(StateName::DefinitionLabelAfter),
State::Nok,
)
}
@@ -168,7 +168,7 @@ pub fn label_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::DefinitionMarker);
tokenizer.consume();
tokenizer.exit(Token::DefinitionMarker);
- State::Fn(StateName::DefinitionMarkerAfter)
+ State::Next(StateName::DefinitionMarkerAfter)
}
_ => State::Nok,
}
@@ -176,11 +176,11 @@ pub fn label_after(tokenizer: &mut Tokenizer) -> State {
/// To do.
pub fn marker_after(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab_eol(tokenizer);
+ let name = space_or_tab_eol(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::DefinitionDestinationBefore),
- State::Fn(StateName::DefinitionDestinationBefore),
+ name,
+ State::Next(StateName::DefinitionDestinationBefore),
+ State::Next(StateName::DefinitionDestinationBefore),
)
}
@@ -199,8 +199,8 @@ pub fn destination_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.size_other = usize::MAX;
tokenizer.attempt(
StateName::DestinationStart,
- State::Fn(StateName::DefinitionDestinationAfter),
- State::Fn(StateName::DefinitionDestinationMissing),
+ State::Next(StateName::DefinitionDestinationAfter),
+ State::Next(StateName::DefinitionDestinationMissing),
)
}
@@ -219,8 +219,8 @@ pub fn destination_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.size_other = 0;
tokenizer.attempt(
StateName::DefinitionTitleBefore,
- State::Fn(StateName::DefinitionAfter),
- State::Fn(StateName::DefinitionAfter),
+ State::Next(StateName::DefinitionAfter),
+ State::Next(StateName::DefinitionAfter),
)
}
@@ -244,11 +244,11 @@ pub fn destination_missing(tokenizer: &mut Tokenizer) -> State {
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::DefinitionAfterWhitespace),
- State::Fn(StateName::DefinitionAfterWhitespace),
+ name,
+ State::Next(StateName::DefinitionAfterWhitespace),
+ State::Next(StateName::DefinitionAfterWhitespace),
)
}
@@ -281,10 +281,10 @@ pub fn after_whitespace(tokenizer: &mut Tokenizer) -> State {
/// ^
/// ```
pub fn title_before(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab_eol(tokenizer);
+ let name = space_or_tab_eol(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::DefinitionTitleBeforeMarker),
+ name,
+ State::Next(StateName::DefinitionTitleBeforeMarker),
State::Nok,
)
}
@@ -302,7 +302,7 @@ pub fn title_before_marker(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_3 = Token::DefinitionTitleString;
tokenizer.attempt(
StateName::TitleStart,
- State::Fn(StateName::DefinitionTitleAfter),
+ State::Next(StateName::DefinitionTitleAfter),
State::Nok,
)
}
@@ -317,11 +317,11 @@ pub fn title_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Token::Data;
tokenizer.tokenize_state.token_2 = Token::Data;
tokenizer.tokenize_state.token_3 = Token::Data;
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::DefinitionTitleAfterOptionalWhitespace),
- State::Fn(StateName::DefinitionTitleAfterOptionalWhitespace),
+ name,
+ State::Next(StateName::DefinitionTitleAfterOptionalWhitespace),
+ State::Next(StateName::DefinitionTitleAfterOptionalWhitespace),
)
}
diff --git a/src/construct/hard_break_escape.rs b/src/construct/hard_break_escape.rs
index 47b7e94..fc2cbdf 100644
--- a/src/construct/hard_break_escape.rs
+++ b/src/construct/hard_break_escape.rs
@@ -54,7 +54,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
Some(b'\\') if tokenizer.parse_state.constructs.hard_break_escape => {
tokenizer.enter(Token::HardBreakEscape);
tokenizer.consume();
- State::Fn(StateName::HardBreakEscapeAfter)
+ State::Next(StateName::HardBreakEscapeAfter)
}
_ => State::Nok,
}
diff --git a/src/construct/heading_atx.rs b/src/construct/heading_atx.rs
index e5bc3bd..41fad49 100644
--- a/src/construct/heading_atx.rs
+++ b/src/construct/heading_atx.rs
@@ -68,7 +68,7 @@ use crate::tokenizer::{ContentType, Event, EventType, State, StateName, Tokenize
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.constructs.heading_atx {
tokenizer.enter(Token::HeadingAtx);
- let state_name = space_or_tab_min_max(
+ let name = space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.constructs.code_indented {
@@ -77,11 +77,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
usize::MAX
},
);
- tokenizer.attempt(
- state_name,
- State::Fn(StateName::HeadingAtxBefore),
- State::Nok,
- )
+ tokenizer.attempt(name, State::Next(StateName::HeadingAtxBefore), State::Nok)
} else {
State::Nok
}
@@ -118,17 +114,13 @@ pub fn sequence_open(tokenizer: &mut Tokenizer) -> State {
Some(b'#') if tokenizer.tokenize_state.size < HEADING_ATX_OPENING_FENCE_SIZE_MAX => {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
- State::Fn(StateName::HeadingAtxSequenceOpen)
+ State::Next(StateName::HeadingAtxSequenceOpen)
}
_ if tokenizer.tokenize_state.size > 0 => {
tokenizer.tokenize_state.size = 0;
tokenizer.exit(Token::HeadingAtxSequence);
- let state_name = space_or_tab(tokenizer);
- tokenizer.attempt(
- state_name,
- State::Fn(StateName::HeadingAtxAtBreak),
- State::Nok,
- )
+ let name = space_or_tab(tokenizer);
+ tokenizer.attempt(name, State::Next(StateName::HeadingAtxAtBreak), State::Nok)
}
_ => {
tokenizer.tokenize_state.size = 0;
@@ -153,12 +145,8 @@ pub fn at_break(tokenizer: &mut Tokenizer) -> State {
State::Ok
}
Some(b'\t' | b' ') => {
- let state_name = space_or_tab(tokenizer);
- tokenizer.attempt(
- state_name,
- State::Fn(StateName::HeadingAtxAtBreak),
- State::Nok,
- )
+ let name = space_or_tab(tokenizer);
+ tokenizer.attempt(name, State::Next(StateName::HeadingAtxAtBreak), State::Nok)
}
Some(b'#') => {
tokenizer.enter(Token::HeadingAtxSequence);
@@ -182,7 +170,7 @@ pub fn at_break(tokenizer: &mut Tokenizer) -> State {
pub fn sequence_further(tokenizer: &mut Tokenizer) -> State {
if let Some(b'#') = tokenizer.current {
tokenizer.consume();
- State::Fn(StateName::HeadingAtxSequenceFurther)
+ State::Next(StateName::HeadingAtxSequenceFurther)
} else {
tokenizer.exit(Token::HeadingAtxSequence);
at_break(tokenizer)
@@ -204,7 +192,7 @@ pub fn data(tokenizer: &mut Tokenizer) -> State {
}
_ => {
tokenizer.consume();
- State::Fn(StateName::HeadingAtxData)
+ State::Next(StateName::HeadingAtxData)
}
}
}
diff --git a/src/construct/heading_setext.rs b/src/construct/heading_setext.rs
index a8c707a..4541a87 100644
--- a/src/construct/heading_setext.rs
+++ b/src/construct/heading_setext.rs
@@ -83,7 +83,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
.token_type
== Token::Paragraph)
{
- let state_name = space_or_tab_min_max(
+ let name = space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.constructs.code_indented {
@@ -94,8 +94,8 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::HeadingSetextBefore),
+ name,
+ State::Next(StateName::HeadingSetextBefore),
State::Nok,
)
} else {
@@ -132,16 +132,16 @@ pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-' | b'=') if tokenizer.current.unwrap() == tokenizer.tokenize_state.marker => {
tokenizer.consume();
- State::Fn(StateName::HeadingSetextInside)
+ State::Next(StateName::HeadingSetextInside)
}
_ => {
tokenizer.tokenize_state.marker = 0;
tokenizer.exit(Token::HeadingSetextUnderline);
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::HeadingSetextAfter),
- State::Fn(StateName::HeadingSetextAfter),
+ name,
+ State::Next(StateName::HeadingSetextAfter),
+ State::Next(StateName::HeadingSetextAfter),
)
}
}
diff --git a/src/construct/html_flow.rs b/src/construct/html_flow.rs
index 128fd2e..298bcaf 100644
--- a/src/construct/html_flow.rs
+++ b/src/construct/html_flow.rs
@@ -132,7 +132,7 @@ const COMPLETE: u8 = 7;
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.constructs.html_flow {
tokenizer.enter(Token::HtmlFlow);
- let state_name = space_or_tab_with_options(
+ let name = space_or_tab_with_options(
tokenizer,
SpaceOrTabOptions {
kind: Token::HtmlFlowData,
@@ -147,7 +147,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
},
);
- tokenizer.attempt(state_name, State::Fn(StateName::HtmlFlowBefore), State::Nok)
+ tokenizer.attempt(name, State::Next(StateName::HtmlFlowBefore), State::Nok)
} else {
State::Nok
}
@@ -163,7 +163,7 @@ pub fn before(tokenizer: &mut Tokenizer) -> State {
if Some(b'<') == tokenizer.current {
tokenizer.enter(Token::HtmlFlowData);
tokenizer.consume();
- State::Fn(StateName::HtmlFlowOpen)
+ State::Next(StateName::HtmlFlowOpen)
} else {
State::Nok
}
@@ -183,13 +183,13 @@ pub fn open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'!') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowDeclarationOpen)
+ State::Next(StateName::HtmlFlowDeclarationOpen)
}
Some(b'/') => {
tokenizer.consume();
tokenizer.tokenize_state.seen = true;
tokenizer.tokenize_state.start = tokenizer.point.index;
- State::Fn(StateName::HtmlFlowTagCloseStart)
+ State::Next(StateName::HtmlFlowTagCloseStart)
}
Some(b'?') => {
tokenizer.tokenize_state.marker = INSTRUCTION;
@@ -198,7 +198,7 @@ pub fn open(tokenizer: &mut Tokenizer) -> State {
tokenizer.concrete = true;
// While we’re in an instruction instead of a declaration, we’re on a `?`
// right now, so we do need to search for `>`, similar to declarations.
- State::Fn(StateName::HtmlFlowContinuationDeclarationInside)
+ State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
// ASCII alphabetical.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
@@ -224,19 +224,19 @@ pub fn declaration_open(tokenizer: &mut Tokenizer) -> State {
Some(b'-') => {
tokenizer.consume();
tokenizer.tokenize_state.marker = COMMENT;
- State::Fn(StateName::HtmlFlowCommentOpenInside)
+ State::Next(StateName::HtmlFlowCommentOpenInside)
}
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
tokenizer.tokenize_state.marker = DECLARATION;
// Do not form containers.
tokenizer.concrete = true;
- State::Fn(StateName::HtmlFlowContinuationDeclarationInside)
+ State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
Some(b'[') => {
tokenizer.consume();
tokenizer.tokenize_state.marker = CDATA;
- State::Fn(StateName::HtmlFlowCdataOpenInside)
+ State::Next(StateName::HtmlFlowCdataOpenInside)
}
_ => State::Nok,
}
@@ -253,7 +253,7 @@ pub fn comment_open_inside(tokenizer: &mut Tokenizer) -> State {
tokenizer.consume();
// Do not form containers.
tokenizer.concrete = true;
- State::Fn(StateName::HtmlFlowContinuationDeclarationInside)
+ State::Next(StateName::HtmlFlowContinuationDeclarationInside)
} else {
tokenizer.tokenize_state.marker = 0;
State::Nok
@@ -275,9 +275,9 @@ pub fn cdata_open_inside(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.size = 0;
// Do not form containers.
tokenizer.concrete = true;
- State::Fn(StateName::HtmlFlowContinuation)
+ State::Next(StateName::HtmlFlowContinuation)
} else {
- State::Fn(StateName::HtmlFlowCdataOpenInside)
+ State::Next(StateName::HtmlFlowCdataOpenInside)
}
} else {
tokenizer.tokenize_state.marker = 0;
@@ -295,7 +295,7 @@ pub fn cdata_open_inside(tokenizer: &mut Tokenizer) -> State {
pub fn tag_close_start(tokenizer: &mut Tokenizer) -> State {
if let Some(b'A'..=b'Z' | b'a'..=b'z') = tokenizer.current {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowTagName)
+ State::Next(StateName::HtmlFlowTagName)
} else {
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.start = 0;
@@ -340,7 +340,7 @@ pub fn tag_name(tokenizer: &mut Tokenizer) -> State {
if slash {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowBasicSelfClosing)
+ State::Next(StateName::HtmlFlowBasicSelfClosing)
} else {
// Do not form containers.
tokenizer.concrete = true;
@@ -363,7 +363,7 @@ pub fn tag_name(tokenizer: &mut Tokenizer) -> State {
// ASCII alphanumerical and `-`.
Some(b'-' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowTagName)
+ State::Next(StateName::HtmlFlowTagName)
}
Some(_) => {
tokenizer.tokenize_state.seen = false;
@@ -383,7 +383,7 @@ pub fn basic_self_closing(tokenizer: &mut Tokenizer) -> State {
tokenizer.consume();
// Do not form containers.
tokenizer.concrete = true;
- State::Fn(StateName::HtmlFlowContinuation)
+ State::Next(StateName::HtmlFlowContinuation)
} else {
tokenizer.tokenize_state.marker = 0;
State::Nok
@@ -400,7 +400,7 @@ pub fn complete_closing_tag_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteClosingTagAfter)
+ State::Next(StateName::HtmlFlowCompleteClosingTagAfter)
}
_ => complete_end(tokenizer),
}
@@ -429,16 +429,16 @@ pub fn complete_attribute_name_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeNameBefore)
+ State::Next(StateName::HtmlFlowCompleteAttributeNameBefore)
}
Some(b'/') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteEnd)
+ State::Next(StateName::HtmlFlowCompleteEnd)
}
// ASCII alphanumerical and `:` and `_`.
Some(b'0'..=b'9' | b':' | b'A'..=b'Z' | b'_' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeName)
+ State::Next(StateName::HtmlFlowCompleteAttributeName)
}
_ => complete_end(tokenizer),
}
@@ -459,7 +459,7 @@ pub fn complete_attribute_name(tokenizer: &mut Tokenizer) -> State {
// ASCII alphanumerical and `-`, `.`, `:`, and `_`.
Some(b'-' | b'.' | b'0'..=b'9' | b':' | b'A'..=b'Z' | b'_' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeName)
+ State::Next(StateName::HtmlFlowCompleteAttributeName)
}
_ => complete_attribute_name_after(tokenizer),
}
@@ -478,11 +478,11 @@ pub fn complete_attribute_name_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeNameAfter)
+ State::Next(StateName::HtmlFlowCompleteAttributeNameAfter)
}
Some(b'=') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeValueBefore)
+ State::Next(StateName::HtmlFlowCompleteAttributeValueBefore)
}
_ => complete_attribute_name_before(tokenizer),
}
@@ -505,12 +505,12 @@ pub fn complete_attribute_value_before(tokenizer: &mut Tokenizer) -> State {
}
Some(b'\t' | b' ') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeValueBefore)
+ State::Next(StateName::HtmlFlowCompleteAttributeValueBefore)
}
Some(b'"' | b'\'') => {
tokenizer.tokenize_state.marker_other = tokenizer.current.unwrap();
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeValueQuoted)
+ State::Next(StateName::HtmlFlowCompleteAttributeValueQuoted)
}
_ => complete_attribute_value_unquoted(tokenizer),
}
@@ -536,11 +536,11 @@ pub fn complete_attribute_value_quoted(tokenizer: &mut Tokenizer) -> State {
{
tokenizer.tokenize_state.marker_other = 0;
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeValueQuotedAfter)
+ State::Next(StateName::HtmlFlowCompleteAttributeValueQuotedAfter)
}
_ => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeValueQuoted)
+ State::Next(StateName::HtmlFlowCompleteAttributeValueQuoted)
}
}
}
@@ -558,7 +558,7 @@ pub fn complete_attribute_value_unquoted(tokenizer: &mut Tokenizer) -> State {
}
Some(_) => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAttributeValueUnquoted)
+ State::Next(StateName::HtmlFlowCompleteAttributeValueUnquoted)
}
}
}
@@ -588,7 +588,7 @@ pub fn complete_attribute_value_quoted_after(tokenizer: &mut Tokenizer) -> State
pub fn complete_end(tokenizer: &mut Tokenizer) -> State {
if let Some(b'>') = tokenizer.current {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAfter)
+ State::Next(StateName::HtmlFlowCompleteAfter)
} else {
tokenizer.tokenize_state.marker = 0;
State::Nok
@@ -610,7 +610,7 @@ pub fn complete_after(tokenizer: &mut Tokenizer) -> State {
}
Some(b'\t' | b' ') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowCompleteAfter)
+ State::Next(StateName::HtmlFlowCompleteAfter)
}
Some(_) => {
tokenizer.tokenize_state.marker = 0;
@@ -634,8 +634,8 @@ pub fn continuation(tokenizer: &mut Tokenizer) -> State {
tokenizer.exit(Token::HtmlFlowData);
tokenizer.check(
StateName::HtmlFlowBlankLineBefore,
- State::Fn(StateName::HtmlFlowContinuationAfter),
- State::Fn(StateName::HtmlFlowContinuationStart),
+ State::Next(StateName::HtmlFlowContinuationAfter),
+ State::Next(StateName::HtmlFlowContinuationStart),
)
}
// Note: important that this is after the basic/complete case.
@@ -645,27 +645,27 @@ pub fn continuation(tokenizer: &mut Tokenizer) -> State {
}
Some(b'-') if tokenizer.tokenize_state.marker == COMMENT => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationCommentInside)
+ State::Next(StateName::HtmlFlowContinuationCommentInside)
}
Some(b'<') if tokenizer.tokenize_state.marker == RAW => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationRawTagOpen)
+ State::Next(StateName::HtmlFlowContinuationRawTagOpen)
}
Some(b'>') if tokenizer.tokenize_state.marker == DECLARATION => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationClose)
+ State::Next(StateName::HtmlFlowContinuationClose)
}
Some(b'?') if tokenizer.tokenize_state.marker == INSTRUCTION => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationDeclarationInside)
+ State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
Some(b']') if tokenizer.tokenize_state.marker == CDATA => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationCdataInside)
+ State::Next(StateName::HtmlFlowContinuationCdataInside)
}
_ => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuation)
+ State::Next(StateName::HtmlFlowContinuation)
}
}
}
@@ -680,8 +680,8 @@ pub fn continuation(tokenizer: &mut Tokenizer) -> State {
pub fn continuation_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.check(
StateName::NonLazyContinuationStart,
- State::Fn(StateName::HtmlFlowContinuationStartNonLazy),
- State::Fn(StateName::HtmlFlowContinuationAfter),
+ State::Next(StateName::HtmlFlowContinuationStartNonLazy),
+ State::Next(StateName::HtmlFlowContinuationAfter),
)
}
@@ -698,7 +698,7 @@ pub fn continuation_start_non_lazy(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LineEnding);
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::HtmlFlowContinuationBefore)
+ State::Next(StateName::HtmlFlowContinuationBefore)
}
_ => unreachable!("expected eol"),
}
@@ -731,7 +731,7 @@ pub fn continuation_comment_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationDeclarationInside)
+ State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
_ => continuation(tokenizer),
}
@@ -748,7 +748,7 @@ pub fn continuation_raw_tag_open(tokenizer: &mut Tokenizer) -> State {
Some(b'/') => {
tokenizer.consume();
tokenizer.tokenize_state.start = tokenizer.point.index;
- State::Fn(StateName::HtmlFlowContinuationRawEndTag)
+ State::Next(StateName::HtmlFlowContinuationRawEndTag)
}
_ => continuation(tokenizer),
}
@@ -775,7 +775,7 @@ pub fn continuation_raw_end_tag(tokenizer: &mut Tokenizer) -> State {
if HTML_RAW_NAMES.contains(&name.as_str()) {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationClose)
+ State::Next(StateName::HtmlFlowContinuationClose)
} else {
continuation(tokenizer)
}
@@ -784,7 +784,7 @@ pub fn continuation_raw_end_tag(tokenizer: &mut Tokenizer) -> State {
if tokenizer.point.index - tokenizer.tokenize_state.start < HTML_RAW_SIZE_MAX =>
{
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationRawEndTag)
+ State::Next(StateName::HtmlFlowContinuationRawEndTag)
}
_ => {
tokenizer.tokenize_state.start = 0;
@@ -803,7 +803,7 @@ pub fn continuation_cdata_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b']') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationDeclarationInside)
+ State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
_ => continuation(tokenizer),
}
@@ -827,11 +827,11 @@ pub fn continuation_declaration_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationClose)
+ State::Next(StateName::HtmlFlowContinuationClose)
}
Some(b'-') if tokenizer.tokenize_state.marker == COMMENT => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationDeclarationInside)
+ State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
_ => continuation(tokenizer),
}
@@ -851,7 +851,7 @@ pub fn continuation_close(tokenizer: &mut Tokenizer) -> State {
}
_ => {
tokenizer.consume();
- State::Fn(StateName::HtmlFlowContinuationClose)
+ State::Next(StateName::HtmlFlowContinuationClose)
}
}
}
@@ -883,5 +883,5 @@ pub fn blank_line_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LineEnding);
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::BlankLineStart)
+ State::Next(StateName::BlankLineStart)
}
diff --git a/src/construct/html_text.rs b/src/construct/html_text.rs
index c1dfaca..38d0476 100644
--- a/src/construct/html_text.rs
+++ b/src/construct/html_text.rs
@@ -70,7 +70,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::HtmlText);
tokenizer.enter(Token::HtmlTextData);
tokenizer.consume();
- State::Fn(StateName::HtmlTextOpen)
+ State::Next(StateName::HtmlTextOpen)
} else {
State::Nok
}
@@ -90,20 +90,20 @@ pub fn open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'!') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextDeclarationOpen)
+ State::Next(StateName::HtmlTextDeclarationOpen)
}
Some(b'/') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagCloseStart)
+ State::Next(StateName::HtmlTextTagCloseStart)
}
Some(b'?') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextInstruction)
+ State::Next(StateName::HtmlTextInstruction)
}
// ASCII alphabetical.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpen)
+ State::Next(StateName::HtmlTextTagOpen)
}
_ => State::Nok,
}
@@ -123,16 +123,16 @@ pub fn declaration_open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextCommentOpenInside)
+ State::Next(StateName::HtmlTextCommentOpenInside)
}
// ASCII alphabetical.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextDeclaration)
+ State::Next(StateName::HtmlTextDeclaration)
}
Some(b'[') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextCdataOpenInside)
+ State::Next(StateName::HtmlTextCdataOpenInside)
}
_ => State::Nok,
}
@@ -148,7 +148,7 @@ pub fn comment_open_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextCommentStart)
+ State::Next(StateName::HtmlTextCommentStart)
}
_ => State::Nok,
}
@@ -172,7 +172,7 @@ pub fn comment_start(tokenizer: &mut Tokenizer) -> State {
Some(b'>') => State::Nok,
Some(b'-') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextCommentStartDash)
+ State::Next(StateName::HtmlTextCommentStartDash)
}
_ => comment(tokenizer),
}
@@ -213,11 +213,11 @@ pub fn comment(tokenizer: &mut Tokenizer) -> State {
}
Some(b'-') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextCommentClose)
+ State::Next(StateName::HtmlTextCommentClose)
}
_ => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextComment)
+ State::Next(StateName::HtmlTextComment)
}
}
}
@@ -232,7 +232,7 @@ pub fn comment_close(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextEnd)
+ State::Next(StateName::HtmlTextEnd)
}
_ => comment(tokenizer),
}
@@ -251,9 +251,9 @@ pub fn cdata_open_inside(tokenizer: &mut Tokenizer) -> State {
if tokenizer.tokenize_state.size == HTML_CDATA_PREFIX.len() {
tokenizer.tokenize_state.size = 0;
- State::Fn(StateName::HtmlTextCdata)
+ State::Next(StateName::HtmlTextCdata)
} else {
- State::Fn(StateName::HtmlTextCdataOpenInside)
+ State::Next(StateName::HtmlTextCdataOpenInside)
}
} else {
State::Nok
@@ -275,11 +275,11 @@ pub fn cdata(tokenizer: &mut Tokenizer) -> State {
}
Some(b']') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextCdataClose)
+ State::Next(StateName::HtmlTextCdataClose)
}
_ => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextCdata)
+ State::Next(StateName::HtmlTextCdata)
}
}
}
@@ -294,7 +294,7 @@ pub fn cdata_close(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b']') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextCdataEnd)
+ State::Next(StateName::HtmlTextCdataEnd)
}
_ => cdata(tokenizer),
}
@@ -329,7 +329,7 @@ pub fn declaration(tokenizer: &mut Tokenizer) -> State {
}
_ => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextDeclaration)
+ State::Next(StateName::HtmlTextDeclaration)
}
}
}
@@ -349,11 +349,11 @@ pub fn instruction(tokenizer: &mut Tokenizer) -> State {
}
Some(b'?') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextInstructionClose)
+ State::Next(StateName::HtmlTextInstructionClose)
}
_ => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextInstruction)
+ State::Next(StateName::HtmlTextInstruction)
}
}
}
@@ -382,7 +382,7 @@ pub fn tag_close_start(tokenizer: &mut Tokenizer) -> State {
// ASCII alphabetical.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagClose)
+ State::Next(StateName::HtmlTextTagClose)
}
_ => State::Nok,
}
@@ -399,7 +399,7 @@ pub fn tag_close(tokenizer: &mut Tokenizer) -> State {
// ASCII alphanumerical and `-`.
Some(b'-' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagClose)
+ State::Next(StateName::HtmlTextTagClose)
}
_ => tag_close_between(tokenizer),
}
@@ -419,7 +419,7 @@ pub fn tag_close_between(tokenizer: &mut Tokenizer) -> State {
}
Some(b'\t' | b' ') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagCloseBetween)
+ State::Next(StateName::HtmlTextTagCloseBetween)
}
_ => end(tokenizer),
}
@@ -436,7 +436,7 @@ pub fn tag_open(tokenizer: &mut Tokenizer) -> State {
// ASCII alphanumerical and `-`.
Some(b'-' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpen)
+ State::Next(StateName::HtmlTextTagOpen)
}
Some(b'\t' | b'\n' | b' ' | b'/' | b'>') => tag_open_between(tokenizer),
_ => State::Nok,
@@ -457,16 +457,16 @@ pub fn tag_open_between(tokenizer: &mut Tokenizer) -> State {
}
Some(b'\t' | b' ') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenBetween)
+ State::Next(StateName::HtmlTextTagOpenBetween)
}
Some(b'/') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextEnd)
+ State::Next(StateName::HtmlTextEnd)
}
// ASCII alphabetical and `:` and `_`.
Some(b':' | b'A'..=b'Z' | b'_' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeName)
+ State::Next(StateName::HtmlTextTagOpenAttributeName)
}
_ => end(tokenizer),
}
@@ -483,7 +483,7 @@ pub fn tag_open_attribute_name(tokenizer: &mut Tokenizer) -> State {
// ASCII alphabetical and `-`, `.`, `:`, and `_`.
Some(b'-' | b'.' | b'0'..=b'9' | b':' | b'A'..=b'Z' | b'_' | b'a'..=b'z') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeName)
+ State::Next(StateName::HtmlTextTagOpenAttributeName)
}
_ => tag_open_attribute_name_after(tokenizer),
}
@@ -505,11 +505,11 @@ pub fn tag_open_attribute_name_after(tokenizer: &mut Tokenizer) -> State {
}
Some(b'\t' | b' ') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeNameAfter)
+ State::Next(StateName::HtmlTextTagOpenAttributeNameAfter)
}
Some(b'=') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeValueBefore)
+ State::Next(StateName::HtmlTextTagOpenAttributeValueBefore)
}
_ => tag_open_between(tokenizer),
}
@@ -532,16 +532,16 @@ pub fn tag_open_attribute_value_before(tokenizer: &mut Tokenizer) -> State {
}
Some(b'\t' | b' ') => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeValueBefore)
+ State::Next(StateName::HtmlTextTagOpenAttributeValueBefore)
}
Some(b'"' | b'\'') => {
tokenizer.tokenize_state.marker = tokenizer.current.unwrap();
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeValueQuoted)
+ State::Next(StateName::HtmlTextTagOpenAttributeValueQuoted)
}
Some(_) => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeValueUnquoted)
+ State::Next(StateName::HtmlTextTagOpenAttributeValueUnquoted)
}
}
}
@@ -566,11 +566,11 @@ pub fn tag_open_attribute_value_quoted(tokenizer: &mut Tokenizer) -> State {
Some(b'"' | b'\'') if tokenizer.current.unwrap() == tokenizer.tokenize_state.marker => {
tokenizer.tokenize_state.marker = 0;
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeValueQuotedAfter)
+ State::Next(StateName::HtmlTextTagOpenAttributeValueQuotedAfter)
}
_ => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeValueQuoted)
+ State::Next(StateName::HtmlTextTagOpenAttributeValueQuoted)
}
}
}
@@ -587,7 +587,7 @@ pub fn tag_open_attribute_value_unquoted(tokenizer: &mut Tokenizer) -> State {
Some(b'\t' | b'\n' | b' ' | b'/' | b'>') => tag_open_between(tokenizer),
Some(_) => {
tokenizer.consume();
- State::Fn(StateName::HtmlTextTagOpenAttributeValueUnquoted)
+ State::Next(StateName::HtmlTextTagOpenAttributeValueUnquoted)
}
}
}
@@ -641,7 +641,7 @@ pub fn line_ending_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LineEnding);
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::HtmlTextLineEndingAfter)
+ State::Next(StateName::HtmlTextLineEndingAfter)
}
_ => unreachable!("expected eol"),
}
@@ -658,11 +658,11 @@ pub fn line_ending_before(tokenizer: &mut Tokenizer) -> State {
/// ^
/// ```
pub fn line_ending_after(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::HtmlTextLineEndingAfterPrefix),
- State::Fn(StateName::HtmlTextLineEndingAfterPrefix),
+ name,
+ State::Next(StateName::HtmlTextLineEndingAfterPrefix),
+ State::Next(StateName::HtmlTextLineEndingAfterPrefix),
)
}
@@ -677,8 +677,9 @@ pub fn line_ending_after(tokenizer: &mut Tokenizer) -> State {
/// ^
/// ```
pub fn line_ending_after_prefix(tokenizer: &mut Tokenizer) -> State {
- let state_name = tokenizer.tokenize_state.return_state.take().unwrap();
- let func = state_name.to_func();
+ let name = tokenizer.tokenize_state.return_state.take().unwrap();
+ // To do: should use `State::Retry` when it exists.
+ let func = name.to_func();
tokenizer.enter(Token::HtmlTextData);
func(tokenizer)
}
diff --git a/src/construct/label_end.rs b/src/construct/label_end.rs
index 47ded36..0404b6e 100644
--- a/src/construct/label_end.rs
+++ b/src/construct/label_end.rs
@@ -201,7 +201,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.consume();
tokenizer.exit(Token::LabelMarker);
tokenizer.exit(Token::LabelEnd);
- return State::Fn(StateName::LabelEndAfter);
+ return State::Next(StateName::LabelEndAfter);
}
}
@@ -239,8 +239,8 @@ pub fn after(tokenizer: &mut Tokenizer) -> State {
// Resource (`[asd](fgh)`)?
Some(b'(') => tokenizer.attempt(
StateName::LabelEndResourceStart,
- State::Fn(StateName::LabelEndOk),
- State::Fn(if defined {
+ State::Next(StateName::LabelEndOk),
+ State::Next(if defined {
StateName::LabelEndOk
} else {
StateName::LabelEndNok
@@ -249,8 +249,8 @@ pub fn after(tokenizer: &mut Tokenizer) -> State {
// Full (`[asd][fgh]`) or collapsed (`[asd][]`) reference?
Some(b'[') => tokenizer.attempt(
StateName::LabelEndReferenceFull,
- State::Fn(StateName::LabelEndOk),
- State::Fn(if defined {
+ State::Next(StateName::LabelEndOk),
+ State::Next(if defined {
StateName::LabelEndReferenceNotFull
} else {
StateName::LabelEndNok
@@ -278,8 +278,8 @@ pub fn after(tokenizer: &mut Tokenizer) -> State {
pub fn reference_not_full(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
StateName::LabelEndReferenceCollapsed,
- State::Fn(StateName::LabelEndOk),
- State::Fn(StateName::LabelEndNok),
+ State::Next(StateName::LabelEndOk),
+ State::Next(StateName::LabelEndNok),
)
}
@@ -362,7 +362,7 @@ pub fn resource_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::ResourceMarker);
tokenizer.consume();
tokenizer.exit(Token::ResourceMarker);
- State::Fn(StateName::LabelEndResourceBefore)
+ State::Next(StateName::LabelEndResourceBefore)
}
_ => unreachable!("expected `(`"),
}
@@ -375,11 +375,11 @@ pub fn resource_start(tokenizer: &mut Tokenizer) -> State {
/// ^
/// ```
pub fn resource_before(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab_eol(tokenizer);
+ let name = space_or_tab_eol(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::LabelEndResourceOpen),
- State::Fn(StateName::LabelEndResourceOpen),
+ name,
+ State::Next(StateName::LabelEndResourceOpen),
+ State::Next(StateName::LabelEndResourceOpen),
)
}
@@ -402,8 +402,8 @@ pub fn resource_open(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
StateName::DestinationStart,
- State::Fn(StateName::LabelEndResourceDestinationAfter),
- State::Fn(StateName::LabelEndResourceDestinationMissing),
+ State::Next(StateName::LabelEndResourceDestinationAfter),
+ State::Next(StateName::LabelEndResourceDestinationMissing),
)
}
}
@@ -421,11 +421,11 @@ pub fn resource_destination_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_4 = Token::Data;
tokenizer.tokenize_state.token_5 = Token::Data;
tokenizer.tokenize_state.size_other = 0;
- let state_name = space_or_tab_eol(tokenizer);
+ let name = space_or_tab_eol(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::LabelEndResourceBetween),
- State::Fn(StateName::LabelEndResourceEnd),
+ name,
+ State::Next(StateName::LabelEndResourceBetween),
+ State::Next(StateName::LabelEndResourceEnd),
)
}
@@ -454,7 +454,7 @@ pub fn resource_between(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_3 = Token::ResourceTitleString;
tokenizer.attempt(
StateName::TitleStart,
- State::Fn(StateName::LabelEndResourceTitleAfter),
+ State::Next(StateName::LabelEndResourceTitleAfter),
State::Nok,
)
}
@@ -472,11 +472,11 @@ pub fn resource_title_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Token::Data;
tokenizer.tokenize_state.token_2 = Token::Data;
tokenizer.tokenize_state.token_3 = Token::Data;
- let state_name = space_or_tab_eol(tokenizer);
+ let name = space_or_tab_eol(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::LabelEndResourceEnd),
- State::Fn(StateName::LabelEndResourceEnd),
+ name,
+ State::Next(StateName::LabelEndResourceEnd),
+ State::Next(StateName::LabelEndResourceEnd),
)
}
@@ -513,7 +513,7 @@ pub fn reference_full(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_3 = Token::ReferenceString;
tokenizer.attempt(
StateName::LabelStart,
- State::Fn(StateName::LabelEndReferenceFullAfter),
+ State::Next(StateName::LabelEndReferenceFullAfter),
State::Nok,
)
}
@@ -572,7 +572,7 @@ pub fn reference_collapsed(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::ReferenceMarker);
tokenizer.consume();
tokenizer.exit(Token::ReferenceMarker);
- State::Fn(StateName::LabelEndReferenceCollapsedOpen)
+ State::Next(StateName::LabelEndReferenceCollapsedOpen)
}
_ => State::Nok,
}
diff --git a/src/construct/label_start_image.rs b/src/construct/label_start_image.rs
index 4fcf8c2..1730fc3 100644
--- a/src/construct/label_start_image.rs
+++ b/src/construct/label_start_image.rs
@@ -45,7 +45,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LabelImageMarker);
tokenizer.consume();
tokenizer.exit(Token::LabelImageMarker);
- State::Fn(StateName::LabelStartImageOpen)
+ State::Next(StateName::LabelStartImageOpen)
}
_ => State::Nok,
}
diff --git a/src/construct/list.rs b/src/construct/list.rs
index 9e4b105..1aec032 100644
--- a/src/construct/list.rs
+++ b/src/construct/list.rs
@@ -62,7 +62,7 @@ use crate::util::{
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.constructs.list {
tokenizer.enter(Token::ListItem);
- let state_name = space_or_tab_min_max(
+ let name = space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.constructs.code_indented {
@@ -71,7 +71,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
usize::MAX
},
);
- tokenizer.attempt(state_name, State::Fn(StateName::ListBefore), State::Nok)
+ tokenizer.attempt(name, State::Next(StateName::ListBefore), State::Nok)
} else {
State::Nok
}
@@ -88,8 +88,8 @@ pub fn before(tokenizer: &mut Tokenizer) -> State {
// Unordered.
Some(b'*' | b'-') => tokenizer.check(
StateName::ThematicBreakStart,
- State::Fn(StateName::ListNok),
- State::Fn(StateName::ListBeforeUnordered),
+ State::Next(StateName::ListNok),
+ State::Next(StateName::ListBeforeUnordered),
),
Some(b'+') => before_unordered(tokenizer),
// Ordered.
@@ -139,7 +139,7 @@ pub fn value(tokenizer: &mut Tokenizer) -> State {
Some(b'0'..=b'9') if tokenizer.tokenize_state.size + 1 < LIST_ITEM_VALUE_SIZE_MAX => {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
- State::Fn(StateName::ListValue)
+ State::Next(StateName::ListValue)
}
_ => {
tokenizer.tokenize_state.size = 0;
@@ -160,7 +160,7 @@ pub fn marker(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::ListItemMarker);
tokenizer.consume();
tokenizer.exit(Token::ListItemMarker);
- State::Fn(StateName::ListMarkerAfter)
+ State::Next(StateName::ListMarkerAfter)
}
/// After a list item marker.
@@ -175,8 +175,8 @@ pub fn marker_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.size = 1;
tokenizer.check(
StateName::BlankLineStart,
- State::Fn(StateName::ListAfter),
- State::Fn(StateName::ListMarkerAfterFilled),
+ State::Next(StateName::ListAfter),
+ State::Next(StateName::ListMarkerAfterFilled),
)
}
@@ -192,8 +192,8 @@ pub fn marker_after_filled(tokenizer: &mut Tokenizer) -> State {
// Attempt to parse up to the largest allowed indent, `nok` if there is more whitespace.
tokenizer.attempt(
StateName::ListWhitespace,
- State::Fn(StateName::ListAfter),
- State::Fn(StateName::ListPrefixOther),
+ State::Next(StateName::ListAfter),
+ State::Next(StateName::ListPrefixOther),
)
}
@@ -204,10 +204,10 @@ pub fn marker_after_filled(tokenizer: &mut Tokenizer) -> State {
/// ^
/// ```
pub fn whitespace(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab_min_max(tokenizer, 1, TAB_SIZE);
+ let name = space_or_tab_min_max(tokenizer, 1, TAB_SIZE);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::ListWhitespaceAfter),
+ name,
+ State::Next(StateName::ListWhitespaceAfter),
State::Nok,
)
}
@@ -238,7 +238,7 @@ pub fn prefix_other(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::SpaceOrTab);
tokenizer.consume();
tokenizer.exit(Token::SpaceOrTab);
- State::Fn(StateName::ListAfter)
+ State::Next(StateName::ListAfter)
}
_ => State::Nok,
}
@@ -295,8 +295,8 @@ pub fn after(tokenizer: &mut Tokenizer) -> State {
pub fn cont_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.check(
StateName::BlankLineStart,
- State::Fn(StateName::ListContBlank),
- State::Fn(StateName::ListContFilled),
+ State::Next(StateName::ListContBlank),
+ State::Next(StateName::ListContFilled),
)
}
@@ -315,9 +315,9 @@ pub fn cont_blank(tokenizer: &mut Tokenizer) -> State {
if container.blank_initial {
State::Nok
} else {
- let state_name = space_or_tab_min_max(tokenizer, 0, size);
+ let name = space_or_tab_min_max(tokenizer, 0, size);
// Consume, optionally, at most `size`.
- tokenizer.attempt(state_name, State::Fn(StateName::ListOk), State::Nok)
+ tokenizer.attempt(name, State::Next(StateName::ListOk), State::Nok)
}
}
@@ -335,8 +335,8 @@ pub fn cont_filled(tokenizer: &mut Tokenizer) -> State {
container.blank_initial = false;
// Consume exactly `size`.
- let state_name = space_or_tab_min_max(tokenizer, size, size);
- tokenizer.attempt(state_name, State::Fn(StateName::ListOk), State::Nok)
+ let name = space_or_tab_min_max(tokenizer, size, size);
+ tokenizer.attempt(name, State::Next(StateName::ListOk), State::Nok)
}
/// A state fn to yield [`State::Ok`].
diff --git a/src/construct/paragraph.rs b/src/construct/paragraph.rs
index de750f4..cb3d85e 100644
--- a/src/construct/paragraph.rs
+++ b/src/construct/paragraph.rs
@@ -71,7 +71,7 @@ pub fn inside(tokenizer: &mut Tokenizer) -> State {
}
_ => {
tokenizer.consume();
- State::Fn(StateName::ParagraphInside)
+ State::Next(StateName::ParagraphInside)
}
}
}
diff --git a/src/construct/partial_bom.rs b/src/construct/partial_bom.rs
index b32b7f9..b2f6e1f 100644
--- a/src/construct/partial_bom.rs
+++ b/src/construct/partial_bom.rs
@@ -45,7 +45,7 @@ pub fn inside(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.size = 0;
State::Ok
} else {
- State::Fn(StateName::BomInside)
+ State::Next(StateName::BomInside)
}
} else {
tokenizer.tokenize_state.size = 0;
diff --git a/src/construct/partial_data.rs b/src/construct/partial_data.rs
index 1cb5e61..5450ff2 100644
--- a/src/construct/partial_data.rs
+++ b/src/construct/partial_data.rs
@@ -21,7 +21,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
Some(byte) if tokenizer.tokenize_state.stop.contains(&byte) => {
tokenizer.enter(Token::Data);
tokenizer.consume();
- State::Fn(StateName::DataInside)
+ State::Next(StateName::DataInside)
}
_ => at_break(tokenizer),
}
@@ -40,7 +40,7 @@ pub fn at_break(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LineEnding);
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::DataAtBreak)
+ State::Next(StateName::DataAtBreak)
}
Some(byte) if tokenizer.tokenize_state.stop.contains(&byte) => {
tokenizer.register_resolver_before("data".to_string(), Box::new(resolve_data));
@@ -71,7 +71,7 @@ pub fn inside(tokenizer: &mut Tokenizer) -> State {
at_break(tokenizer)
} else {
tokenizer.consume();
- State::Fn(StateName::DataInside)
+ State::Next(StateName::DataInside)
}
}
diff --git a/src/construct/partial_destination.rs b/src/construct/partial_destination.rs
index e8818a0..e4cfdc3 100644
--- a/src/construct/partial_destination.rs
+++ b/src/construct/partial_destination.rs
@@ -90,7 +90,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(tokenizer.tokenize_state.token_3.clone());
tokenizer.consume();
tokenizer.exit(tokenizer.tokenize_state.token_3.clone());
- State::Fn(StateName::DestinationEnclosedBefore)
+ State::Next(StateName::DestinationEnclosedBefore)
}
// ASCII control, space, closing paren, but *not* `\0`.
None | Some(0x01..=0x1F | b' ' | b')' | 0x7F) => State::Nok,
@@ -141,11 +141,11 @@ pub fn enclosed(tokenizer: &mut Tokenizer) -> State {
}
Some(b'\\') => {
tokenizer.consume();
- State::Fn(StateName::DestinationEnclosedEscape)
+ State::Next(StateName::DestinationEnclosedEscape)
}
_ => {
tokenizer.consume();
- State::Fn(StateName::DestinationEnclosed)
+ State::Next(StateName::DestinationEnclosed)
}
}
}
@@ -160,7 +160,7 @@ pub fn enclosed_escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'<' | b'>' | b'\\') => {
tokenizer.consume();
- State::Fn(StateName::DestinationEnclosed)
+ State::Next(StateName::DestinationEnclosed)
}
_ => enclosed(tokenizer),
}
@@ -185,7 +185,7 @@ pub fn raw(tokenizer: &mut Tokenizer) -> State {
Some(b'(') if tokenizer.tokenize_state.size < tokenizer.tokenize_state.size_other => {
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
- State::Fn(StateName::DestinationRaw)
+ State::Next(StateName::DestinationRaw)
}
// ASCII control (but *not* `\0`) and space and `(`.
None | Some(0x01..=0x1F | b' ' | b'(' | 0x7F) => {
@@ -195,15 +195,15 @@ pub fn raw(tokenizer: &mut Tokenizer) -> State {
Some(b')') => {
tokenizer.consume();
tokenizer.tokenize_state.size -= 1;
- State::Fn(StateName::DestinationRaw)
+ State::Next(StateName::DestinationRaw)
}
Some(b'\\') => {
tokenizer.consume();
- State::Fn(StateName::DestinationRawEscape)
+ State::Next(StateName::DestinationRawEscape)
}
Some(_) => {
tokenizer.consume();
- State::Fn(StateName::DestinationRaw)
+ State::Next(StateName::DestinationRaw)
}
}
}
@@ -218,7 +218,7 @@ pub fn raw_escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'(' | b')' | b'\\') => {
tokenizer.consume();
- State::Fn(StateName::DestinationRaw)
+ State::Next(StateName::DestinationRaw)
}
_ => raw(tokenizer),
}
diff --git a/src/construct/partial_label.rs b/src/construct/partial_label.rs
index 070bdc0..718af06 100644
--- a/src/construct/partial_label.rs
+++ b/src/construct/partial_label.rs
@@ -78,7 +78,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.consume();
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
tokenizer.enter(tokenizer.tokenize_state.token_3.clone());
- State::Fn(StateName::LabelAtBreak)
+ State::Next(StateName::LabelAtBreak)
}
_ => State::Nok,
}
@@ -102,7 +102,7 @@ pub fn at_break(tokenizer: &mut Tokenizer) -> State {
} else {
match tokenizer.current {
Some(b'\n') => {
- let state_name = space_or_tab_eol_with_options(
+ let name = space_or_tab_eol_with_options(
tokenizer,
EolOptions {
content_type: Some(ContentType::String),
@@ -110,9 +110,9 @@ pub fn at_break(tokenizer: &mut Tokenizer) -> State {
},
);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::LabelEolAfter),
- State::Fn(StateName::LabelAtBlankLine),
+ name,
+ State::Next(StateName::LabelEolAfter),
+ State::Next(StateName::LabelAtBlankLine),
)
}
Some(b']') => {
@@ -177,7 +177,7 @@ pub fn inside(tokenizer: &mut Tokenizer) -> State {
if !tokenizer.tokenize_state.seen && !matches!(byte, b'\t' | b' ') {
tokenizer.tokenize_state.seen = true;
}
- State::Fn(if matches!(byte, b'\\') {
+ State::Next(if matches!(byte, b'\\') {
StateName::LabelEscape
} else {
StateName::LabelInside
@@ -198,7 +198,7 @@ pub fn escape(tokenizer: &mut Tokenizer) -> State {
Some(b'[' | b'\\' | b']') => {
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
- State::Fn(StateName::LabelInside)
+ State::Next(StateName::LabelInside)
}
_ => inside(tokenizer),
}
diff --git a/src/construct/partial_non_lazy_continuation.rs b/src/construct/partial_non_lazy_continuation.rs
index 6d5cd7a..9d19860 100644
--- a/src/construct/partial_non_lazy_continuation.rs
+++ b/src/construct/partial_non_lazy_continuation.rs
@@ -26,7 +26,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Token::LineEnding);
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::NonLazyContinuationAfter)
+ State::Next(StateName::NonLazyContinuationAfter)
}
_ => State::Nok,
}
diff --git a/src/construct/partial_space_or_tab.rs b/src/construct/partial_space_or_tab.rs
index 0b63b0e..1ca2520 100644
--- a/src/construct/partial_space_or_tab.rs
+++ b/src/construct/partial_space_or_tab.rs
@@ -133,7 +133,7 @@ pub fn inside(tokenizer: &mut Tokenizer) -> State {
{
tokenizer.consume();
tokenizer.tokenize_state.space_or_tab_size += 1;
- State::Fn(StateName::SpaceOrTabInside)
+ State::Next(StateName::SpaceOrTabInside)
}
_ => {
tokenizer.exit(tokenizer.tokenize_state.space_or_tab_token.clone());
@@ -166,7 +166,7 @@ pub fn after(tokenizer: &mut Tokenizer) -> State {
}
pub fn eol_start(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab_with_options(
+ let name = space_or_tab_with_options(
tokenizer,
Options {
kind: Token::SpaceOrTab,
@@ -181,9 +181,9 @@ pub fn eol_start(tokenizer: &mut Tokenizer) -> State {
);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::SpaceOrTabEolAfterFirst),
- State::Fn(StateName::SpaceOrTabEolAtEol),
+ name,
+ State::Next(StateName::SpaceOrTabEolAfterFirst),
+ State::Next(StateName::SpaceOrTabEolAtEol),
)
}
@@ -231,7 +231,7 @@ pub fn eol_at_eol(tokenizer: &mut Tokenizer) -> State {
tokenizer.consume();
tokenizer.exit(Token::LineEnding);
- State::Fn(StateName::SpaceOrTabEolAfterEol)
+ State::Next(StateName::SpaceOrTabEolAfterEol)
} else {
let ok = tokenizer.tokenize_state.space_or_tab_eol_ok;
tokenizer.tokenize_state.space_or_tab_eol_content_type = None;
@@ -254,7 +254,7 @@ pub fn eol_at_eol(tokenizer: &mut Tokenizer) -> State {
/// ```
#[allow(clippy::needless_pass_by_value)]
pub fn eol_after_eol(tokenizer: &mut Tokenizer) -> State {
- let state_name = space_or_tab_with_options(
+ let name = space_or_tab_with_options(
tokenizer,
Options {
kind: Token::SpaceOrTab,
@@ -268,9 +268,9 @@ pub fn eol_after_eol(tokenizer: &mut Tokenizer) -> State {
},
);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::SpaceOrTabEolAfterMore),
- State::Fn(StateName::SpaceOrTabEolAfterMore),
+ name,
+ State::Next(StateName::SpaceOrTabEolAfterMore),
+ State::Next(StateName::SpaceOrTabEolAfterMore),
)
}
diff --git a/src/construct/partial_title.rs b/src/construct/partial_title.rs
index 1d3e9b2..9521d32 100644
--- a/src/construct/partial_title.rs
+++ b/src/construct/partial_title.rs
@@ -50,7 +50,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
tokenizer.consume();
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
- State::Fn(StateName::TitleBegin)
+ State::Next(StateName::TitleBegin)
}
_ => State::Nok,
}
@@ -98,7 +98,7 @@ pub fn at_break(tokenizer: &mut Tokenizer) -> State {
State::Nok
}
Some(b'\n') => {
- let state_name = space_or_tab_eol_with_options(
+ let name = space_or_tab_eol_with_options(
tokenizer,
EolOptions {
content_type: Some(ContentType::String),
@@ -107,9 +107,9 @@ pub fn at_break(tokenizer: &mut Tokenizer) -> State {
);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::TitleAfterEol),
- State::Fn(StateName::TitleAtBlankLine),
+ name,
+ State::Next(StateName::TitleAfterEol),
+ State::Next(StateName::TitleAtBlankLine),
)
}
Some(b'"' | b'\'' | b')')
@@ -166,7 +166,7 @@ pub fn inside(tokenizer: &mut Tokenizer) -> State {
}
Some(byte) => {
tokenizer.consume();
- State::Fn(if matches!(byte, b'\\') {
+ State::Next(if matches!(byte, b'\\') {
StateName::TitleEscape
} else {
StateName::TitleInside
@@ -185,7 +185,7 @@ pub fn escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'"' | b'\'' | b')') => {
tokenizer.consume();
- State::Fn(StateName::TitleInside)
+ State::Next(StateName::TitleInside)
}
_ => inside(tokenizer),
}
diff --git a/src/construct/thematic_break.rs b/src/construct/thematic_break.rs
index 30fabe4..beefe5b 100644
--- a/src/construct/thematic_break.rs
+++ b/src/construct/thematic_break.rs
@@ -62,7 +62,7 @@ use crate::tokenizer::{State, StateName, Tokenizer};
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.constructs.thematic_break {
tokenizer.enter(Token::ThematicBreak);
- let state_name = space_or_tab_min_max(
+ let name = space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.constructs.code_indented {
@@ -73,8 +73,8 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::ThematicBreakBefore),
+ name,
+ State::Next(StateName::ThematicBreakBefore),
State::Nok,
)
} else {
@@ -141,15 +141,15 @@ pub fn sequence(tokenizer: &mut Tokenizer) -> State {
{
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
- State::Fn(StateName::ThematicBreakSequence)
+ State::Next(StateName::ThematicBreakSequence)
}
_ => {
tokenizer.exit(Token::ThematicBreakSequence);
- let state_name = space_or_tab(tokenizer);
+ let name = space_or_tab(tokenizer);
tokenizer.attempt(
- state_name,
- State::Fn(StateName::ThematicBreakAtBreak),
- State::Fn(StateName::ThematicBreakAtBreak),
+ name,
+ State::Next(StateName::ThematicBreakAtBreak),
+ State::Next(StateName::ThematicBreakAtBreak),
)
}
}