aboutsummaryrefslogtreecommitdiffstats
path: root/src/construct/character_escape.rs
diff options
context:
space:
mode:
authorLibravatar Titus Wormer <tituswormer@gmail.com>2022-07-22 17:16:38 +0200
committerLibravatar Titus Wormer <tituswormer@gmail.com>2022-07-22 17:16:38 +0200
commitb945e43103544fc31a0755841b380358b2c161e6 (patch)
tree80c6091c4268e6fec5cce02a08cdf6fa2b434300 /src/construct/character_escape.rs
parent41fc406af206e21014eaaba94bcf6b1854f892b3 (diff)
downloadmarkdown-rs-b945e43103544fc31a0755841b380358b2c161e6.tar.gz
markdown-rs-b945e43103544fc31a0755841b380358b2c161e6.tar.bz2
markdown-rs-b945e43103544fc31a0755841b380358b2c161e6.zip
Refactor to remove unneeded tuples in every states
Diffstat (limited to 'src/construct/character_escape.rs')
-rw-r--r--src/construct/character_escape.rs14
1 files changed, 7 insertions, 7 deletions
diff --git a/src/construct/character_escape.rs b/src/construct/character_escape.rs
index eb79486..8403765 100644
--- a/src/construct/character_escape.rs
+++ b/src/construct/character_escape.rs
@@ -34,7 +34,7 @@
//! [hard_break_escape]: crate::construct::hard_break_escape
use crate::token::Token;
-use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
+use crate::tokenizer::{Code, State, Tokenizer};
/// Start of a character escape.
///
@@ -42,16 +42,16 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// > | a\*b
/// ^
/// ```
-pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
+pub fn start(tokenizer: &mut Tokenizer, code: Code) -> State {
match code {
Code::Char('\\') if tokenizer.parse_state.constructs.character_escape => {
tokenizer.enter(Token::CharacterEscape);
tokenizer.enter(Token::CharacterEscapeMarker);
tokenizer.consume(code);
tokenizer.exit(Token::CharacterEscapeMarker);
- (State::Fn(Box::new(inside)), 0)
+ State::Fn(Box::new(inside))
}
- _ => (State::Nok, 0),
+ _ => State::Nok,
}
}
@@ -61,15 +61,15 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// > | a\*b
/// ^
/// ```
-fn inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
+fn inside(tokenizer: &mut Tokenizer, code: Code) -> State {
match code {
Code::Char(char) if char.is_ascii_punctuation() => {
tokenizer.enter(Token::CharacterEscapeValue);
tokenizer.consume(code);
tokenizer.exit(Token::CharacterEscapeValue);
tokenizer.exit(Token::CharacterEscape);
- (State::Ok, 0)
+ State::Ok(0)
}
- _ => (State::Nok, 0),
+ _ => State::Nok,
}
}