aboutsummaryrefslogtreecommitdiffstats
path: root/src/content/flow.rs
diff options
context:
space:
mode:
authorLibravatar Titus Wormer <tituswormer@gmail.com>2022-06-10 16:47:43 +0200
committerLibravatar Titus Wormer <tituswormer@gmail.com>2022-06-10 16:47:43 +0200
commit17f4eec55ad0a5f74aedbcff6c2f0119ad52e584 (patch)
tree1839c796de977421456d1b9006f2f2c1e23cf809 /src/content/flow.rs
parent5133042973f31a3992f216e591d840bb491bfd45 (diff)
downloadmarkdown-rs-17f4eec55ad0a5f74aedbcff6c2f0119ad52e584.tar.gz
markdown-rs-17f4eec55ad0a5f74aedbcff6c2f0119ad52e584.tar.bz2
markdown-rs-17f4eec55ad0a5f74aedbcff6c2f0119ad52e584.zip
Add text content type
* Add character reference and character escapes in text * Add recursive subtokenization
Diffstat (limited to 'src/content/flow.rs')
-rw-r--r--src/content/flow.rs14
1 files changed, 9 insertions, 5 deletions
diff --git a/src/content/flow.rs b/src/content/flow.rs
index 0d1bd22..6fa8c25 100644
--- a/src/content/flow.rs
+++ b/src/content/flow.rs
@@ -34,7 +34,11 @@ use crate::util::get_span;
pub fn flow(codes: &[Code], point: Point, index: usize) -> Vec<Event> {
let mut tokenizer = Tokenizer::new(point, index);
tokenizer.feed(codes, Box::new(start), true);
- subtokenize(tokenizer.events, codes)
+ let mut result = (tokenizer.events, false);
+ while !result.1 {
+ result = subtokenize(result.0, codes);
+ }
+ result.0
}
/// Before flow.
@@ -165,7 +169,7 @@ fn content_before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
}
_ => {
tokenizer.enter(TokenType::Content);
- tokenizer.enter(TokenType::ContentChunk);
+ tokenizer.enter(TokenType::ChunkContent);
content(tokenizer, code, tokenizer.events.len() - 1)
}
}
@@ -259,8 +263,8 @@ fn continuation_construct_after_prefix(tokenizer: &mut Tokenizer, code: Code) ->
fn content_continue(tokenizer: &mut Tokenizer, code: Code, previous_index: usize) -> StateFnResult {
tokenizer.consume(code);
- tokenizer.exit(TokenType::ContentChunk);
- tokenizer.enter(TokenType::ContentChunk);
+ tokenizer.exit(TokenType::ChunkContent);
+ tokenizer.enter(TokenType::ChunkContent);
let next_index = tokenizer.events.len() - 1;
tokenizer.events[previous_index].next = Some(next_index);
tokenizer.events[next_index].previous = Some(previous_index);
@@ -271,7 +275,7 @@ fn content_continue(tokenizer: &mut Tokenizer, code: Code, previous_index: usize
}
fn content_end(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
- tokenizer.exit(TokenType::ContentChunk);
+ tokenizer.exit(TokenType::ChunkContent);
tokenizer.exit(TokenType::Content);
after(tokenizer, code)
}