aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorLibravatar Titus Wormer <tituswormer@gmail.com>2022-07-08 13:26:23 +0200
committerLibravatar Titus Wormer <tituswormer@gmail.com>2022-07-08 13:26:23 +0200
commitc1b325a6dcf4bb8795dd2e5b2cdb1dcfcf61faf5 (patch)
treea94fc69206c3d493028845030f5d9beb3fce48bd /src
parentbd0cb0d0395abb06941960938aacc3639148a96c (diff)
downloadmarkdown-rs-c1b325a6dcf4bb8795dd2e5b2cdb1dcfcf61faf5.tar.gz
markdown-rs-c1b325a6dcf4bb8795dd2e5b2cdb1dcfcf61faf5.tar.bz2
markdown-rs-c1b325a6dcf4bb8795dd2e5b2cdb1dcfcf61faf5.zip
Fix closing of flow when exiting containers
Diffstat (limited to '')
-rw-r--r--src/construct/code_fenced.rs1
-rw-r--r--src/content/document.rs45
-rw-r--r--src/tokenizer.rs44
3 files changed, 63 insertions, 27 deletions
diff --git a/src/construct/code_fenced.rs b/src/construct/code_fenced.rs
index e2165a9..c7b2334 100644
--- a/src/construct/code_fenced.rs
+++ b/src/construct/code_fenced.rs
@@ -568,6 +568,7 @@ fn content_continue(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateF
/// console.log('1')
/// ~~~|
/// ```
+#[allow(clippy::needless_pass_by_value)]
fn after(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
tokenizer.exit(Token::CodeFenced);
// Feel free to interrupt.
diff --git a/src/content/document.rs b/src/content/document.rs
index feffb62..2934890 100644
--- a/src/content/document.rs
+++ b/src/content/document.rs
@@ -155,7 +155,7 @@ fn document_continue(
// assert(point, 'could not find previous flow chunk')
let size = info.continued;
- exit_containers(tokenizer, &mut info, size);
+ info = exit_containers(tokenizer, info, size);
// // Fix positions.
// let index = indexBeforeExits
@@ -195,8 +195,7 @@ fn check_new_containers(
// step 1 before creating the new block as a child of the last matched
// block.
if info.continued == info.stack.len() {
- println!(" to do: interrupt ({:?})?", tokenizer.interrupt);
- // // No need to `check` whether there’s a container, of `exitContainers`
+ // // No need to `check` whether there’s a container, if `exitContainers`
// // would be moot.
// // We can instead immediately `attempt` to parse one.
// if (!childFlow) {
@@ -211,6 +210,7 @@ fn check_new_containers(
return flow_start(tokenizer, code, info);
}
+ println!(" to do: interrupt ({:?})?", tokenizer.interrupt);
// // If we do have flow, it could still be a blank line,
// // but we’d be interrupting it w/ a new container if there’s a current
// // construct.
@@ -237,17 +237,26 @@ fn there_is_a_new_container(
name: String,
) -> StateFnResult {
println!("there_is_a_new_container");
- println!(" todo: close_flow");
- // if (childFlow) closeFlow()
let size = info.continued;
- exit_containers(tokenizer, &mut info, size);
+ info = exit_containers(tokenizer, info, size);
info.stack.push(name);
info.continued += 1;
document_continued(tokenizer, code, info)
}
/// Exit open containers.
-fn exit_containers(tokenizer: &mut Tokenizer, info: &mut DocumentInfo, size: usize) {
+fn exit_containers(tokenizer: &mut Tokenizer, mut info: DocumentInfo, size: usize) -> DocumentInfo {
+ if info.stack.len() > size {
+ println!("closing flow. To do: are these resulting exits okay?");
+ let index_before = tokenizer.events.len();
+ let result = tokenizer.flush(info.next);
+ info.next = Box::new(flow); // This is weird but Rust needs a function there.
+ assert!(matches!(result.0, State::Ok));
+ assert!(result.1.is_none());
+ let shift = tokenizer.events.len() - index_before;
+ info.last_line_ending_index = info.last_line_ending_index.map(|d| d + shift);
+ }
+
while info.stack.len() > size {
let name = info.stack.pop().unwrap();
@@ -281,6 +290,8 @@ fn exit_containers(tokenizer: &mut Tokenizer, info: &mut DocumentInfo, size: usi
while index < token_types.len() {
let token_type = &token_types[index];
+ println!("injected exit for `{:?}`", token_type);
+
info.map.add(
insert_index,
0,
@@ -314,6 +325,8 @@ fn exit_containers(tokenizer: &mut Tokenizer, info: &mut DocumentInfo, size: usi
index += 1;
}
}
+
+ info
}
fn there_is_no_new_container(
@@ -373,14 +386,15 @@ fn container_continue(
}
fn flow_start(tokenizer: &mut Tokenizer, code: Code, mut info: DocumentInfo) -> StateFnResult {
- println!("flow_start");
- let next = info.next;
- info.next = Box::new(flow); // This is weird but Rust needs a function there.
+ println!("flow_start {:?}", code);
let size = info.continued;
- exit_containers(tokenizer, &mut info, size);
+ info = exit_containers(tokenizer, info, size);
- tokenizer.go_until(next, eof_eol, move |(state, remainder)| {
+ let state = info.next;
+ info.next = Box::new(flow); // This is weird but Rust needs a function there.
+
+ tokenizer.go_until(state, eof_eol, move |(state, remainder)| {
(
State::Fn(Box::new(move |t, c| flow_end(t, c, info, state))),
remainder,
@@ -414,10 +428,15 @@ fn flow_end(
info.last_line_ending_index = None;
}
+ println!(
+ "set `last_line_ending_index` to {:?}",
+ info.last_line_ending_index
+ );
+
match result {
State::Ok => {
println!("State::Ok");
- exit_containers(tokenizer, &mut info, 0);
+ info = exit_containers(tokenizer, info, 0);
tokenizer.events = info.map.consume(&mut tokenizer.events);
(State::Ok, Some(vec![code]))
}
diff --git a/src/tokenizer.rs b/src/tokenizer.rs
index efd8068..dcbcb09 100644
--- a/src/tokenizer.rs
+++ b/src/tokenizer.rs
@@ -413,12 +413,7 @@ impl<'a> Tokenizer<'a> {
vec![],
|result: (Vec<Code>, Vec<Code>), ok, tokenizer: &mut Tokenizer, _state| {
if ok {
- feed_impl(
- tokenizer,
- &if ok { result.1 } else { result.0 },
- after,
- false,
- )
+ feed_impl(tokenizer, &if ok { result.1 } else { result.0 }, after)
} else {
(State::Nok, None)
}
@@ -468,7 +463,7 @@ impl<'a> Tokenizer<'a> {
vec![],
|result: (Vec<Code>, Vec<Code>), ok, tokenizer: &mut Tokenizer, _state| {
tokenizer.free(previous);
- feed_impl(tokenizer, &result.0, done(ok), false)
+ feed_impl(tokenizer, &result.0, done(ok))
},
)
}
@@ -508,7 +503,7 @@ impl<'a> Tokenizer<'a> {
codes,
tokenizer.point
);
- feed_impl(tokenizer, &codes, done(ok), false)
+ feed_impl(tokenizer, &codes, done(ok))
},
)
}
@@ -556,9 +551,16 @@ impl<'a> Tokenizer<'a> {
) -> StateFnResult {
assert!(!self.drained, "cannot feed after drain");
- let result = feed_impl(self, codes, start, drain);
+ let mut result = feed_impl(self, codes, start);
if drain {
+ let func = match result.0 {
+ State::Fn(func) => func,
+ _ => unreachable!("expected next state"),
+ };
+
+ result = flush_impl(self, func);
+
self.drained = true;
while !self.resolvers.is_empty() {
@@ -569,6 +571,14 @@ impl<'a> Tokenizer<'a> {
result
}
+
+ /// To do.
+ pub fn flush(
+ &mut self,
+ start: impl FnOnce(&mut Tokenizer, Code) -> StateFnResult + 'static,
+ ) -> StateFnResult {
+ flush_impl(self, start)
+ }
}
/// Internal utility to wrap states to also capture codes.
@@ -635,7 +645,6 @@ fn feed_impl(
tokenizer: &mut Tokenizer,
codes: &[Code],
start: impl FnOnce(&mut Tokenizer, Code) -> StateFnResult + 'static,
- drain: bool,
) -> StateFnResult {
let codes = codes;
let mut state = State::Fn(Box::new(start));
@@ -665,10 +674,17 @@ fn feed_impl(
}
}
- // Yield to a higher loop if we shouldn’t feed EOFs.
- if !drain {
- return check_statefn_result((state, Some(codes[index..].to_vec())));
- }
+ // Yield to a higher loop.
+ check_statefn_result((state, Some(codes[index..].to_vec())))
+}
+
+/// To do.
+fn flush_impl(
+ tokenizer: &mut Tokenizer,
+ start: impl FnOnce(&mut Tokenizer, Code) -> StateFnResult + 'static,
+) -> StateFnResult {
+ let mut state = State::Fn(Box::new(start));
+ tokenizer.consumed = true;
loop {
// Feed EOF.