aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/construct/document.rs68
-rw-r--r--src/construct/gfm_task_list_item_check.rs2
-rw-r--r--src/tokenizer.rs6
3 files changed, 63 insertions, 13 deletions
diff --git a/src/construct/document.rs b/src/construct/document.rs
index 4ef6acc..b438808 100644
--- a/src/construct/document.rs
+++ b/src/construct/document.rs
@@ -337,16 +337,65 @@ pub fn flow_end(tokenizer: &mut Tokenizer) -> State {
state,
);
- let paragraph = matches!(state, State::Next(StateName::ParagraphInside))
- || (!child.events.is_empty()
- && child.events
- [skip::opt_back(&child.events, child.events.len() - 1, &[Name::LineEnding])]
- .name
- == Name::Paragraph);
-
tokenizer.tokenize_state.document_child_state = Some(state);
- if child.lazy && paragraph && tokenizer.tokenize_state.document_paragraph_before {
+ // If we’re in a lazy line, and the previous (lazy or not) line is something
+ // that can be lazy, and this line is that too, allow it.
+ //
+ // Accept:
+ //
+ // ```markdown
+ // | * a
+ // > | b
+ // ^
+ // | ```
+ // ```
+ //
+ // Do not accept:
+ //
+ // ```markdown
+ // | * # a
+ // > | b
+ // ^
+ // | ```
+ // ```
+ //
+ // Do not accept:
+ //
+ // ```markdown
+ // | * a
+ // > | # b
+ // ^
+ // | ```
+ // ```
+ let mut document_lazy_continuation_current = false;
+ let mut stack_index = child.stack.len();
+
+ // Use two algo’s: one for when we’re suspended or in multiline things
+ // like definitions, another (b) for when we fed the line ending and closed
+ // a)
+ while !document_lazy_continuation_current && stack_index > 0 {
+ stack_index -= 1;
+ let name = &child.stack[stack_index];
+ if name == &Name::Paragraph || name == &Name::Definition {
+ document_lazy_continuation_current = true;
+ }
+ }
+
+ // …another because we parse each “rest” line as a paragraph, and we passed
+ // a EOL already.
+ if !document_lazy_continuation_current && !child.events.is_empty() {
+ let before = skip::opt_back(&child.events, child.events.len() - 1, &[Name::LineEnding]);
+ let name = &child.events[before].name;
+ if name == &Name::Paragraph {
+ document_lazy_continuation_current = true;
+ }
+ }
+
+ if child.lazy
+ && tokenizer.tokenize_state.document_lazy_accepting_before
+ && document_lazy_continuation_current
+ {
tokenizer.tokenize_state.document_continued =
tokenizer.tokenize_state.document_container_stack.len();
}
@@ -366,7 +415,8 @@ pub fn flow_end(tokenizer: &mut Tokenizer) -> State {
}
Some(_) => {
tokenizer.tokenize_state.document_continued = 0;
- tokenizer.tokenize_state.document_paragraph_before = paragraph;
+ tokenizer.tokenize_state.document_lazy_accepting_before =
+ document_lazy_continuation_current;
// Containers would only be interrupting if we’ve continued.
tokenizer.interrupt = false;
State::Retry(StateName::DocumentContainerExistingBefore)
diff --git a/src/construct/gfm_task_list_item_check.rs b/src/construct/gfm_task_list_item_check.rs
index 62ff8aa..b30659a 100644
--- a/src/construct/gfm_task_list_item_check.rs
+++ b/src/construct/gfm_task_list_item_check.rs
@@ -81,7 +81,7 @@ pub fn start(tokenizer: &mut Tokenizer) -> State {
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
- Some(b'\t' | b' ') => {
+ Some(b'\t' | b'\n' | b' ') => {
tokenizer.enter(Name::GfmTaskListItemValueUnchecked);
tokenizer.consume();
tokenizer.exit(Name::GfmTaskListItemValueUnchecked);
diff --git a/src/tokenizer.rs b/src/tokenizer.rs
index 731b829..83514cb 100644
--- a/src/tokenizer.rs
+++ b/src/tokenizer.rs
@@ -142,8 +142,8 @@ pub struct TokenizeState<'a> {
pub document_data_index: Option<usize>,
/// Container exits by line number.
pub document_exits: Vec<Option<Vec<Event>>>,
- /// Whether the previous flow was a paragraph.
- pub document_paragraph_before: bool,
+ /// Whether the previous flow was a paragraph or a definition.
+ pub document_lazy_accepting_before: bool,
/// Whether this is the first paragraph (potentially after definitions) in
/// a list item.
/// Used for GFM task list items.
@@ -282,7 +282,7 @@ impl<'a> Tokenizer<'a> {
document_container_stack: vec![],
document_exits: vec![],
document_continued: 0,
- document_paragraph_before: false,
+ document_lazy_accepting_before: false,
document_data_index: None,
document_child_state: None,
document_child: None,