1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
|
//! Paragraph is a construct that occurs in the [flow] content type.
//!
//! They’re formed with the following BNF:
//!
//! ```bnf
//! ; Restriction: lines cannot start other flow constructs.
//! ; Restriction: lines cannot be blank.
//! paragraph ::= 1*line *( eol 1*line )
//! ```
//!
//! Paragraphs in markdown relate to the `<p>` element in HTML.
//! See [*§ 4.4.1 The `p` element* in the HTML spec][html] for more info.
//!
//! Paragraphs can contain line endings and whitespace, but they are not
//! allowed to contain blank lines, or to be blank themselves.
//!
//! The paragraph is interpreted as the [text][] content type.
//! That means that [autolinks][autolink], [code (text)][code_text], etc are allowed.
//!
//! ## Tokens
//!
//! * [`Paragraph`][TokenType::Paragraph]
//!
//! ## References
//!
//! * [`content.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/content.js)
//! * [*§ 4.8 Paragraphs* in `CommonMark`](https://spec.commonmark.org/0.30/#paragraphs)
//!
//! [flow]: crate::content::flow
//! [text]: crate::content::text
//! [autolink]: crate::construct::autolink
//! [code_text]: crate::construct::code_text
//! [html]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-p-element
use crate::tokenizer::{
Code, ContentType, Event, EventType, State, StateFnResult, TokenType, Tokenizer,
};
use crate::util::edit_map::EditMap;
/// Before a paragraph.
///
/// ```markdown
/// |qwe
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
Code::None | Code::CarriageReturnLineFeed | Code::Char('\n' | '\r') => {
unreachable!("unexpected eol/eof")
}
_ => {
tokenizer.enter(TokenType::Paragraph);
tokenizer.enter_with_content(TokenType::Data, Some(ContentType::Text));
inside(tokenizer, code)
}
}
}
/// In a paragraph.
///
/// ```markdown
/// al|pha
/// ```
fn inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
Code::None | Code::CarriageReturnLineFeed | Code::Char('\n' | '\r') => {
tokenizer.exit(TokenType::Data);
tokenizer.exit(TokenType::Paragraph);
tokenizer.register_resolver_before("paragraph".to_string(), Box::new(resolve));
// You’d be interrupting.
tokenizer.interrupt = true;
(State::Ok, Some(vec![code]))
}
_ => {
tokenizer.consume(code);
(State::Fn(Box::new(inside)), None)
}
}
}
/// Merge “`Paragraph`”s, which currently span a single line, into actual
/// `Paragraph`s that span multiple lines.
pub fn resolve(tokenizer: &mut Tokenizer) -> Vec<Event> {
let mut edit_map = EditMap::new();
let len = tokenizer.events.len();
let mut index = 0;
while index < len {
let event = &tokenizer.events[index];
if event.event_type == EventType::Enter && event.token_type == TokenType::Paragraph {
// Exit:Paragraph
let mut exit_index = index + 3;
// Enter:Paragraph
let mut enter_next_index = exit_index + 3;
// Find future `Paragraphs`.
// There will be `LineEnding` between.
while enter_next_index < len
&& tokenizer.events[enter_next_index].token_type == TokenType::Paragraph
{
// Remove Exit:Paragraph, Enter:LineEnding, Exit:LineEnding, Enter:Paragraph.
edit_map.add(exit_index, 4, vec![]);
// Add Exit:LineEnding position info to Exit:Data.
let line_ending_exit = &tokenizer.events[enter_next_index - 1];
let line_ending_point = line_ending_exit.point.clone();
let line_ending_index = line_ending_exit.index;
let data_exit = &mut tokenizer.events[exit_index - 1];
data_exit.point = line_ending_point;
data_exit.index = line_ending_index;
// Link Enter:Data on the previous line to Enter:Data on this line.
let data_enter_prev = &mut tokenizer.events[exit_index - 2];
data_enter_prev.next = Some(enter_next_index + 1);
let data_enter_next = &mut tokenizer.events[enter_next_index + 1];
data_enter_next.previous = Some(exit_index - 2);
// Potential next start.
exit_index = enter_next_index + 3;
enter_next_index = exit_index + 3;
}
// Move to `Exit:Paragraph`.
index = exit_index;
}
index += 1;
}
edit_map.consume(&mut tokenizer.events)
}
|