aboutsummaryrefslogtreecommitdiffstats
path: root/src/construct
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/construct/attention.rs6
-rw-r--r--src/construct/autolink.rs46
-rw-r--r--src/construct/blank_line.rs14
-rw-r--r--src/construct/block_quote.rs22
-rw-r--r--src/construct/character_escape.rs10
-rw-r--r--src/construct/character_reference.rs57
-rw-r--r--src/construct/code_fenced.rs145
-rw-r--r--src/construct/code_indented.rs47
-rw-r--r--src/construct/code_text.rs22
-rw-r--r--src/construct/definition.rs70
-rw-r--r--src/construct/hard_break_escape.rs8
-rw-r--r--src/construct/hard_break_trailing.rs10
-rw-r--r--src/construct/heading_atx.rs23
-rw-r--r--src/construct/heading_setext.rs39
-rw-r--r--src/construct/html_flow.rs168
-rw-r--r--src/construct/html_text.rs140
-rw-r--r--src/construct/label_end.rs99
-rw-r--r--src/construct/label_start_image.rs6
-rw-r--r--src/construct/label_start_link.rs3
-rw-r--r--src/construct/paragraph.rs6
-rw-r--r--src/construct/partial_data.rs9
-rw-r--r--src/construct/partial_destination.rs21
-rw-r--r--src/construct/partial_label.rs13
-rw-r--r--src/construct/partial_space_or_tab.rs37
-rw-r--r--src/construct/partial_title.rs28
-rw-r--r--src/construct/thematic_break.rs16
26 files changed, 605 insertions, 460 deletions
diff --git a/src/construct/attention.rs b/src/construct/attention.rs
index 1750692..e1fa82f 100644
--- a/src/construct/attention.rs
+++ b/src/construct/attention.rs
@@ -170,7 +170,8 @@ struct Sequence {
/// Before a sequence.
///
/// ```markdown
-/// |**
+/// > | **
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -185,7 +186,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a sequence.
///
/// ```markdown
-/// *|*
+/// > | **
+/// ^^
/// ```
fn inside(tokenizer: &mut Tokenizer, code: Code, marker: MarkerKind) -> StateFnResult {
match code {
diff --git a/src/construct/autolink.rs b/src/construct/autolink.rs
index db4365f..fe8f380 100644
--- a/src/construct/autolink.rs
+++ b/src/construct/autolink.rs
@@ -108,8 +108,10 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// Start of an autolink.
///
/// ```markdown
-/// a|<https://example.com>b
-/// a|<user@example.com>b
+/// > | a<https://example.com>b
+/// ^
+/// > | a<user@example.com>b
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -128,8 +130,10 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<`, before the protocol.
///
/// ```markdown
-/// a<|https://example.com>b
-/// a<|user@example.com>b
+/// > | a<https://example.com>b
+/// ^
+/// > | a<user@example.com>b
+/// ^
/// ```
fn open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -145,8 +149,10 @@ fn open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After the first character of the protocol or email name.
///
/// ```markdown
-/// a<h|ttps://example.com>b
-/// a<u|ser@example.com>b
+/// > | a<https://example.com>b
+/// ^
+/// > | a<user@example.com>b
+/// ^
/// ```
fn scheme_or_email_atext(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -160,8 +166,10 @@ fn scheme_or_email_atext(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult
/// Inside an ambiguous protocol or email name.
///
/// ```markdown
-/// a<ht|tps://example.com>b
-/// a<us|er@example.com>b
+/// > | a<https://example.com>b
+/// ^
+/// > | a<user@example.com>b
+/// ^
/// ```
fn scheme_inside_or_email_atext(
tokenizer: &mut Tokenizer,
@@ -191,7 +199,8 @@ fn scheme_inside_or_email_atext(
/// Inside a URL, after the protocol.
///
/// ```markdown
-/// a<https:|//example.com>b
+/// > | a<https://example.com>b
+/// ^
/// ```
fn url_inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -213,7 +222,8 @@ fn url_inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Inside email atext.
///
/// ```markdown
-/// a<user.na|me@example.com>b
+/// > | a<user.name@example.com>b
+/// ^
/// ```
fn email_atext(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -235,8 +245,8 @@ fn email_atext(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After an at-sign or a dot in the label.
///
/// ```markdown
-/// a<user.name@|example.com>b
-/// a<user.name@example.|com>b
+/// > | a<user.name@example.com>b
+/// ^ ^
/// ```
fn email_at_sign_or_dot(tokenizer: &mut Tokenizer, code: Code, size: usize) -> StateFnResult {
match code {
@@ -248,7 +258,8 @@ fn email_at_sign_or_dot(tokenizer: &mut Tokenizer, code: Code, size: usize) -> S
/// In the label, where `.` and `>` are allowed.
///
/// ```markdown
-/// a<user.name@ex|ample.com>b
+/// > | a<user.name@example.com>b
+/// ^
/// ```
fn email_label(tokenizer: &mut Tokenizer, code: Code, size: usize) -> StateFnResult {
match code {
@@ -276,7 +287,8 @@ fn email_label(tokenizer: &mut Tokenizer, code: Code, size: usize) -> StateFnRes
/// Though, this is also used in `email_label` to parse other values.
///
/// ```markdown
-/// a<user.name@ex-|ample.com>b
+/// > | a<user.name@ex-ample.com>b
+/// ^
/// ```
fn email_value(tokenizer: &mut Tokenizer, code: Code, size: usize) -> StateFnResult {
match code {
@@ -301,8 +313,10 @@ fn email_value(tokenizer: &mut Tokenizer, code: Code, size: usize) -> StateFnRes
/// At the `>`.
///
/// ```markdown
-/// a<https://example.com|>b
-/// a<user@example.com|>b
+/// > | a<https://example.com>b
+/// ^
+/// > | a<user@example.com>b
+/// ^
/// ```
fn end(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/blank_line.rs b/src/construct/blank_line.rs
index bbfb61f..cf51aec 100644
--- a/src/construct/blank_line.rs
+++ b/src/construct/blank_line.rs
@@ -40,8 +40,10 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// > ๐Ÿ‘‰ **Note**: `โ ` represents a space character.
///
/// ```markdown
-/// |โ โ 
-/// |
+/// > | โ โ 
+/// ^
+/// > |
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.attempt_opt(space_or_tab(), after)(tokenizer, code)
@@ -49,11 +51,11 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After zero or more spaces or tabs, before a line ending or EOF.
///
-/// > ๐Ÿ‘‰ **Note**: `โ ` represents a space character.
-///
/// ```markdown
-/// |โ โ 
-/// |
+/// > | โ โ 
+/// ^
+/// > |
+/// ^
/// ```
fn after(_tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/block_quote.rs b/src/construct/block_quote.rs
index 8e8c8b0..6c3f270 100644
--- a/src/construct/block_quote.rs
+++ b/src/construct/block_quote.rs
@@ -41,7 +41,8 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// Start of block quote.
///
/// ```markdown
-/// | > a
+/// > | > a
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
// To do: allow arbitrary when code (indented) is turned off.
@@ -51,7 +52,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Start of block quote, after whitespace, before `>`.
///
/// ```markdown
-/// |> a
+/// > | > a
+/// ^
/// ```
fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -66,8 +68,9 @@ fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Start of block quote continuation.
///
/// ```markdown
-/// > a
-/// |> b
+/// | > a
+/// > | > b
+/// ^
/// ```
pub fn cont(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
// To do: allow arbitrary when code (indented) is turned off.
@@ -77,8 +80,9 @@ pub fn cont(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After whitespace, before `>`.
///
/// ```markdown
-/// > a
-/// |> b
+/// | > a
+/// > | > b
+/// ^
/// ```
fn cont_before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -96,8 +100,10 @@ fn cont_before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `>`, before optional whitespace.
///
/// ```markdown
-/// >| a
-/// >|b
+/// > | > a
+/// ^
+/// > | >b
+/// ^
/// ```
fn cont_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/character_escape.rs b/src/construct/character_escape.rs
index 55d321a..811adcf 100644
--- a/src/construct/character_escape.rs
+++ b/src/construct/character_escape.rs
@@ -39,9 +39,8 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// Start of a character escape.
///
/// ```markdown
-/// a|\*b
-/// a|\b
-/// a|\ b
+/// > | a\*b
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -59,9 +58,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Inside a character escape, after `\`.
///
/// ```markdown
-/// a\|*b
-/// a\|b
-/// a\| b
+/// > | a\*b
+/// ^
/// ```
fn inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/character_reference.rs b/src/construct/character_reference.rs
index 2bfed45..544458a 100644
--- a/src/construct/character_reference.rs
+++ b/src/construct/character_reference.rs
@@ -71,16 +71,32 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// Kind of a character reference.
#[derive(Debug, Clone, PartialEq)]
pub enum Kind {
- /// Numeric decimal character reference (`&#x9;`).
+ /// Numeric decimal character reference.
+ ///
+ /// ```markdown
+ /// > | a&#x9;b
+ /// ^^^^^
+ /// ```
Decimal,
- /// Numeric hexadecimal character reference (`&#123;`).
+ /// Numeric hexadecimal character reference.
+ ///
+ /// ```markdown
+ /// > | a&#123;b
+ /// ^^^^^^
+ /// ```
Hexadecimal,
- /// Named character reference (`&amp;`).
+ /// Named character reference.
+ ///
+ /// ```markdown
+ /// > | a&amp;b
+ /// ^^^^^
+ /// ```
Named,
}
impl Kind {
- /// Get the maximum size of characters allowed in a character reference.
+ /// Get the maximum size of characters allowed in the value of a character
+ /// reference.
fn max(&self) -> usize {
match self {
Kind::Hexadecimal => CHARACTER_REFERENCE_HEXADECIMAL_SIZE_MAX,
@@ -113,9 +129,12 @@ struct Info {
/// Start of a character reference.
///
/// ```markdown
-/// a|&amp;b
-/// a|&#123;b
-/// a|&#x9;b
+/// > | a&amp;b
+/// ^
+/// > | a&#123;b
+/// ^
+/// > | a&#x9;b
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -134,9 +153,12 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// or an alphanumeric for named references.
///
/// ```markdown
-/// a&|amp;b
-/// a&|#123;b
-/// a&|#x9;b
+/// > | a&amp;b
+/// ^
+/// > | a&#123;b
+/// ^
+/// > | a&#x9;b
+/// ^
/// ```
fn open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
let info = Info {
@@ -158,8 +180,10 @@ fn open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// or a digit for decimals.
///
/// ```markdown
-/// a&#|123;b
-/// a&#|x9;b
+/// > | a&#123;b
+/// ^
+/// > | a&#x9;b
+/// ^
/// ```
fn numeric(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
if let Code::Char('x' | 'X') = code {
@@ -182,9 +206,12 @@ fn numeric(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResu
/// allowed.
///
/// ```markdown
-/// a&a|mp;b
-/// a&#1|23;b
-/// a&#x|9;b
+/// > | a&amp;b
+/// ^^^
+/// > | a&#123;b
+/// ^^^
+/// > | a&#x9;b
+/// ^
/// ```
fn value(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
diff --git a/src/construct/code_fenced.rs b/src/construct/code_fenced.rs
index 622cc3d..d5001e7 100644
--- a/src/construct/code_fenced.rs
+++ b/src/construct/code_fenced.rs
@@ -183,9 +183,10 @@ struct Info {
/// Start of fenced code.
///
/// ```markdown
-/// | ~~~js
-/// console.log(1);
-/// ~~~
+/// > | ~~~js
+/// ^
+/// | console.log(1)
+/// | ~~~
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.enter(Token::CodeFenced);
@@ -197,9 +198,10 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Inside the opening fence, after an optional prefix, before a sequence.
///
/// ```markdown
-/// |~~~js
-/// console.log(1);
-/// ~~~
+/// > | ~~~js
+/// ^
+/// | console.log(1)
+/// | ~~~
/// ```
fn before_sequence_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
let tail = tokenizer.events.last();
@@ -232,9 +234,10 @@ fn before_sequence_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult
/// Inside the opening fence sequence.
///
/// ```markdown
-/// ~|~~js
-/// console.log(1);
-/// ~~~
+/// > | ~~~js
+/// ^
+/// | console.log(1)
+/// | ~~~
/// ```
fn sequence_open(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -259,9 +262,10 @@ fn sequence_open(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> State
/// Inside the opening fence, after the sequence (and optional whitespace), before the info.
///
/// ```markdown
-/// ~~~|js
-/// console.log(1);
-/// ~~~
+/// > | ~~~js
+/// ^
+/// | console.log(1)
+/// | ~~~
/// ```
fn info_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -282,9 +286,10 @@ fn info_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResu
/// Inside the opening fence info.
///
/// ```markdown
-/// ~~~j|s
-/// console.log(1);
-/// ~~~
+/// > | ~~~js
+/// ^
+/// | console.log(1)
+/// | ~~~
/// ```
fn info_inside(
tokenizer: &mut Tokenizer,
@@ -321,9 +326,10 @@ fn info_inside(
/// Inside the opening fence, after the info and whitespace, before the meta.
///
/// ```markdown
-/// ~~~js |eval
-/// console.log(1);
-/// ~~~
+/// > | ~~~js eval
+/// ^
+/// | console.log(1)
+/// | ~~~
/// ```
fn meta_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -344,9 +350,10 @@ fn meta_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResu
/// Inside the opening fence meta.
///
/// ```markdown
-/// ~~~js e|val
-/// console.log(1);
-/// ~~~
+/// > | ~~~js eval
+/// ^
+/// | console.log(1)
+/// | ~~~
/// ```
fn meta(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -369,9 +376,11 @@ fn meta(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
/// At an eol/eof in code, before a closing fence or before content.
///
/// ```markdown
-/// ~~~js|
-/// aa|
-/// ~~~
+/// > | ~~~js
+/// ^
+/// > | console.log(1)
+/// ^
+/// | ~~~
/// ```
fn at_break(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
tokenizer.check(partial_non_lazy_continuation, |ok| {
@@ -386,9 +395,11 @@ fn at_break(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult
/// At an eol/eof in code, before a non-lazy closing fence or content.
///
/// ```markdown
-/// ~~~js|
-/// aa|
-/// ~~~
+/// > | ~~~js
+/// ^
+/// > | console.log(1)
+/// ^
+/// | ~~~
/// ```
fn at_non_lazy_break(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
let clone = info.clone();
@@ -408,9 +419,10 @@ fn at_non_lazy_break(tokenizer: &mut Tokenizer, code: Code, info: Info) -> State
/// Before a closing fence, at the line ending.
///
/// ```markdown
-/// ~~~js
-/// console.log('1')|
-/// ~~~
+/// | ~~~js
+/// > | console.log(1)
+/// ^
+/// | ~~~
/// ```
fn close_begin(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -427,13 +439,10 @@ fn close_begin(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResu
/// Before a closing fence, before optional whitespace.
///
/// ```markdown
-/// ~~~js
-/// console.log('1')
-/// |~~~
-///
-/// ~~~js
-/// console.log('1')
-/// | ~~~
+/// | ~~~js
+/// | console.log(1)
+/// > | ~~~
+/// ^
/// ```
fn close_start(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
tokenizer.enter(Token::CodeFencedFence);
@@ -445,13 +454,10 @@ fn close_start(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResu
/// In a closing fence, after optional whitespace, before sequence.
///
/// ```markdown
-/// ~~~js
-/// console.log('1')
-/// |~~~
-///
-/// ~~~js
-/// console.log('1')
-/// |~~~
+/// | ~~~js
+/// | console.log(1)
+/// > | ~~~
+/// ^
/// ```
fn close_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -466,9 +472,10 @@ fn close_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnRes
/// In the closing fence sequence.
///
/// ```markdown
-/// ~~~js
-/// console.log('1')
-/// ~|~~
+/// | ~~~js
+/// | console.log(1)
+/// > | ~~~
+/// ^
/// ```
fn close_sequence(tokenizer: &mut Tokenizer, code: Code, info: Info, size: usize) -> StateFnResult {
match code {
@@ -490,9 +497,10 @@ fn close_sequence(tokenizer: &mut Tokenizer, code: Code, info: Info, size: usize
/// After the closing fence sequence after optional whitespace.
///
/// ```markdown
-/// ~~~js
-/// console.log('1')
-/// ~~~ |
+/// | ~~~js
+/// | console.log(1)
+/// > | ~~~
+/// ^
/// ```
fn close_sequence_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -507,9 +515,10 @@ fn close_sequence_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult
/// Before a closing fence, at the line ending.
///
/// ```markdown
-/// ~~~js
-/// console.log('1')|
-/// ~~~
+/// | ~~~js
+/// > | console.log(1)
+/// ^
+/// | ~~~
/// ```
fn content_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
tokenizer.enter(Token::LineEnding);
@@ -520,9 +529,10 @@ fn content_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnR
/// Before code content, definitely not before a closing fence.
///
/// ```markdown
-/// ~~~js
-/// |aa
-/// ~~~
+/// | ~~~js
+/// > | console.log(1)
+/// ^
+/// | ~~~
/// ```
fn content_start(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
tokenizer.go(space_or_tab_min_max(0, info.prefix), |t, c| {
@@ -533,9 +543,10 @@ fn content_start(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnRe
/// Before code content, after a prefix.
///
/// ```markdown
-/// ~~~js
-/// | aa
-/// ~~~
+/// | ~~~js
+/// > | console.log(1)
+/// ^
+/// | ~~~
/// ```
fn content_begin(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -552,11 +563,10 @@ fn content_begin(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnRe
/// In code content.
///
/// ```markdown
-/// ~~~js
-/// |ab
-/// a|b
-/// ab|
-/// ~~~
+/// | ~~~js
+/// > | console.log(1)
+/// ^^^^^^^^^^^^^^
+/// | ~~~
/// ```
fn content_continue(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -577,9 +587,10 @@ fn content_continue(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateF
/// After fenced code.
///
/// ```markdown
-/// ~~~js
-/// console.log('1')
-/// ~~~|
+/// | ~~~js
+/// | console.log(1)
+/// > | ~~~
+/// ^
/// ```
fn after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.exit(Token::CodeFenced);
diff --git a/src/construct/code_indented.rs b/src/construct/code_indented.rs
index 74a0938..6778b62 100644
--- a/src/construct/code_indented.rs
+++ b/src/construct/code_indented.rs
@@ -52,13 +52,14 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// Start of code (indented).
///
-/// ```markdown
-/// | asd
-/// ```
-///
/// > **Parsing note**: it is not needed to check if this first line is a
/// > filled line (that it has a non-whitespace character), because blank lines
/// > are parsed already, so we never run into that.
+///
+/// ```markdown
+/// > | aaa
+/// ^
+/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
// Do not interrupt paragraphs.
if tokenizer.interrupt {
@@ -72,8 +73,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// At a break.
///
/// ```markdown
-/// |asd
-/// asd|
+/// > | aaa
+/// ^ ^
/// ```
fn at_break(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -92,9 +93,8 @@ fn at_break(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Inside code content.
///
/// ```markdown
-/// |ab
-/// a|b
-/// ab|
+/// > | aaa
+/// ^^^^
/// ```
fn content(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -112,7 +112,8 @@ fn content(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After indented code.
///
/// ```markdown
-/// ab|
+/// > | aaa
+/// ^
/// ```
fn after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.exit(Token::CodeIndented);
@@ -124,8 +125,9 @@ fn after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Right at a line ending, trying to parse another indent.
///
/// ```markdown
-/// ab|
-/// cd
+/// > | aaa
+/// ^
+/// | bbb
/// ```
fn further_start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
if tokenizer.lazy {
@@ -148,8 +150,9 @@ fn further_start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After a proper indent.
///
/// ```markdown
-/// asd
-/// |asd
+/// | aaa
+/// > | bbb
+/// ^
/// ```
fn further_end(_tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
(State::Ok, Some(vec![code]))
@@ -157,23 +160,21 @@ fn further_end(_tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// At the beginning of a line that is not indented enough.
///
-/// > ๐Ÿ‘‰ **Note**: `โ ` represents a space character.
-///
/// ```markdown
-/// asd
-/// |โ โ 
-/// asd
+/// | aaa
+/// > | bbb
+/// ^
/// ```
fn further_begin(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.attempt_opt(space_or_tab(), further_after)(tokenizer, code)
}
-/// After whitespace.
+/// After whitespace, not indented enough.
///
/// ```markdown
-/// asd
-/// โ โ |
-/// asd
+/// | aaa
+/// > | bbb
+/// ^
/// ```
fn further_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/code_text.rs b/src/construct/code_text.rs
index 039f4b2..5e40d03 100644
--- a/src/construct/code_text.rs
+++ b/src/construct/code_text.rs
@@ -89,11 +89,10 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// Start of code (text).
///
/// ```markdown
-/// |`a`
-///
-/// |\``a`
-///
-/// |``a`
+/// > | `a`
+/// ^
+/// > | \`a`
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
let len = tokenizer.events.len();
@@ -114,7 +113,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In the opening sequence.
///
/// ```markdown
-/// `|`a``
+/// > | `a`
+/// ^
/// ```
fn sequence_open(tokenizer: &mut Tokenizer, code: Code, size: usize) -> StateFnResult {
if let Code::Char('`') = code {
@@ -132,8 +132,8 @@ fn sequence_open(tokenizer: &mut Tokenizer, code: Code, size: usize) -> StateFnR
/// Between something and something else
///
/// ```markdown
-/// `|a`
-/// `a|`
+/// > | `a`
+/// ^^
/// ```
fn between(tokenizer: &mut Tokenizer, code: Code, size_open: usize) -> StateFnResult {
match code {
@@ -161,7 +161,8 @@ fn between(tokenizer: &mut Tokenizer, code: Code, size_open: usize) -> StateFnRe
/// In data.
///
/// ```markdown
-/// `a|b`
+/// > | `a`
+/// ^
/// ```
fn data(tokenizer: &mut Tokenizer, code: Code, size_open: usize) -> StateFnResult {
match code {
@@ -179,7 +180,8 @@ fn data(tokenizer: &mut Tokenizer, code: Code, size_open: usize) -> StateFnResul
/// In the closing sequence.
///
/// ```markdown
-/// ``a`|`
+/// > | `a`
+/// ^
/// ```
fn sequence_close(
tokenizer: &mut Tokenizer,
diff --git a/src/construct/definition.rs b/src/construct/definition.rs
index 4d14653..6ce3a04 100644
--- a/src/construct/definition.rs
+++ b/src/construct/definition.rs
@@ -101,17 +101,23 @@ use crate::construct::{
};
use crate::token::Token;
use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
+use crate::util::skip::opt_back as skip_opt_back;
/// At the start of a definition.
///
/// ```markdown
-/// |[a]: b "c"
+/// > | [a]: b "c"
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
- let index = tokenizer.events.len();
- let definition_before = index > 3
- && tokenizer.events[index - 1].token_type == Token::LineEnding
- && tokenizer.events[index - 3].token_type == Token::Definition;
+ let definition_before = !tokenizer.events.is_empty()
+ && tokenizer.events[skip_opt_back(
+ &tokenizer.events,
+ tokenizer.events.len() - 1,
+ &[Token::LineEnding, Token::SpaceOrTab],
+ )]
+ .token_type
+ == Token::Definition;
// Do not interrupt paragraphs (but do follow definitions).
if tokenizer.interrupt && !definition_before {
@@ -126,7 +132,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// At the start of a definition, after whitespace.
///
/// ```markdown
-/// |[a]: b "c"
+/// > | [a]: b "c"
+/// ^
/// ```
fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -151,7 +158,8 @@ fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After the label of a definition.
///
/// ```markdown
-/// [a]|: b "c"
+/// > | [a]: b "c"
+/// ^
/// ```
fn label_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -173,10 +181,8 @@ fn label_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Before a destination.
///
/// ```markdown
-/// [a]: |b "c"
-///
-/// [a]:
-/// |b "c"
+/// > | [a]: b "c"
+/// ^
/// ```
fn destination_before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.go(
@@ -201,10 +207,8 @@ fn destination_before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After a destination.
///
/// ```markdown
-/// [a]: b| "c"
-///
-/// [a]: b| โŠ
-/// "c"
+/// > | [a]: b "c"
+/// ^
/// ```
fn destination_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.attempt_opt(title_before, after)(tokenizer, code)
@@ -213,8 +217,10 @@ fn destination_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After a definition.
///
/// ```markdown
-/// [a]: b|
-/// [a]: b "c"|
+/// > | [a]: b
+/// ^
+/// > | [a]: b "c"
+/// ^
/// ```
fn after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.attempt_opt(space_or_tab(), after_whitespace)(tokenizer, code)
@@ -223,8 +229,10 @@ fn after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After a definition, after optional whitespace.
///
/// ```markdown
-/// [a]: b |
-/// [a]: b "c"|
+/// > | [a]: b
+/// ^
+/// > | [a]: b "c"
+/// ^
/// ```
fn after_whitespace(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -241,10 +249,10 @@ fn after_whitespace(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After a destination, presumably before a title.
///
/// ```markdown
-/// [a]: b| "c"
-///
-/// [a]: b| โŠ
-/// "c"
+/// > | [a]: b
+/// ^
+/// > | [a]: b "c"
+/// ^
/// ```
fn title_before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.go(space_or_tab_eol(), title_before_marker)(tokenizer, code)
@@ -253,8 +261,9 @@ fn title_before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Before a title, after a line ending.
///
/// ```markdown
-/// [a]: bโŠ
-/// | "c"
+/// | [a]: b
+/// > | "c"
+/// ^
/// ```
fn title_before_marker(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.go(
@@ -276,10 +285,8 @@ fn title_before_marker(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After a title.
///
/// ```markdown
-/// [a]: b "c"|
-///
-/// [a]: bโŠ
-/// "c"|
+/// > | [a]: b "c"
+/// ^
/// ```
fn title_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.attempt_opt(space_or_tab(), title_after_after_optional_whitespace)(tokenizer, code)
@@ -288,9 +295,8 @@ fn title_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After a title, after optional whitespace.
///
/// ```markdown
-/// [a]: b "c"|
-///
-/// [a]: b "c" |
+/// > | [a]: b "c"
+/// ^
/// ```
fn title_after_after_optional_whitespace(_tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/hard_break_escape.rs b/src/construct/hard_break_escape.rs
index 4fb87bf..de8afe6 100644
--- a/src/construct/hard_break_escape.rs
+++ b/src/construct/hard_break_escape.rs
@@ -46,7 +46,9 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// Start of a hard break (escape).
///
/// ```markdown
-/// a|\
+/// > | a\
+/// ^
+/// | b
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -64,7 +66,9 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// At the end of a hard break (escape), after `\`.
///
/// ```markdown
-/// a\|
+/// > | a\
+/// ^
+/// | b
/// ```
fn inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/hard_break_trailing.rs b/src/construct/hard_break_trailing.rs
index 6626675..d83bf60 100644
--- a/src/construct/hard_break_trailing.rs
+++ b/src/construct/hard_break_trailing.rs
@@ -47,8 +47,9 @@ use crate::tokenizer::{Code, State, StateFnResult, Tokenizer};
/// Start of a hard break (trailing).
///
/// ```markdown
-/// a| โŠ
-/// b
+/// > | aโ โ 
+/// ^
+/// | b
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -65,8 +66,9 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Inside the hard break (trailing).
///
/// ```markdown
-/// a |โŠ
-/// b
+/// > | aโ โ 
+/// ^
+/// | b
/// ```
fn inside(tokenizer: &mut Tokenizer, code: Code, size: usize) -> StateFnResult {
match code {
diff --git a/src/construct/heading_atx.rs b/src/construct/heading_atx.rs
index 4a4992a..8947f64 100644
--- a/src/construct/heading_atx.rs
+++ b/src/construct/heading_atx.rs
@@ -63,7 +63,8 @@ use crate::util::edit_map::EditMap;
/// Start of a heading (atx).
///
/// ```markdown
-/// |## alpha
+/// > | ## aa
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.enter(Token::HeadingAtx);
@@ -74,7 +75,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Start of a heading (atx), after whitespace.
///
/// ```markdown
-/// |## alpha
+/// > | ## aa
+/// ^
/// ```
fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
if Code::Char('#') == code {
@@ -88,7 +90,8 @@ fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In the opening sequence.
///
/// ```markdown
-/// #|# alpha
+/// > | ## aa
+/// ^
/// ```
fn sequence_open(tokenizer: &mut Tokenizer, code: Code, rank: usize) -> StateFnResult {
match code {
@@ -116,11 +119,8 @@ fn sequence_open(tokenizer: &mut Tokenizer, code: Code, rank: usize) -> StateFnR
/// After something but before something else.
///
/// ```markdown
-/// ## |alpha
-/// ## alpha| bravo
-/// ## alpha |bravo
-/// ## alpha bravo|##
-/// ## alpha bravo ##|
+/// > | ## aa
+/// ^
/// ```
fn at_break(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -146,10 +146,12 @@ fn at_break(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
}
/// In a further sequence (after whitespace).
+///
/// Could be normal โ€œvisibleโ€ hashes in the heading or a final sequence.
///
/// ```markdown
-/// ## alpha #|#
+/// > | ## aa ##
+/// ^
/// ```
fn further_sequence(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
if let Code::Char('#') = code {
@@ -164,7 +166,8 @@ fn further_sequence(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In text.
///
/// ```markdown
-/// ## al|pha
+/// > | ## aa
+/// ^
/// ```
fn data(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/heading_setext.rs b/src/construct/heading_setext.rs
index cb426a9..deab558 100644
--- a/src/construct/heading_setext.rs
+++ b/src/construct/heading_setext.rs
@@ -112,23 +112,19 @@ impl Kind {
/// At a line ending, presumably an underline.
///
/// ```markdown
-/// alpha|
-/// ==
+/// | aa
+/// > | ==
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
- let index = tokenizer.events.len();
- let previous = if index > 1 {
- skip_opt_back(
+ let paragraph_before = !tokenizer.events.is_empty()
+ && tokenizer.events[skip_opt_back(
&tokenizer.events,
- index - 1,
- &[Token::SpaceOrTab, Token::BlockQuotePrefix],
- )
- } else {
- 0
- };
- let previous = skip_opt_back(&tokenizer.events, previous, &[Token::LineEnding]);
- let paragraph_before =
- previous > 1 && tokenizer.events[previous].token_type == Token::Paragraph;
+ tokenizer.events.len() - 1,
+ &[Token::LineEnding, Token::SpaceOrTab],
+ )]
+ .token_type
+ == Token::Paragraph;
// Require a paragraph before and do not allow on a lazy line.
if paragraph_before && !tokenizer.lazy {
@@ -142,8 +138,9 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After optional whitespace, presumably an underline.
///
/// ```markdown
-/// alpha
-/// |==
+/// | aa
+/// > | ==
+/// ^
/// ```
fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -158,8 +155,9 @@ fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In an underline sequence.
///
/// ```markdown
-/// alpha
-/// =|=
+/// | aa
+/// > | ==
+/// ^
/// ```
fn inside(tokenizer: &mut Tokenizer, code: Code, kind: Kind) -> StateFnResult {
match code {
@@ -177,8 +175,9 @@ fn inside(tokenizer: &mut Tokenizer, code: Code, kind: Kind) -> StateFnResult {
/// After an underline sequence, after optional whitespace.
///
/// ```markdown
-/// alpha
-/// ==|
+/// | aa
+/// > | ==
+/// ^
/// ```
fn after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/html_flow.rs b/src/construct/html_flow.rs
index 445165f..822b9dd 100644
--- a/src/construct/html_flow.rs
+++ b/src/construct/html_flow.rs
@@ -200,9 +200,9 @@ struct Info {
/// Start of HTML (flow), before optional whitespace.
///
/// ```markdown
-/// |<x />
+/// > | <x />
+/// ^
/// ```
-///
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.enter(Token::HtmlFlow);
// To do: allow arbitrary when code (indented) is turned off.
@@ -221,7 +221,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After optional whitespace, before `<`.
///
/// ```markdown
-/// |<x />
+/// > | <x />
+/// ^
/// ```
fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
if Code::Char('<') == code {
@@ -236,9 +237,12 @@ fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<`, before a tag name or other stuff.
///
/// ```markdown
-/// <|x />
-/// <|!doctype>
-/// <|!--xxx-->
+/// > | <x />
+/// ^
+/// > | <!doctype>
+/// ^
+/// > | <!--xxx-->
+/// ^
/// ```
fn open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
let mut info = Info {
@@ -289,9 +293,12 @@ fn open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<!`, so inside a declaration, comment, or CDATA.
///
/// ```markdown
-/// <!|doctype>
-/// <!|--xxx-->
-/// <!|[CDATA[>&<]]>
+/// > | <!doctype>
+/// ^
+/// > | <!--xxx-->
+/// ^
+/// > | <![CDATA[>&<]]>
+/// ^
/// ```
fn declaration_open(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -330,7 +337,8 @@ fn declaration_open(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> St
/// After `<!-`, inside a comment, before another `-`.
///
/// ```markdown
-/// <!-|-xxx-->
+/// > | <!--xxx-->
+/// ^
/// ```
fn comment_open_inside(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -350,11 +358,8 @@ fn comment_open_inside(tokenizer: &mut Tokenizer, code: Code, info: Info) -> Sta
/// After `<![`, inside CDATA, expecting `CDATA[`.
///
/// ```markdown
-/// <![|CDATA[>&<]]>
-/// <![CD|ATA[>&<]]>
-/// <![CDA|TA[>&<]]>
-/// <![CDAT|A[>&<]]>
-/// <![CDATA|[>&<]]>
+/// > | <![CDATA[>&<]]>
+/// ^^^^^^
/// ```
fn cdata_open_inside(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
if code == info.buffer[info.index] {
@@ -380,7 +385,8 @@ fn cdata_open_inside(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> S
/// After `</`, in a closing tag, before a tag name.
///
/// ```markdown
-/// </|x>
+/// > | </x>
+/// ^
/// ```
fn tag_close_start(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -396,8 +402,10 @@ fn tag_close_start(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> Sta
/// In a tag name.
///
/// ```markdown
-/// <a|b>
-/// </a|b>
+/// > | <ab>
+/// ^^
+/// > | </ab>
+/// ^^
/// ```
fn tag_name(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -454,7 +462,8 @@ fn tag_name(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnRes
/// After a closing slash of a basic tag name.
///
/// ```markdown
-/// <div/|>
+/// > | <div/>
+/// ^
/// ```
fn basic_self_closing(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -471,8 +480,8 @@ fn basic_self_closing(tokenizer: &mut Tokenizer, code: Code, info: Info) -> Stat
/// After a closing slash of a complete tag name.
///
/// ```markdown
-/// <x/|>
-/// </x/|>
+/// > | <x/>
+/// ^
/// ```
fn complete_closing_tag_after(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -495,12 +504,16 @@ fn complete_closing_tag_after(tokenizer: &mut Tokenizer, code: Code, info: Info)
/// attributes.
///
/// ```markdown
-/// <x |/>
-/// <x |:asd>
-/// <x |_asd>
-/// <x |asd>
-/// <x | >
-/// <x |>
+/// > | <a />
+/// ^
+/// > | <a :b>
+/// ^
+/// > | <a _b>
+/// ^
+/// > | <a b>
+/// ^
+/// > | <a >
+/// ^
/// ```
fn complete_attribute_name_before(
tokenizer: &mut Tokenizer,
@@ -533,9 +546,12 @@ fn complete_attribute_name_before(
/// In an attribute name.
///
/// ```markdown
-/// <x :|>
-/// <x _|>
-/// <x a|>
+/// > | <a :b>
+/// ^
+/// > | <a _b>
+/// ^
+/// > | <a b>
+/// ^
/// ```
fn complete_attribute_name(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -554,9 +570,10 @@ fn complete_attribute_name(tokenizer: &mut Tokenizer, code: Code, info: Info) ->
/// tag, or whitespace.
///
/// ```markdown
-/// <x a|>
-/// <x a|=b>
-/// <x a|="c">
+/// > | <a b>
+/// ^
+/// > | <a b=c>
+/// ^
/// ```
fn complete_attribute_name_after(
tokenizer: &mut Tokenizer,
@@ -586,8 +603,10 @@ fn complete_attribute_name_after(
/// allowing whitespace.
///
/// ```markdown
-/// <x a=|b>
-/// <x a=|"c">
+/// > | <a b=c>
+/// ^
+/// > | <a b="c">
+/// ^
/// ```
fn complete_attribute_value_before(
tokenizer: &mut Tokenizer,
@@ -618,8 +637,10 @@ fn complete_attribute_value_before(
/// In a double or single quoted attribute value.
///
/// ```markdown
-/// <x a="|">
-/// <x a='|'>
+/// > | <a b="c">
+/// ^
+/// > | <a b='c'>
+/// ^
/// ```
fn complete_attribute_value_quoted(
tokenizer: &mut Tokenizer,
@@ -650,7 +671,8 @@ fn complete_attribute_value_quoted(
/// In an unquoted attribute value.
///
/// ```markdown
-/// <x a=b|c>
+/// > | <a b=c>
+/// ^
/// ```
fn complete_attribute_value_unquoted(
tokenizer: &mut Tokenizer,
@@ -680,7 +702,8 @@ fn complete_attribute_value_unquoted(
/// end of the tag.
///
/// ```markdown
-/// <x a="b"|>
+/// > | <a b="c">
+/// ^
/// ```
fn complete_attribute_value_quoted_after(
tokenizer: &mut Tokenizer,
@@ -698,7 +721,8 @@ fn complete_attribute_value_quoted_after(
/// In certain circumstances of a complete tag where only an `>` is allowed.
///
/// ```markdown
-/// <x a="b"|>
+/// > | <a b="c">
+/// ^
/// ```
fn complete_end(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -713,7 +737,8 @@ fn complete_end(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnRes
/// After `>` in a complete tag.
///
/// ```markdown
-/// <x>|
+/// > | <x>
+/// ^
/// ```
fn complete_after(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -733,7 +758,8 @@ fn complete_after(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnR
/// Inside continuation of any HTML kind.
///
/// ```markdown
-/// <!--x|xx-->
+/// > | <!--xxx-->
+/// ^
/// ```
fn continuation(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -800,8 +826,9 @@ fn continuation(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnRes
/// In continuation, at an eol.
///
/// ```markdown
-/// <x>|
-/// asd
+/// > | <x>
+/// ^
+/// | asd
/// ```
fn continuation_start(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
tokenizer.check(partial_non_lazy_continuation, |ok| {
@@ -816,8 +843,9 @@ fn continuation_start(tokenizer: &mut Tokenizer, code: Code, info: Info) -> Stat
/// In continuation, at an eol, before non-lazy content.
///
/// ```markdown
-/// <x>|
-/// asd
+/// > | <x>
+/// ^
+/// | asd
/// ```
fn continuation_start_non_lazy(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -837,8 +865,9 @@ fn continuation_start_non_lazy(tokenizer: &mut Tokenizer, code: Code, info: Info
/// In continuation, after an eol, before non-lazy content.
///
/// ```markdown
-/// <x>
-/// |asd
+/// | <x>
+/// > | asd
+/// ^
/// ```
fn continuation_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -855,7 +884,8 @@ fn continuation_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> Sta
/// In comment continuation, after one `-`, expecting another.
///
/// ```markdown
-/// <!--xxx-|->
+/// > | <!--xxx-->
+/// ^
/// ```
fn continuation_comment_inside(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -873,7 +903,8 @@ fn continuation_comment_inside(tokenizer: &mut Tokenizer, code: Code, info: Info
/// In raw continuation, after `<`, expecting a `/`.
///
/// ```markdown
-/// <script>console.log(1)<|/script>
+/// > | <script>console.log(1)</script>
+/// ^
/// ```
fn continuation_raw_tag_open(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -891,9 +922,8 @@ fn continuation_raw_tag_open(tokenizer: &mut Tokenizer, code: Code, info: Info)
/// In raw continuation, after `</`, expecting or inside a raw tag name.
///
/// ```markdown
-/// <script>console.log(1)</|script>
-/// <script>console.log(1)</s|cript>
-/// <script>console.log(1)</script|>
+/// > | <script>console.log(1)</script>
+/// ^^^^^^
/// ```
fn continuation_raw_end_tag(
tokenizer: &mut Tokenizer,
@@ -933,7 +963,8 @@ fn continuation_raw_end_tag(
/// In cdata continuation, after `]`, expecting `]>`.
///
/// ```markdown
-/// <![CDATA[>&<]|]>
+/// > | <![CDATA[>&<]]>
+/// ^
/// ```
fn continuation_character_data_inside(
tokenizer: &mut Tokenizer,
@@ -955,14 +986,16 @@ fn continuation_character_data_inside(
/// In declaration or instruction continuation, waiting for `>` to close it.
///
/// ```markdown
-/// <!--|>
-/// <?ab?|>
-/// <?|>
-/// <!q|>
-/// <!--ab--|>
-/// <!--ab--|->
-/// <!--ab---|>
-/// <![CDATA[>&<]]|>
+/// > | <!-->
+/// ^
+/// > | <?>
+/// ^
+/// > | <!q>
+/// ^
+/// > | <!--ab-->
+/// ^
+/// > | <![CDATA[>&<]]>
+/// ^
/// ```
fn continuation_declaration_inside(
tokenizer: &mut Tokenizer,
@@ -991,7 +1024,8 @@ fn continuation_declaration_inside(
/// In closed continuation: everything we get until the eol/eof is part of it.
///
/// ```markdown
-/// <!doctype>|
+/// > | <!doctype>
+/// ^
/// ```
fn continuation_close(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -1012,7 +1046,8 @@ fn continuation_close(tokenizer: &mut Tokenizer, code: Code, info: Info) -> Stat
/// Done.
///
/// ```markdown
-/// <!doctype>|
+/// > | <!doctype>
+/// ^
/// ```
fn continuation_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.exit(Token::HtmlFlow);
@@ -1026,8 +1061,9 @@ fn continuation_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Before a line ending, expecting a blank line.
///
/// ```markdown
-/// <div>|
-///
+/// > | <div>
+/// ^
+/// |
/// ```
fn blank_line_before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.enter(Token::LineEnding);
diff --git a/src/construct/html_text.rs b/src/construct/html_text.rs
index cdd7c69..be1f1fe 100644
--- a/src/construct/html_text.rs
+++ b/src/construct/html_text.rs
@@ -62,7 +62,8 @@ use crate::util::codes::parse;
/// Start of HTML (text)
///
/// ```markdown
-/// a |<x> b
+/// > | a <b> c
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
if Code::Char('<') == code {
@@ -78,9 +79,12 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<`, before a tag name or other stuff.
///
/// ```markdown
-/// a <|x /> b
-/// a <|!doctype> b
-/// a <|!--xxx--/> b
+/// > | a <b> c
+/// ^
+/// > | a <!doctype> c
+/// ^
+/// > | a <!--b--> c
+/// ^
/// ```
fn open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -107,9 +111,12 @@ fn open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<!`, so inside a declaration, comment, or CDATA.
///
/// ```markdown
-/// a <!|doctype> b
-/// a <!|--xxx--> b
-/// a <!|[CDATA[>&<]]> b
+/// > | a <!doctype> c
+/// ^
+/// > | a <!--b--> c
+/// ^
+/// > | a <![CDATA[>&<]]> c
+/// ^
/// ```
fn declaration_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -136,7 +143,8 @@ fn declaration_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<!-`, inside a comment, before another `-`.
///
/// ```markdown
-/// a <!-|-xxx--> b
+/// > | a <!--b--> c
+/// ^
/// ```
fn comment_open_inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -150,13 +158,14 @@ fn comment_open_inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<!--`, inside a comment
///
-/// > **Note**: [html (flow)][html_flow] does allow `<!-->` or `<!--->` as
+/// > ๐Ÿ‘‰ **Note**: [html (flow)][html_flow] does allow `<!-->` or `<!--->` as
/// > empty comments.
/// > This is prohibited in html (text).
/// > See: <https://github.com/commonmark/commonmark-spec/issues/712>.
///
/// ```markdown
-/// a <!--|xxx--> b
+/// > | a <!--b--> c
+/// ^
/// ```
///
/// [html_flow]: crate::construct::html_flow
@@ -173,13 +182,14 @@ fn comment_start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<!---`, inside a comment
///
-/// > **Note**: [html (flow)][html_flow] does allow `<!--->` as an empty
-/// > comment.
+/// > ๐Ÿ‘‰ **Note**: [html (flow)][html_flow] does allow `<!-->` or `<!--->` as
+/// > empty comments.
/// > This is prohibited in html (text).
/// > See: <https://github.com/commonmark/commonmark-spec/issues/712>.
///
/// ```markdown
-/// a <!---|xxx--> b
+/// > | a <!---b--> c
+/// ^
/// ```
///
/// [html_flow]: crate::construct::html_flow
@@ -193,8 +203,8 @@ fn comment_start_dash(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a comment.
///
/// ```markdown
-/// a <!--|xxx--> b
-/// a <!--x|xx--> b
+/// > | a <!--b--> c
+/// ^
/// ```
fn comment(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -216,8 +226,8 @@ fn comment(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a comment, after `-`.
///
/// ```markdown
-/// a <!--xxx-|-> b
-/// a <!--xxx-|yyy--> b
+/// > | a <!--b--> c
+/// ^
/// ```
fn comment_close(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -232,11 +242,8 @@ fn comment_close(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<![`, inside CDATA, expecting `CDATA[`.
///
/// ```markdown
-/// a <![|CDATA[>&<]]> b
-/// a <![CD|ATA[>&<]]> b
-/// a <![CDA|TA[>&<]]> b
-/// a <![CDAT|A[>&<]]> b
-/// a <![CDATA|[>&<]]> b
+/// > | a <![CDATA[>&<]]> b
+/// ^^^^^^
/// ```
fn cdata_open_inside(
tokenizer: &mut Tokenizer,
@@ -265,7 +272,8 @@ fn cdata_open_inside(
/// In CDATA.
///
/// ```markdown
-/// a <![CDATA[|>&<]]> b
+/// > | a <![CDATA[>&<]]> b
+/// ^^^
/// ```
fn cdata(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -287,7 +295,8 @@ fn cdata(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In CDATA, after `]`.
///
/// ```markdown
-/// a <![CDATA[>&<]|]> b
+/// > | a <![CDATA[>&<]]> b
+/// ^
/// ```
fn cdata_close(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -302,7 +311,8 @@ fn cdata_close(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In CDATA, after `]]`.
///
/// ```markdown
-/// a <![CDATA[>&<]]|> b
+/// > | a <![CDATA[>&<]]> b
+/// ^
/// ```
fn cdata_end(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -315,7 +325,8 @@ fn cdata_end(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a declaration.
///
/// ```markdown
-/// a <!a|b> b
+/// > | a <!b> c
+/// ^
/// ```
fn declaration(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -333,8 +344,8 @@ fn declaration(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In an instruction.
///
/// ```markdown
-/// a <?|ab?> b
-/// a <?a|b?> b
+/// > | a <?b?> c
+/// ^
/// ```
fn instruction(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -356,8 +367,8 @@ fn instruction(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In an instruction, after `?`.
///
/// ```markdown
-/// a <?aa?|> b
-/// a <?aa?|bb?> b
+/// > | a <?b?> c
+/// ^
/// ```
fn instruction_close(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -369,7 +380,8 @@ fn instruction_close(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `</`, in a closing tag, before a tag name.
///
/// ```markdown
-/// a </|x> b
+/// > | a </b> c
+/// ^
/// ```
fn tag_close_start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -384,8 +396,8 @@ fn tag_close_start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `</x`, in a tag name.
///
/// ```markdown
-/// a </x|> b
-/// a </x|y> b
+/// > | a </b> c
+/// ^
/// ```
fn tag_close(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -400,8 +412,8 @@ fn tag_close(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a closing tag, after the tag name.
///
/// ```markdown
-/// a </x| > b
-/// a </xy |> b
+/// > | a </b> c
+/// ^
/// ```
fn tag_close_between(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -419,7 +431,8 @@ fn tag_close_between(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `<x`, in an opening tag name.
///
/// ```markdown
-/// a <x|> b
+/// > | a <b> c
+/// ^
/// ```
fn tag_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -437,9 +450,8 @@ fn tag_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In an opening tag, after the tag name.
///
/// ```markdown
-/// a <x| y> b
-/// a <x |y="z"> b
-/// a <x |/> b
+/// > | a <b> c
+/// ^
/// ```
fn tag_open_between(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -465,9 +477,8 @@ fn tag_open_between(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In an attribute name.
///
/// ```markdown
-/// a <x :|> b
-/// a <x _|> b
-/// a <x a|> b
+/// > | a <b c> d
+/// ^
/// ```
fn tag_open_attribute_name(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -483,9 +494,8 @@ fn tag_open_attribute_name(tokenizer: &mut Tokenizer, code: Code) -> StateFnResu
/// tag, or whitespace.
///
/// ```markdown
-/// a <x a|> b
-/// a <x a|=b> b
-/// a <x a|="c"> b
+/// > | a <b c> d
+/// ^
/// ```
fn tag_open_attribute_name_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -508,8 +518,8 @@ fn tag_open_attribute_name_after(tokenizer: &mut Tokenizer, code: Code) -> State
/// allowing whitespace.
///
/// ```markdown
-/// a <x a=|b> b
-/// a <x a=|"c"> b
+/// > | a <b c=d> e
+/// ^
/// ```
fn tag_open_attribute_value_before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -540,8 +550,8 @@ fn tag_open_attribute_value_before(tokenizer: &mut Tokenizer, code: Code) -> Sta
/// In a double or single quoted attribute value.
///
/// ```markdown
-/// a <x a="|"> b
-/// a <x a='|'> b
+/// > | a <b c="d"> e
+/// ^
/// ```
fn tag_open_attribute_value_quoted(
tokenizer: &mut Tokenizer,
@@ -577,7 +587,8 @@ fn tag_open_attribute_value_quoted(
/// In an unquoted attribute value.
///
/// ```markdown
-/// a <x a=b|c> b
+/// > | a <b c=d> e
+/// ^
/// ```
fn tag_open_attribute_value_unquoted(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -596,7 +607,8 @@ fn tag_open_attribute_value_unquoted(tokenizer: &mut Tokenizer, code: Code) -> S
/// end of the tag.
///
/// ```markdown
-/// a <x a="b"|> b
+/// > | a <b c="d"> e
+/// ^
/// ```
fn tag_open_attribute_value_quoted_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -610,9 +622,8 @@ fn tag_open_attribute_value_quoted_after(tokenizer: &mut Tokenizer, code: Code)
/// In certain circumstances of a complete tag where only an `>` is allowed.
///
/// ```markdown
-/// a <x a="b"|> b
-/// a <!--xx--|> b
-/// a <x /|> b
+/// > | a <b c="d"> e
+/// ^
/// ```
fn end(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -628,12 +639,13 @@ fn end(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// At an allowed line ending.
///
-/// > **Note**: we canโ€™t have blank lines in text, so no need to worry about
+/// > ๐Ÿ‘‰ **Note**: we canโ€™t have blank lines in text, so no need to worry about
/// > empty tokens.
///
/// ```markdown
-/// a <!--a|
-/// b--> b
+/// > | a <!--a
+/// ^
+/// | b-->
/// ```
fn at_line_ending(
tokenizer: &mut Tokenizer,
@@ -657,12 +669,13 @@ fn at_line_ending(
/// After a line ending.
///
-/// > **Note**: we canโ€™t have blank lines in text, so no need to worry about
+/// > ๐Ÿ‘‰ **Note**: we canโ€™t have blank lines in text, so no need to worry about
/// > empty tokens.
///
/// ```markdown
-/// a <!--a
-/// |b--> b
+/// | a <!--a
+/// > | b-->
+/// ^
/// ```
fn after_line_ending(
tokenizer: &mut Tokenizer,
@@ -676,12 +689,13 @@ fn after_line_ending(
/// After a line ending, after indent.
///
-/// > **Note**: we canโ€™t have blank lines in text, so no need to worry about
+/// > ๐Ÿ‘‰ **Note**: we canโ€™t have blank lines in text, so no need to worry about
/// > empty tokens.
///
/// ```markdown
-/// a <!--a
-/// |b--> b
+/// | a <!--a
+/// > | b-->
+/// ^
/// ```
fn after_line_ending_prefix(
tokenizer: &mut Tokenizer,
diff --git a/src/construct/label_end.rs b/src/construct/label_end.rs
index 29ac6f9..2db68b5 100644
--- a/src/construct/label_end.rs
+++ b/src/construct/label_end.rs
@@ -344,12 +344,13 @@ pub fn resolve_media(tokenizer: &mut Tokenizer) -> Vec<Event> {
/// Start of label end.
///
/// ```markdown
-/// [a|](b) c
-/// [a|][b] c
-/// [a|][] b
-/// [a|] b
-///
-/// [a]: z
+/// > | [a](b) c
+/// ^
+/// > | [a][b] c
+/// ^
+/// > | [a][] b
+/// ^
+/// > | [a] b
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
if Code::Char(']') == code {
@@ -410,12 +411,14 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `]`.
///
/// ```markdown
-/// [a]|(b) c
-/// [a]|[b] c
-/// [a]|[] b
-/// [a]| b
-///
-/// [a]: z
+/// > | [a](b) c
+/// ^
+/// > | [a][b] c
+/// ^
+/// > | [a][] b
+/// ^
+/// > | [a] b
+/// ^
/// ```
fn after(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
let defined = tokenizer.parse_state.definitions.contains(&info.media.id);
@@ -460,9 +463,10 @@ fn after(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
/// > ๐Ÿ‘‰ **Note**: we only get here if the label is defined.
///
/// ```markdown
-/// [a]|[] b
-///
-/// [a]: z
+/// > | [a][] b
+/// ^
+/// > | [a] b
+/// ^
/// ```
fn reference_not_full(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
tokenizer.attempt(collapsed_reference, move |is_ok| {
@@ -479,12 +483,14 @@ fn reference_not_full(tokenizer: &mut Tokenizer, code: Code, info: Info) -> Stat
/// Done, we found something.
///
/// ```markdown
-/// [a](b)| c
-/// [a][b]| c
-/// [a][]| b
-/// [a]| b
-///
-/// [a]: z
+/// > | [a](b) c
+/// ^
+/// > | [a][b] c
+/// ^
+/// > | [a][] b
+/// ^
+/// > | [a] b
+/// ^
/// ```
fn ok(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
// Remove this one and everything after it.
@@ -520,12 +526,12 @@ fn ok(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
/// There was an okay opening, but we didnโ€™t match anything.
///
/// ```markdown
-/// [a]|(b c
-/// [a]|[b c
-/// [b]|[ c
-/// [b]| c
-///
-/// [a]: z
+/// > | [a](b c
+/// ^
+/// > | [a][b c
+/// ^
+/// > | [a] b
+/// ^
/// ```
fn nok(tokenizer: &mut Tokenizer, _code: Code, label_start_index: usize) -> StateFnResult {
let label_start = tokenizer
@@ -539,7 +545,8 @@ fn nok(tokenizer: &mut Tokenizer, _code: Code, label_start_index: usize) -> Stat
/// Before a resource, at `(`.
///
/// ```markdown
-/// [a]|(b) c
+/// > | [a](b) c
+/// ^
/// ```
fn resource(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -554,10 +561,11 @@ fn resource(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
}
}
-/// At the start of a resource, after `(`, before a definition.
+/// At the start of a resource, after `(`, before a destination.
///
/// ```markdown
-/// [a](|b) c
+/// > | [a](b) c
+/// ^
/// ```
fn resource_start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.attempt_opt(space_or_tab_eol(), resource_open)(tokenizer, code)
@@ -566,7 +574,8 @@ fn resource_start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// At the start of a resource, after optional whitespace.
///
/// ```markdown
-/// [a](|b) c
+/// > | [a](b) c
+/// ^
/// ```
fn resource_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -594,8 +603,8 @@ fn resource_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a resource, after a destination, before optional whitespace.
///
/// ```markdown
-/// [a](b|) c
-/// [a](b| "c") d
+/// > | [a](b) c
+/// ^
/// ```
fn destination_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.attempt(space_or_tab_eol(), |ok| {
@@ -606,8 +615,8 @@ fn destination_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a resource, after a destination, after whitespace.
///
/// ```markdown
-/// [a](b |) c
-/// [a](b |"c") d
+/// > | [a](b ) c
+/// ^
/// ```
fn resource_between(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -632,7 +641,8 @@ fn resource_between(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a resource, after a title.
///
/// ```markdown
-/// [a](b "c"|) d
+/// > | [a](b "c") d
+/// ^
/// ```
fn title_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.attempt_opt(space_or_tab_eol(), resource_end)(tokenizer, code)
@@ -641,9 +651,8 @@ fn title_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a resource, at the `)`.
///
/// ```markdown
-/// [a](b|) c
-/// [a](b |) c
-/// [a](b "c"|) d
+/// > | [a](b) d
+/// ^
/// ```
fn resource_end(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -661,7 +670,8 @@ fn resource_end(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a reference (full), at the `[`.
///
/// ```markdown
-/// [a]|[b]
+/// > | [a][b] d
+/// ^
/// ```
fn full_reference(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -686,7 +696,8 @@ fn full_reference(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a reference (full), after `]`.
///
/// ```markdown
-/// [a][b]|
+/// > | [a][b] d
+/// ^
/// ```
fn full_reference_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
let events = &tokenizer.events;
@@ -731,7 +742,8 @@ fn full_reference_after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult
/// > ๐Ÿ‘‰ **Note**: we only get here if the label is defined.
///
/// ```markdown
-/// [a]|[]
+/// > | [a][] d
+/// ^
/// ```
fn collapsed_reference(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -751,7 +763,8 @@ fn collapsed_reference(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// > ๐Ÿ‘‰ **Note**: we only get here if the label is defined.
///
/// ```markdown
-/// [a][|]
+/// > | [a][] d
+/// ^
/// ```
fn collapsed_reference_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/label_start_image.rs b/src/construct/label_start_image.rs
index f9b8300..b4e0433 100644
--- a/src/construct/label_start_image.rs
+++ b/src/construct/label_start_image.rs
@@ -35,7 +35,8 @@ use crate::tokenizer::{Code, LabelStart, State, StateFnResult, Tokenizer};
/// Start of label (image) start.
///
/// ```markdown
-/// a |![ b
+/// > | a ![b] c
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -53,7 +54,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After `!`, before a `[`.
///
/// ```markdown
-/// a !|[ b
+/// > | a ![b] c
+/// ^
/// ```
pub fn open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/label_start_link.rs b/src/construct/label_start_link.rs
index 59729cc..7e8e511 100644
--- a/src/construct/label_start_link.rs
+++ b/src/construct/label_start_link.rs
@@ -34,7 +34,8 @@ use crate::tokenizer::{Code, LabelStart, State, StateFnResult, Tokenizer};
/// Start of label (link) start.
///
/// ```markdown
-/// a |[ b
+/// > | a [b] c
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/paragraph.rs b/src/construct/paragraph.rs
index 967e009..74dca87 100644
--- a/src/construct/paragraph.rs
+++ b/src/construct/paragraph.rs
@@ -39,7 +39,8 @@ use crate::util::{edit_map::EditMap, skip::opt as skip_opt};
/// Before a paragraph.
///
/// ```markdown
-/// |qwe
+/// > | abc
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -57,7 +58,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// In a paragraph.
///
/// ```markdown
-/// al|pha
+/// > | abc
+/// ^^^
/// ```
fn inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
diff --git a/src/construct/partial_data.rs b/src/construct/partial_data.rs
index 186665d..b4dfda0 100644
--- a/src/construct/partial_data.rs
+++ b/src/construct/partial_data.rs
@@ -13,7 +13,8 @@ use crate::util::edit_map::EditMap;
/// At the beginning of data.
///
/// ```markdown
-/// |&qwe
+/// > | abc
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code, stop: Vec<Code>) -> StateFnResult {
if stop.contains(&code) {
@@ -28,7 +29,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code, stop: Vec<Code>) -> StateFnR
/// Before something.
///
/// ```markdown
-/// |qwe| |&
+/// > | abc
+/// ^
/// ```
fn at_break(tokenizer: &mut Tokenizer, code: Code, stop: Vec<Code>) -> StateFnResult {
match code {
@@ -53,7 +55,8 @@ fn at_break(tokenizer: &mut Tokenizer, code: Code, stop: Vec<Code>) -> StateFnRe
/// In data.
///
/// ```markdown
-/// q|w|e
+/// > | abc
+/// ^^^
/// ```
fn data(tokenizer: &mut Tokenizer, code: Code, stop: Vec<Code>) -> StateFnResult {
let done = match code {
diff --git a/src/construct/partial_destination.rs b/src/construct/partial_destination.rs
index daa968a..f5299d2 100644
--- a/src/construct/partial_destination.rs
+++ b/src/construct/partial_destination.rs
@@ -105,8 +105,10 @@ struct Info {
/// Before a destination.
///
/// ```markdown
-/// |<ab>
-/// |ab
+/// > | <aa>
+/// ^
+/// > | aa
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code, options: Options) -> StateFnResult {
let info = Info {
@@ -143,7 +145,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code, options: Options) -> StateFn
/// After `<`, before an enclosed destination.
///
/// ```markdown
-/// <|ab>
+/// > | <aa>
+/// ^
/// ```
fn enclosed_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
if let Code::Char('>') = code {
@@ -163,7 +166,8 @@ fn enclosed_before(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFn
/// In an enclosed destination.
///
/// ```markdown
-/// <u|rl>
+/// > | <aa>
+/// ^
/// ```
fn enclosed(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -192,7 +196,8 @@ fn enclosed(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult
/// After `\`, in an enclosed destination.
///
/// ```markdown
-/// <a\|>b>
+/// > | <a\*a>
+/// ^
/// ```
fn enclosed_escape(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -207,7 +212,8 @@ fn enclosed_escape(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFn
/// In a raw destination.
///
/// ```markdown
-/// a|b
+/// > | aa
+/// ^
/// ```
fn raw(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -265,7 +271,8 @@ fn raw(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
/// After `\`, in a raw destination.
///
/// ```markdown
-/// a\|)b
+/// > | a\*a
+/// ^
/// ```
fn raw_escape(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
diff --git a/src/construct/partial_label.rs b/src/construct/partial_label.rs
index f380c7d..b1d02e8 100644
--- a/src/construct/partial_label.rs
+++ b/src/construct/partial_label.rs
@@ -93,7 +93,8 @@ struct Info {
/// Before a label.
///
/// ```markdown
-/// |[a]
+/// > | [a]
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code, options: Options) -> StateFnResult {
match code {
@@ -118,8 +119,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code, options: Options) -> StateFn
/// In a label, at something.
///
/// ```markdown
-/// [|a]
-/// [a|]
+/// > | [a]
+/// ^
/// ```
fn at_break(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -162,7 +163,8 @@ fn at_break(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnRes
/// In a label, in text.
///
/// ```markdown
-/// [a|b]
+/// > | [a]
+/// ^
/// ```
fn label(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -201,7 +203,8 @@ fn label(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult
/// After `\` in a label.
///
/// ```markdown
-/// [a\|[b]
+/// > | [a\*a]
+/// ^
/// ```
fn escape(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
diff --git a/src/construct/partial_space_or_tab.rs b/src/construct/partial_space_or_tab.rs
index 78477de..9ee8b6c 100644
--- a/src/construct/partial_space_or_tab.rs
+++ b/src/construct/partial_space_or_tab.rs
@@ -129,7 +129,8 @@ pub fn space_or_tab_eol_with_options(options: EolOptions) -> Box<StateFn> {
/// Before `space_or_tab`.
///
/// ```markdown
-/// alpha| bravo
+/// > | aโ โ b
+/// ^
/// ```
fn start(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -159,8 +160,8 @@ fn start(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult
/// In `space_or_tab`.
///
/// ```markdown
-/// alpha |bravo
-/// alpha | bravo
+/// > | aโ โ b
+/// ^
/// ```
fn inside(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -186,13 +187,9 @@ fn inside(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResul
/// `space_or_tab_eol`: after optionally first `space_or_tab`.
///
/// ```markdown
-/// alpha |
-/// bravo
-/// ```
-///
-/// ```markdown
-/// alpha|
-/// bravo
+/// > | a
+/// ^
+/// | b
/// ```
fn after_space_or_tab(tokenizer: &mut Tokenizer, code: Code, mut info: EolInfo) -> StateFnResult {
match code {
@@ -218,13 +215,9 @@ fn after_space_or_tab(tokenizer: &mut Tokenizer, code: Code, mut info: EolInfo)
/// `space_or_tab_eol`: after eol.
///
/// ```markdown
-/// alpha
-/// |bravo
-/// ```
-///
-/// ```markdown
-/// alpha
-/// |bravo
+/// | a
+/// > | b
+/// ^
/// ```
#[allow(clippy::needless_pass_by_value)]
fn after_eol(tokenizer: &mut Tokenizer, code: Code, info: EolInfo) -> StateFnResult {
@@ -243,13 +236,9 @@ fn after_eol(tokenizer: &mut Tokenizer, code: Code, info: EolInfo) -> StateFnRes
/// `space_or_tab_eol`: after more (optional) `space_or_tab`.
///
/// ```markdown
-/// alpha
-/// |bravo
-/// ```
-///
-/// ```markdown
-/// alpha
-/// |bravo
+/// | a
+/// > | b
+/// ^
/// ```
fn after_more_space_or_tab(_tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
// Blank line not allowed.
diff --git a/src/construct/partial_title.rs b/src/construct/partial_title.rs
index 6303da8..852693a 100644
--- a/src/construct/partial_title.rs
+++ b/src/construct/partial_title.rs
@@ -56,7 +56,7 @@ enum Kind {
/// ## Example
///
/// ```markdown
- /// [a] b (c)
+ /// (a)
/// ```
Paren,
/// In a double quoted (`"`) title.
@@ -64,7 +64,7 @@ enum Kind {
/// ## Example
///
/// ```markdown
- /// [a] b "c"
+ /// "a"
/// ```
Double,
/// In a single quoted (`'`) title.
@@ -72,7 +72,7 @@ enum Kind {
/// ## Example
///
/// ```markdown
- /// [a] b 'c'
+ /// 'a'
/// ```
Single,
}
@@ -132,9 +132,8 @@ struct Info {
/// Before a title.
///
/// ```markdown
-/// |"a"
-/// |'a'
-/// |(a)
+/// > | "a"
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code, options: Options) -> StateFnResult {
match code {
@@ -159,9 +158,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code, options: Options) -> StateFn
/// This is also used when at the closing marker.
///
/// ```markdown
-/// "|a"
-/// '|a'
-/// (|a)
+/// > | "a"
+/// ^
/// ```
fn begin(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -182,10 +180,8 @@ fn begin(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
/// At something, before something else.
///
/// ```markdown
-/// "|a"
-/// 'a|'
-/// (a|
-/// b)
+/// > | "a"
+/// ^
/// ```
fn at_break(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {
@@ -222,7 +218,8 @@ fn at_break(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnRes
/// In title text.
///
/// ```markdown
-/// "a|b"
+/// > | "a"
+/// ^
/// ```
fn title(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -248,7 +245,8 @@ fn title(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
/// After `\`, in title text.
///
/// ```markdown
-/// "a\|"b"
+/// > | "a\*b"
+/// ^
/// ```
fn escape(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
diff --git a/src/construct/thematic_break.rs b/src/construct/thematic_break.rs
index 96b328a..f0b6052 100644
--- a/src/construct/thematic_break.rs
+++ b/src/construct/thematic_break.rs
@@ -131,7 +131,8 @@ struct Info {
/// Start of a thematic break.
///
/// ```markdown
-/// |***
+/// > | ***
+/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
tokenizer.enter(Token::ThematicBreak);
@@ -142,7 +143,8 @@ pub fn start(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Start of a thematic break, after whitespace.
///
/// ```markdown
-/// |***
+/// > | ***
+/// ^
/// ```
fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
match code {
@@ -161,9 +163,8 @@ fn before(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// After something but before something else.
///
/// ```markdown
-/// |***
-/// *| * *
-/// * |* *
+/// > | ***
+/// ^
/// ```
fn at_break(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult {
match code {
@@ -186,9 +187,8 @@ fn at_break(tokenizer: &mut Tokenizer, code: Code, info: Info) -> StateFnResult
/// In a sequence of markers.
///
/// ```markdown
-/// |***
-/// *|**
-/// **|*
+/// > | ***
+/// ^
/// ```
fn sequence(tokenizer: &mut Tokenizer, code: Code, mut info: Info) -> StateFnResult {
match code {