aboutsummaryrefslogtreecommitdiffstats
path: root/src/construct
diff options
context:
space:
mode:
authorLibravatar Titus Wormer <tituswormer@gmail.com>2022-07-20 12:34:06 +0200
committerLibravatar Titus Wormer <tituswormer@gmail.com>2022-07-20 12:34:06 +0200
commit7894ec75a7070591c3499fce1f409563c4edc7d7 (patch)
tree170d736268a30b728f28b164213a0a0ac47414da /src/construct
parent7ec35068c86a546dac8172e74e8a34e3b6813eb2 (diff)
downloadmarkdown-rs-7894ec75a7070591c3499fce1f409563c4edc7d7.tar.gz
markdown-rs-7894ec75a7070591c3499fce1f409563c4edc7d7.tar.bz2
markdown-rs-7894ec75a7070591c3499fce1f409563c4edc7d7.zip
Refactor to use less vecs for events
Diffstat (limited to 'src/construct')
-rw-r--r--src/construct/attention.rs4
-rw-r--r--src/construct/heading_atx.rs4
-rw-r--r--src/construct/heading_setext.rs6
-rw-r--r--src/construct/label_end.rs4
-rw-r--r--src/construct/list.rs6
-rw-r--r--src/construct/paragraph.rs6
-rw-r--r--src/construct/partial_data.rs6
7 files changed, 18 insertions, 18 deletions
diff --git a/src/construct/attention.rs b/src/construct/attention.rs
index 7e99600..2cbc563 100644
--- a/src/construct/attention.rs
+++ b/src/construct/attention.rs
@@ -205,7 +205,7 @@ fn inside(tokenizer: &mut Tokenizer, code: Code, marker: MarkerKind) -> StateFnR
/// Resolve attention sequences.
#[allow(clippy::too_many_lines)]
-fn resolve_attention(tokenizer: &mut Tokenizer) -> Vec<Event> {
+fn resolve_attention(tokenizer: &mut Tokenizer) {
let codes = &tokenizer.parse_state.codes;
let mut edit_map = EditMap::new();
let mut start = 0;
@@ -523,7 +523,7 @@ fn resolve_attention(tokenizer: &mut Tokenizer) -> Vec<Event> {
index += 1;
}
- edit_map.consume(tokenizer.events.split_off(0))
+ edit_map.consume(&mut tokenizer.events);
}
/// Classify whether a character code represents whitespace, punctuation, or
diff --git a/src/construct/heading_atx.rs b/src/construct/heading_atx.rs
index 4546924..feb1e9d 100644
--- a/src/construct/heading_atx.rs
+++ b/src/construct/heading_atx.rs
@@ -193,7 +193,7 @@ fn data(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
}
/// Resolve heading (atx).
-pub fn resolve(tokenizer: &mut Tokenizer) -> Vec<Event> {
+pub fn resolve(tokenizer: &mut Tokenizer) {
let mut edit_map = EditMap::new();
let mut index = 0;
let mut heading_start: Option<usize> = None;
@@ -258,5 +258,5 @@ pub fn resolve(tokenizer: &mut Tokenizer) -> Vec<Event> {
index += 1;
}
- edit_map.consume(tokenizer.events.split_off(0))
+ edit_map.consume(&mut tokenizer.events);
}
diff --git a/src/construct/heading_setext.rs b/src/construct/heading_setext.rs
index 841bf53..d1e7d57 100644
--- a/src/construct/heading_setext.rs
+++ b/src/construct/heading_setext.rs
@@ -60,7 +60,7 @@
use crate::constant::TAB_SIZE;
use crate::construct::partial_space_or_tab::{space_or_tab, space_or_tab_min_max};
use crate::token::Token;
-use crate::tokenizer::{Code, Event, EventType, State, StateFnResult, Tokenizer};
+use crate::tokenizer::{Code, EventType, State, StateFnResult, Tokenizer};
use crate::util::{edit_map::EditMap, skip::opt_back as skip_opt_back};
/// Kind of underline.
@@ -196,7 +196,7 @@ fn after(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
}
/// Resolve heading (setext).
-pub fn resolve(tokenizer: &mut Tokenizer) -> Vec<Event> {
+pub fn resolve(tokenizer: &mut Tokenizer) {
let mut edit_map = EditMap::new();
let mut index = 0;
let mut paragraph_enter: Option<usize> = None;
@@ -236,5 +236,5 @@ pub fn resolve(tokenizer: &mut Tokenizer) -> Vec<Event> {
index += 1;
}
- edit_map.consume(tokenizer.events.split_off(0))
+ edit_map.consume(&mut tokenizer.events);
}
diff --git a/src/construct/label_end.rs b/src/construct/label_end.rs
index 2124681..6bd634f 100644
--- a/src/construct/label_end.rs
+++ b/src/construct/label_end.rs
@@ -612,7 +612,7 @@ fn collapsed_reference_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnRes
/// This turns correct label start (image, link) and label end into links and
/// images, or turns them back into data.
#[allow(clippy::too_many_lines)]
-pub fn resolve_media(tokenizer: &mut Tokenizer) -> Vec<Event> {
+pub fn resolve_media(tokenizer: &mut Tokenizer) {
let mut left = tokenizer.label_start_list_loose.split_off(0);
let mut left_2 = tokenizer.label_start_stack.split_off(0);
let media = tokenizer.media_list.split_off(0);
@@ -773,5 +773,5 @@ pub fn resolve_media(tokenizer: &mut Tokenizer) -> Vec<Event> {
index += 1;
}
- edit_map.consume(tokenizer.events.split_off(0))
+ edit_map.consume(&mut tokenizer.events);
}
diff --git a/src/construct/list.rs b/src/construct/list.rs
index db8af36..48ed291 100644
--- a/src/construct/list.rs
+++ b/src/construct/list.rs
@@ -50,7 +50,7 @@ use crate::construct::{
thematic_break::start as thematic_break,
};
use crate::token::Token;
-use crate::tokenizer::{Code, Event, EventType, State, StateFnResult, Tokenizer};
+use crate::tokenizer::{Code, EventType, State, StateFnResult, Tokenizer};
use crate::util::{
edit_map::EditMap,
skip,
@@ -390,7 +390,7 @@ fn nok(_tokenizer: &mut Tokenizer, _code: Code) -> StateFnResult {
}
/// Find adjacent list items with the same marker.
-pub fn resolve_list_item(tokenizer: &mut Tokenizer) -> Vec<Event> {
+pub fn resolve_list_item(tokenizer: &mut Tokenizer) {
let mut edit_map = EditMap::new();
let mut index = 0;
let mut balance = 0;
@@ -492,5 +492,5 @@ pub fn resolve_list_item(tokenizer: &mut Tokenizer) -> Vec<Event> {
index += 1;
}
- edit_map.consume(tokenizer.events.split_off(0))
+ edit_map.consume(&mut tokenizer.events);
}
diff --git a/src/construct/paragraph.rs b/src/construct/paragraph.rs
index 53030f4..1b186e3 100644
--- a/src/construct/paragraph.rs
+++ b/src/construct/paragraph.rs
@@ -33,7 +33,7 @@
//! [html]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-p-element
use crate::token::Token;
-use crate::tokenizer::{Code, ContentType, Event, EventType, State, StateFnResult, Tokenizer};
+use crate::tokenizer::{Code, ContentType, EventType, State, StateFnResult, Tokenizer};
use crate::util::{edit_map::EditMap, skip::opt as skip_opt};
/// Before a paragraph.
@@ -80,7 +80,7 @@ fn inside(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult {
/// Merge “`Paragraph`”s, which currently span a single line, into actual
/// `Paragraph`s that span multiple lines.
-pub fn resolve(tokenizer: &mut Tokenizer) -> Vec<Event> {
+pub fn resolve(tokenizer: &mut Tokenizer) {
let mut edit_map = EditMap::new();
let len = tokenizer.events.len();
let mut index = 0;
@@ -142,5 +142,5 @@ pub fn resolve(tokenizer: &mut Tokenizer) -> Vec<Event> {
index += 1;
}
- edit_map.consume(tokenizer.events.split_off(0))
+ edit_map.consume(&mut tokenizer.events);
}
diff --git a/src/construct/partial_data.rs b/src/construct/partial_data.rs
index b59bb76..b56efd2 100644
--- a/src/construct/partial_data.rs
+++ b/src/construct/partial_data.rs
@@ -7,7 +7,7 @@
//! [text]: crate::content::text
use crate::token::Token;
-use crate::tokenizer::{Code, Event, EventType, State, StateFnResult, Tokenizer};
+use crate::tokenizer::{Code, EventType, State, StateFnResult, Tokenizer};
use crate::util::edit_map::EditMap;
/// At the beginning of data.
@@ -75,7 +75,7 @@ fn data(tokenizer: &mut Tokenizer, code: Code, stop: Vec<Code>) -> StateFnResult
}
/// Merge adjacent data events.
-pub fn resolve_data(tokenizer: &mut Tokenizer) -> Vec<Event> {
+pub fn resolve_data(tokenizer: &mut Tokenizer) {
let mut edit_map = EditMap::new();
let len = tokenizer.events.len();
let mut index = 0;
@@ -114,5 +114,5 @@ pub fn resolve_data(tokenizer: &mut Tokenizer) -> Vec<Event> {
index += 1;
}
- edit_map.consume(tokenizer.events.split_off(0))
+ edit_map.consume(&mut tokenizer.events);
}