aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLibravatar Titus Wormer <tituswormer@gmail.com>2022-07-19 11:06:42 +0200
committerLibravatar Titus Wormer <tituswormer@gmail.com>2022-07-19 11:07:07 +0200
commited42e65627f3283f0e002490fbcb652649fd3afc (patch)
tree8eba438f2b234714bcf00c5d4e9c9a836ff97baf
parentf98836b85ce71157adaa10c9d5ef5c0ee527e67a (diff)
downloadmarkdown-rs-ed42e65627f3283f0e002490fbcb652649fd3afc.tar.gz
markdown-rs-ed42e65627f3283f0e002490fbcb652649fd3afc.tar.bz2
markdown-rs-ed42e65627f3283f0e002490fbcb652649fd3afc.zip
Replace use of `HashSet` with `vec`
Diffstat (limited to '')
-rw-r--r--readme.md2
-rw-r--r--src/content/document.rs11
-rw-r--r--src/parser.rs5
3 files changed, 10 insertions, 8 deletions
diff --git a/readme.md b/readme.md
index d5641df..f446535 100644
--- a/readme.md
+++ b/readme.md
@@ -71,7 +71,7 @@ cargo doc --document-private-items
#### Misc
-- [ ] (3) `no_std`: remove all `HashSet`s/`HashMap` to use vecs, vecs w/ tuples?
+- [ ] (3) `no_std`: remove all `HashMap` to use vecs, vecs w/ tuples?
- [ ] (3) Remove splicing and cloning in subtokenizer
- [ ] (3) Pass more references around
- [ ] (1) Get markers from constructs (`string`, `text`)
diff --git a/src/content/document.rs b/src/content/document.rs
index 7656db7..d2c2426 100644
--- a/src/content/document.rs
+++ b/src/content/document.rs
@@ -26,7 +26,6 @@ use crate::util::{
skip,
span::{from_exit_event, serialize},
};
-use std::collections::HashSet;
/// Phases where we can exit containers.
#[derive(Debug, PartialEq)]
@@ -84,20 +83,24 @@ pub fn document(parse_state: &mut ParseState, point: Point, index: usize) -> Vec
tokenizer.push(&parse_state.codes, Box::new(start), true);
let mut index = 0;
- let mut next_definitions: HashSet<String> = HashSet::new();
+ let mut next_definitions = vec![];
while index < tokenizer.events.len() {
let event = &tokenizer.events[index];
if event.event_type == EventType::Exit && event.token_type == Token::DefinitionLabelString {
- next_definitions.insert(normalize_identifier(
+ let id = normalize_identifier(
serialize(
&parse_state.codes,
&from_exit_event(&tokenizer.events, index),
false,
)
.as_str(),
- ));
+ );
+
+ if !next_definitions.contains(&id) {
+ next_definitions.push(id);
+ }
}
index += 1;
diff --git a/src/parser.rs b/src/parser.rs
index 409e812..3361baf 100644
--- a/src/parser.rs
+++ b/src/parser.rs
@@ -4,7 +4,6 @@ use crate::content::document::document;
use crate::tokenizer::{Code, Event, Point};
use crate::util::codes::parse as parse_codes;
use crate::{Constructs, Options};
-use std::collections::HashSet;
/// Information needed, in all content types, when parsing markdown.
///
@@ -16,7 +15,7 @@ pub struct ParseState {
/// List of codes.
pub codes: Vec<Code>,
/// Set of defined identifiers.
- pub definitions: HashSet<String>,
+ pub definitions: Vec<String>,
}
/// Turn a string of markdown into events.
@@ -26,7 +25,7 @@ pub fn parse(value: &str, options: &Options) -> (Vec<Event>, ParseState) {
let mut parse_state = ParseState {
constructs: options.constructs.clone(),
codes: parse_codes(value),
- definitions: HashSet::new(),
+ definitions: vec![],
};
let events = document(