aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorLibravatar Titus Wormer <tituswormer@gmail.com>2022-06-14 13:47:32 +0200
committerLibravatar Titus Wormer <tituswormer@gmail.com>2022-06-14 13:55:03 +0200
commita1ce43e428754084474a7ecf88ae6debf88b9164 (patch)
treea1df0ec515f624431d3e398f7d24e7b411c18e6e /src
parentc587aee9512119e61918bfbe81c3cca3de7e70aa (diff)
downloadmarkdown-rs-a1ce43e428754084474a7ecf88ae6debf88b9164.tar.gz
markdown-rs-a1ce43e428754084474a7ecf88ae6debf88b9164.tar.bz2
markdown-rs-a1ce43e428754084474a7ecf88ae6debf88b9164.zip
Reorganize to split util
Diffstat (limited to '')
-rw-r--r--src/compiler.rs44
-rw-r--r--src/construct/character_reference.rs4
-rw-r--r--src/construct/code_fenced.rs6
-rw-r--r--src/construct/mod.rs1
-rw-r--r--src/content/flow.rs4
-rw-r--r--src/subtokenize.rs10
-rw-r--r--src/tokenizer.rs2
-rw-r--r--src/util.rs341
-rw-r--r--src/util/decode_character_reference.rs103
-rw-r--r--src/util/encode.rs29
-rw-r--r--src/util/mod.rs6
-rw-r--r--src/util/sanitize_uri.rs111
-rw-r--r--src/util/span.rs112
13 files changed, 398 insertions, 375 deletions
diff --git a/src/compiler.rs b/src/compiler.rs
index 619bbe5..e1ce440 100644
--- a/src/compiler.rs
+++ b/src/compiler.rs
@@ -2,8 +2,10 @@
use crate::construct::character_reference::Kind as CharacterReferenceKind;
use crate::tokenizer::{Code, Event, EventType, TokenType};
use crate::util::{
- decode_named_character_reference, decode_numeric_character_reference, encode, get_span,
- sanitize_uri, slice_serialize,
+ decode_character_reference::{decode_named, decode_numeric},
+ encode::encode,
+ sanitize_uri::sanitize_uri,
+ span::{from_exit_event, serialize},
};
/// Configuration (optional).
@@ -141,7 +143,7 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St
ignore_encode = false;
}
TokenType::HtmlFlowData | TokenType::HtmlTextData => {
- let slice = slice_serialize(codes, &get_span(events, index), false);
+ let slice = serialize(codes, &from_exit_event(events, index), false);
let res = if ignore_encode { slice } else { encode(&slice) };
@@ -208,9 +210,9 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St
}
TokenType::CodeFlowChunk => {
code_flow_seen_data = Some(true);
- buf_tail_mut(buffers).push(encode(&slice_serialize(
+ buf_tail_mut(buffers).push(encode(&serialize(
codes,
- &get_span(events, index),
+ &from_exit_event(events, index),
false,
)));
}
@@ -224,13 +226,17 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St
if let Some(buf) = atx_heading_buffer {
atx_heading_buffer = Some(
buf.to_string()
- + &encode(&slice_serialize(codes, &get_span(events, index), false)),
+ + &encode(&serialize(
+ codes,
+ &from_exit_event(events, index),
+ false,
+ )),
);
}
// First fence we see.
if None == atx_opening_sequence_size {
- let rank = slice_serialize(codes, &get_span(events, index), false).len();
+ let rank = serialize(codes, &from_exit_event(events, index), false).len();
atx_opening_sequence_size = Some(rank);
buf_tail_mut(buffers).push(format!("<h{}>", rank));
}
@@ -246,7 +252,7 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St
atx_heading_buffer = Some("".to_string());
}
- let slice = encode(&slice_serialize(codes, &get_span(events, index), false));
+ let slice = encode(&serialize(codes, &from_exit_event(events, index), false));
println!("slice: {:?}", slice);
buf_tail_mut(buffers).push(slice);
}
@@ -258,7 +264,7 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St
atx_heading_buffer = None;
}
TokenType::AutolinkProtocol => {
- let slice = slice_serialize(codes, &get_span(events, index), false);
+ let slice = serialize(codes, &from_exit_event(events, index), false);
let buf = buf_tail_mut(buffers);
buf.push(format!(
"<a href=\"{}\">",
@@ -268,7 +274,7 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St
buf.push("</a>".to_string());
}
TokenType::AutolinkEmail => {
- let slice = slice_serialize(codes, &get_span(events, index), false);
+ let slice = serialize(codes, &from_exit_event(events, index), false);
let buf = buf_tail_mut(buffers);
buf.push(format!(
"<a href=\"mailto:{}\">",
@@ -289,9 +295,9 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St
// } else if code_text_inside {
// buf_tail_mut(buffers).push(" ".to_string());
} else {
- buf_tail_mut(buffers).push(encode(&slice_serialize(
+ buf_tail_mut(buffers).push(encode(&serialize(
codes,
- &get_span(events, index),
+ &from_exit_event(events, index),
false,
)));
}
@@ -308,18 +314,16 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St
TokenType::CharacterReferenceValue => {
let kind = character_reference_kind
.expect("expected `character_reference_kind` to be set");
- let reference = slice_serialize(codes, &get_span(events, index), false);
+ let reference = serialize(codes, &from_exit_event(events, index), false);
let ref_string = reference.as_str();
let value = match kind {
CharacterReferenceKind::Decimal => {
- decode_numeric_character_reference(ref_string, 10).to_string()
+ decode_numeric(ref_string, 10).to_string()
}
CharacterReferenceKind::Hexadecimal => {
- decode_numeric_character_reference(ref_string, 16).to_string()
- }
- CharacterReferenceKind::Named => {
- decode_named_character_reference(ref_string)
+ decode_numeric(ref_string, 16).to_string()
}
+ CharacterReferenceKind::Named => decode_named(ref_string),
};
buf_tail_mut(buffers).push(value);
@@ -329,9 +333,9 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St
// This branch below currently acts as the resulting `data` tokens.
TokenType::Data | TokenType::CharacterEscapeValue => {
// last_was_tag = false;
- buf_tail_mut(buffers).push(encode(&slice_serialize(
+ buf_tail_mut(buffers).push(encode(&serialize(
codes,
- &get_span(events, index),
+ &from_exit_event(events, index),
false,
)));
}
diff --git a/src/construct/character_reference.rs b/src/construct/character_reference.rs
index 943d70f..ba2ad61 100644
--- a/src/construct/character_reference.rs
+++ b/src/construct/character_reference.rs
@@ -20,7 +20,7 @@
//! However, for security reasons, several numeric character references parse
//! fine but are not rendered as their corresponding character and they are
//! instead replaced by a U+FFFD REPLACEMENT CHARACTER (`�`).
-//! See [`decode_numeric_character_reference`][decode_numeric] for more info.
+//! See [`decode_numeric`][decode_numeric] for more info.
//!
//! To escape ASCII punctuation characters, use the terser
//! [character escape][character_escape] construct instead (as in, `\&`).
@@ -48,7 +48,7 @@
//! [string]: crate::content::string
//! [text]: crate::content::text
//! [character_escape]: crate::construct::character_reference
-//! [decode_numeric]: crate::util::decode_numeric_character_reference
+//! [decode_numeric]: crate::util::decode_character_reference::decode_numeric
//! [character_reference_names]: crate::constant::CHARACTER_REFERENCE_NAMES
//! [html]: https://html.spec.whatwg.org/multipage/parsing.html#character-reference-state
//!
diff --git a/src/construct/code_fenced.rs b/src/construct/code_fenced.rs
index a440d40..c852e8d 100644
--- a/src/construct/code_fenced.rs
+++ b/src/construct/code_fenced.rs
@@ -91,7 +91,7 @@
use crate::constant::{CODE_FENCED_SEQUENCE_SIZE_MIN, TAB_SIZE};
use crate::construct::partial_whitespace::start as whitespace;
use crate::tokenizer::{Code, State, StateFnResult, TokenType, Tokenizer};
-use crate::util::get_span;
+use crate::util::span::from_exit_event;
/// Kind of fences.
#[derive(Debug, Clone, PartialEq)]
@@ -147,7 +147,7 @@ fn before_sequence_open(tokenizer: &mut Tokenizer, code: Code) -> StateFnResult
if let Some(event) = tail {
if event.token_type == TokenType::Whitespace {
- let span = get_span(&tokenizer.events, tokenizer.events.len() - 1);
+ let span = from_exit_event(&tokenizer.events, tokenizer.events.len() - 1);
prefix = span.end_index - span.start_index;
}
}
@@ -415,7 +415,7 @@ fn close_sequence_before(tokenizer: &mut Tokenizer, info: Info, code: Code) -> S
if let Some(event) = tail {
if event.token_type == TokenType::Whitespace {
- let span = get_span(&tokenizer.events, tokenizer.events.len() - 1);
+ let span = from_exit_event(&tokenizer.events, tokenizer.events.len() - 1);
prefix = span.end_index - span.start_index;
}
}
diff --git a/src/construct/mod.rs b/src/construct/mod.rs
index d2203d2..14f53a0 100644
--- a/src/construct/mod.rs
+++ b/src/construct/mod.rs
@@ -53,7 +53,6 @@
//! example `ascii_punctuation` refers to
//! [`char::is_ascii_punctuation`][char::is_ascii_punctuation].
-
pub mod autolink;
pub mod blank_line;
pub mod character_escape;
diff --git a/src/content/flow.rs b/src/content/flow.rs
index 6fa8c25..4d2ece1 100644
--- a/src/content/flow.rs
+++ b/src/content/flow.rs
@@ -28,7 +28,7 @@ use crate::construct::{
};
use crate::subtokenize::subtokenize;
use crate::tokenizer::{Code, Event, Point, State, StateFnResult, TokenType, Tokenizer};
-use crate::util::get_span;
+use crate::util::span::from_exit_event;
/// Turn `codes` as the flow content type into events.
pub fn flow(codes: &[Code], point: Point, index: usize) -> Vec<Event> {
@@ -240,7 +240,7 @@ fn continuation_construct_after_prefix(tokenizer: &mut Tokenizer, code: Code) ->
if let Some(event) = tail {
if event.token_type == TokenType::Whitespace {
- let span = get_span(&tokenizer.events, tokenizer.events.len() - 1);
+ let span = from_exit_event(&tokenizer.events, tokenizer.events.len() - 1);
prefix = span.end_index - span.start_index;
}
}
diff --git a/src/subtokenize.rs b/src/subtokenize.rs
index e004f7b..35d7672 100644
--- a/src/subtokenize.rs
+++ b/src/subtokenize.rs
@@ -1,10 +1,8 @@
-use crate::content::content::start as content;
-use crate::content::string::start as string;
-use crate::content::text::start as text;
+use crate::content::{content::start as content, string::start as string, text::start as text};
use crate::tokenizer::{
Code, Event, EventType, State, StateFn, StateFnResult, TokenType, Tokenizer,
};
-use crate::util::{slice_codes, Span};
+use crate::util::span;
use std::collections::HashMap;
/// To do.
@@ -51,7 +49,7 @@ pub fn subtokenize(events: Vec<Event>, codes: &[Code]) -> (Vec<Event>, bool) {
while let Some(index_ptr) = index_opt {
let enter = &events[index_ptr];
assert_eq!(enter.event_type, EventType::Enter);
- let span = Span {
+ let span = span::Span {
start_index: enter.index,
end_index: events[index_ptr + 1].index,
};
@@ -66,7 +64,7 @@ pub fn subtokenize(events: Vec<Event>, codes: &[Code]) -> (Vec<Event>, bool) {
_ => unreachable!("cannot be ok/nok"),
};
- result = tokenizer.feed(slice_codes(codes, &span), func, enter.next == None);
+ result = tokenizer.feed(span::codes(codes, &span), func, enter.next == None);
if let Some(ref x) = result.1 {
if !x.is_empty() {
diff --git a/src/tokenizer.rs b/src/tokenizer.rs
index 8a2f477..486bc75 100644
--- a/src/tokenizer.rs
+++ b/src/tokenizer.rs
@@ -513,6 +513,8 @@ impl Tokenizer {
let mut state = State::Fn(Box::new(start));
let mut index = 0;
+ println!("feed: {:?} {:?}", codes, drain);
+
self.consumed = true;
while index < codes.len() {
diff --git a/src/util.rs b/src/util.rs
deleted file mode 100644
index accc48e..0000000
--- a/src/util.rs
+++ /dev/null
@@ -1,341 +0,0 @@
-//! Some utilities helpful when parsing and compiling markdown.
-
-use crate::constant::{CHARACTER_REFERENCE_NAMES, CHARACTER_REFERENCE_VALUES};
-use crate::tokenizer::{Code, Event, EventType};
-
-/// Encode dangerous html characters.
-///
-/// This ensures that certain characters which have special meaning in HTML are
-/// dealt with.
-/// Technically, we can skip `>` and `"` in many cases, but CM includes them.
-///
-/// This behavior is not explained in prose in `CommonMark` but can be inferred
-/// from the input/output test cases.
-///
-/// ## Examples
-///
-/// ```rust ignore
-/// use micromark::util::encode;
-///
-/// assert_eq!(encode("I <3 🦀"), "I &lt;3 🦀");
-/// ```
-///
-/// ## References
-///
-/// * [`micromark-util-encode` in `micromark`](https://github.com/micromark/micromark/tree/main/packages/micromark-util-encode)
-pub fn encode(value: &str) -> String {
- value
- .replace('&', "&amp;")
- .replace('"', "&quot;")
- .replace('<', "&lt;")
- .replace('>', "&gt;")
-}
-
-/// Make a value safe for injection as a URL.
-///
-/// This encodes unsafe characters with percent-encoding and skips already
-/// encoded sequences (see `normalize_uri` below).
-/// Further unsafe characters are encoded as character references (see
-/// `encode`).
-///
-/// Then, a vec of (lowercase) allowed protocols can be given, in which case
-/// the URL is sanitized.
-///
-/// For example, `Some(vec!["http", "https", "irc", "ircs", "mailto", "xmpp"])`
-/// can be used for `a[href]`, or `Some(vec!["http", "https"])` for `img[src]`.
-/// If the URL includes an unknown protocol (one not matched by `protocol`, such
-/// as a dangerous example, `javascript:`), the value is ignored.
-pub fn sanitize_uri(value: &str, protocols: &Option<Vec<&str>>) -> String {
- let value = encode(&normalize_uri(value));
-
- if let Some(protocols) = protocols {
- let chars: Vec<char> = value.chars().collect();
- let mut index = 0;
- let mut colon: Option<usize> = None;
-
- while index < chars.len() {
- let char = chars[index];
-
- match char {
- ':' => {
- colon = Some(index);
- break;
- }
- '?' | '#' | '/' => break,
- _ => {}
- }
-
- index += 1;
- }
-
- // If there is no protocol, or the first colon is after `?`, `#`, or `/`, it’s relative.
- // It is a protocol, it should be allowed.
- if let Some(colon) = colon {
- let protocol = chars[0..colon].iter().collect::<String>().to_lowercase();
- if !protocols.contains(&protocol.as_str()) {
- return "".to_string();
- }
- }
- }
-
- value
-}
-
-/// Normalize a URL (such as used in definitions).
-///
-/// Encode unsafe characters with percent-encoding, skipping already encoded
-/// sequences.
-fn normalize_uri(value: &str) -> String {
- let chars: Vec<char> = value.chars().collect();
- let mut result: Vec<String> = vec![];
- let mut index = 0;
- let mut start = 0;
- let mut buff = [0; 4];
-
- while index < chars.len() {
- let char = chars[index];
-
- // A correct percent encoded value.
- if char == '%'
- && index + 2 < chars.len()
- && chars[index + 1].is_ascii_alphanumeric()
- && chars[index + 2].is_ascii_alphanumeric()
- {
- index += 3;
- continue;
- }
-
- // Note: Rust already takes care of lone astral surrogates.
- // Non-ascii or not allowed ascii.
- if char >= '\u{0080}'
- || !matches!(char, '!' | '#' | '$' | '&'..=';' | '=' | '?'..='Z' | '_' | 'a'..='z' | '~')
- {
- result.push(chars[start..index].iter().collect::<String>());
-
- char.encode_utf8(&mut buff);
- result.push(
- buff[0..char.len_utf8()]
- .iter()
- .map(|&byte| format!("%{:X}", byte))
- .collect::<String>(),
- );
-
- start = index + 1;
- }
-
- index += 1;
- }
-
- result.push(chars[start..].iter().collect::<String>());
-
- result.join("")
-}
-
-/// Decode numeric character references.
-///
-/// Turn the number (in string form as either hexadecimal or decimal) coming
-/// from a numeric character reference into a character.
-/// Whether the base of the string form is `10` (decimal) or `16` (hexadecimal)
-/// must be passed as the `radix` parameter.
-///
-/// This returns the `char` associated with that number or a replacement
-/// character for C0 control characters (except for ASCII whitespace), C1
-/// control characters, lone surrogates, noncharacters, and out of range
-/// characters.
-///
-/// ## Examples
-///
-/// ```rust ignore
-/// use micromark::util::decode_numeric_character_reference;
-///
-/// assert_eq!(decode_numeric_character_reference("123", 10), '{');
-/// assert_eq!(decode_numeric_character_reference("9", 16), '\t');
-/// assert_eq!(decode_numeric_character_reference("0", 10), '�'); // Not allowed.
-/// ```
-///
-/// ## Panics
-///
-/// This function panics if a invalid string or an out of bounds valid string
-/// is given.
-/// It is expected that figuring out whether a number is allowed is handled in
-/// the parser.
-/// When `micromark` is used, this function never panics.
-///
-/// ## References
-///
-/// * [`micromark-util-decode-numeric-character-reference` in `micromark`](https://github.com/micromark/micromark/tree/main/packages/micromark-util-decode-numeric-character-reference)
-/// * [*§ 2.5 Entity and numeric character references* in `CommonMark`](https://spec.commonmark.org/0.30/#entity-and-numeric-character-references)
-pub fn decode_numeric_character_reference(value: &str, radix: u32) -> char {
- let code = u32::from_str_radix(value, radix).expect("expected `value` to be an int");
-
- if
- // C0 except for HT, LF, FF, CR, space
- code < 0x09 ||
- code == 0x0B ||
- (code > 0x0D && code < 0x20) ||
- // Control character (DEL) of the basic block and C1 controls.
- (code > 0x7E && code < 0xA0) ||
- // Lone high surrogates and low surrogates.
- (code > 0xd7ff && code < 0xe000) ||
- // Noncharacters.
- (code > 0xfdcf && code < 0xfdf0) ||
- ((code & 0xffff) == 0xffff) ||
- ((code & 0xffff) == 0xfffe) ||
- // Out of range
- code > 0x0010_ffff
- {
- '�'
- } else {
- char::from_u32(code).expect("expected valid `code`")
- }
-}
-
-/// Decode named character references.
-///
-/// Turn the name coming from a named character reference (without the `&` or
-/// `;`) into a string.
-/// This looks the given string up in [`CHARACTER_REFERENCE_NAMES`][] and then
-/// takes the corresponding value from [`CHARACTER_REFERENCE_VALUES`][].
-///
-/// The result is `String` instead of `char` because named character references
-/// can expand into multiple characters.
-///
-/// ## Examples
-///
-/// ```rust ignore
-/// use micromark::util::decode_named_character_reference;
-///
-/// assert_eq!(decode_named_character_reference("amp"), "&");
-/// assert_eq!(decode_named_character_reference("AElig"), "Æ");
-/// assert_eq!(decode_named_character_reference("aelig"), "æ");
-/// ```
-///
-/// ## Panics
-///
-/// This function panics if a name not in [`CHARACTER_REFERENCE_NAMES`][] is
-/// given.
-/// It is expected that figuring out whether a name is allowed is handled in
-/// the parser.
-/// When `micromark` is used, this function never panics.
-///
-/// ## References
-///
-/// * [`wooorm/decode-named-character-reference`](https://github.com/wooorm/decode-named-character-reference)
-/// * [*§ 2.5 Entity and numeric character references* in `CommonMark`](https://spec.commonmark.org/0.30/#entity-and-numeric-character-references)
-pub fn decode_named_character_reference(value: &str) -> String {
- let position = CHARACTER_REFERENCE_NAMES.iter().position(|&x| x == value);
- if let Some(index) = position {
- CHARACTER_REFERENCE_VALUES[index].to_string()
- } else {
- unreachable!("expected valid `name`")
- }
-}
-
-/// A struct representing the span of an opening and closing event of a token.
-#[derive(Debug)]
-pub struct Span {
- // To do: probably needed in the future.
- // start: Point,
- /// Absolute offset (and `index` in `codes`) of where this span starts.
- pub start_index: usize,
- // To do: probably needed in the future.
- // end: Point,
- /// Absolute offset (and `index` in `codes`) of where this span ends.
- pub end_index: usize,
- // To do: probably needed in the future.
- // token_type: TokenType,
-}
-
-/// Get a span from an event.
-///
-/// Get the span of an `exit` event, by looking backwards through the events to
-/// find the corresponding `enter` event.
-/// This assumes that tokens with the same are not nested.
-///
-/// ## Panics
-///
-/// This function panics if an enter event is given.
-/// When `micromark` is used, this function never panics.
-pub fn get_span(events: &[Event], index: usize) -> Span {
- let exit = &events[index];
- // let end = exit.point.clone();
- let end_index = exit.index;
- let token_type = exit.token_type.clone();
- // To do: support `enter` events if needed and walk forwards?
- assert_eq!(
- exit.event_type,
- EventType::Exit,
- "expected `get_span` to be called on `exit` event"
- );
- let mut enter_index = index - 1;
-
- loop {
- let enter = &events[enter_index];
- if enter.event_type == EventType::Enter && enter.token_type == token_type {
- return Span {
- // start: enter.point.clone(),
- start_index: enter.index,
- // end,
- end_index,
- // token_type,
- };
- }
-
- enter_index -= 1;
- }
-}
-
-/// Serialize a span, optionally expanding tabs.
-pub fn slice_serialize(codes: &[Code], span: &Span, expand_tabs: bool) -> String {
- serialize_chunks(slice_codes(codes, span), expand_tabs)
-}
-
-/// Get a slice of codes from a span.
-pub fn slice_codes<'a>(codes: &'a [Code], span: &Span) -> &'a [Code] {
- &codes[span.start_index..span.end_index]
-}
-
-/// Serialize a slice of codes, optionally expanding tabs.
-pub fn serialize_chunks(codes: &[Code], expand_tabs: bool) -> String {
- let mut at_tab = false;
- let mut index = 0;
- let mut value: Vec<char> = vec![];
-
- while index < codes.len() {
- let code = codes[index];
- let mut at_tab_next = false;
-
- match code {
- Code::CarriageReturnLineFeed => {
- value.push('\r');
- value.push('\n');
- }
- Code::Char(char) if char == '\n' || char == '\r' => {
- value.push(char);
- }
- Code::Char(char) if char == '\t' => {
- at_tab_next = true;
- value.push(if expand_tabs { ' ' } else { char });
- }
- Code::VirtualSpace => {
- if !expand_tabs && at_tab {
- index += 1;
- continue;
- }
- value.push(' ');
- }
- Code::Char(char) => {
- value.push(char);
- }
- Code::None => {
- unreachable!("unexpected EOF code in codes");
- }
- }
-
- at_tab = at_tab_next;
-
- index += 1;
- }
-
- value.into_iter().collect()
-}
diff --git a/src/util/decode_character_reference.rs b/src/util/decode_character_reference.rs
new file mode 100644
index 0000000..4a9317e
--- /dev/null
+++ b/src/util/decode_character_reference.rs
@@ -0,0 +1,103 @@
+//! Utilities to decode character references.
+
+use crate::constant::{CHARACTER_REFERENCE_NAMES, CHARACTER_REFERENCE_VALUES};
+
+/// Decode named character references.
+///
+/// Turn the name coming from a named character reference (without the `&` or
+/// `;`) into a string.
+/// This looks the given string up in [`CHARACTER_REFERENCE_NAMES`][] and then
+/// takes the corresponding value from [`CHARACTER_REFERENCE_VALUES`][].
+///
+/// The result is `String` instead of `char` because named character references
+/// can expand into multiple characters.
+///
+/// ## Examples
+///
+/// ```rust ignore
+/// use micromark::util::character_reference::decode_named;
+///
+/// assert_eq!(decode_named("amp"), "&");
+/// assert_eq!(decode_named("AElig"), "Æ");
+/// assert_eq!(decode_named("aelig"), "æ");
+/// ```
+///
+/// ## Panics
+///
+/// This function panics if a name not in [`CHARACTER_REFERENCE_NAMES`][] is
+/// given.
+/// It is expected that figuring out whether a name is allowed is handled in
+/// the parser.
+/// When `micromark` is used, this function never panics.
+///
+/// ## References
+///
+/// * [`wooorm/decode-named-character-reference`](https://github.com/wooorm/decode-named-character-reference)
+/// * [*§ 2.5 Entity and numeric character references* in `CommonMark`](https://spec.commonmark.org/0.30/#entity-and-numeric-character-references)
+pub fn decode_named(value: &str) -> String {
+ let position = CHARACTER_REFERENCE_NAMES.iter().position(|&x| x == value);
+ if let Some(index) = position {
+ CHARACTER_REFERENCE_VALUES[index].to_string()
+ } else {
+ unreachable!("expected valid `name`")
+ }
+}
+
+/// Decode numeric character references.
+///
+/// Turn the number (in string form as either hexadecimal or decimal) coming
+/// from a numeric character reference into a character.
+/// Whether the base of the string form is `10` (decimal) or `16` (hexadecimal)
+/// must be passed as the `radix` parameter.
+///
+/// This returns the `char` associated with that number or a replacement
+/// character for C0 control characters (except for ASCII whitespace), C1
+/// control characters, lone surrogates, noncharacters, and out of range
+/// characters.
+///
+/// ## Examples
+///
+/// ```rust ignore
+/// use micromark::util::character_reference::decode_numeric;
+///
+/// assert_eq!(decode_numeric("123", 10), '{');
+/// assert_eq!(decode_numeric("9", 16), '\t');
+/// assert_eq!(decode_numeric("0", 10), '�'); // Not allowed.
+/// ```
+///
+/// ## Panics
+///
+/// This function panics if a invalid string or an out of bounds valid string
+/// is given.
+/// It is expected that figuring out whether a number is allowed is handled in
+/// the parser.
+/// When `micromark` is used, this function never panics.
+///
+/// ## References
+///
+/// * [`micromark-util-decode-numeric-character-reference` in `micromark`](https://github.com/micromark/micromark/tree/main/packages/micromark-util-decode-numeric-character-reference)
+/// * [*§ 2.5 Entity and numeric character references* in `CommonMark`](https://spec.commonmark.org/0.30/#entity-and-numeric-character-references)
+pub fn decode_numeric(value: &str, radix: u32) -> char {
+ let code = u32::from_str_radix(value, radix).expect("expected `value` to be an int");
+
+ if
+ // C0 except for HT, LF, FF, CR, space
+ code < 0x09 ||
+ code == 0x0B ||
+ (code > 0x0D && code < 0x20) ||
+ // Control character (DEL) of the basic block and C1 controls.
+ (code > 0x7E && code < 0xA0) ||
+ // Lone high surrogates and low surrogates.
+ (code > 0xd7ff && code < 0xe000) ||
+ // Noncharacters.
+ (code > 0xfdcf && code < 0xfdf0) ||
+ ((code & 0xffff) == 0xffff) ||
+ ((code & 0xffff) == 0xfffe) ||
+ // Out of range
+ code > 0x0010_ffff
+ {
+ '�'
+ } else {
+ char::from_u32(code).expect("expected valid `code`")
+ }
+}
diff --git a/src/util/encode.rs b/src/util/encode.rs
new file mode 100644
index 0000000..f79c8ea
--- /dev/null
+++ b/src/util/encode.rs
@@ -0,0 +1,29 @@
+//! Utilities to encode HTML.
+
+/// Encode dangerous html characters.
+///
+/// This ensures that certain characters which have special meaning in HTML are
+/// dealt with.
+/// Technically, we can skip `>` and `"` in many cases, but CM includes them.
+///
+/// This behavior is not explained in prose in `CommonMark` but can be inferred
+/// from the input/output test cases.
+///
+/// ## Examples
+///
+/// ```rust ignore
+/// use micromark::util::encode;
+///
+/// assert_eq!(encode("I <3 🦀"), "I &lt;3 🦀");
+/// ```
+///
+/// ## References
+///
+/// * [`micromark-util-encode` in `micromark`](https://github.com/micromark/micromark/tree/main/packages/micromark-util-encode)
+pub fn encode(value: &str) -> String {
+ value
+ .replace('&', "&amp;")
+ .replace('"', "&quot;")
+ .replace('<', "&lt;")
+ .replace('>', "&gt;")
+}
diff --git a/src/util/mod.rs b/src/util/mod.rs
new file mode 100644
index 0000000..c3db267
--- /dev/null
+++ b/src/util/mod.rs
@@ -0,0 +1,6 @@
+//! Utilities used when compiling markdown.
+
+pub mod decode_character_reference;
+pub mod encode;
+pub mod sanitize_uri;
+pub mod span;
diff --git a/src/util/sanitize_uri.rs b/src/util/sanitize_uri.rs
new file mode 100644
index 0000000..1dffc50
--- /dev/null
+++ b/src/util/sanitize_uri.rs
@@ -0,0 +1,111 @@
+//! Utilities to make urls safe.
+
+use crate::util::encode::encode;
+
+/// Make a value safe for injection as a URL.
+///
+/// This encodes unsafe characters with percent-encoding and skips already
+/// encoded sequences (see `normalize_uri` below).
+/// Further unsafe characters are encoded as character references (see
+/// `encode`).
+///
+/// Then, a vec of (lowercase) allowed protocols can be given, in which case
+/// the URL is sanitized.
+///
+/// For example, `Some(vec!["http", "https", "irc", "ircs", "mailto", "xmpp"])`
+/// can be used for `a[href]`, or `Some(vec!["http", "https"])` for `img[src]`.
+/// If the URL includes an unknown protocol (one not matched by `protocol`, such
+/// as a dangerous example, `javascript:`), the value is ignored.
+///
+/// ## References
+///
+/// * [`micromark-util-sanitize-uri` in `micromark`](https://github.com/micromark/micromark/tree/main/packages/micromark-util-sanitize-uri)
+pub fn sanitize_uri(value: &str, protocols: &Option<Vec<&str>>) -> String {
+ let value = encode(&normalize_uri(value));
+
+ if let Some(protocols) = protocols {
+ let chars: Vec<char> = value.chars().collect();
+ let mut index = 0;
+ let mut colon: Option<usize> = None;
+
+ while index < chars.len() {
+ let char = chars[index];
+
+ match char {
+ ':' => {
+ colon = Some(index);
+ break;
+ }
+ '?' | '#' | '/' => break,
+ _ => {}
+ }
+
+ index += 1;
+ }
+
+ // If there is no protocol, or the first colon is after `?`, `#`, or `/`, it’s relative.
+ // It is a protocol, it should be allowed.
+ if let Some(colon) = colon {
+ let protocol = chars[0..colon].iter().collect::<String>().to_lowercase();
+ if !protocols.contains(&protocol.as_str()) {
+ return "".to_string();
+ }
+ }
+ }
+
+ value
+}
+
+/// Normalize a URL (such as used in definitions).
+///
+/// Encode unsafe characters with percent-encoding, skipping already encoded
+/// sequences.
+///
+/// ## References
+///
+/// * [`micromark-util-sanitize-uri` in `micromark`](https://github.com/micromark/micromark/tree/main/packages/micromark-util-sanitize-uri)
+fn normalize_uri(value: &str) -> String {
+ let chars: Vec<char> = value.chars().collect();
+ let mut result: Vec<String> = vec![];
+ let mut index = 0;
+ let mut start = 0;
+ let mut buff = [0; 4];
+
+ while index < chars.len() {
+ let char = chars[index];
+
+ // A correct percent encoded value.
+ if char == '%'
+ && index + 2 < chars.len()
+ && chars[index + 1].is_ascii_alphanumeric()
+ && chars[index + 2].is_ascii_alphanumeric()
+ {
+ index += 3;
+ continue;
+ }
+
+ // Note: Rust already takes care of lone astral surrogates.
+ // Non-ascii or not allowed ascii.
+ if char >= '\u{0080}'
+ || !matches!(char, '!' | '#' | '$' | '&'..=';' | '=' | '?'..='Z' | '_' | 'a'..='z' | '~')
+ {
+ result.push(chars[start..index].iter().collect::<String>());
+
+ char.encode_utf8(&mut buff);
+ result.push(
+ buff[0..char.len_utf8()]
+ .iter()
+ .map(|&byte| format!("%{:X}", byte))
+ .collect::<String>(),
+ );
+
+ start = index + 1;
+ }
+
+ index += 1;
+ }
+
+ result.push(chars[start..].iter().collect::<String>());
+
+ result.join("")
+}
diff --git a/src/util/span.rs b/src/util/span.rs
new file mode 100644
index 0000000..c48549b
--- /dev/null
+++ b/src/util/span.rs
@@ -0,0 +1,112 @@
+//! Utilities to deal with semantic labels.
+
+use crate::tokenizer::{Code, Event, EventType};
+
+/// A struct representing the span of an opening and closing event of a token.
+#[derive(Debug)]
+pub struct Span {
+ // To do: probably needed in the future.
+ // start: Point,
+ /// Absolute offset (and `index` in `codes`) of where this span starts.
+ pub start_index: usize,
+ // To do: probably needed in the future.
+ // end: Point,
+ /// Absolute offset (and `index` in `codes`) of where this span ends.
+ pub end_index: usize,
+ // To do: probably needed in the future.
+ // token_type: TokenType,
+}
+
+/// Get a span from an event.
+///
+/// Get the span of an `exit` event, by looking backwards through the events to
+/// find the corresponding `enter` event.
+/// This assumes that tokens with the same are not nested.
+///
+/// ## Panics
+///
+/// This function panics if an enter event is given.
+/// When `micromark` is used, this function never panics.
+pub fn from_exit_event(events: &[Event], index: usize) -> Span {
+ let exit = &events[index];
+ // let end = exit.point.clone();
+ let end_index = exit.index;
+ let token_type = exit.token_type.clone();
+ // To do: support `enter` events if needed and walk forwards?
+ assert_eq!(
+ exit.event_type,
+ EventType::Exit,
+ "expected `get_span` to be called on `exit` event"
+ );
+ let mut enter_index = index - 1;
+
+ loop {
+ let enter = &events[enter_index];
+ if enter.event_type == EventType::Enter && enter.token_type == token_type {
+ return Span {
+ // start: enter.point.clone(),
+ start_index: enter.index,
+ // end,
+ end_index,
+ // token_type,
+ };
+ }
+
+ enter_index -= 1;
+ }
+}
+
+/// Serialize a span, optionally expanding tabs.
+pub fn serialize(all_codes: &[Code], span: &Span, expand_tabs: bool) -> String {
+ serialize_codes(codes(all_codes, span), expand_tabs)
+}
+
+/// Get a slice of codes from a span.
+pub fn codes<'a>(codes: &'a [Code], span: &Span) -> &'a [Code] {
+ &codes[span.start_index..span.end_index]
+}
+
+/// Serialize a slice of codes, optionally expanding tabs.
+fn serialize_codes(codes: &[Code], expand_tabs: bool) -> String {
+ let mut at_tab = false;
+ let mut index = 0;
+ let mut value: Vec<char> = vec![];
+
+ while index < codes.len() {
+ let code = codes[index];
+ let mut at_tab_next = false;
+
+ match code {
+ Code::CarriageReturnLineFeed => {
+ value.push('\r');
+ value.push('\n');
+ }
+ Code::Char(char) if char == '\n' || char == '\r' => {
+ value.push(char);
+ }
+ Code::Char(char) if char == '\t' => {
+ at_tab_next = true;
+ value.push(if expand_tabs { ' ' } else { char });
+ }
+ Code::VirtualSpace => {
+ if !expand_tabs && at_tab {
+ index += 1;
+ continue;
+ }
+ value.push(' ');
+ }
+ Code::Char(char) => {
+ value.push(char);
+ }
+ Code::None => {
+ unreachable!("unexpected EOF code in codes");
+ }
+ }
+
+ at_tab = at_tab_next;
+
+ index += 1;
+ }
+
+ value.into_iter().collect()
+}