diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/compiler.rs | 34 | ||||
-rw-r--r-- | src/construct/autolink.rs | 10 | ||||
-rw-r--r-- | src/lib.rs | 1 | ||||
-rw-r--r-- | src/util.rs | 100 |
4 files changed, 130 insertions, 15 deletions
diff --git a/src/compiler.rs b/src/compiler.rs index df26f1b..c451887 100644 --- a/src/compiler.rs +++ b/src/compiler.rs @@ -3,7 +3,7 @@ use crate::construct::character_reference::Kind as CharacterReferenceKind; use crate::tokenizer::{Code, Event, EventType, TokenType}; use crate::util::{ decode_named_character_reference, decode_numeric_character_reference, encode, get_span, - slice_serialize, + sanitize_uri, slice_serialize, }; /// Configuration (optional). @@ -13,6 +13,11 @@ pub struct CompileOptions { /// The default is `false`, you can turn it on to `true` for trusted /// content. pub allow_dangerous_html: bool, + + /// Whether to allow (dangerous) protocols in links and images. + /// The default is `false`, you can turn it on to `true` for trusted + /// content. + pub allow_dangerous_protocol: bool, } /// Turn events and codes into a string of HTML. @@ -28,6 +33,17 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St let mut slurp_one_line_ending = false; let mut ignore_encode = false; let mut character_reference_kind: Option<CharacterReferenceKind> = None; + let protocol_href = if options.allow_dangerous_protocol { + None + } else { + Some(vec!["http", "https", "irc", "ircs", "mailto", "xmpp"]) + }; + // let protocol_src = if options.allow_dangerous_protocol { + // None + // } else { + // Some(vec!["http", "https"]) + // }; + // let mut slurp_all_line_endings = false; println!("events: {:#?}", events); @@ -238,20 +254,20 @@ pub fn compile(events: &[Event], codes: &[Code], options: &CompileOptions) -> St TokenType::AutolinkProtocol => { let slice = slice_serialize(codes, &get_span(events, index), false); let buf = buf_tail_mut(buffers); - // To do: options.allowDangerousProtocol ? undefined : protocolHref - // let url = sanitize_uri(slice); - let url = encode(&slice); - buf.push(format!("<a href=\"{}\">", url)); + buf.push(format!( + "<a href=\"{}\">", + sanitize_uri(slice.as_str(), &protocol_href) + )); buf.push(encode(&slice)); buf.push("</a>".to_string()); } TokenType::AutolinkEmail => { let slice = slice_serialize(codes, &get_span(events, index), false); let buf = buf_tail_mut(buffers); - // To do: options.allowDangerousProtocol ? undefined : protocolHref - // let url = sanitize_uri(slice); - let url = encode(&slice); - buf.push(format!("<a href=\"mailto:{}\">", url)); + buf.push(format!( + "<a href=\"mailto:{}\">", + sanitize_uri(slice.as_str(), &protocol_href) + )); buf.push(encode(&slice)); buf.push("</a>".to_string()); } diff --git a/src/construct/autolink.rs b/src/construct/autolink.rs index 24f2c20..c190d40 100644 --- a/src/construct/autolink.rs +++ b/src/construct/autolink.rs @@ -41,12 +41,12 @@ //! Interestingly, there are a couple of things that are valid autolinks in //! markdown but in HTML would be valid tags, such as `<svg:rect>` and //! `<xml:lang/>`. -//! However, because CommonMark employs a naïve HTML parsing algorithm, those +//! However, because `CommonMark` employs a naïve HTML parsing algorithm, those //! are not considered HTML. //! -//! While CommonMark restricts links from occurring in other links in the case -//! of bracketed links, this restriction is not in place for autolinks inside -//! autolinks: +//! While `CommonMark` restricts links from occurring in other links in the +//! case of bracketed links, this restriction is not in place for autolinks +//! inside autolinks: //! //! ```markdown //! [<https://example.com>](#) @@ -74,8 +74,6 @@ //! [autolink_scheme_size_max]: crate::constant::AUTOLINK_SCHEME_SIZE_MAX //! [autolink_domain_size_max]: crate::constant::AUTOLINK_DOMAIN_SIZE_MAX //! [html-a]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-a-element -//! -//! <!-- To do: link to `encode` --> use crate::constant::{AUTOLINK_DOMAIN_SIZE_MAX, AUTOLINK_SCHEME_SIZE_MAX}; use crate::tokenizer::{Code, State, StateFnResult, TokenType, Tokenizer}; @@ -42,6 +42,7 @@ pub fn micromark(value: &str) -> String { /// /// let result = micromark_with_options("<div>\n\n# Hello, world!\n\n</div>", &CompileOptions { /// allow_dangerous_html: true, +/// allow_dangerous_protocol: true, /// }); /// /// assert_eq!(result, "<div>\n<h1>Hello, world!</h1>\n</div>"); diff --git a/src/util.rs b/src/util.rs index 5a916cd..accc48e 100644 --- a/src/util.rs +++ b/src/util.rs @@ -31,6 +31,106 @@ pub fn encode(value: &str) -> String { .replace('>', ">") } +/// Make a value safe for injection as a URL. +/// +/// This encodes unsafe characters with percent-encoding and skips already +/// encoded sequences (see `normalize_uri` below). +/// Further unsafe characters are encoded as character references (see +/// `encode`). +/// +/// Then, a vec of (lowercase) allowed protocols can be given, in which case +/// the URL is sanitized. +/// +/// For example, `Some(vec!["http", "https", "irc", "ircs", "mailto", "xmpp"])` +/// can be used for `a[href]`, or `Some(vec!["http", "https"])` for `img[src]`. +/// If the URL includes an unknown protocol (one not matched by `protocol`, such +/// as a dangerous example, `javascript:`), the value is ignored. +pub fn sanitize_uri(value: &str, protocols: &Option<Vec<&str>>) -> String { + let value = encode(&normalize_uri(value)); + + if let Some(protocols) = protocols { + let chars: Vec<char> = value.chars().collect(); + let mut index = 0; + let mut colon: Option<usize> = None; + + while index < chars.len() { + let char = chars[index]; + + match char { + ':' => { + colon = Some(index); + break; + } + '?' | '#' | '/' => break, + _ => {} + } + + index += 1; + } + + // If there is no protocol, or the first colon is after `?`, `#`, or `/`, it’s relative. + // It is a protocol, it should be allowed. + if let Some(colon) = colon { + let protocol = chars[0..colon].iter().collect::<String>().to_lowercase(); + if !protocols.contains(&protocol.as_str()) { + return "".to_string(); + } + } + } + + value +} + +/// Normalize a URL (such as used in definitions). +/// +/// Encode unsafe characters with percent-encoding, skipping already encoded +/// sequences. +fn normalize_uri(value: &str) -> String { + let chars: Vec<char> = value.chars().collect(); + let mut result: Vec<String> = vec![]; + let mut index = 0; + let mut start = 0; + let mut buff = [0; 4]; + + while index < chars.len() { + let char = chars[index]; + + // A correct percent encoded value. + if char == '%' + && index + 2 < chars.len() + && chars[index + 1].is_ascii_alphanumeric() + && chars[index + 2].is_ascii_alphanumeric() + { + index += 3; + continue; + } + + // Note: Rust already takes care of lone astral surrogates. + // Non-ascii or not allowed ascii. + if char >= '\u{0080}' + || !matches!(char, '!' | '#' | '$' | '&'..=';' | '=' | '?'..='Z' | '_' | 'a'..='z' | '~') + { + result.push(chars[start..index].iter().collect::<String>()); + + char.encode_utf8(&mut buff); + result.push( + buff[0..char.len_utf8()] + .iter() + .map(|&byte| format!("%{:X}", byte)) + .collect::<String>(), + ); + + start = index + 1; + } + + index += 1; + } + + result.push(chars[start..].iter().collect::<String>()); + + result.join("") +} + /// Decode numeric character references. /// /// Turn the number (in string form as either hexadecimal or decimal) coming |