Skip to content

Commit

Permalink
🔧 Make sure docstrings are ///
Browse files Browse the repository at this point in the history
Just a minor change, to help with intellisense
  • Loading branch information
chrisjsewell authored and rlidwka committed Jul 6, 2023
1 parent 78914cd commit 1a22358
Show file tree
Hide file tree
Showing 9 changed files with 50 additions and 50 deletions.
20 changes: 10 additions & 10 deletions src/generics/inline/emph_pair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,8 @@ impl<const MARKER: char, const CAN_SPLIT_WORD: bool> InlineRule for EmphPairScan
}
}

// Assuming last token is a closing delimiter we just inserted,
// try to find opener(s). If any are found, move stuff to nested emph node.
/// Assuming last token is a closing delimiter we just inserted,
/// try to find opener(s). If any are found, move stuff to nested emph node.
fn scan_and_match_delimiters<const MARKER: char>(state: &mut InlineState, mut closer_token: Node) -> Node {
if state.node.children.is_empty() { return closer_token; } // must have at least opener and closer

Expand Down Expand Up @@ -270,14 +270,14 @@ impl CoreRule for FragmentsJoin {
}


// Clean up tokens after emphasis and strikethrough postprocessing:
// merge adjacent text nodes into one and re-calculate all token levels
//
// This is necessary because initially emphasis delimiter markers (*, _, ~)
// are treated as their own separate text tokens. Then emphasis rule either
// leaves them as text (needed to merge with adjacent text) or turns them
// into opening/closing tags (which messes up levels inside).
//
/// Clean up tokens after emphasis and strikethrough postprocessing:
/// merge adjacent text nodes into one and re-calculate all token levels
///
/// This is necessary because initially emphasis delimiter markers (*, _, ~)
/// are treated as their own separate text tokens. Then emphasis rule either
/// leaves them as text (needed to merge with adjacent text) or turns them
/// into opening/closing tags (which messes up levels inside).
///
fn fragments_join(node: &mut Node) {
// replace all emph markers with text tokens
for token in node.children.iter_mut() {
Expand Down
4 changes: 2 additions & 2 deletions src/generics/inline/full_link.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,8 @@ impl<const PREFIX: char, const ENABLE_NESTED: bool> InlineRule for LinkPrefixSca
}

#[doc(hidden)]
// this rule makes sure that parser is stopped on "]" character,
// but it actually doesn't do anything
/// this rule makes sure that parser is stopped on "]" character,
/// but it actually doesn't do anything
pub struct LinkScannerEnd;
impl InlineRule for LinkScannerEnd {
const MARKER: char = ']';
Expand Down
8 changes: 4 additions & 4 deletions src/parser/block/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ impl BlockParser {
Self::default()
}

// Generate tokens for input range
//
/// Generate tokens for input range
///
pub fn tokenize(&self, state: &mut BlockState) {
stacker::maybe_grow(64*1024, 1024*1024, || {
let mut has_empty_lines = false;
Expand Down Expand Up @@ -104,8 +104,8 @@ impl BlockParser {
});
}

// Process input string and push block tokens into `out_tokens`
//
/// Process input string and push block tokens into `out_tokens`
///
pub fn parse(&self, src: &str, node: Node, md: &MarkdownIt, root_ext: &mut RootExtSet) -> Node {
let mut state = BlockState::new(src, md, root_ext, node);
self.tokenize(&mut state);
Expand Down
10 changes: 5 additions & 5 deletions src/parser/block/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,8 @@ impl<'a, 'b> BlockState<'a, 'b> {
line
}

// return line indent of specific line, taking into account blockquotes and lists;
// it may be negative if a text has less indentation than current list item
/// return line indent of specific line, taking into account blockquotes and lists;
/// it may be negative if a text has less indentation than current list item
#[must_use]
#[inline]
pub fn line_indent(&self, line: usize) -> i32 {
Expand All @@ -209,7 +209,7 @@ impl<'a, 'b> BlockState<'a, 'b> {
}
}

// return a single line, trimming initial spaces
/// return a single line, trimming initial spaces
#[must_use]
#[inline]
pub fn get_line(&self, line: usize) -> &str {
Expand All @@ -222,8 +222,8 @@ impl<'a, 'b> BlockState<'a, 'b> {
}
}

// Cut a range of lines begin..end (not including end) from the source without preceding indent.
// Returns a string (lines) plus a mapping (start of each line in result -> start of each line in source).
/// Cut a range of lines begin..end (not including end) from the source without preceding indent.
/// Returns a string (lines) plus a mapping (start of each line in result -> start of each line in source).
pub fn get_lines(&self, begin: usize, end: usize, indent: usize, keep_last_lf: bool) -> (String, Vec<(usize, usize)>) {
debug_assert!(begin <= end);

Expand Down
22 changes: 11 additions & 11 deletions src/parser/inline/builtin/skip_text.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// Skip text characters for text token, place those to pending buffer
// and increment current pos
//
//! Skip text characters for text token, place those to pending buffer
//! and increment current pos
//!
use regex::{self, Regex};

use crate::parser::inline::{InlineRule, InlineState};
Expand Down Expand Up @@ -43,14 +43,14 @@ pub(crate) enum TextScannerImpl {
SkipRegex(Regex),
}

// Rule to skip pure text
// '{}$%@~+=:' reserved for extentions
//
// !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~
//
// !!!! Don't confuse with "Markdown ASCII Punctuation" chars
// http://spec.commonmark.org/0.15/#ascii-punctuation-character
//
/// Rule to skip pure text
/// '{}$%@~+=:' reserved for extensions
///
/// !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~
///
/// !!!! Don't confuse with "Markdown ASCII Punctuation" chars
/// http://spec.commonmark.org/0.15/#ascii-punctuation-character
///
pub struct TextScanner;

impl TextScanner {
Expand Down
14 changes: 7 additions & 7 deletions src/parser/inline/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ impl InlineParser {
Self::default()
}

// Skip single token by running all rules in validation mode;
// returns `true` if any rule reported success
//
/// Skip single token by running all rules in validation mode;
/// returns `true` if any rule reported success
///
pub fn skip_token(&self, state: &mut InlineState) {
stacker::maybe_grow(64*1024, 1024*1024, || {
let mut ok = None;
Expand Down Expand Up @@ -78,8 +78,8 @@ impl InlineParser {
});
}

// Generate tokens for input range
//
/// Generate tokens for input range
///
pub fn tokenize(&self, state: &mut InlineState) {
stacker::maybe_grow(64*1024, 1024*1024, || {
let end = state.pos_max;
Expand Down Expand Up @@ -120,8 +120,8 @@ impl InlineParser {
});
}

// Process input string and push inline tokens into `out_tokens`
//
/// Process input string and push inline tokens into `out_tokens`
///
pub fn parse(
&self,
src: String,
Expand Down
12 changes: 6 additions & 6 deletions src/parser/inline/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,12 +140,12 @@ impl<'a, 'b> InlineState<'a, 'b> {
}
}

// Scan a sequence of emphasis-like markers, and determine whether
// it can start an emphasis sequence or end an emphasis sequence.
//
// - start - position to scan from (it should point at a valid marker);
// - can_split_word - determine if these markers can be found inside a word
//
/// Scan a sequence of emphasis-like markers, and determine whether
/// it can start an emphasis sequence or end an emphasis sequence.
///
/// - start - position to scan from (it should point at a valid marker);
/// - can_split_word - determine if these markers can be found inside a word
///
#[must_use]
pub fn scan_delims(&self, start: usize, can_split_word: bool) -> DelimiterRun {
let mut left_flanking = true;
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/html/utils/blocks.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// List of valid html blocks names, accorting to commonmark spec
// http://jgm.github.io/CommonMark/spec.html#html-blocks
//
//! List of valid html blocks names, accorting to commonmark spec
//! http://jgm.github.io/CommonMark/spec.html#html-blocks
//!

pub const HTML_BLOCKS: [&str; 62] = [
"address",
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/html/utils/regexps.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// Regexps to match html elements
//
//! Regexps to match html elements
//!
#![allow(non_upper_case_globals)]
use const_format::formatcp;
use once_cell::sync::Lazy;
Expand Down

0 comments on commit 1a22358

Please sign in to comment.