Skip to content

Commit

Permalink
feat(#162): parse hashtags
Browse files Browse the repository at this point in the history
  • Loading branch information
carbontwelve committed Jan 24, 2023
1 parent e83216d commit 37ff3e0
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 68 deletions.
69 changes: 35 additions & 34 deletions .eleventy.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ const PostCSSPlugin = require("eleventy-plugin-postcss");
const linkMapCache = require("./utils/helpers/map");

const UpgradeHelper = require("@11ty/eleventy-upgrade-help");
const {setupMarkdownIt} = require("./utils/helpers/hashtags");

module.exports = function (eleventyConfig) {
eleventyConfig.addPlugin(UpgradeHelper);
Expand Down Expand Up @@ -78,48 +79,48 @@ module.exports = function (eleventyConfig) {
require('prismjs/components/prism-markdown')
require('prismjs/components/prism-basic')
require('prismjs/components/prism-go')
require('prismjs/components/prism-regex')
}
});

// Markdown libraries
let markdownIt = require("markdown-it");
let markdownItAnchor = require("markdown-it-anchor");
let markdownFootnote = require("markdown-it-footnote");
const markdownIt = require('markdown-it')({
html: true,
breaks: true,
linkify: true,
}).use(require("markdown-it-anchor"), {
permalink: false,
slugify: input => slugify(input),
}).use(function(md) {
// Recognize Mediawiki links ([[text]])
md.linkify.add("[[", {
validate: /^\s?([^\[\]\|\n\r]+)(\|[^\[\]\|\n\r]+)?\s?\]\]/,
normalize: match => {
const parts = match.raw.slice(2, -2).split("|");
const slug = slugify(parts[0].replace(/.(md|markdown)\s?$/i, "").trim());
const found = linkMapCache.get(slug);

if (!found) {
throw new Error(`Unable to find page linked by wikilink slug [${slug}]`)
}

match.text = parts.length === 2
? parts[1]
: found.title;

match.url = found.permalink.substring(0,1) === '/'
? found.permalink
: `/${found.permalink}`;
}
})
}).use(require("markdown-it-footnote"));

setupMarkdownIt(markdownIt);

eleventyConfig.on('eleventy.after', async () => {
const all = linkMapCache.all();
const n = 1;
});

eleventyConfig
.setLibrary("md", markdownIt({
html: true,
breaks: true,
linkify: true
}).use(markdownItAnchor, {
permalink: false,
slugify: input => slugify(input)
}).use(markdownFootnote).use(function(md) {
// Recognize Mediawiki links ([[text]])
md.linkify.add("[[", {
validate: /^\s?([^\[\]\|\n\r]+)(\|[^\[\]\|\n\r]+)?\s?\]\]/,
normalize: match => {
const parts = match.raw.slice(2, -2).split("|");
const slug = slugify(parts[0].replace(/.(md|markdown)\s?$/i, "").trim());
const found = linkMapCache.get(slug);

if (!found) {
throw new Error(`Unable to find page linked by wikilink slug [${slug}]`)
}

match.text = parts.length === 2
? parts[1]
: found.title;

match.url = found.permalink.substring(0,1) === '/'
? found.permalink
: `/${found.permalink}`;
}
})
}));
eleventyConfig.setLibrary("md", markdownIt);
};
37 changes: 3 additions & 34 deletions utils/collections.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
const {chunk} = require('./helpers')
const {slugify} = require("./filters");
const {setupMarkdownIt, parseCollectionHashtags} = require ('./helpers/hashtags');

// Written with inspiration from:
// @see https://www.webstoemp.com/blog/basic-custom-taxonomies-with-eleventy/
Expand Down Expand Up @@ -33,45 +34,13 @@ const paginateContentTaxonomy = (baseSlug = '', perPage = 10) => {
};
};

const md = require('markdown-it')()
.use(require('markdown-it-hashtag'), {
hashtagRegExp: '(?!\\d+\\b)\\w{3,}'
});
const md = setupMarkdownIt(require('markdown-it')());

// Filter draft posts when deployed into production
const post = (collection) => ((process.env.ELEVENTY_ENV !== 'production')
? [...collection.getFilteredByGlob('./content/**/*.md')]
: [...collection.getFilteredByGlob('./content/**/*.md')].filter((post) => !post.data.draft)
).map(post => {
if (!post.data.hashtagsMapped) {
// Identify Hashtags and append to Tags
const tags = new Set(post.data.tags ?? []);
const found = new Set();
const content = post.template?.frontMatter?.content;

// Only do the expensive markdown parse if content contains potential hashtags
if (content && content.match(/#([\w-]+)/g)) {
const tokens = md.parseInline(content, {});
for (const token of tokens) {
for (const child of token.children) {
if (child.type === 'hashtag_text') found.add(child.content);
}
}

if (found.size > 0) {
found.forEach(tag => {
tags.add(tag);
});

post.data.tags = Array.from(tags);
}
}
// Mark this post as processed, so we don't do so again each time this collection is requested
post.data.hashtagsMapped = true;
}

return post;
});
).map(parseCollectionHashtags(md));

// Written for #20, this creates a collection of all tags
// @see https://github.com/photogabble/website/issues/20
Expand Down

0 comments on commit 37ff3e0

Please sign in to comment.