Skip to content

Commit

Permalink
fix(html-serializer): Disables tokenizers if marks/nodes are not foun…
Browse files Browse the repository at this point in the history
…d in the editor schema (#86)
  • Loading branch information
rfgamaral committed Jan 6, 2023
1 parent bf5e5bb commit 0ed4a9b
Show file tree
Hide file tree
Showing 14 changed files with 734 additions and 451 deletions.
21 changes: 20 additions & 1 deletion src/helpers/serializer.test.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,29 @@
import { getSchema } from '@tiptap/core'

import { RichTextKit } from '../extensions/rich-text/rich-text-kit'
import { createSuggestionExtension } from '../factories/create-suggestion-extension'

import { extractTagsFromParseRules } from './serializer'
import { buildSuggestionSchemaPartialRegex, extractTagsFromParseRules } from './serializer'

describe('Helper: Serializer', () => {
describe('#buildSuggestionSchemaPartialRegex', () => {
test('returns `null` when there are no suggestion nodes in the schema', () => {
expect(buildSuggestionSchemaPartialRegex(getSchema([RichTextKit]))).toBeNull()
})

test('returns a partial regular expression including valid URL schemas', () => {
expect(
buildSuggestionSchemaPartialRegex(
getSchema([
RichTextKit,
createSuggestionExtension('mention'),
createSuggestionExtension('channel'),
]),
),
).toBe('(?:mention|channel)://')
})
})

describe('#extractTagsFromParseRules', () => {
test('returns an array of all tags from the given parse rules', () => {
expect(
Expand Down
29 changes: 27 additions & 2 deletions src/helpers/serializer.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,29 @@
import type { ParseRule } from 'prosemirror-model'
import { kebabCase } from 'lodash-es'

import type { ParseRule, Schema } from 'prosemirror-model'

/**
* Builds a partial regular expression that includes valid URL schemas used by all the available
* suggestion nodes from the given editor schema.
*
* @param schema The editor schema to be used for suggestion nodes detection.
*
* @returns A partial regular expression with valid URL schemas for the available suggestion nodes,
* `null` if there are no suggestion nodes in the editor schema.
*/
function buildSuggestionSchemaPartialRegex(schema: Schema) {
const suggestionNodes = Object.values(schema.nodes).filter((node) =>
node.name.endsWith('Suggestion'),
)

if (suggestionNodes.length === 0) {
return null
}

return `(?:${suggestionNodes
.map((suggestionNode) => kebabCase(suggestionNode.name.replace(/Suggestion$/, '')))
.join('|')})://`
}

/**
* Extract all tags from the given parse rules argument, and returns an array of said tags.
Expand All @@ -19,4 +44,4 @@ function extractTagsFromParseRules(
.map((rule) => rule.tag as keyof HTMLElementTagNameMap)
}

export { extractTagsFromParseRules }
export { buildSuggestionSchemaPartialRegex, extractTagsFromParseRules }
160 changes: 160 additions & 0 deletions src/serializers/html/extensions/disabled.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
import { marked } from 'marked'

import { buildSuggestionSchemaPartialRegex } from '../../../helpers/serializer'
import { INITIAL_MARKED_OPTIONS } from '../html'

import type { Schema } from 'prosemirror-model'

/**
* A version of `marked.TokenizerObject` that allows to return an `undefined` tokenizer.
*/
type MarkedTokenizerObjectAsUndefined = Partial<
Omit<marked.Tokenizer<undefined>, 'constructor' | 'options'>
>

/**
* A Marked extension which disables multiple parsing rules by disabling the rules respective
* tokenizers based on the availability of marks and/or nodes in the editor schema.
*
* @param schema The editor schema to be used for nodes and marks detection.
*/
function disabled(schema: Schema) {
const markedTokenizer = new marked.Tokenizer(INITIAL_MARKED_OPTIONS)

const tokenizer: marked.TokenizerObject = {}

if (!schema.nodes.blockquote) {
Object.assign(tokenizer, {
blockquote() {
return undefined
},
})
}

if (!schema.marks.bold || !schema.marks.italic) {
Object.assign(tokenizer, {
emStrong() {
return undefined
},
})
}

// Given that there isn't a one to one mapping between the bullet/ordered list nodes and Marked
// tokenizers, we need to conditionally disable the `list` tokenizer based on the input
if (!schema.nodes.bulletList || !schema.nodes.orderedList) {
Object.assign<marked.TokenizerObject, MarkedTokenizerObjectAsUndefined>(tokenizer, {
list(src) {
const isOrdered = /^\d+/.test(src)

if (
(isOrdered && schema.nodes.orderedList) ||
(!isOrdered && schema.nodes.bulletList)
) {
return markedTokenizer.list.apply(this, [src])
}

return undefined
},
})
}

if (!schema.marks.code) {
Object.assign(tokenizer, {
codespan() {
return undefined
},
})
}

if (!schema.nodes.codeBlock) {
Object.assign(tokenizer, {
code() {
return undefined
},
fences() {
return undefined
},
})
}

if (!schema.nodes.hardBreak) {
Object.assign(tokenizer, {
br() {
return undefined
},
})
}

if (!schema.nodes.heading) {
Object.assign(tokenizer, {
heading() {
return undefined
},
})
}

if (!schema.nodes.horizontalRule) {
Object.assign(tokenizer, {
hr() {
return undefined
},
})
}

if (!schema.marks.link) {
Object.assign(tokenizer, {
url() {
return undefined
},
})
}

// Given that there isn't a one to one mapping between the link/image mark/node and Marked
// tokenizers, nor Marked supports our custom Markdown syntax for suggestions, we need to
// conditionally disable the `link` tokenizer based on the input
if (!schema.marks.link || !schema.nodes.image) {
const suggestionSchemaPartialRegex = buildSuggestionSchemaPartialRegex(schema)
const suggestionSchemaRegex = suggestionSchemaPartialRegex
? new RegExp(`^\\[[^\\]]*\\]\\(${suggestionSchemaPartialRegex}`)
: null

Object.assign<marked.TokenizerObject, MarkedTokenizerObjectAsUndefined>(tokenizer, {
link(src) {
const isImage = /^!\[[^\]]*\]\([^)]+\)/.test(src)
const isSuggestion = suggestionSchemaRegex?.test(src)

if (
(isImage && schema.nodes.image) ||
(!isImage && schema.marks.link) ||
isSuggestion
) {
return markedTokenizer.link.apply(this, [src])
}

return undefined
},
})
}

if (!schema.marks.strike) {
Object.assign(tokenizer, {
del() {
return undefined
},
})
}

if (!schema.nodes.table) {
Object.assign(tokenizer, {
table() {
return undefined
},
})
}

return {
tokenizer,
}
}

export { disabled }
16 changes: 4 additions & 12 deletions src/serializers/html/extensions/link.ts
Original file line number Diff line number Diff line change
@@ -1,27 +1,19 @@
import { kebabCase } from 'lodash-es'
import { marked } from 'marked'

import type { NodeType } from 'prosemirror-model'

const markedRenderer = new marked.Renderer()

/**
* A Marked extension which tweaks the `link` renderer to add support for suggestion nodes, while
* preserving the original renderer for standard links.
*
* @param suggestionNodes An array of the suggestion nodes to serialize.
* @param suggestionSchemaRegex A regular expression with valid URL schemas for the available
* suggestion nodes.
*/
function link(suggestionNodes: NodeType[]): marked.MarkedExtension {
const linkSchemaRegex = new RegExp(
`^(?:${suggestionNodes
.map((suggestionNode) => kebabCase(suggestionNode.name.replace(/Suggestion$/, '')))
.join('|')})://`,
)

function link(suggestionSchemaRegex: RegExp): marked.MarkedExtension {
return {
renderer: {
link(href, title, text) {
if (href && linkSchemaRegex.test(href)) {
if (href && suggestionSchemaRegex?.test(href)) {
const [, schema, id] = /^([a-z-]+):\/\/(\S+)$/i.exec(href) || []

if (schema && id && text) {
Expand Down

0 comments on commit 0ed4a9b

Please sign in to comment.