diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ec65e548..6a352980 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,4 +1,4 @@ -name: Deploy +name: Docs on: push: branches: @@ -34,11 +34,14 @@ jobs: mkdir mdbook curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.27/mdbook-v0.4.27-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook echo `pwd`/mdbook >> $GITHUB_PATH + cargo install mdbook-mermaid - name: Deploy GitHub Pages run: | # This assumes your book is in the root of your repository. # Just add a `cd` here if you need to change to another directory. + cp CHANGELOG.md docs/src/CHANGELOG.md cd docs + mdbook-mermaid install . mdbook build - name: Setup Pages uses: actions/configure-pages@f156874f8191504dae5b037505266ed5dda6c382 # v3.0.6 diff --git a/docs/book.toml b/docs/book.toml index b4951a29..ef6a60c6 100644 --- a/docs/book.toml +++ b/docs/book.toml @@ -6,10 +6,12 @@ src = "src" title = "Obsidian Confluence Integration" create-missing = false +[preprocessor.mermaid] +command = "mdbook-mermaid" [output.html] git-repository-url = "https://github.com/obsidian-confluence/obsidian-confluence" git-repository-icon = "fa-github" edit-url-template = "https://github.com/obsidian-confluence/obsidian-confluence/edit/main/docs/{path}" cname = "obsidian-confluence.com" - +additional-js = ["mermaid.min.js", "mermaid-init.js"] diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 29622692..de719e8e 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -6,6 +6,7 @@ - [Installation](./getting-started/installation.md) - [Installation - BRAT](./getting-started/installation-brat.md) - [Supported Features](./features/index.md) + - [Comments](./features/comment-handling.md) - [Supported Markdown](./features/supported-markdown.md) - [Folder Note](./features/folder-note.md) - [Folder Structure](./features/folder-structure.md) @@ -14,4 +15,5 @@ - [YAML Frontmatter](./features/yaml-frontmatter.md) - [Callouts](./features/callouts.md) - [Wikilinks](./features/wikilinks.md) -- [Thanks](thanks.md) \ No newline at end of file +- [Thanks](thanks.md) +- [Changelog](CHANGELOG.md) \ No newline at end of file diff --git a/docs/src/features/comment-handling.md b/docs/src/features/comment-handling.md new file mode 100644 index 00000000..7f96408e --- /dev/null +++ b/docs/src/features/comment-handling.md @@ -0,0 +1,51 @@ +# Comment Handling + +This plugin attempts to find the original location of a comment and reattach it. If it cannot determine the correct location, it will add a section to the bottom of the page and place the comment there. + +# Comment Matching Process + +## No matches found +If no matches for the text the comment was attached to are found, the comment is moved to the unmatched comment section. + +## Exact Match +The plugin first tries to find an exact match for the comment by comparing the text before and after the comment. If an exact match is found, the comment is attached at that location. + +## Distance of whole before and after +If an exact match is not found, the plugin calculates the "distance" between the text before and after the comment in the original location and each potential new location. This distance is calculated using the Levenshtein distance algorithm, which measures the number of changes (insertions, deletions, or substitutions) required to transform one string into another. If there are more than 40 changes before and after the comment text the node is excluded from being a viable option. The potential matches are sorted based on the calculated distances, and the match with the smallest distance is chosen. + +## Distance of 2 words before and after +If there are still multiple matches with similar distances, the plugin narrows down the selection by comparing only the words immediately surrounding the comment. The Levenshtein distance is calculated again, and the best match is chosen based on the smallest distance. + +## No ability to match +If no suitable match is found, the function returns undefined. + +## Flow chart +```mermaid +flowchart TD + Start --> DoesCommentTextExist{Does comment text exist on page?} + DoesCommentTextExist -->|Yes| DoesExactMatchExist{Does exact match before and after comment text?} + DoesCommentTextExist -->|No| UnmatchedCommentSection[Unmatched Comment Section] + DoesExactMatchExist -->|Yes| AttachComment[Attach Comment To This ADF Node] + DoesExactMatchExist -->|No| CalculateLevenshtein + + subgraph WholeBeforeAfter + CalculateLevenshtein[Calculate Levenshtein Distance Between Before and After text] --> SortByMinimumDistance[Sort by minimum distance] + SortByMinimumDistance --> IsFirstItemMinimumDistanceUnder40{Is first item minimum distance under 40 changes?} + end + + IsFirstItemMinimumDistanceUnder40 -->|Yes| AttachComment + IsFirstItemMinimumDistanceUnder40 -->|No| GetXWordsBeforeAfterComment[Get X Words before and after comment] + + subgraph WordsBeforeAfter + GetXWordsBeforeAfterComment --> TrimBeforeAndAfterToSameLength + TrimBeforeAndAfterToSameLength --> CalculateWordsLevenshtein + CalculateWordsLevenshtein --> IsDistanceLessThan50percentCharacters{IsDistanceLessThan50percentCharacters} + IsDistanceLessThan50percentCharacters -->|Yes| AddToChecks + IsDistanceLessThan50percentCharacters -->|Yes| CalculateWordsLevenshtein + IsDistanceLessThan50percentCharacters -->|No| CalculateWordsLevenshtein + AddToChecks --> SortByWordsMinimumDistance + SortByWordsMinimumDistance --> IsThereAnyItems{IsThereAnyItems} + end + + IsThereAnyItems -->|Yes| AttachComment + IsThereAnyItems -->|No| UnmatchedCommentSection diff --git a/package-lock.json b/package-lock.json index f2c6e7ed..f498bd64 100644 --- a/package-lock.json +++ b/package-lock.json @@ -3958,6 +3958,12 @@ "version": "2.0.6", "license": "MIT" }, + "node_modules/@types/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-rFT3ak0/2trgvp4yYZo5iKFEPsET7vKydKF+VRCxlQ9bpheehyAJH89dAkaLEq/j/RZXJIqcgsmPJKUP1Z28HA==", + "dev": true + }, "node_modules/@types/yargs": { "version": "17.0.24", "dev": true, @@ -15020,7 +15026,8 @@ "prosemirror-markdown": "^1.10.1", "prosemirror-model": "1.14.3", "sort-any": "^4.0.5", - "spark-md5": "^3.0.2" + "spark-md5": "^3.0.2", + "uuid": "^9.0.0" }, "devDependencies": { "@jest/globals": "^29.5.0", @@ -15031,6 +15038,7 @@ "@types/prosemirror-transform": "^1.4.2", "@types/prosemirror-view": "^1.23.1", "@types/spark-md5": "^3.0.2", + "@types/uuid": "^9.0.0", "jest": "^29.5.0", "markdown-it": "^13.0.1" } @@ -17669,6 +17677,7 @@ "@types/prosemirror-transform": "^1.4.2", "@types/prosemirror-view": "^1.23.1", "@types/spark-md5": "^3.0.2", + "@types/uuid": "^9.0.0", "confluence.js": "^1.6.3", "formdata-node": "^5.0.0", "gray-matter": "^4.0.3", @@ -17680,7 +17689,8 @@ "prosemirror-markdown": "^1.10.1", "prosemirror-model": "1.14.3", "sort-any": "^4.0.5", - "spark-md5": "^3.0.2" + "spark-md5": "^3.0.2", + "uuid": "^9.0.0" }, "dependencies": { "@atlaskit/adf-schema": { @@ -18020,6 +18030,12 @@ "@types/unist": { "version": "2.0.6" }, + "@types/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-rFT3ak0/2trgvp4yYZo5iKFEPsET7vKydKF+VRCxlQ9bpheehyAJH89dAkaLEq/j/RZXJIqcgsmPJKUP1Z28HA==", + "dev": true + }, "@types/yargs": { "version": "17.0.24", "dev": true, diff --git a/packages/lib/package.json b/packages/lib/package.json index e730fc97..8595954b 100644 --- a/packages/lib/package.json +++ b/packages/lib/package.json @@ -26,6 +26,7 @@ "@types/prosemirror-transform": "^1.4.2", "@types/prosemirror-view": "^1.23.1", "@types/spark-md5": "^3.0.2", + "@types/uuid": "^9.0.0", "jest": "^29.5.0", "markdown-it": "^13.0.1" }, @@ -43,7 +44,8 @@ "prosemirror-markdown": "^1.10.1", "prosemirror-model": "1.14.3", "sort-any": "^4.0.5", - "spark-md5": "^3.0.2" + "spark-md5": "^3.0.2", + "uuid": "^9.0.0" }, "resolutions": { "prosemirror-model": "1.14.3" diff --git a/packages/lib/src/AdfEqual.ts b/packages/lib/src/AdfEqual.ts index 0e3be2e9..6dd4cfe4 100644 --- a/packages/lib/src/AdfEqual.ts +++ b/packages/lib/src/AdfEqual.ts @@ -1,8 +1,7 @@ import sortAny from "sort-any"; import { mapValues } from "lodash"; import { traverse } from "@atlaskit/adf-utils/traverse"; -import { JSONDocNode } from "@atlaskit/editor-json-transformer"; -import { ADFEntityMark } from "@atlaskit/adf-utils/types"; +import { ADFEntity, ADFEntityMark } from "@atlaskit/adf-utils/types"; import { isEqual } from "./isEqual"; // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -25,7 +24,7 @@ const sortDeep = (object: unknown): any => { return sortAny(object.map(sortDeep)); }; -export function orderMarks(adf: JSONDocNode) { +export function orderMarks(adf: ADFEntity) { return traverse(adf, { any: (node, __parent) => { if (node.marks) { @@ -36,7 +35,7 @@ export function orderMarks(adf: JSONDocNode) { }); } -export function adfEqual(first: JSONDocNode, second: JSONDocNode): boolean { +export function adfEqual(first: ADFEntity, second: ADFEntity): boolean { return isEqual(orderMarks(first), orderMarks(second)); } diff --git a/packages/lib/src/AdfProcessing.ts b/packages/lib/src/AdfProcessing.ts index b6edd442..4c7464e5 100644 --- a/packages/lib/src/AdfProcessing.ts +++ b/packages/lib/src/AdfProcessing.ts @@ -2,9 +2,33 @@ import { traverse } from "@atlaskit/adf-utils/traverse"; import { JSONDocNode } from "@atlaskit/editor-json-transformer"; import { ConfluenceAdfFile, ConfluenceNode } from "./Publisher"; import { ConfluenceSettings } from "./Settings"; -import { marksEqual } from "./AdfEqual"; -import { ADFEntity, ADFEntityMark } from "@atlaskit/adf-utils/types"; -import { p, tr } from "@atlaskit/adf-utils/builders"; +import { adfEqual, marksEqual } from "./AdfEqual"; +import { ADFEntity } from "@atlaskit/adf-utils/types"; +import { heading, li, ol, p, text } from "@atlaskit/adf-utils/builders"; +import { v4 as uuidv4 } from "uuid"; +import { TextDefinition } from "@atlaskit/adf-schema"; + +type ExtractedInlineComment = { + pluginInternalId: string; + inlineCommentId: string; + textForComment: string | undefined; + beforeText: string; + afterText: string; +}; + +type PossibleMatchForInlineComment = { + pluginInternalId: string; + inlineCommentId: string; + textForComment: string | undefined; + beforeTextOutsideMyNode: string; + afterTextOutsideMyNode: string; + beforeTextMyNode: string; + afterTextMyNode: string; + myParentNode: ADFEntity | undefined; + myNode: ADFEntity | undefined; + commentStart: number; + commentEnd: number; +}; export function prepareAdfToUpload( confluencePagesToPublish: ConfluenceNode[], @@ -27,10 +51,588 @@ export function prepareAdfToUpload( result = mergeTextNodes(result); + const pageInlineComments = extractInlineComments( + confluenceNode.existingAdf + ); + + result = applyInlineComments(result, pageInlineComments); + confluenceNode.file.contents = result; }); } +function applyInlineComments( + adf: JSONDocNode, + pageInlineComments: ExtractedInlineComment[] +) { + const unfoundInlineComments: ExtractedInlineComment[] = []; + let result = adf; + + for (const comment of pageInlineComments) { + const commentOptions: PossibleMatchForInlineComment[] = []; + traverse(result, { + any: (node) => { + if (!node.content) { + return; + } + for ( + let nodeIndex = 0; + nodeIndex < node.content.length; + nodeIndex++ + ) { + const child = node.content[nodeIndex]; + if ( + node.content && + child && + child.type === "text" && + child.text && + comment.textForComment && + child.text.includes(comment.textForComment) + ) { + const beforeNodes: (ADFEntity | undefined)[] = [], + afterNodes: (ADFEntity | undefined)[] = []; + + node.content?.forEach((siblingContent, index) => { + if (nodeIndex > index) { + beforeNodes.push(siblingContent); + } else if (nodeIndex < index) { + afterNodes.push(siblingContent); + } + }); + + const beforeText = beforeNodes.reduce((prev, curr) => { + return prev + curr?.text; + }, ""); + + const afterText = afterNodes.reduce((prev, curr) => { + return prev + curr?.text; + }, ""); + + let index = 0; + + const childText = child.text ?? ""; + while ( + (index = childText.indexOf( + comment.textForComment, + index + )) !== -1 + ) { + const beforeTextMyNode = childText.slice(0, index); + const afterTextMyNode = childText.slice( + index + comment.textForComment.length + ); + + const found: PossibleMatchForInlineComment = { + pluginInternalId: comment.pluginInternalId, + inlineCommentId: comment.inlineCommentId, + beforeTextOutsideMyNode: beforeText, + afterTextOutsideMyNode: afterText, + myParentNode: node, + myNode: child, + beforeTextMyNode, + textForComment: comment.textForComment, + afterTextMyNode, + commentStart: index, + commentEnd: + index + comment.textForComment.length, + }; + + commentOptions.push(found); + + index += comment.textForComment.length; + } + } + } + }, + }); + + const whereToApplyComment = pickBestMatchForComment( + comment, + commentOptions + ); + if ( + whereToApplyComment?.myNode && + whereToApplyComment?.myNode !== undefined + ) { + let appliedComment = false; + result = traverse(result, { + any: (node, _parent) => { + if (!node.content || appliedComment) { + return; + } + const newContent: (ADFEntity | undefined)[] = []; + for ( + let nodeIndex = 0; + nodeIndex < node.content.length; + nodeIndex++ + ) { + const child = node.content[nodeIndex]; + if ( + node.content && + child && + child.type === "text" && + child.text && + whereToApplyComment?.myNode && + whereToApplyComment?.myParentNode && + adfEqual(whereToApplyComment.myNode, child) && + adfEqual(whereToApplyComment.myParentNode, node) + ) { + appliedComment = true; + const beforeText = child.text.slice( + 0, + whereToApplyComment.commentStart + ); + const afterText = child.text.slice( + whereToApplyComment.commentEnd, + child.text.length + ); + + if (beforeText) { + newContent.push({ + ...child, + text: beforeText, + }); + } + + newContent.push({ + ...child, + text: comment.textForComment, + marks: [ + ...(child.marks ? child.marks : []), + { + type: "annotation", + attrs: { + annotationType: "inlineComment", + id: comment.inlineCommentId, + }, + }, + ], + }); + + newContent.push({ + ...child, + text: afterText, + }); + } else { + newContent.push(child); + } + } + + if (newContent.length > 0) { + node.content = newContent; + } + + return node; + }, + }) as JSONDocNode; + + if (!appliedComment) { + unfoundInlineComments.push(comment); + } + } else { + unfoundInlineComments.push(comment); + } + } + + if (unfoundInlineComments.length > 0) { + const comments = [ + heading({ level: 1 })( + text("Inline comments that couldn't be mapped") + ), + ol({ order: 1 })( + ...unfoundInlineComments.map((item) => + li([ + p( + commentedText( + item.textForComment ?? "", + item.inlineCommentId + ) + ), + ]) + ) + ), + ]; + + comments.forEach((item) => result.content.push(item)); + } + + return result; +} + +function commentedText(text: string, inlineCommentId: string): TextDefinition { + return { + type: "text", + text: text, + marks: [ + { + type: "annotation", + attrs: { + annotationType: "inlineComment", + id: inlineCommentId, + }, + }, + ], + }; +} + +function pickBestMatchForComment( + inlineComment: ExtractedInlineComment, + possibleMatchsForInlineComment: PossibleMatchForInlineComment[] +): PossibleMatchForInlineComment | undefined { + if (possibleMatchsForInlineComment.length === 0) { + return undefined; + } + + const exactMatch = possibleMatchsForInlineComment.find( + (possibleSpot) => + inlineComment.beforeText === + possibleSpot.beforeTextOutsideMyNode + + possibleSpot.beforeTextMyNode && + inlineComment.afterText === + possibleSpot.afterTextMyNode + + possibleSpot.afterTextOutsideMyNode + ); + if (exactMatch) { + return exactMatch; + } + + const distancesBeforeAfter: { + possibleSpot: PossibleMatchForInlineComment; + beforeTextDistance: number; + afterTextDistance: number; + }[] = []; + for ( + let index = 0; + index < possibleMatchsForInlineComment.length; + index++ + ) { + const possibleSpot = possibleMatchsForInlineComment[index]; + + const beforeText = + possibleSpot.beforeTextOutsideMyNode + + possibleSpot.beforeTextMyNode; + const afterText = + possibleSpot.afterTextMyNode + possibleSpot.afterTextOutsideMyNode; + if ( + (inlineComment.beforeText.length > 0 && + beforeText.length > 0 && + isSpecialCharacter( + inlineComment.beforeText.charAt( + inlineComment.beforeText.length - 1 + ) + ) !== + isSpecialCharacter( + beforeText.charAt(beforeText.length - 1) + )) || + (inlineComment.afterText.length > 0 && + afterText.length > 0 && + isSpecialCharacter(inlineComment.afterText.charAt(0)) !== + isSpecialCharacter(afterText.charAt(0))) + ) { + continue; + } + + const beforeTextDistance = levenshteinDistance( + inlineComment.beforeText, + beforeText + ); + const afterTextDistance = levenshteinDistance( + inlineComment.afterText, + afterText + ); + + if (beforeTextDistance > 40 && afterTextDistance > 40) { + continue; + } + + distancesBeforeAfter.push({ + possibleSpot, + beforeTextDistance, + afterTextDistance, + }); + } + const sortedDistances = distancesBeforeAfter.sort((a, b) => { + const minDistanceA = Math.min( + a.beforeTextDistance, + a.afterTextDistance + ); + const minDistanceB = Math.min( + b.beforeTextDistance, + b.afterTextDistance + ); + + return minDistanceA - minDistanceB; + }); + + if (sortedDistances.length > 1) { + return sortedDistances.at(0)?.possibleSpot; + } + + const distancesBeforeAfterWords: { + possibleSpot: PossibleMatchForInlineComment; + beforeTextDistance: number; + afterTextDistance: number; + }[] = []; + // Look at words immediately around comment to see if multiple match + for ( + let index = 0; + index < possibleMatchsForInlineComment.length; + index++ + ) { + const possibleSpot = possibleMatchsForInlineComment[index]; + + const beforeText = + possibleSpot.beforeTextOutsideMyNode + + possibleSpot.beforeTextMyNode; + const afterText = + possibleSpot.afterTextMyNode + possibleSpot.afterTextOutsideMyNode; + if ( + (inlineComment.beforeText.length > 0 && + beforeText.length > 0 && + isSpecialCharacter( + inlineComment.beforeText.charAt( + inlineComment.beforeText.length - 1 + ) + ) !== + isSpecialCharacter( + beforeText.charAt(beforeText.length - 1) + )) || + (inlineComment.afterText.length > 0 && + afterText.length > 0 && + isSpecialCharacter(inlineComment.afterText.charAt(0)) !== + isSpecialCharacter(afterText.charAt(0))) + ) { + continue; + } + + const wordsFromBeforeText = getStringAfterXSpace(beforeText, 2); + const wordsFromAfterText = getStringBeforeXSpace(afterText, 2); + + const wordsBeforeComment = getStringAfterXSpace( + inlineComment.beforeText, + 2 + ); + const wordsAfterComment = getStringBeforeXSpace( + inlineComment.afterText, + 2 + ); + + const minBefore = Math.min( + wordsBeforeComment.length, + wordsFromBeforeText.length + ); + const minAfter = Math.min( + wordsAfterComment.length, + wordsFromAfterText.length + ); + + const trimmedWordsFromAfterText = wordsFromAfterText.substring( + 0, + minAfter + ); + const trimmedWordsAfterComment = wordsAfterComment.substring( + 0, + minAfter + ); + + const trimmedWordsFromBeforeText = wordsFromBeforeText.substring( + wordsFromBeforeText.length - minBefore, + wordsFromBeforeText.length + ); + const trimmedWordsBeforeComment = wordsBeforeComment.substring( + wordsBeforeComment.length - minBefore, + wordsBeforeComment.length + ); + + const beforeTextDistance = levenshteinDistance( + trimmedWordsFromBeforeText, + trimmedWordsBeforeComment + ); + const afterTextDistance = levenshteinDistance( + trimmedWordsFromAfterText, + trimmedWordsAfterComment + ); + + if ( + beforeTextDistance > minBefore / 2 && + afterTextDistance > minAfter / 2 + ) { + continue; + } + + distancesBeforeAfterWords.push({ + possibleSpot, + beforeTextDistance, + afterTextDistance, + }); + } + const sortedDistancesWords = distancesBeforeAfterWords.sort((a, b) => { + const minDistanceA = Math.min( + a.beforeTextDistance, + a.afterTextDistance + ); + const minDistanceB = Math.min( + b.beforeTextDistance, + b.afterTextDistance + ); + + return minDistanceA - minDistanceB; + }); + + if (sortedDistancesWords.length > 0) { + return sortedDistancesWords.at(0)?.possibleSpot; + } + + return undefined; +} + +function getStringBeforeXSpace(str: string, numOfSpaces: number): string { + if (numOfSpaces < 0) { + throw new Error("The number of spaces should be non-negative."); + } + + // Find the index where contiguous special characters and spaces end + const startOfActualString = str.search(/\w/); + + // Check if the number of spaces is 0, then return the whole string + if (numOfSpaces === 0) { + return str; + } + + let count = 0; + let lastIndex = -1; + + for (let i = startOfActualString; i < str.length; i++) { + if (str[i] === " ") { + count++; + if (count === numOfSpaces) { + lastIndex = i; + break; + } + } + } + + if (lastIndex === -1) { + return str; + } + + return str.substring(0, lastIndex); +} + +function getStringAfterXSpace(str: string, numOfSpaces: number): string { + if (numOfSpaces < 0) { + throw new Error("The number of spaces should be non-negative."); + } + + // Find the index where contiguous special characters and spaces end at the end of the string + const endOfActualString = str.search(/(\w)[^\w]*$/); + + // Check if the number of spaces is 0, then return the whole string + if (numOfSpaces === 0) { + return str; + } + + let count = 0; + let startIndex = -1; + + for (let i = endOfActualString; i >= 0; i--) { + if (str[i] === " ") { + count++; + if (count === numOfSpaces) { + startIndex = i; + break; + } + } + } + + if (startIndex === -1) { + return str; + } + + return str.substring(startIndex + 1); +} + +function isSpecialCharacter(char: string): boolean { + // The regex pattern checks if the character is not a letter, number, or underscore + // \p{L} matches any kind of letter from any language + // \p{N} matches any kind of numeric character in any script + // \p{M} matches a character intended to be combined with another character (e.g. accents, umlauts, etc.) + const regex = /[^\p{L}\p{N}\p{M}_]/u; + return regex.test(char); +} + +function levenshteinDistance(a: string, b: string): number { + const matrix: number[][] = Array.from({ length: a.length + 1 }, (_, i) => [ + i, + ]); + matrix[0] = Array.from({ length: b.length + 1 }, (_, i) => i); + + for (let i = 1; i <= a.length; i++) { + for (let j = 1; j <= b.length; j++) { + const cost = a[i - 1] === b[j - 1] ? 0 : 1; + matrix[i][j] = Math.min( + matrix[i - 1][j] + 1, + matrix[i][j - 1] + 1, + matrix[i - 1][j - 1] + cost + ); + } + } + + return matrix[a.length][b.length]; +} + +function extractInlineComments(adf: JSONDocNode) { + const result: ExtractedInlineComment[] = []; + traverse(adf, { + text: (node, parent, nodeIndex) => { + if ( + parent.node && + node.marks && + node.marks.some( + (mark) => + mark.type === "annotation" && + mark.attrs?.annotationType === "inlineComment" + ) + ) { + const inlineCommentMark = node.marks?.find( + (mark) => + mark.type === "annotation" && + mark.attrs?.annotationType === "inlineComment" + ); + const beforeNodes: (ADFEntity | undefined)[] = [], + afterNodes: (ADFEntity | undefined)[] = []; + + parent.node.content?.forEach((siblingContent, index) => { + if (nodeIndex > index) { + beforeNodes.push(siblingContent); + } else if (nodeIndex < index) { + afterNodes.push(siblingContent); + } + }); + + const beforeText = beforeNodes.reduce((prev, curr, index) => { + return prev + curr?.text; + }, ""); + + const afterText = afterNodes.reduce((prev, curr, index) => { + return prev + curr?.text; + }, ""); + + const extract: ExtractedInlineComment = { + pluginInternalId: uuidv4(), + inlineCommentId: inlineCommentMark?.attrs?.id, + textForComment: node.text, + beforeText, + afterText, + }; + + result.push(extract); + } + }, + }); + return result; +} + function processWikilinkToActualLink( adf: JSONDocNode, fileToPageIdMap: Record, @@ -93,7 +695,7 @@ function removeEmptyProperties(adf: JSONDocNode) { delete node.marks; } } catch (e: unknown) { - console.log({ marks: node.marks, e }); + console.warn({ marks: node.marks, e }); } return node; }, @@ -162,26 +764,3 @@ function mergeTextNodes(adf: JSONDocNode) { return result; } - -export function removeInlineComments(adf: JSONDocNode) { - let result = traverse(adf, { - text: (node, _parent) => { - if (node.marks) { - node.marks = node.marks.reduce((prev, curr) => { - if ( - curr.type === "annotation" && - curr.attrs?.annotationType === "inlineComment" - ) { - return prev; - } - return [...prev, curr]; - }, [] as ADFEntityMark[]); - } - return node; - }, - }) as JSONDocNode; - - result = mergeTextNodes(result); - - return result; -} diff --git a/packages/lib/src/Attachments.ts b/packages/lib/src/Attachments.ts index d9f1ba75..f2d90d98 100644 --- a/packages/lib/src/Attachments.ts +++ b/packages/lib/src/Attachments.ts @@ -1,5 +1,5 @@ import SparkMD5 from "spark-md5"; -import { CustomConfluenceClient, LoaderAdaptor } from "./adaptors"; +import { RequiredConfluenceClient, LoaderAdaptor } from "./adaptors"; import sizeOf from "image-size"; export type ConfluenceImageStatus = "existing" | "uploaded"; @@ -14,7 +14,7 @@ export interface UploadedImageData { } export async function uploadBuffer( - confluenceClient: CustomConfluenceClient, + confluenceClient: RequiredConfluenceClient, pageId: string, uploadFilename: string, fileBuffer: Buffer, @@ -70,7 +70,7 @@ export async function uploadBuffer( } export async function uploadFile( - confluenceClient: CustomConfluenceClient, + confluenceClient: RequiredConfluenceClient, adaptor: LoaderAdaptor, pageId: string, pageFilePath: string, diff --git a/packages/lib/src/Publisher.ts b/packages/lib/src/Publisher.ts index d69f1e4c..38bc9efc 100644 --- a/packages/lib/src/Publisher.ts +++ b/packages/lib/src/Publisher.ts @@ -1,11 +1,10 @@ import { ConfluenceSettings } from "./Settings"; import { traverse, filter } from "@atlaskit/adf-utils/traverse"; -import { CustomConfluenceClient, LoaderAdaptor } from "./adaptors"; +import { RequiredConfluenceClient, LoaderAdaptor } from "./adaptors"; import { JSONDocNode } from "@atlaskit/editor-json-transformer"; import { createFolderStructure as createLocalAdfTree } from "./TreeLocal"; import { ensureAllFilesExistInConfluence } from "./TreeConfluence"; import { uploadBuffer, UploadedImageData, uploadFile } from "./Attachments"; -import { prepareAdfToUpload, removeInlineComments } from "./AdfProcessing"; import { adfEqual } from "./AdfEqual"; import { getMermaidFileName, @@ -78,14 +77,14 @@ export interface ConfluenceNode { file: ConfluenceAdfFile; version: number; lastUpdatedBy: string; - existingAdf: string; + existingAdf: JSONDocNode; parentPageId: string; } export interface ConfluenceTreeNode { file: ConfluenceAdfFile; version: number; lastUpdatedBy: string; - existingAdf: string; + existingAdf: JSONDocNode; children: ConfluenceTreeNode[]; } @@ -97,7 +96,7 @@ export interface UploadAdfFileResult { } export class Publisher { - confluenceClient: CustomConfluenceClient; + confluenceClient: RequiredConfluenceClient; adaptor: LoaderAdaptor; settings: ConfluenceSettings; mermaidRenderer: MermaidRenderer; @@ -106,7 +105,7 @@ export class Publisher { constructor( adaptor: LoaderAdaptor, settings: ConfluenceSettings, - confluenceClient: CustomConfluenceClient, + confluenceClient: RequiredConfluenceClient, mermaidRenderer: MermaidRenderer ) { this.adaptor = adaptor; @@ -141,11 +140,10 @@ export class Publisher { folderTree, spaceToPublishTo.key, parentPage.id, - parentPage.id + parentPage.id, + this.settings ); - prepareAdfToUpload(confluencePagesToPublish, this.settings); - if (publishFilter) { confluencePagesToPublish = confluencePagesToPublish.filter( (file) => file.file.absoluteFilePath === publishFilter @@ -220,7 +218,7 @@ export class Publisher { private async updatePageContent( parentPageId: string, pageVersionNumber: number, - currentContents: string, + currentContents: JSONDocNode, adfFile: ConfluenceAdfFile, lastUpdatedBy: string ): Promise { @@ -236,15 +234,15 @@ export class Publisher { imageResult: "same", labelResult: "same", }; - const uploadResult = await this.uploadFiles( + const imageUploadResult = await this.uploadFiles( adfFile.pageId, adfFile.absoluteFilePath, adfFile.contents ); - const imageResult = Object.keys(uploadResult.imageMap).reduce( + const imageResult = Object.keys(imageUploadResult.imageMap).reduce( (prev, curr) => { - const value = uploadResult.imageMap[curr]; + const value = imageUploadResult.imageMap[curr]; if (value === null) { return prev; } @@ -260,23 +258,20 @@ export class Publisher { } as Record ); - if (!adfEqual(adfFile.contents, uploadResult.adf)) { + if (!adfEqual(adfFile.contents, imageUploadResult.adf)) { result.imageResult = imageResult["uploaded"] > 0 ? "updated" : "same"; } - const cleanedExistingContents = removeInlineComments( - JSON.parse(currentContents) - ); - if (!adfEqual(cleanedExistingContents, uploadResult.adf)) { + if (!adfEqual(currentContents, imageUploadResult.adf)) { result.contentResult = "updated"; console.log(`TESTING DIFF - ${adfFile.absoluteFilePath}`); const replacer = (key: unknown, value: unknown) => typeof value === "undefined" ? null : value; - console.log(JSON.stringify(cleanedExistingContents, replacer)); - console.log(JSON.stringify(uploadResult.adf, replacer)); + console.log(JSON.stringify(currentContents, replacer)); + console.log(JSON.stringify(imageUploadResult.adf, replacer)); const updateContentDetails = { id: adfFile.pageId, @@ -290,7 +285,7 @@ export class Publisher { body: { // eslint-disable-next-line @typescript-eslint/naming-convention atlas_doc_format: { - value: JSON.stringify(uploadResult.adf), + value: JSON.stringify(imageUploadResult.adf), representation: "atlas_doc_format", }, }, diff --git a/packages/lib/src/TreeConfluence.ts b/packages/lib/src/TreeConfluence.ts index 21f969a1..fae2475e 100644 --- a/packages/lib/src/TreeConfluence.ts +++ b/packages/lib/src/TreeConfluence.ts @@ -6,7 +6,10 @@ import { LocalAdfFileTreeNode, } from "./Publisher"; import { doc, p } from "@atlaskit/adf-utils/builders"; -import { CustomConfluenceClient, LoaderAdaptor } from "./adaptors"; +import { RequiredConfluenceClient, LoaderAdaptor } from "./adaptors"; +import { JSONDocNode } from "@atlaskit/editor-json-transformer"; +import { prepareAdfToUpload } from "./AdfProcessing"; +import { ConfluenceSettings } from "./Settings"; const blankPageAdf: string = JSON.stringify(doc(p("Page not published yet"))); @@ -37,12 +40,13 @@ function flattenTree( } export async function ensureAllFilesExistInConfluence( - confluenceClient: CustomConfluenceClient, + confluenceClient: RequiredConfluenceClient, adaptor: LoaderAdaptor, node: LocalAdfFileTreeNode, spaceKey: string, parentPageId: string, - topPageId: string + topPageId: string, + settings: ConfluenceSettings ): Promise { const confluenceNode = await createFileStructureInConfluence( confluenceClient, @@ -56,11 +60,13 @@ export async function ensureAllFilesExistInConfluence( const pages = flattenTree(confluenceNode); + prepareAdfToUpload(pages, settings); + return pages; } async function createFileStructureInConfluence( - confluenceClient: CustomConfluenceClient, + confluenceClient: RequiredConfluenceClient, adaptor: LoaderAdaptor, node: LocalAdfFileTreeNode, spaceKey: string, @@ -73,7 +79,7 @@ async function createFileStructureInConfluence( } let version: number; - let existingAdf: string | undefined; + let existingAdf: JSONDocNode | undefined; let lastUpdatedBy: string | undefined; const file: ConfluenceAdfFile = { ...node.file, @@ -93,11 +99,13 @@ async function createFileStructureInConfluence( file.pageId = pageDetails.id; file.spaceKey = pageDetails.spaceKey; version = pageDetails.version; - existingAdf = pageDetails.existingAdf; + existingAdf = JSON.parse( + pageDetails.existingAdf ?? "{}" + ) as JSONDocNode; lastUpdatedBy = pageDetails.lastUpdatedBy; } else { version = 0; - existingAdf = ""; + existingAdf = doc(p()); } const childDetailsTasks = node.children.map((childNode) => { @@ -118,13 +126,13 @@ async function createFileStructureInConfluence( file: file, version, lastUpdatedBy: lastUpdatedBy ?? "", - existingAdf: existingAdf ?? "", + existingAdf, children: childDetails, }; } async function ensurePageExists( - confluenceClient: CustomConfluenceClient, + confluenceClient: RequiredConfluenceClient, adaptor: LoaderAdaptor, file: LocalAdfFile, spaceKey: string, diff --git a/packages/lib/src/adaptors/index.ts b/packages/lib/src/adaptors/index.ts index f0587bdc..3166a914 100644 --- a/packages/lib/src/adaptors/index.ts +++ b/packages/lib/src/adaptors/index.ts @@ -33,7 +33,7 @@ export interface LoaderAdaptor { ): Promise; } -export interface CustomConfluenceClient { +export interface RequiredConfluenceClient { content: Api.Content; space: Api.Space; contentAttachments: Api.ContentAttachments; diff --git a/packages/obsidian/src/MyBaseClient.ts b/packages/obsidian/src/MyBaseClient.ts index 29aea811..51c36c16 100644 --- a/packages/obsidian/src/MyBaseClient.ts +++ b/packages/obsidian/src/MyBaseClient.ts @@ -7,6 +7,7 @@ import { AuthenticationService, } from "confluence.js"; import { requestUrl } from "obsidian"; +import { RequiredConfluenceClient } from "@markdown-confluence/lib"; const ATLASSIAN_TOKEN_CHECK_FLAG = "X-Atlassian-Token"; const ATLASSIAN_TOKEN_CHECK_NOCHECK_VALUE = "no-check"; @@ -186,7 +187,10 @@ export class MyBaseClient implements Client { } } -export class CustomConfluenceClient extends MyBaseClient { +export class ObsidianConfluenceClient + extends MyBaseClient + implements RequiredConfluenceClient +{ content = new Api.Content(this); space = new Api.Space(this); contentAttachments = new Api.ContentAttachments(this); diff --git a/packages/obsidian/src/main.ts b/packages/obsidian/src/main.ts index 59ba9105..988376fb 100644 --- a/packages/obsidian/src/main.ts +++ b/packages/obsidian/src/main.ts @@ -15,7 +15,7 @@ import { ElectronMermaidRenderer } from "@markdown-confluence/mermaid-electron-r import { ConfluenceSettingTab } from "./ConfluenceSettingTab"; import ObsidianAdaptor from "./adaptors/obsidian"; import { CompletedModal } from "./CompletedModal"; -import { CustomConfluenceClient } from "./MyBaseClient"; +import { ObsidianConfluenceClient } from "./MyBaseClient"; import AdfView, { ADF_VIEW_TYPE } from "./AdfView"; import { ConfluencePerPageForm, @@ -77,7 +77,7 @@ export default class ConfluencePlugin extends Plugin { this.app ); const mermaidRenderer = new ElectronMermaidRenderer(); - const confluenceClient = new CustomConfluenceClient({ + const confluenceClient = new ObsidianConfluenceClient({ host: this.settings.confluenceBaseUrl, authentication: { basic: {