forked from FormidableLabs/prism-react-renderer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnormalizeTokens.js
executable file
·109 lines (90 loc) · 3.1 KB
/
normalizeTokens.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
// @flow
import type { PrismToken, Token } from "../types";
const newlineRe = /\r\n|\r|\n/;
// Empty lines need to contain a single empty token, denoted with { empty: true }
const normalizeEmptyLines = (line: Token[]) => {
if (line.length === 0) {
line.push({
types: ["plain"],
content: "",
empty: true
});
} else if (line.length === 1 && line[0].content === "") {
line[0].empty = true;
}
};
const appendTypes = (
types: string[],
add: string[] | string
): string[] => {
const typesSize = types.length
if (typesSize > 0 && types[typesSize - 1] === add) {
return types;
}
return types.concat(add);
};
// Takes an array of Prism's tokens and groups them by line, turning plain
// strings into tokens as well. Tokens can become recursive in some cases,
// which means that their types are concatenated. Plain-string tokens however
// are always of type "plain".
// This is not recursive to avoid exceeding the call-stack limit, since it's unclear
// how nested Prism's tokens can become
const normalizeTokens = (tokens: Array<PrismToken | string>): Token[][] => {
const typeArrStack: string[][] = [[]];
const tokenArrStack = [tokens];
const tokenArrIndexStack = [0];
const tokenArrSizeStack = [tokens.length];
let i = 0;
let stackIndex = 0;
let currentLine = [];
const acc = [currentLine];
while (stackIndex > -1) {
while (
(i = tokenArrIndexStack[stackIndex]++) < tokenArrSizeStack[stackIndex]
) {
let content;
let types = typeArrStack[stackIndex];
const tokenArr = tokenArrStack[stackIndex];
const token = tokenArr[i];
// Determine content and append type to types if necessary
if (typeof token === "string") {
types = stackIndex > 0 ? types : ["plain"];
content = token;
} else {
types = appendTypes(types, token.type);
if (token.alias) {
types = appendTypes(types, token.alias);
}
content = token.content;
}
// If token.content is an array, increase the stack depth and repeat this while-loop
if (typeof content !== "string") {
stackIndex++;
typeArrStack.push(types);
tokenArrStack.push(content);
tokenArrIndexStack.push(0);
tokenArrSizeStack.push(content.length);
continue;
}
// Split by newlines
const splitByNewlines = content.split(newlineRe);
const newlineCount = splitByNewlines.length;
currentLine.push({ types, content: splitByNewlines[0] });
// Create a new line for each string on a new line
for (let i = 1; i < newlineCount; i++) {
normalizeEmptyLines(currentLine);
acc.push((currentLine = []));
currentLine.push({ types, content: splitByNewlines[i] });
}
}
// Decreate the stack depth
stackIndex--;
typeArrStack.pop();
tokenArrStack.pop();
tokenArrIndexStack.pop();
tokenArrSizeStack.pop();
}
normalizeEmptyLines(currentLine);
return acc;
};
export default normalizeTokens;