Skip to content

Commit

Permalink
🏗️ 去掉源码映射
Browse files Browse the repository at this point in the history
  • Loading branch information
88250 committed Nov 19, 2019
1 parent f220bdd commit 9cc3009
Show file tree
Hide file tree
Showing 40 changed files with 563 additions and 851 deletions.
100 changes: 50 additions & 50 deletions auto_link.go
Expand Up @@ -42,8 +42,8 @@ func (t *Tree) parseGFMAutoLink(node *Node) {
}
}

var mailto = strToItems("mailto:")
var at = strToItems("@")
var mailto = strToBytes("mailto:")
var at = strToBytes("@")

func (t *Tree) parseGFMAutoEmailLink0(node *Node) {
tokens := node.tokens
Expand All @@ -58,13 +58,13 @@ func (t *Tree) parseGFMAutoEmailLink0(node *Node) {
// 按空白分隔成多组并进行处理
loopPart:
for i < length {
var group items
var group []byte
atIndex = 0
j = i

// 积攒组直到遇到空白符
for ; j < length; j++ {
token = tokens[j].term()
token = tokens[j]
if !isWhitespace(token) {
group = append(group, tokens[j])
if '@' == token {
Expand All @@ -77,7 +77,7 @@ loopPart:
}
if i == j {
// 说明积攒组时第一个字符就是空白符,那就把这个空白符作为一个文本节点插到前面
text := &Node{typ: NodeText, tokens: items{tokens[j]}}
text := &Node{typ: NodeText, tokens: []byte{tokens[j]}}
node.InsertBefore(text)
i++
continue
Expand All @@ -96,7 +96,7 @@ loopPart:

k = 0
for ; k < atIndex; k++ {
token = group[k].term()
token = group[k]
if !t.isValidEmailSegment1(token) {
text := &Node{typ: NodeText, tokens: group}
node.InsertBefore(text)
Expand All @@ -105,10 +105,10 @@ loopPart:
}

k++ // 跳过 @ 检查后面的部分
var item item
var item byte
for ; k < len(group); k++ {
item = group[k]
token = group[k].term()
token = group[k]
if !t.isValidEmailSegment2(token) {
text := &Node{typ: NodeText, tokens: group}
node.InsertBefore(text)
Expand All @@ -123,7 +123,7 @@ loopPart:
link := t.newLink(NodeLink, group, append(mailto, group...), nil, 2)
node.InsertBefore(link)
// . 作为文本节点插入
node.InsertBefore(&Node{typ: NodeText, tokens: items{item}})
node.InsertBefore(&Node{typ: NodeText, tokens: []byte{item}})
} else if itemHyphen == token || itemUnderscore == token {
// 如果以 - 或者 _ 结尾则整个串都不能算作邮件链接
text := &Node{typ: NodeText, tokens: group}
Expand Down Expand Up @@ -154,18 +154,18 @@ func (t *Tree) isValidEmailSegment2(token byte) bool {
}

var (
httpProto = strToItems("http://")
httpProto = strToBytes("http://")

// validAutoLinkDomainSuffix 作为 GFM 自动连接解析时校验域名后缀用。
validAutoLinkDomainSuffix = []items{strToItems("top"), strToItems("com"), strToItems("net"), strToItems("org"), strToItems("edu"), strToItems("gov"),
strToItems("cn"), strToItems("io"), strToItems("me"), strToItems("biz"), strToItems("co"), strToItems("live"), strToItems("pro"), strToItems("xyz"),
strToItems("win"), strToItems("club"), strToItems("tv"), strToItems("wiki"), strToItems("site"), strToItems("tech"), strToItems("space"), strToItems("cc"),
strToItems("name"), strToItems("social"), strToItems("band"), strToItems("pub"), strToItems("info")}
validAutoLinkDomainSuffix = [][]byte{strToBytes("top"), strToBytes("com"), strToBytes("net"), strToBytes("org"), strToBytes("edu"), strToBytes("gov"),
strToBytes("cn"), strToBytes("io"), strToBytes("me"), strToBytes("biz"), strToBytes("co"), strToBytes("live"), strToBytes("pro"), strToBytes("xyz"),
strToBytes("win"), strToBytes("club"), strToBytes("tv"), strToBytes("wiki"), strToBytes("site"), strToBytes("tech"), strToBytes("space"), strToBytes("cc"),
strToBytes("name"), strToBytes("social"), strToBytes("band"), strToBytes("pub"), strToBytes("info")}
)

// AddAutoLinkDomainSuffix 添加自动链接解析域名后缀 suffix。
func (lute *Lute) AddAutoLinkDomainSuffix(suffix string) {
validAutoLinkDomainSuffix = append(validAutoLinkDomainSuffix, strToItems(suffix))
validAutoLinkDomainSuffix = append(validAutoLinkDomainSuffix, strToBytes(suffix))
}

func (t *Tree) parseGFMAutoLink0(node *Node) {
Expand All @@ -177,26 +177,26 @@ func (t *Tree) parseGFMAutoLink0(node *Node) {
return
}

var token item
var consumed = make(items, 0, 256)
var tmp = make(items, 0, 16)
var token byte
var consumed = make([]byte, 0, 256)
var tmp = make([]byte, 0, 16)
www := false
for i < length {
token = tokens[i]
var protocol items
var protocol []byte
// 检查前缀
tmp = tokens[i:]
tmpLen := len(tmp)
if 10 <= tmpLen /* www.xxx.xx */ && 'w' == tmp[0].term() && 'w' == tmp[1].term() && 'w' == tmp[2].term() && '.' == tmp[3].term() {
if 10 <= tmpLen /* www.xxx.xx */ && 'w' == tmp[0] && 'w' == tmp[1] && 'w' == tmp[2] && '.' == tmp[3] {
protocol = httpProto
www = true
} else if 13 <= tmpLen /* http://xxx.xx */ && 'h' == tmp[0].term() && 't' == tmp[1].term() && 't' == tmp[2].term() && 'p' == tmp[3].term() && ':' == tmp[4].term() && '/' == tmp[5].term() && '/' == tmp[6].term() {
} else if 13 <= tmpLen /* http://xxx.xx */ && 'h' == tmp[0] && 't' == tmp[1] && 't' == tmp[2] && 'p' == tmp[3] && ':' == tmp[4] && '/' == tmp[5] && '/' == tmp[6] {
protocol = tmp[0:7]
i += 7
} else if 14 <= tmpLen /* https://xxx.xx */ && 'h' == tmp[0].term() && 't' == tmp[1].term() && 't' == tmp[2].term() && 'p' == tmp[3].term() && 's' == tmp[4].term() && ':' == tmp[5].term() && '/' == tmp[6].term() && '/' == tmp[7].term() {
} else if 14 <= tmpLen /* https://xxx.xx */ && 'h' == tmp[0] && 't' == tmp[1] && 't' == tmp[2] && 'p' == tmp[3] && 's' == tmp[4] && ':' == tmp[5] && '/' == tmp[6] && '/' == tmp[7] {
protocol = tmp[0:8]
i += 8
} else if 12 <= tmpLen /* ftp://xxx.xx */ && 'f' == tmp[0].term() && 't' == tmp[1].term() && 'p' == tmp[2].term() && ':' == tmp[3].term() && '/' == tmp[4].term() && '/' == tmp[5].term() {
} else if 12 <= tmpLen /* ftp://xxx.xx */ && 'f' == tmp[0] && 't' == tmp[1] && 'p' == tmp[2] && ':' == tmp[3] && '/' == tmp[4] && '/' == tmp[5] {
protocol = tmp[0:6]
i += 6
} else {
Expand All @@ -215,26 +215,26 @@ func (t *Tree) parseGFMAutoLink0(node *Node) {
if 0 < len(consumed) {
text := &Node{typ: NodeText, tokens: consumed}
node.InsertBefore(text)
consumed = make(items, 0, 256)
consumed = make([]byte, 0, 256)
}

var url items
var url []byte
j = i
for ; j < length; j++ {
token = tokens[j]
if (isWhitespace(token.term()) || itemLess == token.term()) || (!isASCIIPunct(token.term()) && !isASCIILetterNum(token.term())) {
if (isWhitespace(token) || itemLess == token) || (!isASCIIPunct(token) && !isASCIILetterNum(token)) {
break
}
url = append(url, token)
}
if i == j { // 第一个字符就断开了
if utf8.RuneSelf <= token.term() {
if utf8.RuneSelf <= token {
if !www {
url = append(url, protocol...)
}
for ; i < length; i++ {
token = tokens[i]
if utf8.RuneSelf > token.term() {
if utf8.RuneSelf > token {
break
}
url = append(url, token)
Expand All @@ -254,13 +254,13 @@ func (t *Tree) parseGFMAutoLink0(node *Node) {
k = 0
for ; k < len(url); k++ {
token = url[k]
if itemSlash == token.term() {
if itemSlash == token {
break
}
}
domain := url[:k]
if !t.isValidDomain(domain) {
var part items
var part []byte
if www {
part = url
} else {
Expand All @@ -280,23 +280,23 @@ func (t *Tree) parseGFMAutoLink0(node *Node) {
// 统计圆括号个数
for l = 0; l < length; l++ {
token = path[l]
if itemOpenParen == token.term() {
if itemOpenParen == token {
openParens++
} else if itemCloseParen == token.term() {
} else if itemCloseParen == token {
closeParens++
}
}

trimmed := false
lastToken := path[length-1]
if itemCloseParen == lastToken.term() {
if itemCloseParen == lastToken {
// 以 ) 结尾的话需要计算圆括号匹配
unmatches := closeParens - openParens
if 0 < unmatches {
// 向前移动
for l = length - 1; 0 < unmatches; l-- {
token = path[l]
if itemCloseParen != token.term() {
if itemCloseParen != token {
break
}
unmatches--
Expand All @@ -308,13 +308,13 @@ func (t *Tree) parseGFMAutoLink0(node *Node) {
// 算作全匹配上了,不需要再处理结尾标点符号
trimmed = true
}
} else if itemSemicolon == lastToken.term() {
} else if itemSemicolon == lastToken {
// 检查 HTML 实体
foundAmp := false
// 向前检查 & 是否存在
for l = length - 1; 0 <= l; l-- {
token = path[l]
if itemAmpersand == token.term() {
if itemAmpersand == token {
foundAmp = true
break
}
Expand All @@ -325,7 +325,7 @@ func (t *Tree) parseGFMAutoLink0(node *Node) {
// 检查截取的子串是否满足实体特征(&;中间需要是字母或数字)
isEntity := true
for j = 1; j < len(entity)-1; j++ {
if !isASCIILetterNum(entity[j].term()) {
if !isASCIILetterNum(entity[j]) {
isEntity = false
break
}
Expand All @@ -340,14 +340,14 @@ func (t *Tree) parseGFMAutoLink0(node *Node) {
}

// 如果之前的 ) 或者 ; 没有命中处理,则进行结尾的标点符号规则处理,即标点不计入链接,需要剔掉
if !trimmed && isASCIIPunct(lastToken.term()) && itemSlash != lastToken.term() {
if !trimmed && isASCIIPunct(lastToken) && itemSlash != lastToken {
path = path[:length-1]
i--
}
} else {
length = len(domain)
lastToken := domain[length-1]
if isASCIIPunct(lastToken.term()) {
if isASCIIPunct(lastToken) {
domain = domain[:length-1]
i--
}
Expand All @@ -356,7 +356,7 @@ func (t *Tree) parseGFMAutoLink0(node *Node) {
dest := protocol
dest = append(dest, domain...)
dest = append(dest, path...)
var addr items
var addr []byte
if !www {
addr = append(addr, protocol...)
}
Expand All @@ -379,7 +379,7 @@ func (t *Tree) parseGFMAutoLink0(node *Node) {

// isValidDomain 校验 GFM 规范自动链接规则中定义的合法域名。
// https://github.github.com/gfm/#valid-domain
func (t *Tree) isValidDomain(domain items) bool {
func (t *Tree) isValidDomain(domain []byte) bool {
segments := split(domain, '.')
length := len(segments)
if 2 > length { // 域名至少被 . 分隔为两部分,小于两部分的话不合法
Expand All @@ -395,7 +395,7 @@ func (t *Tree) isValidDomain(domain items) bool {
}

for j := 0; j < segLen; j++ {
token = segment[j].term()
token = segment[j]
if !isASCIILetterNumHyphen(token) {
return false
}
Expand Down Expand Up @@ -427,14 +427,14 @@ var markers = strToBytes(".!#$%&'*+/=?^_`{|}~")

func (t *Tree) parseAutoEmailLink(ctx *InlineContext) (ret *Node) {
tokens := ctx.tokens[1:]
var dest items
var dest []byte
var token byte
length := len(tokens)
passed := 0
i := 0
at := false
for ; i < length; i++ {
token = tokens[i].term()
token = tokens[i]
dest = append(dest, tokens[i])
passed++
if '@' == token {
Expand All @@ -456,7 +456,7 @@ func (t *Tree) parseAutoEmailLink(ctx *InlineContext) (ret *Node) {
i = 0
closed := false
for ; i < length; i++ {
token = domainPart[i].term()
token = domainPart[i]
passed++
if itemGreater == token {
closed = true
Expand All @@ -479,7 +479,7 @@ func (t *Tree) parseAutoEmailLink(ctx *InlineContext) (ret *Node) {
return t.newLink(NodeLink, dest, append(mailto, dest...), nil, 2)
}

func (t *Tree) newLink(typ nodeType, text, dest, title items, linkType int) (ret *Node) {
func (t *Tree) newLink(typ nodeType, text, dest, title []byte, linkType int) (ret *Node) {
ret = &Node{typ: typ, linkType: linkType}
if NodeImage == typ {
ret.AppendChild(&Node{typ: NodeBang})
Expand All @@ -499,11 +499,11 @@ func (t *Tree) newLink(typ nodeType, text, dest, title items, linkType int) (ret
func (t *Tree) parseAutolink(ctx *InlineContext) (ret *Node) {
schemed := false
scheme := ""
var dest items
var dest []byte
var token byte
i := ctx.pos + 1
for ; i < ctx.tokensLen && itemGreater != ctx.tokens[i].term(); i++ {
token = ctx.tokens[i].term()
for ; i < ctx.tokensLen && itemGreater != ctx.tokens[i]; i++ {
token = ctx.tokens[i]
if itemSpace == token {
return nil
}
Expand All @@ -521,7 +521,7 @@ func (t *Tree) parseAutolink(ctx *InlineContext) (ret *Node) {
return nil
}

if itemGreater != ctx.tokens[i].term() {
if itemGreater != ctx.tokens[i] {
return nil
}
ctx.pos = 1 + i
Expand Down
4 changes: 2 additions & 2 deletions blockquote.go
Expand Up @@ -14,10 +14,10 @@ package lute

func (blockquote *Node) blockquoteContinue(context *Context) int {
var ln = context.currentLine
if !context.indented && ln.peek(context.nextNonspace).term() == itemGreater {
if !context.indented && peek(ln, context.nextNonspace) == itemGreater {
context.advanceNextNonspace()
context.advanceOffset(1, false)
if token := ln.peek(context.offset); itemSpace == token.term() || itemTab == token.term() {
if token := peek(ln, context.offset); itemSpace == token || itemTab == token {
context.advanceOffset(1, true)
}
return 0
Expand Down

0 comments on commit 9cc3009

Please sign in to comment.