From c6b64e823bb9732d14b5b9b347b1e661651ba432 Mon Sep 17 00:00:00 2001 From: Jason Dent Date: Mon, 19 Feb 2018 21:28:34 +0100 Subject: [PATCH] Remove the output from the unit test. --- src/grammar/display.ts | 38 ++++++++++++++++++++++++++++++++ src/grammar/grammar.test.ts | 42 ++++++------------------------------ src/grammar/tokenize.test.ts | 12 ++++------- src/grammar/tokenize.ts | 8 ------- 4 files changed, 48 insertions(+), 52 deletions(-) create mode 100644 src/grammar/display.ts diff --git a/src/grammar/display.ts b/src/grammar/display.ts new file mode 100644 index 00000000000..308e154ec02 --- /dev/null +++ b/src/grammar/display.ts @@ -0,0 +1,38 @@ +import chalk, { Chalk } from 'chalk'; +import { Grammar } from './grammar'; + +type ColorMap = [RegExp, Chalk][]; + +const colorMap: ColorMap = [ + [/ keyword/, chalk.yellow], + [/ entity.name/, chalk.blue], + [/ variable/, chalk.greenBright], + [/ string/, chalk.yellowBright], + [/comment/, chalk.dim.green], + [/ punctuation/, chalk.yellow], + [/support.function/, chalk.greenBright], + [/^source$/, chalk.gray] +]; + +export function colorize(text: string, scopes: string): string { + for (const [reg, fn] of colorMap) { + if (reg.test(scopes)) { + return fn(text); + } + } + return text; +} + +export function *formatTokenizeText(text: string, grammar: Grammar, colorizer: (text: string, scopes: string) => string = colorize) { + for (const tr of grammar.tokenizeText(text.split('\n'))) { + const {line: rawLine, lineNumber, tokens} = tr; + const line = rawLine.replace(/\t/g, ' '); + yield `${lineNumber} ${line}`; + const results = tokens.map(t => ({ text: line.slice(t.startIndex, t.endIndex), scopes: t.scopes.join(' ')})); + const w = Math.max(...results.map(t => t.text.length)); + for (const {text, scopes} of results) { + yield ` ${colorizer(text.padEnd(w + 2), scopes)} => ${scopes}`; + } + yield ''; + } +} diff --git a/src/grammar/grammar.test.ts b/src/grammar/grammar.test.ts index 368ec31e13e..57244f58ad6 100644 --- a/src/grammar/grammar.test.ts +++ b/src/grammar/grammar.test.ts @@ -2,7 +2,7 @@ import { Grammar } from './grammar'; import { expect } from 'chai'; import * as path from 'path'; import * as fs from 'fs-extra'; -import chalk, { Chalk } from 'chalk'; +import { formatTokenizeText } from './display'; const javascriptGrammarFile = path.join(__dirname, '..', '..', 'samples', 'syntax', 'javascript.tmLanguage.json'); const sampleJavascriptFile = path.join(__dirname, '..', '..', 'samples', 'src', 'sample.js'); @@ -23,7 +23,7 @@ describe('Validate Grammar', () => { const sampleFile = sampleGolangFile; const file = await fs.readFile(sampleFile, 'utf8'); for (const s of formatTokenizeText(file, grammar)) { - console.log(s); + output(s); } }); @@ -45,44 +45,14 @@ describe('Validate Grammar', () => { expect(last).to.be.eq(line.length); } for (const s of formatTokenizeText(file, grammar)) { - console.log(s); + output(s); } }); }); -type ColorMap = [RegExp, Chalk][]; - -const colorMap: ColorMap = [ - [/ keyword/, chalk.yellow], - [/ entity.name/, chalk.blue], - [/ variable/, chalk.greenBright], - [/ string/, chalk.yellowBright], - [/comment/, chalk.dim.green], - [/ punctuation/, chalk.yellow], - [/support.function/, chalk.greenBright], - [/^source$/, chalk.gray] -]; - -function colorize(text: string, scopes: string): string { - for (const [reg, fn] of colorMap) { - if (reg.test(scopes)) { - return fn(text); - } - } - return text; +function output(text: string) { + expect(text).to.not.be.undefined; + // console.log(text); } -function *formatTokenizeText(text: string, grammar: Grammar) { - for (const tr of grammar.tokenizeText(text.split('\n'))) { - const {line: rawLine, lineNumber, tokens} = tr; - const line = rawLine.replace(/\t/g, ' '); - yield `${lineNumber} ${line}`; - const results = tokens.map(t => ({ text: line.slice(t.startIndex, t.endIndex), scopes: t.scopes.join(' ')})); - const w = Math.max(...results.map(t => t.text.length)); - for (const {text, scopes} of results) { - yield ` ${colorize(text.padEnd(w + 2), scopes)} => ${scopes}`; - } - yield ''; - } -} diff --git a/src/grammar/tokenize.test.ts b/src/grammar/tokenize.test.ts index f21027395a3..c77ecfbbae0 100644 --- a/src/grammar/tokenize.test.ts +++ b/src/grammar/tokenize.test.ts @@ -143,7 +143,7 @@ describe('Validate Tokenizer', () => { const text = `const x = 'it\\'s good'; // comment`; const r = tokenizeLine(text, rule); const tokens = r.tokens; - console.log(r); + // console.log(r); let startIndex = 0; for (const t of tokens) { expect(t.startIndex).to.equal(startIndex); @@ -171,16 +171,12 @@ describe('Validate Tokenizer', () => { let rule = sampleJavascriptGrammarRule; expect(lines).to.be.not.empty; for (const line of lines) { - console.log(line); const r = tokenizeLine(line, rule); - r.tokens.forEach(t => { - const text = JSON.stringify(line.slice(t.startIndex, t.endIndex)); - const scope = t.scopes.join(', '); - console.log(`${text} => ${scope}`); - }); + if (line !== '') { + expect(r.tokens).to.not.be.empty; + } rule = r.state; } - expect(true).to.be.true; }); }); diff --git a/src/grammar/tokenize.ts b/src/grammar/tokenize.ts index 8ed6e6318b9..1192c5db50c 100644 --- a/src/grammar/tokenize.ts +++ b/src/grammar/tokenize.ts @@ -49,9 +49,6 @@ export function tokenizeLine(text: string, rule: Rule): TokenizeLineResult { while (offset < text.length) { const { match, rule: matchingRule } = matchRule(text, offset, rule); if (match && match.index <= endOffset) { - if (matchingRule.comment) { - console.log(matchingRule.comment); - } if (match.index > offset) { tokens.push({ startIndex: offset, endIndex: match.index, scopes: extractScopes(rule) }); } @@ -101,13 +98,8 @@ export function tokenizeLine(text: string, rule: Rule): TokenizeLineResult { } if (offset === endOffset) { // process ending rule. - console.log(`End of match: ${rule.comment} EndRegEx: ${endMatch}`); if (rule.parent && endMatch) { rule = findBoundingRule(rule); - if (isPatternBeginEnd(rule.pattern)) { - const pattern = rule.pattern; - console.log(`End ${rule.depth}: ${pattern.begin} <--> ${pattern.end} # ` + (pattern.name || pattern.comment || '')); - } tokens.push(...tokenizeCapture(rule, endMatch, endCaptures(rule.pattern))); offset = endMatch.index + endMatch[0].length; }