/
SingleDocumentProcessorTest.spec.ts
53 lines (40 loc) · 1.92 KB
/
SingleDocumentProcessorTest.spec.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import { resolve } from 'path';
import { Document } from '../../modules/ast/Document';
import { NLPBasedSentenceRecognizer } from '../../modules/nlp/NLPBasedSentenceRecognizer';
import { SingleDocumentProcessor } from '../../modules/app/SingleDocumentProcessor';
import { Parser } from '../../modules/parser/Parser';
import { Lexer } from '../../modules/lexer/Lexer';
import { NLPTrainer } from '../../modules/nlp/NLPTrainer';
import { Options } from '../../modules/app/Options';
import { LanguageContentLoader, JsonLanguageContentLoader } from '../../modules/dict/LanguageContentLoader';
import { LexerBuilder } from '../../modules/lexer/LexerBuilder';
/**
* @author Thiago Delgado Pinto
*/
describe( 'SingleDocumentProcessorTest', () => {
const LANGUAGE = 'pt';
const options: Options = new Options( resolve( process.cwd(), 'dist/' ) );
const langLoader: LanguageContentLoader =
new JsonLanguageContentLoader( options.languageDir, {}, options.encoding );
let lexer: Lexer = ( new LexerBuilder( langLoader ) ).build( options, LANGUAGE );
let parser = new Parser();
let nlpTrainer = new NLPTrainer( langLoader );
let nlpRec: NLPBasedSentenceRecognizer = new NLPBasedSentenceRecognizer( nlpTrainer );
let singleDocProcessor: SingleDocumentProcessor = new SingleDocumentProcessor();
let analyze = ( doc, lexer ) => {
singleDocProcessor.analyzeNodes( doc, lexer, parser, nlpRec, LANGUAGE );
};
it( 'is able to recognize a defined database', () => {
[
'Banco de dados: acme',
'- tipo é "mysql"',
'- host é "127.0.0.2"',
'- username é "root"',
'- password é ""',
].forEach( ( val, index ) => lexer.addNodeFromLine( val, index + 1 ) );
let doc: Document = {};
analyze( doc, lexer );
expect( doc.fileErrors ).toHaveLength( 0 );
expect( doc.databases ).toHaveLength( 1 );
} );
} );