Skip to content

Commit

Permalink
[Lexer] Refactor Tokens definitions.
Browse files Browse the repository at this point in the history
  • Loading branch information
aquariuslt committed Sep 18, 2017
1 parent ff0894c commit 6e824c3
Show file tree
Hide file tree
Showing 8 changed files with 148 additions and 116 deletions.
1 change: 1 addition & 0 deletions src/lexer/index.ts
@@ -1,4 +1,5 @@
import * as marked from 'marked';
import {TokensList} from '../shared/token';


export default class Lexer {
Expand Down
2 changes: 2 additions & 0 deletions src/parser/index.ts
@@ -1,3 +1,5 @@
import {TokensList} from '../shared/token';

export default class Parser {
constructor(src: TokensList, options?: ParserOptions) {

Expand Down
6 changes: 4 additions & 2 deletions src/shared/token.d.ts → src/shared/token.ts
@@ -1,4 +1,6 @@
declare type Token =
import * as Tokens from 'shared/tokens';

export type Token =
Tokens.Space
| Tokens.Code
| Tokens.Heading
Expand All @@ -16,7 +18,7 @@ declare type Token =
| Tokens.Text;


declare type TokensList = Token[] & {
export type TokensList = Token[] & {
links: {
[key: string]: { href: string; title: string; }
}
Expand Down
18 changes: 18 additions & 0 deletions src/shared/token.types.ts
@@ -0,0 +1,18 @@
export default class TokenTypes {
SPACE = 'space';
CODE = 'code';
HEADING = 'heading';
TABLE = 'table';
HR = 'hr';
BLOCKQUOTE_START = 'blockquote_start';
BLOCKQUOTE_END = 'blockquote_end';
LIST_START = 'list_start';
LOOSE_ITEM_START = 'loose_item_start';
LIST_ITEM_START = 'list_item_start';
LIST_ITEM_END = 'list_item_end';
LIST_END = 'list_end';
PARAGRAPH = 'paragraph';
HTML = 'html';
TEXT = 'text';

}
74 changes: 0 additions & 74 deletions src/shared/tokens.d.ts

This file was deleted.

75 changes: 75 additions & 0 deletions src/shared/tokens.ts
@@ -0,0 +1,75 @@

export interface Space {
type: 'space';
}

export interface Code {
type: 'code';
lang?: string;
text: string;
}

export interface Heading {
type: 'heading';
depth: number;
text: string;
}

export interface Table {
type: 'table';
header: string[];
align: Array<'center' | 'left' | 'right' | null>;
cells: string[][];
}

export interface Hr {
type: 'hr';
}

export interface BlockquoteStart {
type: 'blockquote_start';
}

export interface BlockquoteEnd {
type: 'blockquote_end';
}

export interface ListStart {
type: 'list_start';
ordered: boolean;
}

export interface LooseItemStart {
type: 'loose_item_start';
}

export interface ListItemStart {
type: 'list_item_start';
}

export interface ListItemEnd {
type: 'list_item_end';
}

export interface ListEnd {
type: 'list_end';
}

export interface Paragraph {
type: 'paragraph';
pre?: boolean;
text: string;
}

export interface HTML {
type: 'html';
pre: boolean;
text: string;
}

export interface Text {
type: 'text';
text: string;
}


72 changes: 44 additions & 28 deletions test/unit/specs/lexer-heading.spec.js
Expand Up @@ -4,66 +4,82 @@ import Lexer from '@/lexer';
/**
* Lexer().lex testing
* Test Cases from http://wowubuntu.com/markdown/ example
*
* */
describe('lexer:headings', () => {
/**
* @example
* ```
* new Lexer().lex('# Hello Title')
* ```
*
* @output
* [
* {
* type:'heading',
* depth:1,
* text: 'Hello Title'
* }
* ]
* */
it('should lex h1 heading tokens', () => {
const mdString = '# Hello Title';
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 1)).to.equal(true);
expect(_.isEqual(_.head(tokens).type, 'heading')).to.equal(true);
expect(_.isEqual(_.head(tokens).depth, 1)).to.equal(true);
expect(tokens.length).to.equal(1);
expect(_.head(tokens).type).to.equal('heading');
expect(_.head(tokens).depth).to.equal(1);
});

it('should lex h2 heading tokens', () => {
const mdString = '## Hello h2 Title';
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 1)).to.equal(true);
expect(_.isEqual(_.head(tokens).type, 'heading')).to.equal(true);
expect(_.isEqual(_.head(tokens).depth, 2)).to.equal(true);
expect(tokens.length).to.equal(1);
expect(_.head(tokens).type).to.equal('heading');
expect(_.head(tokens).depth).to.equal(2);
});

it('should lex h3 heading tokens', () => {
const mdString = '### Hello h3 Title';
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 1)).to.equal(true);
expect(_.isEqual(_.head(tokens).type, 'heading')).to.equal(true);
expect(_.isEqual(_.head(tokens).depth, 3)).to.equal(true);
expect(tokens.length).to.equal(1);
expect(_.head(tokens).type).to.equal('heading');
expect(_.head(tokens).depth).to.equal(3);
});

it('should lex h4 heading tokens', () => {
const mdString = '#### Hello h4 Title';
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 1)).to.equal(true);
expect(_.isEqual(_.head(tokens).type, 'heading')).to.equal(true);
expect(_.isEqual(_.head(tokens).depth, 4)).to.equal(true);
expect(tokens.length).to.equal(1);
expect(_.head(tokens).type).to.equal('heading');
expect(_.head(tokens).depth).to.equal(4);
});

it('should lex h5 heading tokens', () => {
const mdString = '##### Hello h5 Title';
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 1)).to.equal(true);
expect(_.isEqual(_.head(tokens).type, 'heading')).to.equal(true);
expect(_.isEqual(_.head(tokens).depth, 5)).to.equal(true);
expect(tokens.length).to.equal(1);
expect(_.head(tokens).type).to.equal('heading');
expect(_.head(tokens).depth).to.equal(5);
});

it('should lex h6 heading tokens', () => {
const mdString = '###### Hello h6 Title';
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 1)).to.equal(true);
expect(_.isEqual(_.head(tokens).type, 'heading')).to.equal(true);
expect(_.isEqual(_.head(tokens).depth, 6)).to.equal(true);
expect(tokens.length).to.equal(1);
expect(_.head(tokens).type).to.equal('heading');
expect(_.head(tokens).depth).to.equal(6);
});

/**
Expand All @@ -74,17 +90,17 @@ describe('lexer:headings', () => {
* This is an H1
* =============
* `
* @output
* @html
* <h1>This is an H1</h1>
* */
it('should lex h1 heading tokens from setext mode', () => {
const mdString = `This is an H1\n=============`;
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 1)).to.equal(true);
expect(_.isEqual(_.head(tokens).type, 'heading')).to.equal(true);
expect(_.isEqual(_.head(tokens).depth, 1)).to.equal(true);
expect(tokens.length).to.equal(1);
expect(_.head(tokens).type).to.equal('heading');
expect(_.head(tokens).depth).to.equal(1);
});

/**
Expand All @@ -103,9 +119,9 @@ describe('lexer:headings', () => {
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 1)).to.equal(true);
expect(_.isEqual(_.head(tokens).type, 'heading')).to.equal(true);
expect(_.isEqual(_.head(tokens).depth, 2)).to.equal(true);
expect(tokens.length).to.equal(1);
expect(_.head(tokens).type).to.equal('heading');
expect(_.head(tokens).depth).to.equal(2);
});

/**
Expand All @@ -123,8 +139,8 @@ describe('lexer:headings', () => {
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 1)).to.equal(true);
expect(_.isEqual(_.head(tokens).type, 'heading')).to.equal(true);
expect(_.isEqual(_.head(tokens).depth, 1)).to.equal(true);
expect(tokens.length).to.equal(1);
expect(_.head(tokens).type).to.equal('heading');
expect(_.head(tokens).depth).to.equal(1);
});
});
16 changes: 4 additions & 12 deletions test/unit/specs/lexer-list.spec.js
Expand Up @@ -10,9 +10,9 @@ describe('lexer:lists', () => {
let lexer = new Lexer();
let tokens = lexer.lex(mdString);

expect(_.isEqual(tokens.length, 5)).to.eq(true);
expect(_.isEqual(_.head(tokens).ordered, false)).to.eq(true);
expect(_.isEqual(_.head(tokens).type,'list_start')).to.eq(true);
expect(tokens.length).to.eq(5);
expect(_.head(tokens).ordered).to.eq(false);
expect(_.head(tokens).type).to.eq('list_start');
});

it('should be lex * as unordered list', () => {
Expand All @@ -31,13 +31,5 @@ describe('lexer:lists', () => {

it('should be lex - as unordered list', () => {
});

it('', () => {
});

it('', () => {
});

it('', () => {
});

});

0 comments on commit 6e824c3

Please sign in to comment.