Skip to content

Commit

Permalink
beautify
Browse files Browse the repository at this point in the history
  • Loading branch information
ichiriac committed Jan 7, 2017
1 parent 65d16e4 commit caa80e6
Show file tree
Hide file tree
Showing 11 changed files with 300 additions and 133 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
node_modules/
coverage/
npm-debug.log
5 changes: 5 additions & 0 deletions .jsbeautifyrc
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"index_size": 2,
"jslint_happy": true,
"wrap_line_length": 80
}
59 changes: 55 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,16 +1,67 @@
# docblock-parser
# DocBlock & Annotations Parser

[![npm version](https://badge.fury.io/js/docblock-parser.svg)](https://www.npmjs.com/package/parsedoc)
[![Build Status](https://travis-ci.org/glayzzle/docblock-parser.svg?branch=master)](https://travis-ci.org/glayzzle/docblock-parser)
[![Coverage Status](https://coveralls.io/repos/github/glayzzle/docblock-parser/badge.svg?branch=master)](https://coveralls.io/github/glayzzle/docblock-parser?branch=master)
[![XO code style](https://img.shields.io/badge/code_style-XO-5ed9c7.svg)](https://github.com/sindresorhus/xo)
[![Gitter](https://img.shields.io/badge/GITTER-join%20chat-green.svg)](https://gitter.im/glayzzle/Lobby)

Parses docblocks comments

This library is a javascript LALR(1) parser that parses docblocks and extracts
annotations under an structured syntax tree.

# Install

```
```sh
npm install parsedoc --save
```

And simple usage :

```js
var ParseDoc = require('parsedoc');
var reader = new ParseDoc();
var data = reader.parse('/** @hello world */');
```


# Supported syntaxes

```php
/**
* Some description
* @return boolean
* @return map<string, SomeClass>
* @author Ioan CHIRIAC <me@domain.com>
* @throws Exception
* @deprecated
* @table('tableName', true)
* @table(
* name='tableName',
* primary=true
* )
* @annotation1 @annotation2
* @Target(["METHOD", "PROPERTY"])
* @Attributes([
* @Attribute("stringProperty", type = "string"),
* @Attribute("annotProperty", type = "SomeAnnotationClass"),
* ])
* @json({
* "key": "value",
* "object": { "inner": true },
* "list": [1, 2, 3]
* })
* <node>
* Some inner multi line content
* </node>
*/
```

# AST structure

```js

```

# Misc

This library is released under BSD-3 license clause.
38 changes: 20 additions & 18 deletions gruntfile.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
module.exports = function(grunt) {

/**
* The automated build configuration
*/
module.exports = function (grunt) {
// Project configuration.
grunt.initConfig({
pkg: grunt.file.readJSON('package.json'),
Expand All @@ -19,11 +21,11 @@ module.exports = function(grunt) {
documentation: {
parser: {
options: {
destination: "docs/",
format: "md",
version: "<%= pkg.version %>",
name: "<%= pkg.name %>",
filename: "parser.md",
destination: 'docs/',
format: 'md',
version: '<%= pkg.version %>',
name: '<%= pkg.name %>',
filename: 'parser.md',
shallow: false
},
files: [{
Expand All @@ -32,11 +34,11 @@ module.exports = function(grunt) {
},
lexer: {
options: {
destination: "docs/",
format: "md",
version: "<%= pkg.version %>",
name: "<%= pkg.name %>",
filename: "lexer.md",
destination: 'docs/',
format: 'md',
version: '<%= pkg.version %>',
name: '<%= pkg.name %>',
filename: 'lexer.md',
shallow: false
},
files: [{
Expand All @@ -45,11 +47,11 @@ module.exports = function(grunt) {
},
main: {
options: {
destination: "docs/",
format: "md",
version: "<%= pkg.version %>",
name: "<%= pkg.name %>",
filename: "README.md",
destination: 'docs/',
format: 'md',
version: '<%= pkg.version %>',
name: '<%= pkg.name %>',
filename: 'README.md',
shallow: true
},
files: [{
Expand All @@ -60,6 +62,7 @@ module.exports = function(grunt) {
uglify: {
options: {
compress: {
// eslint-disable-next-line camelcase
keep_fnames: true
},
sourceMap: true,
Expand All @@ -81,5 +84,4 @@ module.exports = function(grunt) {
// Default task(s).
grunt.registerTask('default', ['browserify', 'uglify']);
grunt.registerTask('doc', ['documentation']);

};
6 changes: 5 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@
"type": "git",
"url": "git+https://github.com/glayzzle/docblock-parser.git"
},
"xo": {
"space": 2,
"envs": ["node", "mocha"]
},
"keywords": [
"doc",
"parser",
Expand All @@ -36,4 +40,4 @@
"should": "^11.1.2",
"xo": "^0.17.1"
}
}
}
11 changes: 5 additions & 6 deletions src/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
* @authors https://github.com/glayzzle/docblock-parser/graphs/contributors
* @url http://glayzzle.com/docblock-parser
*/

"use strict";
'use strict';

var token = require('./token');
var Lexer = require('./lexer');
Expand All @@ -16,18 +15,18 @@ var Parser = require('./parser');
* @property {Lexer} lexer
* @property {Parser} parser
*/
var API = function() {
var API = function (grammar) {
this.token = token;
this.lexer = new Lexer(this.token);
this.parser = new Parser(this.lexer);
this.parser = new Parser(this.lexer, grammar);
};

/**
* Parsing the specified input
* @return {Array} AST
*/
API.prototype.parse = function(input) {
API.prototype.parse = function (input) {
return this.parser.parse(input);
}
};

module.exports = API;
81 changes: 41 additions & 40 deletions src/lexer.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,18 @@
* @authors https://github.com/glayzzle/docblock-parser/graphs/contributors
* @url http://glayzzle.com/docblock-parser
*/

"use strict";
'use strict';

/**
* @constructor Lexer
* @property {String} text Current parsed text (attached to current token)
* @property {Number} offset Current offset
* @property {String|Number} token Current parsed token
*/
var Lexer = function(tokens) {
var Lexer = function (tokens) {
this._t = tokens;
};


// breaking symbols
var lexerSymbols = [
',', '=', ':', '(', ')', '[', ']', '{', '}', '@'
Expand All @@ -28,32 +26,31 @@ var lexerWhiteSpace = [' ', '\t', '\r', '\n'];
/**
* Initialize the lexer with specified text
*/
Lexer.prototype.read = function(input) {
Lexer.prototype.read = function (input) {
this._input = input;
this.offset = 0;
this.text = "";
this.text = '';
this.token = null;
};

/**
* Consumes a char
* @return {String}
*/
Lexer.prototype.input = function() {
Lexer.prototype.input = function () {
if (this.offset < this._input.length) {
this.ch = this._input[this.offset++];
this.text += this.ch;
return this.ch;
} else {
return null;
}
return null;
};

/**
* Revert back the current consumed char
* @return {void}
*/
Lexer.prototype.unput = function() {
Lexer.prototype.unput = function () {
this.offset--;
this.text = this.text.substring(0, this.text.length - 1);
};
Expand All @@ -62,7 +59,7 @@ Lexer.prototype.unput = function() {
* Revert back the current consumed token
* @return {String|Number} the previous token
*/
Lexer.prototype.unlex = function() {
Lexer.prototype.unlex = function () {
this.offset = this.__offset;
this.text = this.__text;
this.token = this.__token;
Expand All @@ -73,7 +70,7 @@ Lexer.prototype.unlex = function() {
* Consumes the next token (ignores whitespaces)
* @return {String|Number} the current token
*/
Lexer.prototype.lex = function() {
Lexer.prototype.lex = function () {
// backup
this.__offset = this.offset;
this.__text = this.text;
Expand All @@ -91,11 +88,12 @@ Lexer.prototype.lex = function() {
* Eats a token (see lex for public usage) including whitespace
* @return {String|Number} the current token
*/
Lexer.prototype.next = function() {
this.text = "";
Lexer.prototype.next = function () {
this.text = '';
var ch = this.input();
if (ch === null) return this._t.T_EOF;
if (ch === '"' || ch === "'") {
if (ch === null) {
return this._t.T_EOF;
} else if (ch === '"' || ch === '\'') {
var tKey = ch;
do {
ch = this.input();
Expand All @@ -105,9 +103,9 @@ Lexer.prototype.next = function() {
} while (ch !== tKey && this.offset < this._input.length);
return this._t.T_TEXT;
} else if (lexerSymbols.indexOf(ch) > -1) {
if (ch === ':')
if (ch === ':') {
ch = '=>'; // alias
if (ch === '=' && this._input[this.offset] === '>') {
} else if (ch === '=' && this._input[this.offset] === '>') {
ch += this.input();
}
return ch;
Expand All @@ -116,32 +114,35 @@ Lexer.prototype.next = function() {
while (lexerWhiteSpace.indexOf(ch) > -1) {
ch = this.input();
}
if (ch !== null) this.unput();
if (ch !== null) {
this.unput();
}
return this._t.T_WHITESPACE;
} else {
ch = ch.charCodeAt(0);
if (ch > 47 && ch < 58) {
while (ch > 47 && ch < 58 && ch !== null) {
ch = this.input();
if (ch !== null)
ch = ch.charCodeAt(0);
}
ch = ch.charCodeAt(0);
if (ch > 47 && ch < 58) {
while (ch > 47 && ch < 58 && ch !== null) {
ch = this.input();
if (ch !== null) {
ch = ch.charCodeAt(0);
}
if (ch !== null) this.unput();
return this._t.T_NUM;
} else {
do {
ch = this.input();
if (
lexerSymbols.indexOf(ch) > -1 ||
lexerWhiteSpace.indexOf(ch) > -1
) {
this.unput();
break;
}
} while (this.offset < this._input.length);
return this._t.T_STRING;
}
if (ch !== null) {
this.unput();
}
return this._t.T_NUM;
}
do {
ch = this.input();
if (
lexerSymbols.indexOf(ch) > -1 ||
lexerWhiteSpace.indexOf(ch) > -1
) {
this.unput();
break;
}
} while (this.offset < this._input.length);
return this._t.T_STRING;
};

// exports
Expand Down
Loading

0 comments on commit caa80e6

Please sign in to comment.