forked from dotless/dotless
/
Parser.cs
131 lines (113 loc) · 4.22 KB
/
Parser.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
// ReSharper disable InconsistentNaming
namespace dotless.Core.Parser
{
using System.Collections.Generic;
using Exceptions;
using Importers;
using Infrastructure;
using Stylizers;
using Tree;
//
// less.js - parser
//
// A relatively straight-forward predictive parser.
// There is no tokenization/lexing stage, the input is parsed
// in one sweep.
//
// To make the parser fast enough to run in the browser, several
// optimization had to be made:
//
// - Instead of the more commonly used technique of slicing the
// input string on every match, we use global regexps (/g),
// and move the `lastIndex` pointer on match, foregoing `slice()`
// completely. This gives us a 3x speed-up.
//
// - Matching on a huge input is often cause of slowdowns.
// The solution to that is to chunkify the input into
// smaller strings.
//
// - In many cases, we don't need to match individual tokens;
// for example, if a value doesn't hold any variables, operations
// or dynamic references, the parser can effectively 'skip' it,
// treating it as a literal.
// An example would be '1px solid #000' - which evaluates to itself,
// we don't need to know what the individual components are.
// The drawback, of course is that you don't get the benefits of
// syntax-checking on the CSS. This gives us a 50% speed-up in the parser,
// and a smaller speed-up in the code-gen.
//
//
// Token matching is done with the `Match` function, which either takes
// a terminal string or regexp, or a non-terminal function to call.
// It also takes care of moving all the indices forwards.
//
//
public class Parser
{
public Tokenizer Tokenizer { get; set; }
public IStylizer Stylizer { get; set; }
private Importer _importer;
public Importer Importer
{
get { return _importer; }
set
{
_importer = value;
_importer.Parser = () => new Parser(Tokenizer.Optimization, Stylizer, _importer);
}
}
private const int defaultOptimization = 1;
public Parser()
: this(defaultOptimization)
{
}
public Parser(int optimization)
: this(optimization, new PlainStylizer(), new Importer())
{
}
public Parser(IStylizer stylizer, Importer importer)
: this(defaultOptimization, stylizer, importer)
{
}
public Parser(int optimization, IStylizer stylizer, Importer importer)
{
Stylizer = stylizer;
Importer = importer;
Tokenizer = new Tokenizer(optimization);
}
public Ruleset Parse(string input, string fileName)
{
Tokenizer.SetupInput(input);
ParsingException parsingException = null;
Ruleset root = null;
try
{
Tokenizer.SetupInput(input);
var parsers = new Parsers(new DefaultNodeProvider());
root = new Root(parsers.Primary(this), e => GenerateParserError(e, fileName));
}
catch (ParsingException e)
{
parsingException = e;
}
if (Tokenizer.HasCompletedParsing() && parsingException == null)
return root;
throw GenerateParserError(parsingException, fileName);
}
private ParserException GenerateParserError(ParsingException parsingException, string fileName)
{
var errorLocation = Tokenizer.Location.Index;
var error = "Parse Error";
var call = 0;
if(parsingException != null)
{
errorLocation = parsingException.Index;
error = parsingException.Message;
call = parsingException.Call;
}
var zone = Tokenizer.GetZone(error, errorLocation, call, fileName);
var message = Stylizer.Stylize(zone);
return new ParserException(message, parsingException);
}
}
}