1 | (function() {
|
2 | var expect, lib, lexer;
|
3 |
|
4 | if(typeof require != 'undefined') {
|
5 | expect = require('expect.js');
|
6 | lib = require('../src/lib');
|
7 | lexer = require('../src/lexer');
|
8 | }
|
9 | else {
|
10 | expect = window.expect;
|
11 | lib = nunjucks.require('lib');
|
12 | lexer = nunjucks.require('lexer');
|
13 | }
|
14 |
|
15 | function _hasTokens(ws, tokens, types) {
|
16 | for(var i=0; i<types.length; i++) {
|
17 | var type = types[i];
|
18 | var tok = tokens.nextToken();
|
19 |
|
20 | if(!ws) {
|
21 | while(tok && tok.type == lexer.TOKEN_WHITESPACE) {
|
22 | tok = tokens.nextToken();
|
23 | }
|
24 | }
|
25 |
|
26 | if(lib.isArray(type)) {
|
27 | expect(tok.type).to.be(type[0]);
|
28 | expect(tok.value).to.be(type[1]);
|
29 | }
|
30 | else {
|
31 | expect(tok.type).to.be(type);
|
32 | }
|
33 | }
|
34 | }
|
35 |
|
36 | function hasTokens(tokens /*, types */) {
|
37 | return _hasTokens(false, tokens, lib.toArray(arguments).slice(1));
|
38 | }
|
39 |
|
40 | function hasTokensWithWS(tokens /*, types */) {
|
41 | return _hasTokens(true, tokens, lib.toArray(arguments).slice(1));
|
42 | }
|
43 |
|
44 | describe('lexer', function() {
|
45 | var tok, tmpl, tokens;
|
46 |
|
47 | it('should parse template data', function() {
|
48 | tok = lexer.lex('3').nextToken();
|
49 | expect(tok.type).to.be(lexer.TOKEN_DATA);
|
50 | expect(tok.value).to.be('3');
|
51 |
|
52 | tmpl = 'foo bar bizzle 3 [1,2] !@#$%^&*()<>?:"{}|';
|
53 | tok = lexer.lex(tmpl).nextToken();
|
54 | expect(tok.type).to.be(lexer.TOKEN_DATA);
|
55 | expect(tok.value).to.be(tmpl);
|
56 | });
|
57 |
|
58 | it('should keep track of whitespace', function() {
|
59 | tokens = lexer.lex('data {% 1 2\n 3 %} data');
|
60 | hasTokensWithWS(tokens,
|
61 | lexer.TOKEN_DATA,
|
62 | lexer.TOKEN_BLOCK_START,
|
63 | [lexer.TOKEN_WHITESPACE, ' '],
|
64 | lexer.TOKEN_INT,
|
65 | [lexer.TOKEN_WHITESPACE, ' '],
|
66 | lexer.TOKEN_INT,
|
67 | [lexer.TOKEN_WHITESPACE, '\n '],
|
68 | lexer.TOKEN_INT,
|
69 | [lexer.TOKEN_WHITESPACE, ' '],
|
70 | lexer.TOKEN_BLOCK_END,
|
71 | lexer.TOKEN_DATA);
|
72 | });
|
73 |
|
74 | it('should parse variable start and end', function() {
|
75 | tokens = lexer.lex('data {{ foo }} bar bizzle');
|
76 | hasTokens(tokens,
|
77 | lexer.TOKEN_DATA,
|
78 | lexer.TOKEN_VARIABLE_START,
|
79 | lexer.TOKEN_SYMBOL,
|
80 | lexer.TOKEN_VARIABLE_END,
|
81 | lexer.TOKEN_DATA);
|
82 | });
|
83 |
|
84 | it('should parse block start and end', function() {
|
85 | tokens = lexer.lex('data {% foo %} bar bizzle');
|
86 | hasTokens(tokens,
|
87 | lexer.TOKEN_DATA,
|
88 | lexer.TOKEN_BLOCK_START,
|
89 | lexer.TOKEN_SYMBOL,
|
90 | lexer.TOKEN_BLOCK_END,
|
91 | lexer.TOKEN_DATA);
|
92 | });
|
93 |
|
94 | it('should parse basic types', function() {
|
95 | tokens = lexer.lex('{{ 3 4.5 true false foo "hello" \'boo\' }}');
|
96 | hasTokens(tokens,
|
97 | lexer.TOKEN_VARIABLE_START,
|
98 | lexer.TOKEN_INT,
|
99 | lexer.TOKEN_FLOAT,
|
100 | lexer.TOKEN_BOOLEAN,
|
101 | lexer.TOKEN_BOOLEAN,
|
102 | lexer.TOKEN_SYMBOL,
|
103 | lexer.TOKEN_STRING,
|
104 | lexer.TOKEN_STRING,
|
105 | lexer.TOKEN_VARIABLE_END);
|
106 | }),
|
107 |
|
108 | it('should parse function calls', function() {
|
109 | tokens = lexer.lex('{{ foo(bar) }}');
|
110 | hasTokens(tokens,
|
111 | lexer.TOKEN_VARIABLE_START,
|
112 | [lexer.TOKEN_SYMBOL, 'foo'],
|
113 | lexer.TOKEN_LEFT_PAREN,
|
114 | [lexer.TOKEN_SYMBOL, 'bar'],
|
115 | lexer.TOKEN_RIGHT_PAREN,
|
116 | lexer.TOKEN_VARIABLE_END);
|
117 | });
|
118 |
|
119 | it('should parse groups', function() {
|
120 | tokens = lexer.lex('{{ (1, 2, 3) }}');
|
121 | hasTokens(tokens,
|
122 | lexer.TOKEN_VARIABLE_START,
|
123 | lexer.TOKEN_LEFT_PAREN,
|
124 | lexer.TOKEN_INT,
|
125 | lexer.TOKEN_COMMA,
|
126 | lexer.TOKEN_INT,
|
127 | lexer.TOKEN_COMMA,
|
128 | lexer.TOKEN_INT,
|
129 | lexer.TOKEN_RIGHT_PAREN,
|
130 | lexer.TOKEN_VARIABLE_END);
|
131 | });
|
132 |
|
133 | it('should parse arrays', function() {
|
134 | tokens = lexer.lex('{{ [1, 2, 3] }}');
|
135 | hasTokens(tokens,
|
136 | lexer.TOKEN_VARIABLE_START,
|
137 | lexer.TOKEN_LEFT_BRACKET,
|
138 | lexer.TOKEN_INT,
|
139 | lexer.TOKEN_COMMA,
|
140 | lexer.TOKEN_INT,
|
141 | lexer.TOKEN_COMMA,
|
142 | lexer.TOKEN_INT,
|
143 | lexer.TOKEN_RIGHT_BRACKET,
|
144 | lexer.TOKEN_VARIABLE_END);
|
145 | });
|
146 |
|
147 | it('should parse dicts', function() {
|
148 | tokens = lexer.lex('{{ {one:1, "two":2} }}');
|
149 | hasTokens(tokens,
|
150 | lexer.TOKEN_VARIABLE_START,
|
151 | lexer.TOKEN_LEFT_CURLY,
|
152 | [lexer.TOKEN_SYMBOL, 'one'],
|
153 | lexer.TOKEN_COLON,
|
154 | [lexer.TOKEN_INT, '1'],
|
155 | lexer.TOKEN_COMMA,
|
156 | [lexer.TOKEN_STRING, 'two'],
|
157 | lexer.TOKEN_COLON,
|
158 | [lexer.TOKEN_INT, '2'],
|
159 | lexer.TOKEN_RIGHT_CURLY,
|
160 | lexer.TOKEN_VARIABLE_END);
|
161 | });
|
162 |
|
163 | it('should parse blocks without whitespace', function() {
|
164 | tokens = lexer.lex('data{{hello}}{%if%}data');
|
165 | hasTokens(tokens,
|
166 | lexer.TOKEN_DATA,
|
167 | lexer.TOKEN_VARIABLE_START,
|
168 | [lexer.TOKEN_SYMBOL, 'hello'],
|
169 | lexer.TOKEN_VARIABLE_END,
|
170 | lexer.TOKEN_BLOCK_START,
|
171 | [lexer.TOKEN_SYMBOL, 'if'],
|
172 | lexer.TOKEN_BLOCK_END,
|
173 | lexer.TOKEN_DATA);
|
174 | });
|
175 |
|
176 | it('should parse filters', function() {
|
177 | hasTokens(lexer.lex('{{ foo|bar }}'),
|
178 | lexer.TOKEN_VARIABLE_START,
|
179 | [lexer.TOKEN_SYMBOL, 'foo'],
|
180 | lexer.TOKEN_PIPE,
|
181 | [lexer.TOKEN_SYMBOL, 'bar'],
|
182 | lexer.TOKEN_VARIABLE_END);
|
183 | }),
|
184 |
|
185 | it('should parse operators', function() {
|
186 | hasTokens(lexer.lex('{{ 3+3-3*3/3 }}'),
|
187 | lexer.TOKEN_VARIABLE_START,
|
188 | lexer.TOKEN_INT,
|
189 | lexer.TOKEN_OPERATOR,
|
190 | lexer.TOKEN_INT,
|
191 | lexer.TOKEN_OPERATOR,
|
192 | lexer.TOKEN_INT,
|
193 | lexer.TOKEN_OPERATOR,
|
194 | lexer.TOKEN_INT,
|
195 | lexer.TOKEN_OPERATOR,
|
196 | lexer.TOKEN_INT,
|
197 | lexer.TOKEN_VARIABLE_END);
|
198 |
|
199 | hasTokens(lexer.lex('{{ 3**4//5 }}'),
|
200 | lexer.TOKEN_VARIABLE_START,
|
201 | lexer.TOKEN_INT,
|
202 | lexer.TOKEN_OPERATOR,
|
203 | lexer.TOKEN_INT,
|
204 | lexer.TOKEN_OPERATOR,
|
205 | lexer.TOKEN_INT,
|
206 | lexer.TOKEN_VARIABLE_END);
|
207 |
|
208 | hasTokens(lexer.lex('{{ 3 != 4 == 5 <= 6 >= 7 < 8 > 9 }}'),
|
209 | lexer.TOKEN_VARIABLE_START,
|
210 | lexer.TOKEN_INT,
|
211 | lexer.TOKEN_OPERATOR,
|
212 | lexer.TOKEN_INT,
|
213 | lexer.TOKEN_OPERATOR,
|
214 | lexer.TOKEN_INT,
|
215 | lexer.TOKEN_OPERATOR,
|
216 | lexer.TOKEN_INT,
|
217 | lexer.TOKEN_OPERATOR,
|
218 | lexer.TOKEN_INT,
|
219 | lexer.TOKEN_OPERATOR,
|
220 | lexer.TOKEN_INT,
|
221 | lexer.TOKEN_OPERATOR,
|
222 | lexer.TOKEN_INT,
|
223 | lexer.TOKEN_VARIABLE_END);
|
224 | }),
|
225 |
|
226 | it('should parse comments', function() {
|
227 | tokens = lexer.lex('data data {# comment #} data');
|
228 | hasTokens(tokens,
|
229 | lexer.TOKEN_DATA,
|
230 | lexer.TOKEN_COMMENT,
|
231 | lexer.TOKEN_DATA);
|
232 | });
|
233 | });
|
234 | })();
|