UNPKG

3.6 kBJavaScriptView Raw
1/*
2 Copyright © 2018 Andrew Powell
3
4 This Source Code Form is subject to the terms of the Mozilla Public
5 License, v. 2.0. If a copy of the MPL was not distributed with this
6 file, You can obtain one at http://mozilla.org/MPL/2.0/.
7
8 The above copyright notice and this permission notice shall be
9 included in all copies or substantial portions of this Source Code Form.
10*/
11const Input = require('postcss/lib/input');
12const tokenizer = require('postcss/lib/tokenize');
13
14const operators = ['*', '-', '%', '+', '/'];
15const operRegex = /([*/])/g;
16
17const brackets = (token, tokenize) => {
18 const [, , startLine, startChar, endLine, endChar] = token;
19 const part = token[1].slice(1, token[1].length - 1);
20 const subTokens = getTokens(part); // eslint-disable-line no-use-before-define
21
22 // adjust line position numbers
23 for (const sub of subTokens) {
24 if (sub[0] !== 'space') {
25 const length = sub[5] - sub[3];
26 sub[2] = startLine;
27 sub[3] += startChar;
28 sub[4] += endLine - 1;
29 sub[5] = sub[3] + length;
30 }
31 }
32
33 const tokens = [['(', '(', startLine, startChar, startLine, startChar], ...subTokens];
34 tokens.push([')', ')', startLine, endChar, endLine, endChar]);
35
36 for (const tokn of tokens.reverse()) {
37 tokenize.back(tokn);
38 }
39};
40
41const comma = (token, tokenize) => {
42 const bits = token[1].split(/([,])/);
43 const tokens = [];
44 const [, , startLine, , endLine] = token;
45 let [, , , startChar, , endChar] = token;
46
47 for (let bit of bits) {
48 bit = bit || ',';
49 const name = bit === ',' ? 'comma' : 'word';
50
51 if (bit !== bits[0]) {
52 startChar = endChar + 1;
53 }
54
55 endChar = startChar + bit.length - 1;
56
57 tokens.push([name, bit, startLine, startChar, endLine, endChar]);
58 }
59
60 for (const tokn of tokens.reverse()) {
61 tokenize.back(tokn);
62 }
63};
64
65const getTokens = (what) => {
66 const input = new Input(what, {});
67 const tokenize = wrapTokenizer(input); // eslint-disable-line no-use-before-define
68 const result = [];
69
70 // this shouldn't ever be slow as the string being tokenized will always be small
71 while (!tokenize.endOfFile()) {
72 const token = tokenize.nextToken();
73 result.push(token);
74 }
75
76 return result;
77};
78
79const operator = (token, tokenize) => {
80 const [, value, startLine, , endLine, endChar] = token;
81 const parts = value.split(operRegex);
82 let [, , , startChar] = token;
83
84 const tokens = parts.map((part) => {
85 const type = operators.includes(part) ? 'operator' : 'word';
86 const newToken = [type, part, startLine, startChar, endLine, endChar];
87
88 startChar += part.length;
89
90 return newToken;
91 });
92
93 for (const tokn of tokens.reverse()) {
94 tokenize.back(tokn);
95 }
96};
97
98const wrapTokenizer = (...args) => {
99 const tokenize = tokenizer(...args);
100 const ogNextToken = tokenize.nextToken;
101
102 tokenize.nextToken = (...nextArgs) => {
103 let token = ogNextToken(...nextArgs);
104
105 if (!token) {
106 return token;
107 }
108
109 const [type, value] = token;
110
111 // TODO: need to adjust the line/char offsets
112 if (type === 'brackets') {
113 brackets(token, tokenize);
114 token = ogNextToken(...nextArgs);
115 } else if (type === 'word') {
116 if (operators.includes(value)) {
117 token[0] = 'operator';
118 } else if (operRegex.test(value)) {
119 operator(token, tokenize);
120 token = ogNextToken(...nextArgs);
121 } else if (value.length > 1 && value.includes(',')) {
122 comma(token, tokenize);
123 token = ogNextToken(...nextArgs);
124 }
125 }
126
127 return token;
128 };
129
130 return tokenize;
131};
132
133module.exports = { getTokens, tokenizer: wrapTokenizer };