1 |
|
2 |
|
3 |
|
4 |
|
5 |
|
6 |
|
7 |
|
8 |
|
9 |
|
10 |
|
11 | const Input = require('postcss/lib/input');
|
12 | const tokenizer = require('postcss/lib/tokenize');
|
13 |
|
14 | const operators = ['*', '-', '%', '+', '/'];
|
15 | const operRegex = /([*/])/g;
|
16 |
|
17 | const brackets = (token, tokenize) => {
|
18 | const [, , startLine, startChar, endLine, endChar] = token;
|
19 | const part = token[1].slice(1, token[1].length - 1);
|
20 | const subTokens = getTokens(part);
|
21 |
|
22 |
|
23 | for (const sub of subTokens) {
|
24 | if (sub[0] !== 'space') {
|
25 | const length = sub[5] - sub[3];
|
26 | sub[2] = startLine;
|
27 | sub[3] += startChar;
|
28 | sub[4] += endLine - 1;
|
29 | sub[5] = sub[3] + length;
|
30 | }
|
31 | }
|
32 |
|
33 | const tokens = [['(', '(', startLine, startChar, startLine, startChar], ...subTokens];
|
34 | tokens.push([')', ')', startLine, endChar, endLine, endChar]);
|
35 |
|
36 | for (const tokn of tokens.reverse()) {
|
37 | tokenize.back(tokn);
|
38 | }
|
39 | };
|
40 |
|
41 | const comma = (token, tokenize) => {
|
42 | const bits = token[1].split(/([,])/);
|
43 | const tokens = [];
|
44 | const [, , startLine, , endLine] = token;
|
45 | let [, , , startChar, , endChar] = token;
|
46 |
|
47 | for (let bit of bits) {
|
48 | bit = bit || ',';
|
49 | const name = bit === ',' ? 'comma' : 'word';
|
50 |
|
51 | if (bit !== bits[0]) {
|
52 | startChar = endChar + 1;
|
53 | }
|
54 |
|
55 | endChar = startChar + bit.length - 1;
|
56 |
|
57 | tokens.push([name, bit, startLine, startChar, endLine, endChar]);
|
58 | }
|
59 |
|
60 | for (const tokn of tokens.reverse()) {
|
61 | tokenize.back(tokn);
|
62 | }
|
63 | };
|
64 |
|
65 | const getTokens = (what) => {
|
66 | const input = new Input(what, {});
|
67 | const tokenize = wrapTokenizer(input);
|
68 | const result = [];
|
69 |
|
70 |
|
71 | while (!tokenize.endOfFile()) {
|
72 | const token = tokenize.nextToken();
|
73 | result.push(token);
|
74 | }
|
75 |
|
76 | return result;
|
77 | };
|
78 |
|
79 | const operator = (token, tokenize) => {
|
80 | const [, value, startLine, , endLine, endChar] = token;
|
81 | const parts = value.split(operRegex);
|
82 | let [, , , startChar] = token;
|
83 |
|
84 | const tokens = parts.map((part) => {
|
85 | const type = operators.includes(part) ? 'operator' : 'word';
|
86 | const newToken = [type, part, startLine, startChar, endLine, endChar];
|
87 |
|
88 | startChar += part.length;
|
89 |
|
90 | return newToken;
|
91 | });
|
92 |
|
93 | for (const tokn of tokens.reverse()) {
|
94 | tokenize.back(tokn);
|
95 | }
|
96 | };
|
97 |
|
98 | const wrapTokenizer = (...args) => {
|
99 | const tokenize = tokenizer(...args);
|
100 | const ogNextToken = tokenize.nextToken;
|
101 |
|
102 | tokenize.nextToken = (...nextArgs) => {
|
103 | let token = ogNextToken(...nextArgs);
|
104 |
|
105 | if (!token) {
|
106 | return token;
|
107 | }
|
108 |
|
109 | const [type, value] = token;
|
110 |
|
111 |
|
112 | if (type === 'brackets') {
|
113 | brackets(token, tokenize);
|
114 | token = ogNextToken(...nextArgs);
|
115 | } else if (type === 'word') {
|
116 | if (operators.includes(value)) {
|
117 | token[0] = 'operator';
|
118 | } else if (operRegex.test(value)) {
|
119 | operator(token, tokenize);
|
120 | token = ogNextToken(...nextArgs);
|
121 | } else if (value.length > 1 && value.includes(',')) {
|
122 | comma(token, tokenize);
|
123 | token = ogNextToken(...nextArgs);
|
124 | }
|
125 | }
|
126 |
|
127 | return token;
|
128 | };
|
129 |
|
130 | return tokenize;
|
131 | };
|
132 |
|
133 | module.exports = { getTokens, tokenizer: wrapTokenizer };
|