1 | ;
|
2 | Object.defineProperty(exports, "__esModule", { value: true });
|
3 | exports.init = exports.DictTokenizer = exports.DEFAULT_MAX_CHUNK_COUNT_MIN = exports.DEFAULT_MAX_CHUNK_COUNT = void 0;
|
4 | const mod_1 = require("../mod");
|
5 | const index_1 = require("../util/index");
|
6 | const CHS_NAMES_1 = require("../mod/CHS_NAMES");
|
7 | const const_1 = require("../mod/const");
|
8 | exports.DEFAULT_MAX_CHUNK_COUNT = 40;
|
9 | exports.DEFAULT_MAX_CHUNK_COUNT_MIN = 30;
|
10 | /**
|
11 | * 字典识别模块
|
12 | *
|
13 | * @author 老雷<leizongmin@gmail.com>
|
14 | */
|
15 | class DictTokenizer extends mod_1.SubSModuleTokenizer {
|
16 | constructor() {
|
17 | super(...arguments);
|
18 | /**
|
19 | * 防止因無分段導致分析過久甚至超過處理負荷
|
20 | * 越高越精準但是處理時間會加倍成長甚至超過記憶體能處理的程度
|
21 | *
|
22 | * 數字越小越快
|
23 | *
|
24 | * FATAL ERROR: CALL_AND_RETRY_LAST Allocation failed - JavaScript heap out of memory
|
25 | *
|
26 | * @type {number}
|
27 | */
|
28 | this.MAX_CHUNK_COUNT = exports.DEFAULT_MAX_CHUNK_COUNT;
|
29 | /**
|
30 | *
|
31 | * 追加新模式使 MAX_CHUNK_COUNT 遞減來防止無分段長段落的總處理次數過高 由 DEFAULT_MAX_CHUNK_COUNT_MIN 來限制最小值
|
32 | */
|
33 | this.DEFAULT_MAX_CHUNK_COUNT_MIN = exports.DEFAULT_MAX_CHUNK_COUNT_MIN;
|
34 | }
|
35 | _cache() {
|
36 | super._cache();
|
37 | this._TABLE = this.segment.getDict('TABLE');
|
38 | this._TABLE2 = this.segment.getDict('TABLE2');
|
39 | this._POSTAG = this.segment.POSTAG;
|
40 | if (typeof this.segment.options.maxChunkCount == 'number' && this.segment.options.maxChunkCount > exports.DEFAULT_MAX_CHUNK_COUNT_MIN) {
|
41 | this.MAX_CHUNK_COUNT = this.segment.options.maxChunkCount;
|
42 | }
|
43 | if (typeof this.segment.options.minChunkCount == 'number' && this.segment.options.minChunkCount > exports.DEFAULT_MAX_CHUNK_COUNT_MIN) {
|
44 | this.DEFAULT_MAX_CHUNK_COUNT_MIN = this.segment.options.minChunkCount;
|
45 | }
|
46 | }
|
47 | /**
|
48 | * 对未识别的单词进行分词
|
49 | *
|
50 | * @param {array} words 单词数组
|
51 | * @return {array}
|
52 | */
|
53 | split(words) {
|
54 | //debug(words);
|
55 | const TABLE = this._TABLE;
|
56 | //const POSTAG = this._POSTAG;
|
57 | const self = this;
|
58 | let ret = [];
|
59 | for (let i = 0, word; word = words[i]; i++) {
|
60 | if (word.p > 0) {
|
61 | ret.push(word);
|
62 | continue;
|
63 | }
|
64 | // 仅对未识别的词进行匹配
|
65 | let wordinfo = this.matchWord(word.w, 0, words[i - 1]);
|
66 | if (wordinfo.length < 1) {
|
67 | ret.push(word);
|
68 | continue;
|
69 | }
|
70 | // 分离出已识别的单词
|
71 | let lastc = 0;
|
72 | wordinfo.forEach(function (bw, ui) {
|
73 | if (bw.c > lastc) {
|
74 | ret.push({
|
75 | w: word.w.substr(lastc, bw.c - lastc),
|
76 | });
|
77 | }
|
78 | let cw = self.createRawToken({
|
79 | w: bw.w,
|
80 | f: bw.f,
|
81 | }, TABLE[bw.w]);
|
82 | ret.push(cw);
|
83 | /*
|
84 | ret.push({
|
85 | w: bw.w,
|
86 | p: ww.p,
|
87 | f: bw.f,
|
88 | s: ww.s,
|
89 | });
|
90 | */
|
91 | lastc = bw.c + bw.w.length;
|
92 | });
|
93 | let lastword = wordinfo[wordinfo.length - 1];
|
94 | if (lastword.c + lastword.w.length < word.w.length) {
|
95 | let cw = self.createRawToken({
|
96 | w: word.w.substr(lastword.c + lastword.w.length),
|
97 | });
|
98 | ret.push(cw);
|
99 | }
|
100 | }
|
101 | words = undefined;
|
102 | return ret;
|
103 | }
|
104 | // =================================================================
|
105 | /**
|
106 | * 匹配单词,返回相关信息
|
107 | *
|
108 | * @param {string} text 文本
|
109 | * @param {int} cur 开始位置
|
110 | * @param {object} preword 上一个单词
|
111 | * @return {array} 返回格式 {w: '单词', c: 开始位置}
|
112 | */
|
113 | matchWord(text, cur, preword) {
|
114 | if (isNaN(cur))
|
115 | cur = 0;
|
116 | let ret = [];
|
117 | let s = false;
|
118 | const TABLE2 = this._TABLE2;
|
119 | // 匹配可能出现的单词
|
120 | while (cur < text.length) {
|
121 | for (let i in TABLE2) {
|
122 | let w = text.substr(cur, i);
|
123 | if (w in TABLE2[i]) {
|
124 | ret.push({
|
125 | w: w,
|
126 | c: cur,
|
127 | f: TABLE2[i][w].f,
|
128 | });
|
129 | }
|
130 | }
|
131 | cur++;
|
132 | }
|
133 | return this.filterWord(ret, preword, text);
|
134 | }
|
135 | /**
|
136 | * 选择最有可能匹配的单词
|
137 | *
|
138 | * @param {array} words 单词信息数组
|
139 | * @param {object} preword 上一个单词
|
140 | * @param {string} text 本节要分词的文本
|
141 | * @return {array}
|
142 | */
|
143 | filterWord(words, preword, text) {
|
144 | const TABLE = this._TABLE;
|
145 | const POSTAG = this._POSTAG;
|
146 | let ret = [];
|
147 | // 将单词按位置分组
|
148 | let wordpos = this.getPosInfo(words, text);
|
149 | //debug(wordpos);
|
150 | /**
|
151 | * 使用类似于MMSG的分词算法
|
152 | * 找出所有分词可能,主要根据一下几项来评价:
|
153 | * x、词数量最少;
|
154 | * a、词平均频率最大;
|
155 | * b、每个词长度标准差最小;
|
156 | * c、未识别词最少;
|
157 | * d、符合语法结构项:如两个连续的动词减分,数词后面跟量词加分;
|
158 | * 取以上几项综合排名最最好的
|
159 | */
|
160 | let chunks = this.getChunks(wordpos, 0, text);
|
161 | //debug(chunks);
|
162 | let assess = []; // 评价表
|
163 | //console.log(chunks);
|
164 | // 对各个分支就行评估
|
165 | for (let i = 0, chunk; chunk = chunks[i]; i++) {
|
166 | assess[i] = {
|
167 | x: chunk.length,
|
168 | a: 0,
|
169 | b: 0,
|
170 | c: 0,
|
171 | d: 0,
|
172 | index: i,
|
173 | };
|
174 | // 词平均长度
|
175 | let sp = text.length / chunk.length;
|
176 | // 句子经常包含的语法结构
|
177 | let has_D_V = false; // 是否包含动词
|
178 | // 遍历各个词
|
179 | let prew;
|
180 | if (preword) {
|
181 | /*
|
182 | prew = {
|
183 | w: preword.w,
|
184 | p: preword.p,
|
185 | f: preword.f,
|
186 | s: preword.s,
|
187 | }
|
188 | */
|
189 | prew = this.createRawToken(preword);
|
190 | }
|
191 | else {
|
192 | prew = null;
|
193 | }
|
194 | for (let j = 0, w; w = chunk[j]; j++) {
|
195 | if (w.w in TABLE) {
|
196 | w.p = TABLE[w.w].p;
|
197 | assess[i].a += w.f; // 总词频
|
198 | if (j === 0 && !preword && (w.p & POSTAG.D_V)) {
|
199 | /**
|
200 | * 將第一個字也計算進去是否包含動詞
|
201 | */
|
202 | has_D_V = true;
|
203 | }
|
204 | // ================ 检查语法结构 ===================
|
205 | if (prew) {
|
206 | // 如果上一个词是数词且当前词是量词(单位),则加分
|
207 | if ((prew.p & POSTAG.A_M)
|
208 | &&
|
209 | ((w.p & POSTAG.A_Q)
|
210 | || w.w in const_1.DATETIME)) {
|
211 | assess[i].d++;
|
212 | }
|
213 | // 如果当前词是动词
|
214 | if (w.p & POSTAG.D_V) {
|
215 | has_D_V = true;
|
216 | // 如果是连续的两个动词,则减分
|
217 | //if ((prew.p & POSTAG.D_V) > 0)
|
218 | //assess[i].d--;
|
219 | /*
|
220 | // 如果是 形容词 + 动词,则加分
|
221 | if ((prew.p & POSTAG.D_A))
|
222 | {
|
223 | assess[i].d++;
|
224 | }
|
225 | */
|
226 | // 如果是 副词 + 动词,则加分
|
227 | if (prew.p & POSTAG.D_D) {
|
228 | assess[i].d++;
|
229 | }
|
230 | }
|
231 | // 如果是地区名、机构名或形容词,后面跟地区、机构、代词、名词等,则加分
|
232 | if (((prew.p & POSTAG.A_NS)
|
233 | || (prew.p & POSTAG.A_NT)
|
234 | || (prew.p & POSTAG.D_A)) &&
|
235 | ((w.p & POSTAG.D_N)
|
236 | || (w.p & POSTAG.A_NR)
|
237 | || (w.p & POSTAG.A_NS)
|
238 | || (w.p & POSTAG.A_NZ)
|
239 | || (w.p & POSTAG.A_NT))) {
|
240 | assess[i].d++;
|
241 | }
|
242 | // 如果是 方位词 + 数量词,则加分
|
243 | if ((prew.p & POSTAG.D_F)
|
244 | &&
|
245 | ((w.p & POSTAG.A_M)
|
246 | || (w.p & POSTAG.D_MQ))) {
|
247 | //debug(prew, w);
|
248 | assess[i].d++;
|
249 | }
|
250 | // 如果是 姓 + 名词,则加分
|
251 | if ((prew.w in CHS_NAMES_1.FAMILY_NAME_1
|
252 | || prew.w in CHS_NAMES_1.FAMILY_NAME_2) &&
|
253 | ((w.p & POSTAG.D_N)
|
254 | || (w.p & POSTAG.A_NZ))) {
|
255 | //debug(prew, w);
|
256 | assess[i].d++;
|
257 | }
|
258 | /**
|
259 | * 地名/处所 + 方位
|
260 | */
|
261 | if (index_1.hexAndAny(prew.p, POSTAG.D_S, POSTAG.A_NS) && index_1.hexAndAny(w.p, POSTAG.D_F)) {
|
262 | assess[i].d += 0.5;
|
263 | }
|
264 | // 探测下一个词
|
265 | let nextw = chunk[j + 1];
|
266 | if (nextw) {
|
267 | if (nextw.w in TABLE) {
|
268 | nextw.p = TABLE[nextw.w].p;
|
269 | }
|
270 | let _temp_ok = true;
|
271 | /**
|
272 | * 如果当前是“的”+ 名词,则加分
|
273 | */
|
274 | if ((w.w === '的' || w.w === '之')
|
275 | && nextw.p && ((nextw.p & POSTAG.D_N)
|
276 | || (nextw.p & POSTAG.D_V)
|
277 | || (nextw.p & POSTAG.A_NR)
|
278 | || (nextw.p & POSTAG.A_NS)
|
279 | || (nextw.p & POSTAG.A_NZ)
|
280 | || (nextw.p & POSTAG.A_NT))) {
|
281 | assess[i].d += 1.5;
|
282 | _temp_ok = false;
|
283 | }
|
284 | /**
|
285 | * 如果是连词,前后两个词词性相同则加分
|
286 | */
|
287 | else if (prew.p && (w.p & POSTAG.D_C)) {
|
288 | let p = prew.p & nextw.p;
|
289 | if (prew.p === nextw.p) {
|
290 | assess[i].d++;
|
291 | _temp_ok = false;
|
292 | }
|
293 | else if (p) {
|
294 | assess[i].d += 0.25;
|
295 | _temp_ok = false;
|
296 | if (p & POSTAG.D_N) {
|
297 | assess[i].d += 0.75;
|
298 | }
|
299 | }
|
300 | }
|
301 | /**
|
302 | * 在感動的重逢中有余在的話就太過閃耀
|
303 | */
|
304 | if (_temp_ok && (w.p & POSTAG.D_R) && (nextw.p & POSTAG.D_P)) {
|
305 | assess[i].d += 1;
|
306 | _temp_ok = false;
|
307 | }
|
308 | if (_temp_ok && nextw.p && (w.p & POSTAG.D_P)) {
|
309 | if (nextw.p & POSTAG.A_NR && (nextw.w.length > 1)) {
|
310 | assess[i].d++;
|
311 | if (prew.w === '的') {
|
312 | /**
|
313 | * 的 + 介詞 + 人名
|
314 | */
|
315 | assess[i].d += 1;
|
316 | _temp_ok = false;
|
317 | }
|
318 | }
|
319 | }
|
320 | if (_temp_ok && w.p & POSTAG.D_P) {
|
321 | if (index_1.hexAndAny(prew.p, POSTAG.D_N) && index_1.hexAndAny(nextw.p, POSTAG.D_N, POSTAG.D_V)) {
|
322 | assess[i].d++;
|
323 | _temp_ok = false;
|
324 | }
|
325 | else if (index_1.hexAndAny(prew.p, POSTAG.D_R) && index_1.hexAndAny(nextw.p, POSTAG.D_R)) {
|
326 | assess[i].d += 0.5;
|
327 | _temp_ok = false;
|
328 | }
|
329 | }
|
330 | // @FIXME 暴力解決 三天后 的問題
|
331 | if (nextw.w === '后' && w.p & POSTAG.D_T && index_1.hexAndAny(prew.p, POSTAG.D_MQ, POSTAG.A_M)) {
|
332 | assess[i].d++;
|
333 | }
|
334 | // @FIXME 到湖中間后手終於能休息了
|
335 | else if ((nextw.w === '后'
|
336 | || nextw.w === '後')
|
337 | && index_1.hexAndAny(w.p, POSTAG.D_F)) {
|
338 | assess[i].d++;
|
339 | }
|
340 | if ((w.w === '后'
|
341 | || w.w === '後')
|
342 | && index_1.hexAndAny(prew.p, POSTAG.D_F)
|
343 | && index_1.hexAndAny(nextw.p, POSTAG.D_N)) {
|
344 | assess[i].d++;
|
345 | }
|
346 | }
|
347 | else {
|
348 | let _temp_ok = true;
|
349 | /**
|
350 | * 她把荷包蛋摆在像是印度烤饼的面包上
|
351 | */
|
352 | if (_temp_ok && (w.p & POSTAG.D_F) && index_1.hexAndAny(prew.p, POSTAG.D_N)) {
|
353 | assess[i].d += 1;
|
354 | _temp_ok = false;
|
355 | }
|
356 | }
|
357 | }
|
358 | // ===========================================
|
359 | }
|
360 | else {
|
361 | // 未识别的词数量
|
362 | assess[i].c++;
|
363 | }
|
364 | // 标准差
|
365 | assess[i].b += Math.pow(sp - w.w.length, 2);
|
366 | prew = chunk[j];
|
367 | }
|
368 | // 如果句子中包含了至少一个动词
|
369 | if (has_D_V === false)
|
370 | assess[i].d -= 0.5;
|
371 | assess[i].a = assess[i].a / chunk.length;
|
372 | assess[i].b = assess[i].b / chunk.length;
|
373 | }
|
374 | //console.dir(assess);
|
375 | // 计算排名
|
376 | let top = this.getTops(assess);
|
377 | let currchunk = chunks[top];
|
378 | if (false) {
|
379 | //console.log(assess);
|
380 | //console.log(Object.entries(chunks));
|
381 | console.dir(Object.entries(chunks)
|
382 | .map(([i, chunk]) => { return { i, asses: assess[i], chunk }; }), { depth: 5 });
|
383 | console.dir({ i: top, asses: assess[top], currchunk });
|
384 | //console.log(top);
|
385 | //console.log(currchunk);
|
386 | }
|
387 | // 剔除不能识别的词
|
388 | for (let i = 0, word; word = currchunk[i]; i++) {
|
389 | if (!(word.w in TABLE)) {
|
390 | currchunk.splice(i--, 1);
|
391 | }
|
392 | }
|
393 | ret = currchunk;
|
394 | // 試圖主動清除記憶體
|
395 | assess = undefined;
|
396 | chunks = undefined;
|
397 | currchunk = undefined;
|
398 | top = undefined;
|
399 | wordpos = undefined;
|
400 | //debug(ret);
|
401 | return ret;
|
402 | }
|
403 | /**
|
404 | * 评价排名
|
405 | *
|
406 | * @param {object} assess
|
407 | * @return {object}
|
408 | */
|
409 | getTops(assess) {
|
410 | //debug(assess);
|
411 | // 取各项最大值
|
412 | let top = {
|
413 | x: assess[0].x,
|
414 | a: assess[0].a,
|
415 | b: assess[0].b,
|
416 | c: assess[0].c,
|
417 | d: assess[0].d,
|
418 | };
|
419 | for (let i = 1, ass; ass = assess[i]; i++) {
|
420 | if (ass.a > top.a)
|
421 | top.a = ass.a; // 取最大平均词频
|
422 | if (ass.b < top.b)
|
423 | top.b = ass.b; // 取最小标准差
|
424 | if (ass.c > top.c)
|
425 | top.c = ass.c; // 取最大未识别词
|
426 | if (ass.d < top.d)
|
427 | top.d = ass.d; // 取最小语法分数
|
428 | if (ass.x > top.x)
|
429 | top.x = ass.x; // 取最大单词数量
|
430 | }
|
431 | //debug(top);
|
432 | // 评估排名
|
433 | let tops = [];
|
434 | for (let i = 0, ass; ass = assess[i]; i++) {
|
435 | tops[i] = 0;
|
436 | // 词数量,越小越好
|
437 | tops[i] += (top.x - ass.x) * 1.5;
|
438 | // 词总频率,越大越好
|
439 | if (ass.a >= top.a)
|
440 | tops[i] += 1;
|
441 | // 词标准差,越小越好
|
442 | if (ass.b <= top.b)
|
443 | tops[i] += 1;
|
444 | // 未识别词,越小越好
|
445 | tops[i] += (top.c - ass.c); //debug(tops[i]);
|
446 | // 符合语法结构程度,越大越好
|
447 | tops[i] += (ass.d < 0 ? top.d + ass.d : ass.d - top.d) * 1;
|
448 | ass.score = tops[i];
|
449 | //debug(tops[i]);debug('---');
|
450 | }
|
451 | //debug(tops.join(' '));
|
452 | //console.log(tops);
|
453 | //console.log(assess);
|
454 | //const old_method = true;
|
455 | const old_method = false;
|
456 | // 取分数最高的
|
457 | let curri = 0;
|
458 | let maxs = tops[0];
|
459 | for (let i in tops) {
|
460 | let s = tops[i];
|
461 | if (s > maxs) {
|
462 | curri = i;
|
463 | maxs = s;
|
464 | }
|
465 | else if (s === maxs) {
|
466 | /**
|
467 | * 如果分数相同,则根据词长度、未识别词个数和平均频率来选择
|
468 | *
|
469 | * 如果依然同分,則保持不變
|
470 | */
|
471 | let a = 0;
|
472 | let b = 0;
|
473 | if (assess[i].c < assess[curri].c) {
|
474 | a++;
|
475 | }
|
476 | else if (assess[i].c !== assess[curri].c) {
|
477 | b++;
|
478 | }
|
479 | if (assess[i].a > assess[curri].a) {
|
480 | a++;
|
481 | }
|
482 | else if (assess[i].a !== assess[curri].a) {
|
483 | b++;
|
484 | }
|
485 | if (assess[i].x < assess[curri].x) {
|
486 | a++;
|
487 | }
|
488 | else if (assess[i].x !== assess[curri].x) {
|
489 | b++;
|
490 | }
|
491 | if (a > b) {
|
492 | curri = i;
|
493 | maxs = s;
|
494 | }
|
495 | }
|
496 | //debug({ i, s, maxs, curri });
|
497 | }
|
498 | //debug('max: i=' + curri + ', s=' + tops[curri]);
|
499 | assess = undefined;
|
500 | top = undefined;
|
501 | return curri;
|
502 | }
|
503 | /**
|
504 | * 将单词按照位置排列
|
505 | *
|
506 | * @param {array} words
|
507 | * @param {string} text
|
508 | * @return {object}
|
509 | */
|
510 | getPosInfo(words, text) {
|
511 | let wordpos = {};
|
512 | // 将单词按位置分组
|
513 | for (let i = 0, word; word = words[i]; i++) {
|
514 | if (!wordpos[word.c]) {
|
515 | wordpos[word.c] = [];
|
516 | }
|
517 | wordpos[word.c].push(word);
|
518 | }
|
519 | // 按单字分割文本,填补空缺的位置
|
520 | for (let i = 0; i < text.length; i++) {
|
521 | if (!wordpos[i]) {
|
522 | wordpos[i] = [{ w: text.charAt(i), c: i, f: 0 }];
|
523 | }
|
524 | }
|
525 | return wordpos;
|
526 | }
|
527 | /**
|
528 | * 取所有分支
|
529 | *
|
530 | * @param {{[p: number]: Segment.IWord[]}} wordpos
|
531 | * @param {number} pos 当前位置
|
532 | * @param {string} text 本节要分词的文本
|
533 | * @param {number} total_count
|
534 | * @returns {Segment.IWord[][]}
|
535 | */
|
536 | getChunks(wordpos, pos, text, total_count = 0, MAX_CHUNK_COUNT) {
|
537 | /**
|
538 | *
|
539 | * 追加新模式使 MAX_CHUNK_COUNT 遞減來防止無分段長段落的總處理次數過高 由 DEFAULT_MAX_CHUNK_COUNT_MIN 來限制最小值
|
540 | */
|
541 | if (total_count === 0) {
|
542 | MAX_CHUNK_COUNT = this.MAX_CHUNK_COUNT;
|
543 | /**
|
544 | * 只有當目前文字長度大於 MAX_CHUNK_COUNT 時才遞減
|
545 | */
|
546 | if (text.length < MAX_CHUNK_COUNT) {
|
547 | MAX_CHUNK_COUNT += 1;
|
548 | }
|
549 | }
|
550 | else if (MAX_CHUNK_COUNT <= this.MAX_CHUNK_COUNT) {
|
551 | MAX_CHUNK_COUNT = Math.max(MAX_CHUNK_COUNT - 1, this.DEFAULT_MAX_CHUNK_COUNT_MIN, exports.DEFAULT_MAX_CHUNK_COUNT_MIN);
|
552 | }
|
553 | else {
|
554 | //MAX_CHUNK_COUNT = Math.max(MAX_CHUNK_COUNT, this.DEFAULT_MAX_CHUNK_COUNT_MIN, DEFAULT_MAX_CHUNK_COUNT_MIN)
|
555 | }
|
556 | /**
|
557 | * 忽略連字
|
558 | *
|
559 | * 例如: 啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊
|
560 | */
|
561 | let m;
|
562 | if (m = text.match(/^((.+)\2{5,})/)) {
|
563 | let s1 = text.slice(0, m[1].length);
|
564 | let s2 = text.slice(m[1].length);
|
565 | let word = {
|
566 | w: s1,
|
567 | c: pos,
|
568 | f: 0,
|
569 | };
|
570 | let _ret = [];
|
571 | if (s2 !== '') {
|
572 | let chunks = this.getChunks(wordpos, pos + s1.length, s2, total_count, MAX_CHUNK_COUNT);
|
573 | for (let ws of chunks) {
|
574 | _ret.push([word].concat(ws));
|
575 | }
|
576 | }
|
577 | else {
|
578 | _ret.push([word]);
|
579 | }
|
580 | // console.dir(wordpos);
|
581 | //
|
582 | // console.dir(ret);
|
583 | //
|
584 | // console.dir([pos, text, total_count]);
|
585 | return _ret;
|
586 | }
|
587 | total_count++;
|
588 | let words = wordpos[pos] || [];
|
589 | //debug(total_count, MAX_CHUNK_COUNT);
|
590 | // debug({
|
591 | // total_count,
|
592 | // MAX_CHUNK_COUNT: this.MAX_CHUNK_COUNT,
|
593 | // text,
|
594 | // words,
|
595 | // });
|
596 | // debug('getChunks: ');
|
597 | // debug(words);
|
598 | //throw new Error();
|
599 | let ret = [];
|
600 | for (let word of words) {
|
601 | //debug(word);
|
602 | let nextcur = word.c + word.w.length;
|
603 | /**
|
604 | * @FIXME
|
605 | */
|
606 | if (!wordpos[nextcur]) {
|
607 | ret.push([word]);
|
608 | }
|
609 | else if (total_count > MAX_CHUNK_COUNT) {
|
610 | // do something
|
611 | // console.log(444, words.slice(i));
|
612 | // console.log(333, word);
|
613 | let w1 = [word];
|
614 | let j = nextcur;
|
615 | while (j in wordpos) {
|
616 | let w2 = wordpos[j][0];
|
617 | if (w2) {
|
618 | w1.push(w2);
|
619 | j += w2.w.length;
|
620 | }
|
621 | else {
|
622 | break;
|
623 | }
|
624 | }
|
625 | ret.push(w1);
|
626 | }
|
627 | else {
|
628 | let t = text.slice(word.w.length);
|
629 | let chunks = this.getChunks(wordpos, nextcur, t, total_count, MAX_CHUNK_COUNT);
|
630 | for (let ws of chunks) {
|
631 | ret.push([word].concat(ws));
|
632 | }
|
633 | chunks = null;
|
634 | }
|
635 | }
|
636 | words = undefined;
|
637 | wordpos = undefined;
|
638 | m = undefined;
|
639 | return ret;
|
640 | }
|
641 | }
|
642 | exports.DictTokenizer = DictTokenizer;
|
643 | exports.init = DictTokenizer.init.bind(DictTokenizer);
|
644 | exports.default = DictTokenizer;
|
645 | //# sourceMappingURL=DictTokenizer.js.map |
\ | No newline at end of file |