UNPKG

74.7 kBJavaScriptView Raw
1/**
2 * marked - a markdown parser
3 * Copyright (c) 2011-2021, Christopher Jeffrey. (MIT Licensed)
4 * https://github.com/markedjs/marked
5 */
6
7/**
8 * DO NOT EDIT THIS FILE
9 * The code in this file is generated from files in ./src/
10 */
11
12var defaults$5 = {exports: {}};
13
14function getDefaults$1() {
15 return {
16 baseUrl: null,
17 breaks: false,
18 extensions: null,
19 gfm: true,
20 headerIds: true,
21 headerPrefix: '',
22 highlight: null,
23 langPrefix: 'language-',
24 mangle: true,
25 pedantic: false,
26 renderer: null,
27 sanitize: false,
28 sanitizer: null,
29 silent: false,
30 smartLists: false,
31 smartypants: false,
32 tokenizer: null,
33 walkTokens: null,
34 xhtml: false
35 };
36}
37
38function changeDefaults$1(newDefaults) {
39 defaults$5.exports.defaults = newDefaults;
40}
41
42defaults$5.exports = {
43 defaults: getDefaults$1(),
44 getDefaults: getDefaults$1,
45 changeDefaults: changeDefaults$1
46};
47
48/**
49 * Helpers
50 */
51
52const escapeTest = /[&<>"']/;
53const escapeReplace = /[&<>"']/g;
54const escapeTestNoEncode = /[<>"']|&(?!#?\w+;)/;
55const escapeReplaceNoEncode = /[<>"']|&(?!#?\w+;)/g;
56const escapeReplacements = {
57 '&': '&amp;',
58 '<': '&lt;',
59 '>': '&gt;',
60 '"': '&quot;',
61 "'": '&#39;'
62};
63const getEscapeReplacement = (ch) => escapeReplacements[ch];
64function escape$3(html, encode) {
65 if (encode) {
66 if (escapeTest.test(html)) {
67 return html.replace(escapeReplace, getEscapeReplacement);
68 }
69 } else {
70 if (escapeTestNoEncode.test(html)) {
71 return html.replace(escapeReplaceNoEncode, getEscapeReplacement);
72 }
73 }
74
75 return html;
76}
77
78const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig;
79
80function unescape$1(html) {
81 // explicitly match decimal, hex, and named HTML entities
82 return html.replace(unescapeTest, (_, n) => {
83 n = n.toLowerCase();
84 if (n === 'colon') return ':';
85 if (n.charAt(0) === '#') {
86 return n.charAt(1) === 'x'
87 ? String.fromCharCode(parseInt(n.substring(2), 16))
88 : String.fromCharCode(+n.substring(1));
89 }
90 return '';
91 });
92}
93
94const caret = /(^|[^\[])\^/g;
95function edit$1(regex, opt) {
96 regex = regex.source || regex;
97 opt = opt || '';
98 const obj = {
99 replace: (name, val) => {
100 val = val.source || val;
101 val = val.replace(caret, '$1');
102 regex = regex.replace(name, val);
103 return obj;
104 },
105 getRegex: () => {
106 return new RegExp(regex, opt);
107 }
108 };
109 return obj;
110}
111
112const nonWordAndColonTest = /[^\w:]/g;
113const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;
114function cleanUrl$1(sanitize, base, href) {
115 if (sanitize) {
116 let prot;
117 try {
118 prot = decodeURIComponent(unescape$1(href))
119 .replace(nonWordAndColonTest, '')
120 .toLowerCase();
121 } catch (e) {
122 return null;
123 }
124 if (prot.indexOf('javascript:') === 0 || prot.indexOf('vbscript:') === 0 || prot.indexOf('data:') === 0) {
125 return null;
126 }
127 }
128 if (base && !originIndependentUrl.test(href)) {
129 href = resolveUrl(base, href);
130 }
131 try {
132 href = encodeURI(href).replace(/%25/g, '%');
133 } catch (e) {
134 return null;
135 }
136 return href;
137}
138
139const baseUrls = {};
140const justDomain = /^[^:]+:\/*[^/]*$/;
141const protocol = /^([^:]+:)[\s\S]*$/;
142const domain = /^([^:]+:\/*[^/]*)[\s\S]*$/;
143
144function resolveUrl(base, href) {
145 if (!baseUrls[' ' + base]) {
146 // we can ignore everything in base after the last slash of its path component,
147 // but we might need to add _that_
148 // https://tools.ietf.org/html/rfc3986#section-3
149 if (justDomain.test(base)) {
150 baseUrls[' ' + base] = base + '/';
151 } else {
152 baseUrls[' ' + base] = rtrim$1(base, '/', true);
153 }
154 }
155 base = baseUrls[' ' + base];
156 const relativeBase = base.indexOf(':') === -1;
157
158 if (href.substring(0, 2) === '//') {
159 if (relativeBase) {
160 return href;
161 }
162 return base.replace(protocol, '$1') + href;
163 } else if (href.charAt(0) === '/') {
164 if (relativeBase) {
165 return href;
166 }
167 return base.replace(domain, '$1') + href;
168 } else {
169 return base + href;
170 }
171}
172
173const noopTest$1 = { exec: function noopTest() {} };
174
175function merge$2(obj) {
176 let i = 1,
177 target,
178 key;
179
180 for (; i < arguments.length; i++) {
181 target = arguments[i];
182 for (key in target) {
183 if (Object.prototype.hasOwnProperty.call(target, key)) {
184 obj[key] = target[key];
185 }
186 }
187 }
188
189 return obj;
190}
191
192function splitCells$1(tableRow, count) {
193 // ensure that every cell-delimiting pipe has a space
194 // before it to distinguish it from an escaped pipe
195 const row = tableRow.replace(/\|/g, (match, offset, str) => {
196 let escaped = false,
197 curr = offset;
198 while (--curr >= 0 && str[curr] === '\\') escaped = !escaped;
199 if (escaped) {
200 // odd number of slashes means | is escaped
201 // so we leave it alone
202 return '|';
203 } else {
204 // add space before unescaped |
205 return ' |';
206 }
207 }),
208 cells = row.split(/ \|/);
209 let i = 0;
210
211 // First/last cell in a row cannot be empty if it has no leading/trailing pipe
212 if (!cells[0].trim()) { cells.shift(); }
213 if (!cells[cells.length - 1].trim()) { cells.pop(); }
214
215 if (cells.length > count) {
216 cells.splice(count);
217 } else {
218 while (cells.length < count) cells.push('');
219 }
220
221 for (; i < cells.length; i++) {
222 // leading or trailing whitespace is ignored per the gfm spec
223 cells[i] = cells[i].trim().replace(/\\\|/g, '|');
224 }
225 return cells;
226}
227
228// Remove trailing 'c's. Equivalent to str.replace(/c*$/, '').
229// /c*$/ is vulnerable to REDOS.
230// invert: Remove suffix of non-c chars instead. Default falsey.
231function rtrim$1(str, c, invert) {
232 const l = str.length;
233 if (l === 0) {
234 return '';
235 }
236
237 // Length of suffix matching the invert condition.
238 let suffLen = 0;
239
240 // Step left until we fail to match the invert condition.
241 while (suffLen < l) {
242 const currChar = str.charAt(l - suffLen - 1);
243 if (currChar === c && !invert) {
244 suffLen++;
245 } else if (currChar !== c && invert) {
246 suffLen++;
247 } else {
248 break;
249 }
250 }
251
252 return str.substr(0, l - suffLen);
253}
254
255function findClosingBracket$1(str, b) {
256 if (str.indexOf(b[1]) === -1) {
257 return -1;
258 }
259 const l = str.length;
260 let level = 0,
261 i = 0;
262 for (; i < l; i++) {
263 if (str[i] === '\\') {
264 i++;
265 } else if (str[i] === b[0]) {
266 level++;
267 } else if (str[i] === b[1]) {
268 level--;
269 if (level < 0) {
270 return i;
271 }
272 }
273 }
274 return -1;
275}
276
277function checkSanitizeDeprecation$1(opt) {
278 if (opt && opt.sanitize && !opt.silent) {
279 console.warn('marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options');
280 }
281}
282
283// copied from https://stackoverflow.com/a/5450113/806777
284function repeatString$1(pattern, count) {
285 if (count < 1) {
286 return '';
287 }
288 let result = '';
289 while (count > 1) {
290 if (count & 1) {
291 result += pattern;
292 }
293 count >>= 1;
294 pattern += pattern;
295 }
296 return result + pattern;
297}
298
299var helpers = {
300 escape: escape$3,
301 unescape: unescape$1,
302 edit: edit$1,
303 cleanUrl: cleanUrl$1,
304 resolveUrl,
305 noopTest: noopTest$1,
306 merge: merge$2,
307 splitCells: splitCells$1,
308 rtrim: rtrim$1,
309 findClosingBracket: findClosingBracket$1,
310 checkSanitizeDeprecation: checkSanitizeDeprecation$1,
311 repeatString: repeatString$1
312};
313
314const { defaults: defaults$4 } = defaults$5.exports;
315const {
316 rtrim,
317 splitCells,
318 escape: escape$2,
319 findClosingBracket
320} = helpers;
321
322function outputLink(cap, link, raw, lexer) {
323 const href = link.href;
324 const title = link.title ? escape$2(link.title) : null;
325 const text = cap[1].replace(/\\([\[\]])/g, '$1');
326
327 if (cap[0].charAt(0) !== '!') {
328 lexer.state.inLink = true;
329 return {
330 type: 'link',
331 raw,
332 href,
333 title,
334 text,
335 tokens: lexer.inlineTokens(text, [])
336 };
337 } else {
338 return {
339 type: 'image',
340 raw,
341 href,
342 title,
343 text: escape$2(text)
344 };
345 }
346}
347
348function indentCodeCompensation(raw, text) {
349 const matchIndentToCode = raw.match(/^(\s+)(?:```)/);
350
351 if (matchIndentToCode === null) {
352 return text;
353 }
354
355 const indentToCode = matchIndentToCode[1];
356
357 return text
358 .split('\n')
359 .map(node => {
360 const matchIndentInNode = node.match(/^\s+/);
361 if (matchIndentInNode === null) {
362 return node;
363 }
364
365 const [indentInNode] = matchIndentInNode;
366
367 if (indentInNode.length >= indentToCode.length) {
368 return node.slice(indentToCode.length);
369 }
370
371 return node;
372 })
373 .join('\n');
374}
375
376/**
377 * Tokenizer
378 */
379var Tokenizer_1 = class Tokenizer {
380 constructor(options) {
381 this.options = options || defaults$4;
382 }
383
384 space(src) {
385 const cap = this.rules.block.newline.exec(src);
386 if (cap) {
387 if (cap[0].length > 1) {
388 return {
389 type: 'space',
390 raw: cap[0]
391 };
392 }
393 return { raw: '\n' };
394 }
395 }
396
397 code(src) {
398 const cap = this.rules.block.code.exec(src);
399 if (cap) {
400 const text = cap[0].replace(/^ {1,4}/gm, '');
401 return {
402 type: 'code',
403 raw: cap[0],
404 codeBlockStyle: 'indented',
405 text: !this.options.pedantic
406 ? rtrim(text, '\n')
407 : text
408 };
409 }
410 }
411
412 fences(src) {
413 const cap = this.rules.block.fences.exec(src);
414 if (cap) {
415 const raw = cap[0];
416 const text = indentCodeCompensation(raw, cap[3] || '');
417
418 return {
419 type: 'code',
420 raw,
421 lang: cap[2] ? cap[2].trim() : cap[2],
422 text
423 };
424 }
425 }
426
427 heading(src) {
428 const cap = this.rules.block.heading.exec(src);
429 if (cap) {
430 let text = cap[2].trim();
431
432 // remove trailing #s
433 if (/#$/.test(text)) {
434 const trimmed = rtrim(text, '#');
435 if (this.options.pedantic) {
436 text = trimmed.trim();
437 } else if (!trimmed || / $/.test(trimmed)) {
438 // CommonMark requires space before trailing #s
439 text = trimmed.trim();
440 }
441 }
442
443 const token = {
444 type: 'heading',
445 raw: cap[0],
446 depth: cap[1].length,
447 text: text,
448 tokens: []
449 };
450 this.lexer.inline(token.text, token.tokens);
451 return token;
452 }
453 }
454
455 hr(src) {
456 const cap = this.rules.block.hr.exec(src);
457 if (cap) {
458 return {
459 type: 'hr',
460 raw: cap[0]
461 };
462 }
463 }
464
465 blockquote(src) {
466 const cap = this.rules.block.blockquote.exec(src);
467 if (cap) {
468 const text = cap[0].replace(/^ *> ?/gm, '');
469
470 return {
471 type: 'blockquote',
472 raw: cap[0],
473 tokens: this.lexer.blockTokens(text, []),
474 text
475 };
476 }
477 }
478
479 list(src) {
480 let cap = this.rules.block.list.exec(src);
481 if (cap) {
482 let raw, istask, ischecked, indent, i, blankLine, endsWithBlankLine,
483 line, lines, itemContents;
484
485 let bull = cap[1].trim();
486 const isordered = bull.length > 1;
487
488 const list = {
489 type: 'list',
490 raw: '',
491 ordered: isordered,
492 start: isordered ? +bull.slice(0, -1) : '',
493 loose: false,
494 items: []
495 };
496
497 bull = isordered ? `\\d{1,9}\\${bull.slice(-1)}` : `\\${bull}`;
498
499 if (this.options.pedantic) {
500 bull = isordered ? bull : '[*+-]';
501 }
502
503 // Get next list item
504 const itemRegex = new RegExp(`^( {0,3}${bull})((?: [^\\n]*| *)(?:\\n[^\\n]*)*(?:\\n|$))`);
505
506 // Get each top-level item
507 while (src) {
508 if (this.rules.block.hr.test(src)) { // End list if we encounter an HR (possibly move into itemRegex?)
509 break;
510 }
511
512 if (!(cap = itemRegex.exec(src))) {
513 break;
514 }
515
516 lines = cap[2].split('\n');
517
518 if (this.options.pedantic) {
519 indent = 2;
520 itemContents = lines[0].trimLeft();
521 } else {
522 indent = cap[2].search(/[^ ]/); // Find first non-space char
523 indent = cap[1].length + (indent > 4 ? 1 : indent); // intented code blocks after 4 spaces; indent is always 1
524 itemContents = lines[0].slice(indent - cap[1].length);
525 }
526
527 blankLine = false;
528 raw = cap[0];
529
530 if (!lines[0] && /^ *$/.test(lines[1])) { // items begin with at most one blank line
531 raw = cap[1] + lines.slice(0, 2).join('\n') + '\n';
532 list.loose = true;
533 lines = [];
534 }
535
536 const nextBulletRegex = new RegExp(`^ {0,${Math.min(3, indent - 1)}}(?:[*+-]|\\d{1,9}[.)])`);
537
538 for (i = 1; i < lines.length; i++) {
539 line = lines[i];
540
541 if (this.options.pedantic) { // Re-align to follow commonmark nesting rules
542 line = line.replace(/^ {1,4}(?=( {4})*[^ ])/g, ' ');
543 }
544
545 // End list item if found start of new bullet
546 if (nextBulletRegex.test(line)) {
547 raw = cap[1] + lines.slice(0, i).join('\n') + '\n';
548 break;
549 }
550
551 // Until we encounter a blank line, item contents do not need indentation
552 if (!blankLine) {
553 if (!line.trim()) { // Check if current line is empty
554 blankLine = true;
555 }
556
557 // Dedent if possible
558 if (line.search(/[^ ]/) >= indent) {
559 itemContents += '\n' + line.slice(indent);
560 } else {
561 itemContents += '\n' + line;
562 }
563 continue;
564 }
565
566 // Dedent this line
567 if (line.search(/[^ ]/) >= indent || !line.trim()) {
568 itemContents += '\n' + line.slice(indent);
569 continue;
570 } else { // Line was not properly indented; end of this item
571 raw = cap[1] + lines.slice(0, i).join('\n') + '\n';
572 break;
573 }
574 }
575
576 if (!list.loose) {
577 // If the previous item ended with a blank line, the list is loose
578 if (endsWithBlankLine) {
579 list.loose = true;
580 } else if (/\n *\n *$/.test(raw)) {
581 endsWithBlankLine = true;
582 }
583 }
584
585 // Check for task list items
586 if (this.options.gfm) {
587 istask = /^\[[ xX]\] /.exec(itemContents);
588 if (istask) {
589 ischecked = istask[0] !== '[ ] ';
590 itemContents = itemContents.replace(/^\[[ xX]\] +/, '');
591 }
592 }
593
594 list.items.push({
595 type: 'list_item',
596 raw: raw,
597 task: !!istask,
598 checked: ischecked,
599 loose: false,
600 text: itemContents
601 });
602
603 list.raw += raw;
604 src = src.slice(raw.length);
605 }
606
607 // Do not consume newlines at end of final item. Alternatively, make itemRegex *start* with any newlines to simplify/speed up endsWithBlankLine logic
608 list.items[list.items.length - 1].raw = raw.trimRight();
609 list.items[list.items.length - 1].text = itemContents.trimRight();
610 list.raw = list.raw.trimRight();
611
612 const l = list.items.length;
613
614 // Item child tokens handled here at end because we needed to have the final item to trim it first
615 for (i = 0; i < l; i++) {
616 this.lexer.state.top = false;
617 list.items[i].tokens = this.lexer.blockTokens(list.items[i].text, []);
618 if (list.items[i].tokens.some(t => t.type === 'space')) {
619 list.loose = true;
620 list.items[i].loose = true;
621 }
622 }
623
624 return list;
625 }
626 }
627
628 html(src) {
629 const cap = this.rules.block.html.exec(src);
630 if (cap) {
631 const token = {
632 type: 'html',
633 raw: cap[0],
634 pre: !this.options.sanitizer
635 && (cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style'),
636 text: cap[0]
637 };
638 if (this.options.sanitize) {
639 token.type = 'paragraph';
640 token.text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$2(cap[0]);
641 token.tokens = [];
642 this.lexer.inline(token.text, token.tokens);
643 }
644 return token;
645 }
646 }
647
648 def(src) {
649 const cap = this.rules.block.def.exec(src);
650 if (cap) {
651 if (cap[3]) cap[3] = cap[3].substring(1, cap[3].length - 1);
652 const tag = cap[1].toLowerCase().replace(/\s+/g, ' ');
653 return {
654 type: 'def',
655 tag,
656 raw: cap[0],
657 href: cap[2],
658 title: cap[3]
659 };
660 }
661 }
662
663 table(src) {
664 const cap = this.rules.block.table.exec(src);
665 if (cap) {
666 const item = {
667 type: 'table',
668 header: splitCells(cap[1]).map(c => { return { text: c }; }),
669 align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */),
670 rows: cap[3] ? cap[3].replace(/\n$/, '').split('\n') : []
671 };
672
673 if (item.header.length === item.align.length) {
674 item.raw = cap[0];
675
676 let l = item.align.length;
677 let i, j, k, row;
678 for (i = 0; i < l; i++) {
679 if (/^ *-+: *$/.test(item.align[i])) {
680 item.align[i] = 'right';
681 } else if (/^ *:-+: *$/.test(item.align[i])) {
682 item.align[i] = 'center';
683 } else if (/^ *:-+ *$/.test(item.align[i])) {
684 item.align[i] = 'left';
685 } else {
686 item.align[i] = null;
687 }
688 }
689
690 l = item.rows.length;
691 for (i = 0; i < l; i++) {
692 item.rows[i] = splitCells(item.rows[i], item.header.length).map(c => { return { text: c }; });
693 }
694
695 // parse child tokens inside headers and cells
696
697 // header child tokens
698 l = item.header.length;
699 for (j = 0; j < l; j++) {
700 item.header[j].tokens = [];
701 this.lexer.inlineTokens(item.header[j].text, item.header[j].tokens);
702 }
703
704 // cell child tokens
705 l = item.rows.length;
706 for (j = 0; j < l; j++) {
707 row = item.rows[j];
708 for (k = 0; k < row.length; k++) {
709 row[k].tokens = [];
710 this.lexer.inlineTokens(row[k].text, row[k].tokens);
711 }
712 }
713
714 return item;
715 }
716 }
717 }
718
719 lheading(src) {
720 const cap = this.rules.block.lheading.exec(src);
721 if (cap) {
722 const token = {
723 type: 'heading',
724 raw: cap[0],
725 depth: cap[2].charAt(0) === '=' ? 1 : 2,
726 text: cap[1],
727 tokens: []
728 };
729 this.lexer.inline(token.text, token.tokens);
730 return token;
731 }
732 }
733
734 paragraph(src) {
735 const cap = this.rules.block.paragraph.exec(src);
736 if (cap) {
737 const token = {
738 type: 'paragraph',
739 raw: cap[0],
740 text: cap[1].charAt(cap[1].length - 1) === '\n'
741 ? cap[1].slice(0, -1)
742 : cap[1],
743 tokens: []
744 };
745 this.lexer.inline(token.text, token.tokens);
746 return token;
747 }
748 }
749
750 text(src) {
751 const cap = this.rules.block.text.exec(src);
752 if (cap) {
753 const token = {
754 type: 'text',
755 raw: cap[0],
756 text: cap[0],
757 tokens: []
758 };
759 this.lexer.inline(token.text, token.tokens);
760 return token;
761 }
762 }
763
764 escape(src) {
765 const cap = this.rules.inline.escape.exec(src);
766 if (cap) {
767 return {
768 type: 'escape',
769 raw: cap[0],
770 text: escape$2(cap[1])
771 };
772 }
773 }
774
775 tag(src) {
776 const cap = this.rules.inline.tag.exec(src);
777 if (cap) {
778 if (!this.lexer.state.inLink && /^<a /i.test(cap[0])) {
779 this.lexer.state.inLink = true;
780 } else if (this.lexer.state.inLink && /^<\/a>/i.test(cap[0])) {
781 this.lexer.state.inLink = false;
782 }
783 if (!this.lexer.state.inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
784 this.lexer.state.inRawBlock = true;
785 } else if (this.lexer.state.inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
786 this.lexer.state.inRawBlock = false;
787 }
788
789 return {
790 type: this.options.sanitize
791 ? 'text'
792 : 'html',
793 raw: cap[0],
794 inLink: this.lexer.state.inLink,
795 inRawBlock: this.lexer.state.inRawBlock,
796 text: this.options.sanitize
797 ? (this.options.sanitizer
798 ? this.options.sanitizer(cap[0])
799 : escape$2(cap[0]))
800 : cap[0]
801 };
802 }
803 }
804
805 link(src) {
806 const cap = this.rules.inline.link.exec(src);
807 if (cap) {
808 const trimmedUrl = cap[2].trim();
809 if (!this.options.pedantic && /^</.test(trimmedUrl)) {
810 // commonmark requires matching angle brackets
811 if (!(/>$/.test(trimmedUrl))) {
812 return;
813 }
814
815 // ending angle bracket cannot be escaped
816 const rtrimSlash = rtrim(trimmedUrl.slice(0, -1), '\\');
817 if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) {
818 return;
819 }
820 } else {
821 // find closing parenthesis
822 const lastParenIndex = findClosingBracket(cap[2], '()');
823 if (lastParenIndex > -1) {
824 const start = cap[0].indexOf('!') === 0 ? 5 : 4;
825 const linkLen = start + cap[1].length + lastParenIndex;
826 cap[2] = cap[2].substring(0, lastParenIndex);
827 cap[0] = cap[0].substring(0, linkLen).trim();
828 cap[3] = '';
829 }
830 }
831 let href = cap[2];
832 let title = '';
833 if (this.options.pedantic) {
834 // split pedantic href and title
835 const link = /^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(href);
836
837 if (link) {
838 href = link[1];
839 title = link[3];
840 }
841 } else {
842 title = cap[3] ? cap[3].slice(1, -1) : '';
843 }
844
845 href = href.trim();
846 if (/^</.test(href)) {
847 if (this.options.pedantic && !(/>$/.test(trimmedUrl))) {
848 // pedantic allows starting angle bracket without ending angle bracket
849 href = href.slice(1);
850 } else {
851 href = href.slice(1, -1);
852 }
853 }
854 return outputLink(cap, {
855 href: href ? href.replace(this.rules.inline._escapes, '$1') : href,
856 title: title ? title.replace(this.rules.inline._escapes, '$1') : title
857 }, cap[0], this.lexer);
858 }
859 }
860
861 reflink(src, links) {
862 let cap;
863 if ((cap = this.rules.inline.reflink.exec(src))
864 || (cap = this.rules.inline.nolink.exec(src))) {
865 let link = (cap[2] || cap[1]).replace(/\s+/g, ' ');
866 link = links[link.toLowerCase()];
867 if (!link || !link.href) {
868 const text = cap[0].charAt(0);
869 return {
870 type: 'text',
871 raw: text,
872 text
873 };
874 }
875 return outputLink(cap, link, cap[0], this.lexer);
876 }
877 }
878
879 emStrong(src, maskedSrc, prevChar = '') {
880 let match = this.rules.inline.emStrong.lDelim.exec(src);
881 if (!match) return;
882
883 // _ can't be between two alphanumerics. \p{L}\p{N} includes non-english alphabet/numbers as well
884 if (match[3] && prevChar.match(/[\p{L}\p{N}]/u)) return;
885
886 const nextChar = match[1] || match[2] || '';
887
888 if (!nextChar || (nextChar && (prevChar === '' || this.rules.inline.punctuation.exec(prevChar)))) {
889 const lLength = match[0].length - 1;
890 let rDelim, rLength, delimTotal = lLength, midDelimTotal = 0;
891
892 const endReg = match[0][0] === '*' ? this.rules.inline.emStrong.rDelimAst : this.rules.inline.emStrong.rDelimUnd;
893 endReg.lastIndex = 0;
894
895 // Clip maskedSrc to same section of string as src (move to lexer?)
896 maskedSrc = maskedSrc.slice(-1 * src.length + lLength);
897
898 while ((match = endReg.exec(maskedSrc)) != null) {
899 rDelim = match[1] || match[2] || match[3] || match[4] || match[5] || match[6];
900
901 if (!rDelim) continue; // skip single * in __abc*abc__
902
903 rLength = rDelim.length;
904
905 if (match[3] || match[4]) { // found another Left Delim
906 delimTotal += rLength;
907 continue;
908 } else if (match[5] || match[6]) { // either Left or Right Delim
909 if (lLength % 3 && !((lLength + rLength) % 3)) {
910 midDelimTotal += rLength;
911 continue; // CommonMark Emphasis Rules 9-10
912 }
913 }
914
915 delimTotal -= rLength;
916
917 if (delimTotal > 0) continue; // Haven't found enough closing delimiters
918
919 // Remove extra characters. *a*** -> *a*
920 rLength = Math.min(rLength, rLength + delimTotal + midDelimTotal);
921
922 // Create `em` if smallest delimiter has odd char count. *a***
923 if (Math.min(lLength, rLength) % 2) {
924 const text = src.slice(1, lLength + match.index + rLength);
925 return {
926 type: 'em',
927 raw: src.slice(0, lLength + match.index + rLength + 1),
928 text,
929 tokens: this.lexer.inlineTokens(text, [])
930 };
931 }
932
933 // Create 'strong' if smallest delimiter has even char count. **a***
934 const text = src.slice(2, lLength + match.index + rLength - 1);
935 return {
936 type: 'strong',
937 raw: src.slice(0, lLength + match.index + rLength + 1),
938 text,
939 tokens: this.lexer.inlineTokens(text, [])
940 };
941 }
942 }
943 }
944
945 codespan(src) {
946 const cap = this.rules.inline.code.exec(src);
947 if (cap) {
948 let text = cap[2].replace(/\n/g, ' ');
949 const hasNonSpaceChars = /[^ ]/.test(text);
950 const hasSpaceCharsOnBothEnds = /^ /.test(text) && / $/.test(text);
951 if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) {
952 text = text.substring(1, text.length - 1);
953 }
954 text = escape$2(text, true);
955 return {
956 type: 'codespan',
957 raw: cap[0],
958 text
959 };
960 }
961 }
962
963 br(src) {
964 const cap = this.rules.inline.br.exec(src);
965 if (cap) {
966 return {
967 type: 'br',
968 raw: cap[0]
969 };
970 }
971 }
972
973 del(src) {
974 const cap = this.rules.inline.del.exec(src);
975 if (cap) {
976 return {
977 type: 'del',
978 raw: cap[0],
979 text: cap[2],
980 tokens: this.lexer.inlineTokens(cap[2], [])
981 };
982 }
983 }
984
985 autolink(src, mangle) {
986 const cap = this.rules.inline.autolink.exec(src);
987 if (cap) {
988 let text, href;
989 if (cap[2] === '@') {
990 text = escape$2(this.options.mangle ? mangle(cap[1]) : cap[1]);
991 href = 'mailto:' + text;
992 } else {
993 text = escape$2(cap[1]);
994 href = text;
995 }
996
997 return {
998 type: 'link',
999 raw: cap[0],
1000 text,
1001 href,
1002 tokens: [
1003 {
1004 type: 'text',
1005 raw: text,
1006 text
1007 }
1008 ]
1009 };
1010 }
1011 }
1012
1013 url(src, mangle) {
1014 let cap;
1015 if (cap = this.rules.inline.url.exec(src)) {
1016 let text, href;
1017 if (cap[2] === '@') {
1018 text = escape$2(this.options.mangle ? mangle(cap[0]) : cap[0]);
1019 href = 'mailto:' + text;
1020 } else {
1021 // do extended autolink path validation
1022 let prevCapZero;
1023 do {
1024 prevCapZero = cap[0];
1025 cap[0] = this.rules.inline._backpedal.exec(cap[0])[0];
1026 } while (prevCapZero !== cap[0]);
1027 text = escape$2(cap[0]);
1028 if (cap[1] === 'www.') {
1029 href = 'http://' + text;
1030 } else {
1031 href = text;
1032 }
1033 }
1034 return {
1035 type: 'link',
1036 raw: cap[0],
1037 text,
1038 href,
1039 tokens: [
1040 {
1041 type: 'text',
1042 raw: text,
1043 text
1044 }
1045 ]
1046 };
1047 }
1048 }
1049
1050 inlineText(src, smartypants) {
1051 const cap = this.rules.inline.text.exec(src);
1052 if (cap) {
1053 let text;
1054 if (this.lexer.state.inRawBlock) {
1055 text = this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$2(cap[0])) : cap[0];
1056 } else {
1057 text = escape$2(this.options.smartypants ? smartypants(cap[0]) : cap[0]);
1058 }
1059 return {
1060 type: 'text',
1061 raw: cap[0],
1062 text
1063 };
1064 }
1065 }
1066};
1067
1068const {
1069 noopTest,
1070 edit,
1071 merge: merge$1
1072} = helpers;
1073
1074/**
1075 * Block-Level Grammar
1076 */
1077const block$1 = {
1078 newline: /^(?: *(?:\n|$))+/,
1079 code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,
1080 fences: /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?=\n|$)|$)/,
1081 hr: /^ {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)/,
1082 heading: /^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,
1083 blockquote: /^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/,
1084 list: /^( {0,3}bull)( [^\n]+?)?(?:\n|$)/,
1085 html: '^ {0,3}(?:' // optional indentation
1086 + '<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:</\\1>[^\\n]*\\n+|$)' // (1)
1087 + '|comment[^\\n]*(\\n+|$)' // (2)
1088 + '|<\\?[\\s\\S]*?(?:\\?>\\n*|$)' // (3)
1089 + '|<![A-Z][\\s\\S]*?(?:>\\n*|$)' // (4)
1090 + '|<!\\[CDATA\\[[\\s\\S]*?(?:\\]\\]>\\n*|$)' // (5)
1091 + '|</?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (6)
1092 + '|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) open tag
1093 + '|</(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) closing tag
1094 + ')',
1095 def: /^ {0,3}\[(label)\]: *\n? *<?([^\s>]+)>?(?:(?: +\n? *| *\n *)(title))? *(?:\n+|$)/,
1096 table: noopTest,
1097 lheading: /^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/,
1098 // regex template, placeholders will be replaced according to different paragraph
1099 // interruption rules of commonmark and the original markdown spec:
1100 _paragraph: /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html| +\n)[^\n]+)*)/,
1101 text: /^[^\n]+/
1102};
1103
1104block$1._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/;
1105block$1._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/;
1106block$1.def = edit(block$1.def)
1107 .replace('label', block$1._label)
1108 .replace('title', block$1._title)
1109 .getRegex();
1110
1111block$1.bullet = /(?:[*+-]|\d{1,9}[.)])/;
1112block$1.listItemStart = edit(/^( *)(bull) */)
1113 .replace('bull', block$1.bullet)
1114 .getRegex();
1115
1116block$1.list = edit(block$1.list)
1117 .replace(/bull/g, block$1.bullet)
1118 .replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))')
1119 .replace('def', '\\n+(?=' + block$1.def.source + ')')
1120 .getRegex();
1121
1122block$1._tag = 'address|article|aside|base|basefont|blockquote|body|caption'
1123 + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'
1124 + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'
1125 + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'
1126 + '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'
1127 + '|track|ul';
1128block$1._comment = /<!--(?!-?>)[\s\S]*?(?:-->|$)/;
1129block$1.html = edit(block$1.html, 'i')
1130 .replace('comment', block$1._comment)
1131 .replace('tag', block$1._tag)
1132 .replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/)
1133 .getRegex();
1134
1135block$1.paragraph = edit(block$1._paragraph)
1136 .replace('hr', block$1.hr)
1137 .replace('heading', ' {0,3}#{1,6} ')
1138 .replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs
1139 .replace('blockquote', ' {0,3}>')
1140 .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
1141 .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
1142 .replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
1143 .replace('tag', block$1._tag) // pars can be interrupted by type (6) html blocks
1144 .getRegex();
1145
1146block$1.blockquote = edit(block$1.blockquote)
1147 .replace('paragraph', block$1.paragraph)
1148 .getRegex();
1149
1150/**
1151 * Normal Block Grammar
1152 */
1153
1154block$1.normal = merge$1({}, block$1);
1155
1156/**
1157 * GFM Block Grammar
1158 */
1159
1160block$1.gfm = merge$1({}, block$1.normal, {
1161 table: '^ *([^\\n ].*\\|.*)\\n' // Header
1162 + ' {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)\\|?' // Align
1163 + '(?:\\n *((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells
1164});
1165
1166block$1.gfm.table = edit(block$1.gfm.table)
1167 .replace('hr', block$1.hr)
1168 .replace('heading', ' {0,3}#{1,6} ')
1169 .replace('blockquote', ' {0,3}>')
1170 .replace('code', ' {4}[^\\n]')
1171 .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
1172 .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
1173 .replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
1174 .replace('tag', block$1._tag) // tables can be interrupted by type (6) html blocks
1175 .getRegex();
1176
1177/**
1178 * Pedantic grammar (original John Gruber's loose markdown specification)
1179 */
1180
1181block$1.pedantic = merge$1({}, block$1.normal, {
1182 html: edit(
1183 '^ *(?:comment *(?:\\n|\\s*$)'
1184 + '|<(tag)[\\s\\S]+?</\\1> *(?:\\n{2,}|\\s*$)' // closed tag
1185 + '|<tag(?:"[^"]*"|\'[^\']*\'|\\s[^\'"/>\\s]*)*?/?> *(?:\\n{2,}|\\s*$))')
1186 .replace('comment', block$1._comment)
1187 .replace(/tag/g, '(?!(?:'
1188 + 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub'
1189 + '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)'
1190 + '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b')
1191 .getRegex(),
1192 def: /^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,
1193 heading: /^(#{1,6})(.*)(?:\n+|$)/,
1194 fences: noopTest, // fences not supported
1195 paragraph: edit(block$1.normal._paragraph)
1196 .replace('hr', block$1.hr)
1197 .replace('heading', ' *#{1,6} *[^\n]')
1198 .replace('lheading', block$1.lheading)
1199 .replace('blockquote', ' {0,3}>')
1200 .replace('|fences', '')
1201 .replace('|list', '')
1202 .replace('|html', '')
1203 .getRegex()
1204});
1205
1206/**
1207 * Inline-Level Grammar
1208 */
1209const inline$1 = {
1210 escape: /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,
1211 autolink: /^<(scheme:[^\s\x00-\x1f<>]*|email)>/,
1212 url: noopTest,
1213 tag: '^comment'
1214 + '|^</[a-zA-Z][\\w:-]*\\s*>' // self-closing tag
1215 + '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag
1216 + '|^<\\?[\\s\\S]*?\\?>' // processing instruction, e.g. <?php ?>
1217 + '|^<![a-zA-Z]+\\s[\\s\\S]*?>' // declaration, e.g. <!DOCTYPE html>
1218 + '|^<!\\[CDATA\\[[\\s\\S]*?\\]\\]>', // CDATA section
1219 link: /^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/,
1220 reflink: /^!?\[(label)\]\[(?!\s*\])((?:\\[\[\]]?|[^\[\]\\])+)\]/,
1221 nolink: /^!?\[(?!\s*\])((?:\[[^\[\]]*\]|\\[\[\]]|[^\[\]])*)\](?:\[\])?/,
1222 reflinkSearch: 'reflink|nolink(?!\\()',
1223 emStrong: {
1224 lDelim: /^(?:\*+(?:([punct_])|[^\s*]))|^_+(?:([punct*])|([^\s_]))/,
1225 // (1) and (2) can only be a Right Delimiter. (3) and (4) can only be Left. (5) and (6) can be either Left or Right.
1226 // () Skip other delimiter (1) #*** (2) a***#, a*** (3) #***a, ***a (4) ***# (5) #***# (6) a***a
1227 rDelimAst: /\_\_[^_*]*?\*[^_*]*?\_\_|[punct_](\*+)(?=[\s]|$)|[^punct*_\s](\*+)(?=[punct_\s]|$)|[punct_\s](\*+)(?=[^punct*_\s])|[\s](\*+)(?=[punct_])|[punct_](\*+)(?=[punct_])|[^punct*_\s](\*+)(?=[^punct*_\s])/,
1228 rDelimUnd: /\*\*[^_*]*?\_[^_*]*?\*\*|[punct*](\_+)(?=[\s]|$)|[^punct*_\s](\_+)(?=[punct*\s]|$)|[punct*\s](\_+)(?=[^punct*_\s])|[\s](\_+)(?=[punct*])|[punct*](\_+)(?=[punct*])/ // ^- Not allowed for _
1229 },
1230 code: /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,
1231 br: /^( {2,}|\\)\n(?!\s*$)/,
1232 del: noopTest,
1233 text: /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\<!\[`*_]|\b_|$)|[^ ](?= {2,}\n)))/,
1234 punctuation: /^([\spunctuation])/
1235};
1236
1237// list of punctuation marks from CommonMark spec
1238// without * and _ to handle the different emphasis markers * and _
1239inline$1._punctuation = '!"#$%&\'()+\\-.,/:;<=>?@\\[\\]`^{|}~';
1240inline$1.punctuation = edit(inline$1.punctuation).replace(/punctuation/g, inline$1._punctuation).getRegex();
1241
1242// sequences em should skip over [title](link), `code`, <html>
1243inline$1.blockSkip = /\[[^\]]*?\]\([^\)]*?\)|`[^`]*?`|<[^>]*?>/g;
1244inline$1.escapedEmSt = /\\\*|\\_/g;
1245
1246inline$1._comment = edit(block$1._comment).replace('(?:-->|$)', '-->').getRegex();
1247
1248inline$1.emStrong.lDelim = edit(inline$1.emStrong.lDelim)
1249 .replace(/punct/g, inline$1._punctuation)
1250 .getRegex();
1251
1252inline$1.emStrong.rDelimAst = edit(inline$1.emStrong.rDelimAst, 'g')
1253 .replace(/punct/g, inline$1._punctuation)
1254 .getRegex();
1255
1256inline$1.emStrong.rDelimUnd = edit(inline$1.emStrong.rDelimUnd, 'g')
1257 .replace(/punct/g, inline$1._punctuation)
1258 .getRegex();
1259
1260inline$1._escapes = /\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g;
1261
1262inline$1._scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/;
1263inline$1._email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/;
1264inline$1.autolink = edit(inline$1.autolink)
1265 .replace('scheme', inline$1._scheme)
1266 .replace('email', inline$1._email)
1267 .getRegex();
1268
1269inline$1._attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/;
1270
1271inline$1.tag = edit(inline$1.tag)
1272 .replace('comment', inline$1._comment)
1273 .replace('attribute', inline$1._attribute)
1274 .getRegex();
1275
1276inline$1._label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/;
1277inline$1._href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/;
1278inline$1._title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/;
1279
1280inline$1.link = edit(inline$1.link)
1281 .replace('label', inline$1._label)
1282 .replace('href', inline$1._href)
1283 .replace('title', inline$1._title)
1284 .getRegex();
1285
1286inline$1.reflink = edit(inline$1.reflink)
1287 .replace('label', inline$1._label)
1288 .getRegex();
1289
1290inline$1.reflinkSearch = edit(inline$1.reflinkSearch, 'g')
1291 .replace('reflink', inline$1.reflink)
1292 .replace('nolink', inline$1.nolink)
1293 .getRegex();
1294
1295/**
1296 * Normal Inline Grammar
1297 */
1298
1299inline$1.normal = merge$1({}, inline$1);
1300
1301/**
1302 * Pedantic Inline Grammar
1303 */
1304
1305inline$1.pedantic = merge$1({}, inline$1.normal, {
1306 strong: {
1307 start: /^__|\*\*/,
1308 middle: /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,
1309 endAst: /\*\*(?!\*)/g,
1310 endUnd: /__(?!_)/g
1311 },
1312 em: {
1313 start: /^_|\*/,
1314 middle: /^()\*(?=\S)([\s\S]*?\S)\*(?!\*)|^_(?=\S)([\s\S]*?\S)_(?!_)/,
1315 endAst: /\*(?!\*)/g,
1316 endUnd: /_(?!_)/g
1317 },
1318 link: edit(/^!?\[(label)\]\((.*?)\)/)
1319 .replace('label', inline$1._label)
1320 .getRegex(),
1321 reflink: edit(/^!?\[(label)\]\s*\[([^\]]*)\]/)
1322 .replace('label', inline$1._label)
1323 .getRegex()
1324});
1325
1326/**
1327 * GFM Inline Grammar
1328 */
1329
1330inline$1.gfm = merge$1({}, inline$1.normal, {
1331 escape: edit(inline$1.escape).replace('])', '~|])').getRegex(),
1332 _extended_email: /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/,
1333 url: /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/,
1334 _backpedal: /(?:[^?!.,:;*_~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_~)]+(?!$))+/,
1335 del: /^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/,
1336 text: /^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\<!\[`*~_]|\b_|https?:\/\/|ftp:\/\/|www\.|$)|[^ ](?= {2,}\n)|[^a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-](?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)))/
1337});
1338
1339inline$1.gfm.url = edit(inline$1.gfm.url, 'i')
1340 .replace('email', inline$1.gfm._extended_email)
1341 .getRegex();
1342/**
1343 * GFM + Line Breaks Inline Grammar
1344 */
1345
1346inline$1.breaks = merge$1({}, inline$1.gfm, {
1347 br: edit(inline$1.br).replace('{2,}', '*').getRegex(),
1348 text: edit(inline$1.gfm.text)
1349 .replace('\\b_', '\\b_| {2,}\\n')
1350 .replace(/\{2,\}/g, '*')
1351 .getRegex()
1352});
1353
1354var rules = {
1355 block: block$1,
1356 inline: inline$1
1357};
1358
1359const Tokenizer$1 = Tokenizer_1;
1360const { defaults: defaults$3 } = defaults$5.exports;
1361const { block, inline } = rules;
1362const { repeatString } = helpers;
1363
1364/**
1365 * smartypants text replacement
1366 */
1367function smartypants(text) {
1368 return text
1369 // em-dashes
1370 .replace(/---/g, '\u2014')
1371 // en-dashes
1372 .replace(/--/g, '\u2013')
1373 // opening singles
1374 .replace(/(^|[-\u2014/(\[{"\s])'/g, '$1\u2018')
1375 // closing singles & apostrophes
1376 .replace(/'/g, '\u2019')
1377 // opening doubles
1378 .replace(/(^|[-\u2014/(\[{\u2018\s])"/g, '$1\u201c')
1379 // closing doubles
1380 .replace(/"/g, '\u201d')
1381 // ellipses
1382 .replace(/\.{3}/g, '\u2026');
1383}
1384
1385/**
1386 * mangle email addresses
1387 */
1388function mangle(text) {
1389 let out = '',
1390 i,
1391 ch;
1392
1393 const l = text.length;
1394 for (i = 0; i < l; i++) {
1395 ch = text.charCodeAt(i);
1396 if (Math.random() > 0.5) {
1397 ch = 'x' + ch.toString(16);
1398 }
1399 out += '&#' + ch + ';';
1400 }
1401
1402 return out;
1403}
1404
1405/**
1406 * Block Lexer
1407 */
1408var Lexer_1 = class Lexer {
1409 constructor(options) {
1410 this.tokens = [];
1411 this.tokens.links = Object.create(null);
1412 this.options = options || defaults$3;
1413 this.options.tokenizer = this.options.tokenizer || new Tokenizer$1();
1414 this.tokenizer = this.options.tokenizer;
1415 this.tokenizer.options = this.options;
1416 this.tokenizer.lexer = this;
1417 this.inlineQueue = [];
1418 this.state = {
1419 inLink: false,
1420 inRawBlock: false,
1421 top: true
1422 };
1423
1424 const rules = {
1425 block: block.normal,
1426 inline: inline.normal
1427 };
1428
1429 if (this.options.pedantic) {
1430 rules.block = block.pedantic;
1431 rules.inline = inline.pedantic;
1432 } else if (this.options.gfm) {
1433 rules.block = block.gfm;
1434 if (this.options.breaks) {
1435 rules.inline = inline.breaks;
1436 } else {
1437 rules.inline = inline.gfm;
1438 }
1439 }
1440 this.tokenizer.rules = rules;
1441 }
1442
1443 /**
1444 * Expose Rules
1445 */
1446 static get rules() {
1447 return {
1448 block,
1449 inline
1450 };
1451 }
1452
1453 /**
1454 * Static Lex Method
1455 */
1456 static lex(src, options) {
1457 const lexer = new Lexer(options);
1458 return lexer.lex(src);
1459 }
1460
1461 /**
1462 * Static Lex Inline Method
1463 */
1464 static lexInline(src, options) {
1465 const lexer = new Lexer(options);
1466 return lexer.inlineTokens(src);
1467 }
1468
1469 /**
1470 * Preprocessing
1471 */
1472 lex(src) {
1473 src = src
1474 .replace(/\r\n|\r/g, '\n')
1475 .replace(/\t/g, ' ');
1476
1477 this.blockTokens(src, this.tokens);
1478
1479 let next;
1480 while (next = this.inlineQueue.shift()) {
1481 this.inlineTokens(next.src, next.tokens);
1482 }
1483
1484 return this.tokens;
1485 }
1486
1487 /**
1488 * Lexing
1489 */
1490 blockTokens(src, tokens = []) {
1491 if (this.options.pedantic) {
1492 src = src.replace(/^ +$/gm, '');
1493 }
1494 let token, lastToken, cutSrc, lastParagraphClipped;
1495
1496 while (src) {
1497 if (this.options.extensions
1498 && this.options.extensions.block
1499 && this.options.extensions.block.some((extTokenizer) => {
1500 if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
1501 src = src.substring(token.raw.length);
1502 tokens.push(token);
1503 return true;
1504 }
1505 return false;
1506 })) {
1507 continue;
1508 }
1509
1510 // newline
1511 if (token = this.tokenizer.space(src)) {
1512 src = src.substring(token.raw.length);
1513 if (token.type) {
1514 tokens.push(token);
1515 }
1516 continue;
1517 }
1518
1519 // code
1520 if (token = this.tokenizer.code(src)) {
1521 src = src.substring(token.raw.length);
1522 lastToken = tokens[tokens.length - 1];
1523 // An indented code block cannot interrupt a paragraph.
1524 if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) {
1525 lastToken.raw += '\n' + token.raw;
1526 lastToken.text += '\n' + token.text;
1527 this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
1528 } else {
1529 tokens.push(token);
1530 }
1531 continue;
1532 }
1533
1534 // fences
1535 if (token = this.tokenizer.fences(src)) {
1536 src = src.substring(token.raw.length);
1537 tokens.push(token);
1538 continue;
1539 }
1540
1541 // heading
1542 if (token = this.tokenizer.heading(src)) {
1543 src = src.substring(token.raw.length);
1544 tokens.push(token);
1545 continue;
1546 }
1547
1548 // hr
1549 if (token = this.tokenizer.hr(src)) {
1550 src = src.substring(token.raw.length);
1551 tokens.push(token);
1552 continue;
1553 }
1554
1555 // blockquote
1556 if (token = this.tokenizer.blockquote(src)) {
1557 src = src.substring(token.raw.length);
1558 tokens.push(token);
1559 continue;
1560 }
1561
1562 // list
1563 if (token = this.tokenizer.list(src)) {
1564 src = src.substring(token.raw.length);
1565 tokens.push(token);
1566 continue;
1567 }
1568
1569 // html
1570 if (token = this.tokenizer.html(src)) {
1571 src = src.substring(token.raw.length);
1572 tokens.push(token);
1573 continue;
1574 }
1575
1576 // def
1577 if (token = this.tokenizer.def(src)) {
1578 src = src.substring(token.raw.length);
1579 lastToken = tokens[tokens.length - 1];
1580 if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) {
1581 lastToken.raw += '\n' + token.raw;
1582 lastToken.text += '\n' + token.raw;
1583 this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
1584 } else if (!this.tokens.links[token.tag]) {
1585 this.tokens.links[token.tag] = {
1586 href: token.href,
1587 title: token.title
1588 };
1589 }
1590 continue;
1591 }
1592
1593 // table (gfm)
1594 if (token = this.tokenizer.table(src)) {
1595 src = src.substring(token.raw.length);
1596 tokens.push(token);
1597 continue;
1598 }
1599
1600 // lheading
1601 if (token = this.tokenizer.lheading(src)) {
1602 src = src.substring(token.raw.length);
1603 tokens.push(token);
1604 continue;
1605 }
1606
1607 // top-level paragraph
1608 // prevent paragraph consuming extensions by clipping 'src' to extension start
1609 cutSrc = src;
1610 if (this.options.extensions && this.options.extensions.startBlock) {
1611 let startIndex = Infinity;
1612 const tempSrc = src.slice(1);
1613 let tempStart;
1614 this.options.extensions.startBlock.forEach(function(getStartIndex) {
1615 tempStart = getStartIndex.call({ lexer: this }, tempSrc);
1616 if (typeof tempStart === 'number' && tempStart >= 0) { startIndex = Math.min(startIndex, tempStart); }
1617 });
1618 if (startIndex < Infinity && startIndex >= 0) {
1619 cutSrc = src.substring(0, startIndex + 1);
1620 }
1621 }
1622 if (this.state.top && (token = this.tokenizer.paragraph(cutSrc))) {
1623 lastToken = tokens[tokens.length - 1];
1624 if (lastParagraphClipped && lastToken.type === 'paragraph') {
1625 lastToken.raw += '\n' + token.raw;
1626 lastToken.text += '\n' + token.text;
1627 this.inlineQueue.pop();
1628 this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
1629 } else {
1630 tokens.push(token);
1631 }
1632 lastParagraphClipped = (cutSrc.length !== src.length);
1633 src = src.substring(token.raw.length);
1634 continue;
1635 }
1636
1637 // text
1638 if (token = this.tokenizer.text(src)) {
1639 src = src.substring(token.raw.length);
1640 lastToken = tokens[tokens.length - 1];
1641 if (lastToken && lastToken.type === 'text') {
1642 lastToken.raw += '\n' + token.raw;
1643 lastToken.text += '\n' + token.text;
1644 this.inlineQueue.pop();
1645 this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
1646 } else {
1647 tokens.push(token);
1648 }
1649 continue;
1650 }
1651
1652 if (src) {
1653 const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
1654 if (this.options.silent) {
1655 console.error(errMsg);
1656 break;
1657 } else {
1658 throw new Error(errMsg);
1659 }
1660 }
1661 }
1662
1663 this.state.top = true;
1664 return tokens;
1665 }
1666
1667 inline(src, tokens) {
1668 this.inlineQueue.push({ src, tokens });
1669 }
1670
1671 /**
1672 * Lexing/Compiling
1673 */
1674 inlineTokens(src, tokens = []) {
1675 let token, lastToken, cutSrc;
1676
1677 // String with links masked to avoid interference with em and strong
1678 let maskedSrc = src;
1679 let match;
1680 let keepPrevChar, prevChar;
1681
1682 // Mask out reflinks
1683 if (this.tokens.links) {
1684 const links = Object.keys(this.tokens.links);
1685 if (links.length > 0) {
1686 while ((match = this.tokenizer.rules.inline.reflinkSearch.exec(maskedSrc)) != null) {
1687 if (links.includes(match[0].slice(match[0].lastIndexOf('[') + 1, -1))) {
1688 maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex);
1689 }
1690 }
1691 }
1692 }
1693 // Mask out other blocks
1694 while ((match = this.tokenizer.rules.inline.blockSkip.exec(maskedSrc)) != null) {
1695 maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);
1696 }
1697
1698 // Mask out escaped em & strong delimiters
1699 while ((match = this.tokenizer.rules.inline.escapedEmSt.exec(maskedSrc)) != null) {
1700 maskedSrc = maskedSrc.slice(0, match.index) + '++' + maskedSrc.slice(this.tokenizer.rules.inline.escapedEmSt.lastIndex);
1701 }
1702
1703 while (src) {
1704 if (!keepPrevChar) {
1705 prevChar = '';
1706 }
1707 keepPrevChar = false;
1708
1709 // extensions
1710 if (this.options.extensions
1711 && this.options.extensions.inline
1712 && this.options.extensions.inline.some((extTokenizer) => {
1713 if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
1714 src = src.substring(token.raw.length);
1715 tokens.push(token);
1716 return true;
1717 }
1718 return false;
1719 })) {
1720 continue;
1721 }
1722
1723 // escape
1724 if (token = this.tokenizer.escape(src)) {
1725 src = src.substring(token.raw.length);
1726 tokens.push(token);
1727 continue;
1728 }
1729
1730 // tag
1731 if (token = this.tokenizer.tag(src)) {
1732 src = src.substring(token.raw.length);
1733 lastToken = tokens[tokens.length - 1];
1734 if (lastToken && token.type === 'text' && lastToken.type === 'text') {
1735 lastToken.raw += token.raw;
1736 lastToken.text += token.text;
1737 } else {
1738 tokens.push(token);
1739 }
1740 continue;
1741 }
1742
1743 // link
1744 if (token = this.tokenizer.link(src)) {
1745 src = src.substring(token.raw.length);
1746 tokens.push(token);
1747 continue;
1748 }
1749
1750 // reflink, nolink
1751 if (token = this.tokenizer.reflink(src, this.tokens.links)) {
1752 src = src.substring(token.raw.length);
1753 lastToken = tokens[tokens.length - 1];
1754 if (lastToken && token.type === 'text' && lastToken.type === 'text') {
1755 lastToken.raw += token.raw;
1756 lastToken.text += token.text;
1757 } else {
1758 tokens.push(token);
1759 }
1760 continue;
1761 }
1762
1763 // em & strong
1764 if (token = this.tokenizer.emStrong(src, maskedSrc, prevChar)) {
1765 src = src.substring(token.raw.length);
1766 tokens.push(token);
1767 continue;
1768 }
1769
1770 // code
1771 if (token = this.tokenizer.codespan(src)) {
1772 src = src.substring(token.raw.length);
1773 tokens.push(token);
1774 continue;
1775 }
1776
1777 // br
1778 if (token = this.tokenizer.br(src)) {
1779 src = src.substring(token.raw.length);
1780 tokens.push(token);
1781 continue;
1782 }
1783
1784 // del (gfm)
1785 if (token = this.tokenizer.del(src)) {
1786 src = src.substring(token.raw.length);
1787 tokens.push(token);
1788 continue;
1789 }
1790
1791 // autolink
1792 if (token = this.tokenizer.autolink(src, mangle)) {
1793 src = src.substring(token.raw.length);
1794 tokens.push(token);
1795 continue;
1796 }
1797
1798 // url (gfm)
1799 if (!this.state.inLink && (token = this.tokenizer.url(src, mangle))) {
1800 src = src.substring(token.raw.length);
1801 tokens.push(token);
1802 continue;
1803 }
1804
1805 // text
1806 // prevent inlineText consuming extensions by clipping 'src' to extension start
1807 cutSrc = src;
1808 if (this.options.extensions && this.options.extensions.startInline) {
1809 let startIndex = Infinity;
1810 const tempSrc = src.slice(1);
1811 let tempStart;
1812 this.options.extensions.startInline.forEach(function(getStartIndex) {
1813 tempStart = getStartIndex.call({ lexer: this }, tempSrc);
1814 if (typeof tempStart === 'number' && tempStart >= 0) { startIndex = Math.min(startIndex, tempStart); }
1815 });
1816 if (startIndex < Infinity && startIndex >= 0) {
1817 cutSrc = src.substring(0, startIndex + 1);
1818 }
1819 }
1820 if (token = this.tokenizer.inlineText(cutSrc, smartypants)) {
1821 src = src.substring(token.raw.length);
1822 if (token.raw.slice(-1) !== '_') { // Track prevChar before string of ____ started
1823 prevChar = token.raw.slice(-1);
1824 }
1825 keepPrevChar = true;
1826 lastToken = tokens[tokens.length - 1];
1827 if (lastToken && lastToken.type === 'text') {
1828 lastToken.raw += token.raw;
1829 lastToken.text += token.text;
1830 } else {
1831 tokens.push(token);
1832 }
1833 continue;
1834 }
1835
1836 if (src) {
1837 const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
1838 if (this.options.silent) {
1839 console.error(errMsg);
1840 break;
1841 } else {
1842 throw new Error(errMsg);
1843 }
1844 }
1845 }
1846
1847 return tokens;
1848 }
1849};
1850
1851const { defaults: defaults$2 } = defaults$5.exports;
1852const {
1853 cleanUrl,
1854 escape: escape$1
1855} = helpers;
1856
1857/**
1858 * Renderer
1859 */
1860var Renderer_1 = class Renderer {
1861 constructor(options) {
1862 this.options = options || defaults$2;
1863 }
1864
1865 code(code, infostring, escaped) {
1866 const lang = (infostring || '').match(/\S*/)[0];
1867 if (this.options.highlight) {
1868 const out = this.options.highlight(code, lang);
1869 if (out != null && out !== code) {
1870 escaped = true;
1871 code = out;
1872 }
1873 }
1874
1875 code = code.replace(/\n$/, '') + '\n';
1876
1877 if (!lang) {
1878 return '<pre><code>'
1879 + (escaped ? code : escape$1(code, true))
1880 + '</code></pre>\n';
1881 }
1882
1883 return '<pre><code class="'
1884 + this.options.langPrefix
1885 + escape$1(lang, true)
1886 + '">'
1887 + (escaped ? code : escape$1(code, true))
1888 + '</code></pre>\n';
1889 }
1890
1891 blockquote(quote) {
1892 return '<blockquote>\n' + quote + '</blockquote>\n';
1893 }
1894
1895 html(html) {
1896 return html;
1897 }
1898
1899 heading(text, level, raw, slugger) {
1900 if (this.options.headerIds) {
1901 return '<h'
1902 + level
1903 + ' id="'
1904 + this.options.headerPrefix
1905 + slugger.slug(raw)
1906 + '">'
1907 + text
1908 + '</h'
1909 + level
1910 + '>\n';
1911 }
1912 // ignore IDs
1913 return '<h' + level + '>' + text + '</h' + level + '>\n';
1914 }
1915
1916 hr() {
1917 return this.options.xhtml ? '<hr/>\n' : '<hr>\n';
1918 }
1919
1920 list(body, ordered, start) {
1921 const type = ordered ? 'ol' : 'ul',
1922 startatt = (ordered && start !== 1) ? (' start="' + start + '"') : '';
1923 return '<' + type + startatt + '>\n' + body + '</' + type + '>\n';
1924 }
1925
1926 listitem(text) {
1927 return '<li>' + text + '</li>\n';
1928 }
1929
1930 checkbox(checked) {
1931 return '<input '
1932 + (checked ? 'checked="" ' : '')
1933 + 'disabled="" type="checkbox"'
1934 + (this.options.xhtml ? ' /' : '')
1935 + '> ';
1936 }
1937
1938 paragraph(text) {
1939 return '<p>' + text + '</p>\n';
1940 }
1941
1942 table(header, body) {
1943 if (body) body = '<tbody>' + body + '</tbody>';
1944
1945 return '<table>\n'
1946 + '<thead>\n'
1947 + header
1948 + '</thead>\n'
1949 + body
1950 + '</table>\n';
1951 }
1952
1953 tablerow(content) {
1954 return '<tr>\n' + content + '</tr>\n';
1955 }
1956
1957 tablecell(content, flags) {
1958 const type = flags.header ? 'th' : 'td';
1959 const tag = flags.align
1960 ? '<' + type + ' align="' + flags.align + '">'
1961 : '<' + type + '>';
1962 return tag + content + '</' + type + '>\n';
1963 }
1964
1965 // span level renderer
1966 strong(text) {
1967 return '<strong>' + text + '</strong>';
1968 }
1969
1970 em(text) {
1971 return '<em>' + text + '</em>';
1972 }
1973
1974 codespan(text) {
1975 return '<code>' + text + '</code>';
1976 }
1977
1978 br() {
1979 return this.options.xhtml ? '<br/>' : '<br>';
1980 }
1981
1982 del(text) {
1983 return '<del>' + text + '</del>';
1984 }
1985
1986 link(href, title, text) {
1987 href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
1988 if (href === null) {
1989 return text;
1990 }
1991 let out = '<a href="' + escape$1(href) + '"';
1992 if (title) {
1993 out += ' title="' + title + '"';
1994 }
1995 out += '>' + text + '</a>';
1996 return out;
1997 }
1998
1999 image(href, title, text) {
2000 href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
2001 if (href === null) {
2002 return text;
2003 }
2004
2005 let out = '<img src="' + href + '" alt="' + text + '"';
2006 if (title) {
2007 out += ' title="' + title + '"';
2008 }
2009 out += this.options.xhtml ? '/>' : '>';
2010 return out;
2011 }
2012
2013 text(text) {
2014 return text;
2015 }
2016};
2017
2018/**
2019 * TextRenderer
2020 * returns only the textual part of the token
2021 */
2022
2023var TextRenderer_1 = class TextRenderer {
2024 // no need for block level renderers
2025 strong(text) {
2026 return text;
2027 }
2028
2029 em(text) {
2030 return text;
2031 }
2032
2033 codespan(text) {
2034 return text;
2035 }
2036
2037 del(text) {
2038 return text;
2039 }
2040
2041 html(text) {
2042 return text;
2043 }
2044
2045 text(text) {
2046 return text;
2047 }
2048
2049 link(href, title, text) {
2050 return '' + text;
2051 }
2052
2053 image(href, title, text) {
2054 return '' + text;
2055 }
2056
2057 br() {
2058 return '';
2059 }
2060};
2061
2062/**
2063 * Slugger generates header id
2064 */
2065
2066var Slugger_1 = class Slugger {
2067 constructor() {
2068 this.seen = {};
2069 }
2070
2071 serialize(value) {
2072 return value
2073 .toLowerCase()
2074 .trim()
2075 // remove html tags
2076 .replace(/<[!\/a-z].*?>/ig, '')
2077 // remove unwanted chars
2078 .replace(/[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g, '')
2079 .replace(/\s/g, '-');
2080 }
2081
2082 /**
2083 * Finds the next safe (unique) slug to use
2084 */
2085 getNextSafeSlug(originalSlug, isDryRun) {
2086 let slug = originalSlug;
2087 let occurenceAccumulator = 0;
2088 if (this.seen.hasOwnProperty(slug)) {
2089 occurenceAccumulator = this.seen[originalSlug];
2090 do {
2091 occurenceAccumulator++;
2092 slug = originalSlug + '-' + occurenceAccumulator;
2093 } while (this.seen.hasOwnProperty(slug));
2094 }
2095 if (!isDryRun) {
2096 this.seen[originalSlug] = occurenceAccumulator;
2097 this.seen[slug] = 0;
2098 }
2099 return slug;
2100 }
2101
2102 /**
2103 * Convert string to unique id
2104 * @param {object} options
2105 * @param {boolean} options.dryrun Generates the next unique slug without updating the internal accumulator.
2106 */
2107 slug(value, options = {}) {
2108 const slug = this.serialize(value);
2109 return this.getNextSafeSlug(slug, options.dryrun);
2110 }
2111};
2112
2113const Renderer$1 = Renderer_1;
2114const TextRenderer$1 = TextRenderer_1;
2115const Slugger$1 = Slugger_1;
2116const { defaults: defaults$1 } = defaults$5.exports;
2117const {
2118 unescape
2119} = helpers;
2120
2121/**
2122 * Parsing & Compiling
2123 */
2124var Parser_1 = class Parser {
2125 constructor(options) {
2126 this.options = options || defaults$1;
2127 this.options.renderer = this.options.renderer || new Renderer$1();
2128 this.renderer = this.options.renderer;
2129 this.renderer.options = this.options;
2130 this.textRenderer = new TextRenderer$1();
2131 this.slugger = new Slugger$1();
2132 }
2133
2134 /**
2135 * Static Parse Method
2136 */
2137 static parse(tokens, options) {
2138 const parser = new Parser(options);
2139 return parser.parse(tokens);
2140 }
2141
2142 /**
2143 * Static Parse Inline Method
2144 */
2145 static parseInline(tokens, options) {
2146 const parser = new Parser(options);
2147 return parser.parseInline(tokens);
2148 }
2149
2150 /**
2151 * Parse Loop
2152 */
2153 parse(tokens, top = true) {
2154 let out = '',
2155 i,
2156 j,
2157 k,
2158 l2,
2159 l3,
2160 row,
2161 cell,
2162 header,
2163 body,
2164 token,
2165 ordered,
2166 start,
2167 loose,
2168 itemBody,
2169 item,
2170 checked,
2171 task,
2172 checkbox,
2173 ret;
2174
2175 const l = tokens.length;
2176 for (i = 0; i < l; i++) {
2177 token = tokens[i];
2178
2179 // Run any renderer extensions
2180 if (this.options.extensions && this.options.extensions.renderers && this.options.extensions.renderers[token.type]) {
2181 ret = this.options.extensions.renderers[token.type].call({ parser: this }, token);
2182 if (ret !== false || !['space', 'hr', 'heading', 'code', 'table', 'blockquote', 'list', 'html', 'paragraph', 'text'].includes(token.type)) {
2183 out += ret || '';
2184 continue;
2185 }
2186 }
2187
2188 switch (token.type) {
2189 case 'space': {
2190 continue;
2191 }
2192 case 'hr': {
2193 out += this.renderer.hr();
2194 continue;
2195 }
2196 case 'heading': {
2197 out += this.renderer.heading(
2198 this.parseInline(token.tokens),
2199 token.depth,
2200 unescape(this.parseInline(token.tokens, this.textRenderer)),
2201 this.slugger);
2202 continue;
2203 }
2204 case 'code': {
2205 out += this.renderer.code(token.text,
2206 token.lang,
2207 token.escaped);
2208 continue;
2209 }
2210 case 'table': {
2211 header = '';
2212
2213 // header
2214 cell = '';
2215 l2 = token.header.length;
2216 for (j = 0; j < l2; j++) {
2217 cell += this.renderer.tablecell(
2218 this.parseInline(token.header[j].tokens),
2219 { header: true, align: token.align[j] }
2220 );
2221 }
2222 header += this.renderer.tablerow(cell);
2223
2224 body = '';
2225 l2 = token.rows.length;
2226 for (j = 0; j < l2; j++) {
2227 row = token.rows[j];
2228
2229 cell = '';
2230 l3 = row.length;
2231 for (k = 0; k < l3; k++) {
2232 cell += this.renderer.tablecell(
2233 this.parseInline(row[k].tokens),
2234 { header: false, align: token.align[k] }
2235 );
2236 }
2237
2238 body += this.renderer.tablerow(cell);
2239 }
2240 out += this.renderer.table(header, body);
2241 continue;
2242 }
2243 case 'blockquote': {
2244 body = this.parse(token.tokens);
2245 out += this.renderer.blockquote(body);
2246 continue;
2247 }
2248 case 'list': {
2249 ordered = token.ordered;
2250 start = token.start;
2251 loose = token.loose;
2252 l2 = token.items.length;
2253
2254 body = '';
2255 for (j = 0; j < l2; j++) {
2256 item = token.items[j];
2257 checked = item.checked;
2258 task = item.task;
2259
2260 itemBody = '';
2261 if (item.task) {
2262 checkbox = this.renderer.checkbox(checked);
2263 if (loose) {
2264 if (item.tokens.length > 0 && item.tokens[0].type === 'paragraph') {
2265 item.tokens[0].text = checkbox + ' ' + item.tokens[0].text;
2266 if (item.tokens[0].tokens && item.tokens[0].tokens.length > 0 && item.tokens[0].tokens[0].type === 'text') {
2267 item.tokens[0].tokens[0].text = checkbox + ' ' + item.tokens[0].tokens[0].text;
2268 }
2269 } else {
2270 item.tokens.unshift({
2271 type: 'text',
2272 text: checkbox
2273 });
2274 }
2275 } else {
2276 itemBody += checkbox;
2277 }
2278 }
2279
2280 itemBody += this.parse(item.tokens, loose);
2281 body += this.renderer.listitem(itemBody, task, checked);
2282 }
2283
2284 out += this.renderer.list(body, ordered, start);
2285 continue;
2286 }
2287 case 'html': {
2288 // TODO parse inline content if parameter markdown=1
2289 out += this.renderer.html(token.text);
2290 continue;
2291 }
2292 case 'paragraph': {
2293 out += this.renderer.paragraph(this.parseInline(token.tokens));
2294 continue;
2295 }
2296 case 'text': {
2297 body = token.tokens ? this.parseInline(token.tokens) : token.text;
2298 while (i + 1 < l && tokens[i + 1].type === 'text') {
2299 token = tokens[++i];
2300 body += '\n' + (token.tokens ? this.parseInline(token.tokens) : token.text);
2301 }
2302 out += top ? this.renderer.paragraph(body) : body;
2303 continue;
2304 }
2305
2306 default: {
2307 const errMsg = 'Token with "' + token.type + '" type was not found.';
2308 if (this.options.silent) {
2309 console.error(errMsg);
2310 return;
2311 } else {
2312 throw new Error(errMsg);
2313 }
2314 }
2315 }
2316 }
2317
2318 return out;
2319 }
2320
2321 /**
2322 * Parse Inline Tokens
2323 */
2324 parseInline(tokens, renderer) {
2325 renderer = renderer || this.renderer;
2326 let out = '',
2327 i,
2328 token,
2329 ret;
2330
2331 const l = tokens.length;
2332 for (i = 0; i < l; i++) {
2333 token = tokens[i];
2334
2335 // Run any renderer extensions
2336 if (this.options.extensions && this.options.extensions.renderers && this.options.extensions.renderers[token.type]) {
2337 ret = this.options.extensions.renderers[token.type].call({ parser: this }, token);
2338 if (ret !== false || !['escape', 'html', 'link', 'image', 'strong', 'em', 'codespan', 'br', 'del', 'text'].includes(token.type)) {
2339 out += ret || '';
2340 continue;
2341 }
2342 }
2343
2344 switch (token.type) {
2345 case 'escape': {
2346 out += renderer.text(token.text);
2347 break;
2348 }
2349 case 'html': {
2350 out += renderer.html(token.text);
2351 break;
2352 }
2353 case 'link': {
2354 out += renderer.link(token.href, token.title, this.parseInline(token.tokens, renderer));
2355 break;
2356 }
2357 case 'image': {
2358 out += renderer.image(token.href, token.title, token.text);
2359 break;
2360 }
2361 case 'strong': {
2362 out += renderer.strong(this.parseInline(token.tokens, renderer));
2363 break;
2364 }
2365 case 'em': {
2366 out += renderer.em(this.parseInline(token.tokens, renderer));
2367 break;
2368 }
2369 case 'codespan': {
2370 out += renderer.codespan(token.text);
2371 break;
2372 }
2373 case 'br': {
2374 out += renderer.br();
2375 break;
2376 }
2377 case 'del': {
2378 out += renderer.del(this.parseInline(token.tokens, renderer));
2379 break;
2380 }
2381 case 'text': {
2382 out += renderer.text(token.text);
2383 break;
2384 }
2385 default: {
2386 const errMsg = 'Token with "' + token.type + '" type was not found.';
2387 if (this.options.silent) {
2388 console.error(errMsg);
2389 return;
2390 } else {
2391 throw new Error(errMsg);
2392 }
2393 }
2394 }
2395 }
2396 return out;
2397 }
2398};
2399
2400const Lexer = Lexer_1;
2401const Parser = Parser_1;
2402const Tokenizer = Tokenizer_1;
2403const Renderer = Renderer_1;
2404const TextRenderer = TextRenderer_1;
2405const Slugger = Slugger_1;
2406const {
2407 merge,
2408 checkSanitizeDeprecation,
2409 escape
2410} = helpers;
2411const {
2412 getDefaults,
2413 changeDefaults,
2414 defaults
2415} = defaults$5.exports;
2416
2417/**
2418 * Marked
2419 */
2420function marked(src, opt, callback) {
2421 // throw error in case of non string input
2422 if (typeof src === 'undefined' || src === null) {
2423 throw new Error('marked(): input parameter is undefined or null');
2424 }
2425 if (typeof src !== 'string') {
2426 throw new Error('marked(): input parameter is of type '
2427 + Object.prototype.toString.call(src) + ', string expected');
2428 }
2429
2430 if (typeof opt === 'function') {
2431 callback = opt;
2432 opt = null;
2433 }
2434
2435 opt = merge({}, marked.defaults, opt || {});
2436 checkSanitizeDeprecation(opt);
2437
2438 if (callback) {
2439 const highlight = opt.highlight;
2440 let tokens;
2441
2442 try {
2443 tokens = Lexer.lex(src, opt);
2444 } catch (e) {
2445 return callback(e);
2446 }
2447
2448 const done = function(err) {
2449 let out;
2450
2451 if (!err) {
2452 try {
2453 if (opt.walkTokens) {
2454 marked.walkTokens(tokens, opt.walkTokens);
2455 }
2456 out = Parser.parse(tokens, opt);
2457 } catch (e) {
2458 err = e;
2459 }
2460 }
2461
2462 opt.highlight = highlight;
2463
2464 return err
2465 ? callback(err)
2466 : callback(null, out);
2467 };
2468
2469 if (!highlight || highlight.length < 3) {
2470 return done();
2471 }
2472
2473 delete opt.highlight;
2474
2475 if (!tokens.length) return done();
2476
2477 let pending = 0;
2478 marked.walkTokens(tokens, function(token) {
2479 if (token.type === 'code') {
2480 pending++;
2481 setTimeout(() => {
2482 highlight(token.text, token.lang, function(err, code) {
2483 if (err) {
2484 return done(err);
2485 }
2486 if (code != null && code !== token.text) {
2487 token.text = code;
2488 token.escaped = true;
2489 }
2490
2491 pending--;
2492 if (pending === 0) {
2493 done();
2494 }
2495 });
2496 }, 0);
2497 }
2498 });
2499
2500 if (pending === 0) {
2501 done();
2502 }
2503
2504 return;
2505 }
2506
2507 try {
2508 const tokens = Lexer.lex(src, opt);
2509 if (opt.walkTokens) {
2510 marked.walkTokens(tokens, opt.walkTokens);
2511 }
2512 return Parser.parse(tokens, opt);
2513 } catch (e) {
2514 e.message += '\nPlease report this to https://github.com/markedjs/marked.';
2515 if (opt.silent) {
2516 return '<p>An error occurred:</p><pre>'
2517 + escape(e.message + '', true)
2518 + '</pre>';
2519 }
2520 throw e;
2521 }
2522}
2523
2524/**
2525 * Options
2526 */
2527
2528marked.options =
2529marked.setOptions = function(opt) {
2530 merge(marked.defaults, opt);
2531 changeDefaults(marked.defaults);
2532 return marked;
2533};
2534
2535marked.getDefaults = getDefaults;
2536
2537marked.defaults = defaults;
2538
2539/**
2540 * Use Extension
2541 */
2542
2543marked.use = function(...args) {
2544 const opts = merge({}, ...args);
2545 const extensions = marked.defaults.extensions || { renderers: {}, childTokens: {} };
2546 let hasExtensions;
2547
2548 args.forEach((pack) => {
2549 // ==-- Parse "addon" extensions --== //
2550 if (pack.extensions) {
2551 hasExtensions = true;
2552 pack.extensions.forEach((ext) => {
2553 if (!ext.name) {
2554 throw new Error('extension name required');
2555 }
2556 if (ext.renderer) { // Renderer extensions
2557 const prevRenderer = extensions.renderers ? extensions.renderers[ext.name] : null;
2558 if (prevRenderer) {
2559 // Replace extension with func to run new extension but fall back if false
2560 extensions.renderers[ext.name] = function(...args) {
2561 let ret = ext.renderer.apply(this, args);
2562 if (ret === false) {
2563 ret = prevRenderer.apply(this, args);
2564 }
2565 return ret;
2566 };
2567 } else {
2568 extensions.renderers[ext.name] = ext.renderer;
2569 }
2570 }
2571 if (ext.tokenizer) { // Tokenizer Extensions
2572 if (!ext.level || (ext.level !== 'block' && ext.level !== 'inline')) {
2573 throw new Error("extension level must be 'block' or 'inline'");
2574 }
2575 if (extensions[ext.level]) {
2576 extensions[ext.level].unshift(ext.tokenizer);
2577 } else {
2578 extensions[ext.level] = [ext.tokenizer];
2579 }
2580 if (ext.start) { // Function to check for start of token
2581 if (ext.level === 'block') {
2582 if (extensions.startBlock) {
2583 extensions.startBlock.push(ext.start);
2584 } else {
2585 extensions.startBlock = [ext.start];
2586 }
2587 } else if (ext.level === 'inline') {
2588 if (extensions.startInline) {
2589 extensions.startInline.push(ext.start);
2590 } else {
2591 extensions.startInline = [ext.start];
2592 }
2593 }
2594 }
2595 }
2596 if (ext.childTokens) { // Child tokens to be visited by walkTokens
2597 extensions.childTokens[ext.name] = ext.childTokens;
2598 }
2599 });
2600 }
2601
2602 // ==-- Parse "overwrite" extensions --== //
2603 if (pack.renderer) {
2604 const renderer = marked.defaults.renderer || new Renderer();
2605 for (const prop in pack.renderer) {
2606 const prevRenderer = renderer[prop];
2607 // Replace renderer with func to run extension, but fall back if false
2608 renderer[prop] = (...args) => {
2609 let ret = pack.renderer[prop].apply(renderer, args);
2610 if (ret === false) {
2611 ret = prevRenderer.apply(renderer, args);
2612 }
2613 return ret;
2614 };
2615 }
2616 opts.renderer = renderer;
2617 }
2618 if (pack.tokenizer) {
2619 const tokenizer = marked.defaults.tokenizer || new Tokenizer();
2620 for (const prop in pack.tokenizer) {
2621 const prevTokenizer = tokenizer[prop];
2622 // Replace tokenizer with func to run extension, but fall back if false
2623 tokenizer[prop] = (...args) => {
2624 let ret = pack.tokenizer[prop].apply(tokenizer, args);
2625 if (ret === false) {
2626 ret = prevTokenizer.apply(tokenizer, args);
2627 }
2628 return ret;
2629 };
2630 }
2631 opts.tokenizer = tokenizer;
2632 }
2633
2634 // ==-- Parse WalkTokens extensions --== //
2635 if (pack.walkTokens) {
2636 const walkTokens = marked.defaults.walkTokens;
2637 opts.walkTokens = (token) => {
2638 pack.walkTokens.call(this, token);
2639 if (walkTokens) {
2640 walkTokens(token);
2641 }
2642 };
2643 }
2644
2645 if (hasExtensions) {
2646 opts.extensions = extensions;
2647 }
2648
2649 marked.setOptions(opts);
2650 });
2651};
2652
2653/**
2654 * Run callback for every token
2655 */
2656
2657marked.walkTokens = function(tokens, callback) {
2658 for (const token of tokens) {
2659 callback(token);
2660 switch (token.type) {
2661 case 'table': {
2662 for (const cell of token.header) {
2663 marked.walkTokens(cell.tokens, callback);
2664 }
2665 for (const row of token.rows) {
2666 for (const cell of row) {
2667 marked.walkTokens(cell.tokens, callback);
2668 }
2669 }
2670 break;
2671 }
2672 case 'list': {
2673 marked.walkTokens(token.items, callback);
2674 break;
2675 }
2676 default: {
2677 if (marked.defaults.extensions && marked.defaults.extensions.childTokens && marked.defaults.extensions.childTokens[token.type]) { // Walk any extensions
2678 marked.defaults.extensions.childTokens[token.type].forEach(function(childTokens) {
2679 marked.walkTokens(token[childTokens], callback);
2680 });
2681 } else if (token.tokens) {
2682 marked.walkTokens(token.tokens, callback);
2683 }
2684 }
2685 }
2686 }
2687};
2688
2689/**
2690 * Parse Inline
2691 */
2692marked.parseInline = function(src, opt) {
2693 // throw error in case of non string input
2694 if (typeof src === 'undefined' || src === null) {
2695 throw new Error('marked.parseInline(): input parameter is undefined or null');
2696 }
2697 if (typeof src !== 'string') {
2698 throw new Error('marked.parseInline(): input parameter is of type '
2699 + Object.prototype.toString.call(src) + ', string expected');
2700 }
2701
2702 opt = merge({}, marked.defaults, opt || {});
2703 checkSanitizeDeprecation(opt);
2704
2705 try {
2706 const tokens = Lexer.lexInline(src, opt);
2707 if (opt.walkTokens) {
2708 marked.walkTokens(tokens, opt.walkTokens);
2709 }
2710 return Parser.parseInline(tokens, opt);
2711 } catch (e) {
2712 e.message += '\nPlease report this to https://github.com/markedjs/marked.';
2713 if (opt.silent) {
2714 return '<p>An error occurred:</p><pre>'
2715 + escape(e.message + '', true)
2716 + '</pre>';
2717 }
2718 throw e;
2719 }
2720};
2721
2722/**
2723 * Expose
2724 */
2725
2726marked.Parser = Parser;
2727marked.parser = Parser.parse;
2728
2729marked.Renderer = Renderer;
2730marked.TextRenderer = TextRenderer;
2731
2732marked.Lexer = Lexer;
2733marked.lexer = Lexer.lex;
2734
2735marked.Tokenizer = Tokenizer;
2736
2737marked.Slugger = Slugger;
2738
2739marked.parse = marked;
2740
2741var marked_1 = marked;
2742
2743export { marked_1 as default };