/**
* marked - a markdown parser
* Copyright (c) 2011-2022, Christopher Jeffrey. (MIT Licensed)
* https://github.com/markedjs/marked
*/
/**
* DO NOT EDIT THIS FILE
* The code in this file is generated from files in ./src/
*/
function getDefaults() {
return {
baseUrl: null,
breaks: false,
extensions: null,
gfm: true,
headerIds: true,
headerPrefix: "",
highlight: null,
langPrefix: "language-",
mangle: true,
pedantic: false,
renderer: null,
sanitize: false,
sanitizer: null,
silent: false,
smartLists: false,
smartypants: false,
tokenizer: null,
walkTokens: null,
xhtml: false,
}
}
let defaults = getDefaults()
function changeDefaults(newDefaults) {
defaults = newDefaults
}
/**
* Helpers
*/
const escapeTest = /[&<>"']/
const escapeReplace = /[&<>"']/g
const escapeTestNoEncode = /[<>"']|&(?!#?\w+;)/
const escapeReplaceNoEncode = /[<>"']|&(?!#?\w+;)/g
const escapeReplacements = {
"&": "&",
"<": "<",
">": ">",
'"': """,
"'": "'",
}
const getEscapeReplacement = ch => escapeReplacements[ch]
function escape(html, encode) {
if (encode) {
if (escapeTest.test(html)) {
return html.replace(escapeReplace, getEscapeReplacement)
}
} else {
if (escapeTestNoEncode.test(html)) {
return html.replace(escapeReplaceNoEncode, getEscapeReplacement)
}
}
return html
}
const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi
/**
* @param {string} html
*/
function unescape(html) {
// explicitly match decimal, hex, and named HTML entities
return html.replace(unescapeTest, (_, n) => {
n = n.toLowerCase()
if (n === "colon") return ":"
if (n.charAt(0) === "#") {
return n.charAt(1) === "x"
? String.fromCharCode(parseInt(n.substring(2), 16))
: String.fromCharCode(+n.substring(1))
}
return ""
})
}
const caret = /(^|[^\[])\^/g
/**
* @param {string | RegExp} regex
* @param {string} opt
*/
function edit(regex, opt) {
regex = typeof regex === "string" ? regex : regex.source
opt = opt || ""
const obj = {
replace: (name, val) => {
val = val.source || val
val = val.replace(caret, "$1")
regex = regex.replace(name, val)
return obj
},
getRegex: () => {
return new RegExp(regex, opt)
},
}
return obj
}
const nonWordAndColonTest = /[^\w:]/g
const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i
/**
* @param {boolean} sanitize
* @param {string} base
* @param {string} href
*/
function cleanUrl(sanitize, base, href) {
if (sanitize) {
let prot
try {
prot = decodeURIComponent(unescape(href))
.replace(nonWordAndColonTest, "")
.toLowerCase()
} catch (e) {
return null
}
if (
prot.indexOf("javascript:") === 0 ||
prot.indexOf("vbscript:") === 0 ||
prot.indexOf("data:") === 0
) {
return null
}
}
if (base && !originIndependentUrl.test(href)) {
href = resolveUrl(base, href)
}
try {
href = encodeURI(href).replace(/%25/g, "%")
} catch (e) {
return null
}
return href
}
const baseUrls = {}
const justDomain = /^[^:]+:\/*[^/]*$/
const protocol = /^([^:]+:)[\s\S]*$/
const domain = /^([^:]+:\/*[^/]*)[\s\S]*$/
/**
* @param {string} base
* @param {string} href
*/
function resolveUrl(base, href) {
if (!baseUrls[" " + base]) {
// we can ignore everything in base after the last slash of its path component,
// but we might need to add _that_
// https://tools.ietf.org/html/rfc3986#section-3
if (justDomain.test(base)) {
baseUrls[" " + base] = base + "/"
} else {
baseUrls[" " + base] = rtrim(base, "/", true)
}
}
base = baseUrls[" " + base]
const relativeBase = base.indexOf(":") === -1
if (href.substring(0, 2) === "//") {
if (relativeBase) {
return href
}
return base.replace(protocol, "$1") + href
} else if (href.charAt(0) === "/") {
if (relativeBase) {
return href
}
return base.replace(domain, "$1") + href
} else {
return base + href
}
}
const noopTest = { exec: function noopTest() {} }
function merge(obj) {
let i = 1,
target,
key
for (; i < arguments.length; i++) {
target = arguments[i]
for (key in target) {
if (Object.prototype.hasOwnProperty.call(target, key)) {
obj[key] = target[key]
}
}
}
return obj
}
function splitCells(tableRow, count) {
// ensure that every cell-delimiting pipe has a space
// before it to distinguish it from an escaped pipe
const row = tableRow.replace(/\|/g, (match, offset, str) => {
let escaped = false,
curr = offset
while (--curr >= 0 && str[curr] === "\\") escaped = !escaped
if (escaped) {
// odd number of slashes means | is escaped
// so we leave it alone
return "|"
} else {
// add space before unescaped |
return " |"
}
}),
cells = row.split(/ \|/)
let i = 0
// First/last cell in a row cannot be empty if it has no leading/trailing pipe
if (!cells[0].trim()) {
cells.shift()
}
if (cells.length > 0 && !cells[cells.length - 1].trim()) {
cells.pop()
}
if (cells.length > count) {
cells.splice(count)
} else {
while (cells.length < count) cells.push("")
}
for (; i < cells.length; i++) {
// leading or trailing whitespace is ignored per the gfm spec
cells[i] = cells[i].trim().replace(/\\\|/g, "|")
}
return cells
}
/**
* Remove trailing 'c's. Equivalent to str.replace(/c*$/, '').
* /c*$/ is vulnerable to REDOS.
*
* @param {string} str
* @param {string} c
* @param {boolean} invert Remove suffix of non-c chars instead. Default falsey.
*/
function rtrim(str, c, invert) {
const l = str.length
if (l === 0) {
return ""
}
// Length of suffix matching the invert condition.
let suffLen = 0
// Step left until we fail to match the invert condition.
while (suffLen < l) {
const currChar = str.charAt(l - suffLen - 1)
if (currChar === c && !invert) {
suffLen++
} else if (currChar !== c && invert) {
suffLen++
} else {
break
}
}
return str.slice(0, l - suffLen)
}
function findClosingBracket(str, b) {
if (str.indexOf(b[1]) === -1) {
return -1
}
const l = str.length
let level = 0,
i = 0
for (; i < l; i++) {
if (str[i] === "\\") {
i++
} else if (str[i] === b[0]) {
level++
} else if (str[i] === b[1]) {
level--
if (level < 0) {
return i
}
}
}
return -1
}
function checkSanitizeDeprecation(opt) {
if (opt && opt.sanitize && !opt.silent) {
console.warn(
"marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options"
)
}
}
// copied from https://stackoverflow.com/a/5450113/806777
/**
* @param {string} pattern
* @param {number} count
*/
function repeatString(pattern, count) {
if (count < 1) {
return ""
}
let result = ""
while (count > 1) {
if (count & 1) {
result += pattern
}
count >>= 1
pattern += pattern
}
return result + pattern
}
function outputLink(cap, link, raw, lexer) {
const href = link.href
const title = link.title ? escape(link.title) : null
const text = cap[1].replace(/\\([\[\]])/g, "$1")
if (cap[0].charAt(0) !== "!") {
lexer.state.inLink = true
const token = {
type: "link",
raw,
href,
title,
text,
tokens: lexer.inlineTokens(text, []),
}
lexer.state.inLink = false
return token
}
return {
type: "image",
raw,
href,
title,
text: escape(text),
}
}
function indentCodeCompensation(raw, text) {
const matchIndentToCode = raw.match(/^(\s+)(?:```)/)
if (matchIndentToCode === null) {
return text
}
const indentToCode = matchIndentToCode[1]
return text
.split("\n")
.map(node => {
const matchIndentInNode = node.match(/^\s+/)
if (matchIndentInNode === null) {
return node
}
const [indentInNode] = matchIndentInNode
if (indentInNode.length >= indentToCode.length) {
return node.slice(indentToCode.length)
}
return node
})
.join("\n")
}
/**
* Tokenizer
*/
class Tokenizer {
constructor(options) {
this.options = options || defaults
}
space(src) {
const cap = this.rules.block.newline.exec(src)
if (cap && cap[0].length > 0) {
return {
type: "space",
raw: cap[0],
}
}
}
code(src) {
const cap = this.rules.block.code.exec(src)
if (cap) {
const text = cap[0].replace(/^ {1,4}/gm, "")
return {
type: "code",
raw: cap[0],
codeBlockStyle: "indented",
text: !this.options.pedantic ? rtrim(text, "\n") : text,
}
}
}
fences(src) {
const cap = this.rules.block.fences.exec(src)
if (cap) {
const raw = cap[0]
const text = indentCodeCompensation(raw, cap[3] || "")
return {
type: "code",
raw,
lang: cap[2] ? cap[2].trim() : cap[2],
text,
}
}
}
heading(src) {
const cap = this.rules.block.heading.exec(src)
if (cap) {
let text = cap[2].trim()
// remove trailing #s
if (/#$/.test(text)) {
const trimmed = rtrim(text, "#")
if (this.options.pedantic) {
text = trimmed.trim()
} else if (!trimmed || / $/.test(trimmed)) {
// CommonMark requires space before trailing #s
text = trimmed.trim()
}
}
const token = {
type: "heading",
raw: cap[0],
depth: cap[1].length,
text,
tokens: [],
}
this.lexer.inline(token.text, token.tokens)
return token
}
}
hr(src) {
const cap = this.rules.block.hr.exec(src)
if (cap) {
return {
type: "hr",
raw: cap[0],
}
}
}
blockquote(src) {
const cap = this.rules.block.blockquote.exec(src)
if (cap) {
const text = cap[0].replace(/^ *>[ \t]?/gm, "")
return {
type: "blockquote",
raw: cap[0],
tokens: this.lexer.blockTokens(text, []),
text,
}
}
}
list(src) {
let cap = this.rules.block.list.exec(src)
if (cap) {
let raw,
istask,
ischecked,
indent,
i,
blankLine,
endsWithBlankLine,
line,
nextLine,
rawLine,
itemContents,
endEarly
let bull = cap[1].trim()
const isordered = bull.length > 1
const list = {
type: "list",
raw: "",
ordered: isordered,
start: isordered ? +bull.slice(0, -1) : "",
loose: false,
items: [],
}
bull = isordered ? `\\d{1,9}\\${bull.slice(-1)}` : `\\${bull}`
if (this.options.pedantic) {
bull = isordered ? bull : "[*+-]"
}
// Get next list item
const itemRegex = new RegExp(
`^( {0,3}${bull})((?:[\t ][^\\n]*)?(?:\\n|$))`
)
// Check if current bullet point can start a new List Item
while (src) {
endEarly = false
if (!(cap = itemRegex.exec(src))) {
break
}
if (this.rules.block.hr.test(src)) {
// End list if bullet was actually HR (possibly move into itemRegex?)
break
}
raw = cap[0]
src = src.substring(raw.length)
line = cap[2].split("\n", 1)[0]
nextLine = src.split("\n", 1)[0]
if (this.options.pedantic) {
indent = 2
itemContents = line.trimLeft()
} else {
indent = cap[2].search(/[^ ]/) // Find first non-space char
indent = indent > 4 ? 1 : indent // Treat indented code blocks (> 4 spaces) as having only 1 indent
itemContents = line.slice(indent)
indent += cap[1].length
}
blankLine = false
if (!line && /^ *$/.test(nextLine)) {
// Items begin with at most one blank line
raw += nextLine + "\n"
src = src.substring(nextLine.length + 1)
endEarly = true
}
if (!endEarly) {
const nextBulletRegex = new RegExp(
`^ {0,${Math.min(
3,
indent - 1
)}}(?:[*+-]|\\d{1,9}[.)])((?: [^\\n]*)?(?:\\n|$))`
)
const hrRegex = new RegExp(
`^ {0,${Math.min(
3,
indent - 1
)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`
)
// Check if following lines should be included in List Item
while (src) {
rawLine = src.split("\n", 1)[0]
line = rawLine
// Re-align to follow commonmark nesting rules
if (this.options.pedantic) {
line = line.replace(/^ {1,4}(?=( {4})*[^ ])/g, " ")
}
// End list item if found start of new bullet
if (nextBulletRegex.test(line)) {
break
}
// Horizontal rule found
if (hrRegex.test(src)) {
break
}
if (line.search(/[^ ]/) >= indent || !line.trim()) {
// Dedent if possible
itemContents += "\n" + line.slice(indent)
} else if (!blankLine) {
// Until blank line, item doesn't need indentation
itemContents += "\n" + line
} else {
// Otherwise, improper indentation ends this item
break
}
if (!blankLine && !line.trim()) {
// Check if current line is blank
blankLine = true
}
raw += rawLine + "\n"
src = src.substring(rawLine.length + 1)
}
}
if (!list.loose) {
// If the previous item ended with a blank line, the list is loose
if (endsWithBlankLine) {
list.loose = true
} else if (/\n *\n *$/.test(raw)) {
endsWithBlankLine = true
}
}
// Check for task list items
if (this.options.gfm) {
istask = /^\[[ xX]\] /.exec(itemContents)
if (istask) {
ischecked = istask[0] !== "[ ] "
itemContents = itemContents.replace(/^\[[ xX]\] +/, "")
}
}
list.items.push({
type: "list_item",
raw,
task: !!istask,
checked: ischecked,
loose: false,
text: itemContents,
})
list.raw += raw
}
// Do not consume newlines at end of final item. Alternatively, make itemRegex *start* with any newlines to simplify/speed up endsWithBlankLine logic
list.items[list.items.length - 1].raw = raw.trimRight()
list.items[list.items.length - 1].text = itemContents.trimRight()
list.raw = list.raw.trimRight()
const l = list.items.length
// Item child tokens handled here at end because we needed to have the final item to trim it first
for (i = 0; i < l; i++) {
this.lexer.state.top = false
list.items[i].tokens = this.lexer.blockTokens(list.items[i].text, [])
const spacers = list.items[i].tokens.filter(t => t.type === "space")
const hasMultipleLineBreaks = spacers.every(t => {
const chars = t.raw.split("")
let lineBreaks = 0
for (const char of chars) {
if (char === "\n") {
lineBreaks += 1
}
if (lineBreaks > 1) {
return true
}
}
return false
})
if (!list.loose && spacers.length && hasMultipleLineBreaks) {
// Having a single line break doesn't mean a list is loose. A single line break is terminating the last list item
list.loose = true
list.items[i].loose = true
}
}
return list
}
}
html(src) {
const cap = this.rules.block.html.exec(src)
if (cap) {
const token = {
type: "html",
raw: cap[0],
pre:
!this.options.sanitizer &&
(cap[1] === "pre" || cap[1] === "script" || cap[1] === "style"),
text: cap[0],
}
if (this.options.sanitize) {
token.type = "paragraph"
token.text = this.options.sanitizer
? this.options.sanitizer(cap[0])
: escape(cap[0])
token.tokens = []
this.lexer.inline(token.text, token.tokens)
}
return token
}
}
def(src) {
const cap = this.rules.block.def.exec(src)
if (cap) {
if (cap[3]) cap[3] = cap[3].substring(1, cap[3].length - 1)
const tag = cap[1].toLowerCase().replace(/\s+/g, " ")
return {
type: "def",
tag,
raw: cap[0],
href: cap[2],
title: cap[3],
}
}
}
table(src) {
const cap = this.rules.block.table.exec(src)
if (cap) {
const item = {
type: "table",
header: splitCells(cap[1]).map(c => {
return { text: c }
}),
align: cap[2].replace(/^ *|\| *$/g, "").split(/ *\| */),
rows:
cap[3] && cap[3].trim()
? cap[3].replace(/\n[ \t]*$/, "").split("\n")
: [],
}
if (item.header.length === item.align.length) {
item.raw = cap[0]
let l = item.align.length
let i, j, k, row
for (i = 0; i < l; i++) {
if (/^ *-+: *$/.test(item.align[i])) {
item.align[i] = "right"
} else if (/^ *:-+: *$/.test(item.align[i])) {
item.align[i] = "center"
} else if (/^ *:-+ *$/.test(item.align[i])) {
item.align[i] = "left"
} else {
item.align[i] = null
}
}
l = item.rows.length
for (i = 0; i < l; i++) {
item.rows[i] = splitCells(item.rows[i], item.header.length).map(c => {
return { text: c }
})
}
// parse child tokens inside headers and cells
// header child tokens
l = item.header.length
for (j = 0; j < l; j++) {
item.header[j].tokens = []
this.lexer.inline(item.header[j].text, item.header[j].tokens)
}
// cell child tokens
l = item.rows.length
for (j = 0; j < l; j++) {
row = item.rows[j]
for (k = 0; k < row.length; k++) {
row[k].tokens = []
this.lexer.inline(row[k].text, row[k].tokens)
}
}
return item
}
}
}
lheading(src) {
const cap = this.rules.block.lheading.exec(src)
if (cap) {
const token = {
type: "heading",
raw: cap[0],
depth: cap[2].charAt(0) === "=" ? 1 : 2,
text: cap[1],
tokens: [],
}
this.lexer.inline(token.text, token.tokens)
return token
}
}
paragraph(src) {
const cap = this.rules.block.paragraph.exec(src)
if (cap) {
const token = {
type: "paragraph",
raw: cap[0],
text:
cap[1].charAt(cap[1].length - 1) === "\n"
? cap[1].slice(0, -1)
: cap[1],
tokens: [],
}
this.lexer.inline(token.text, token.tokens)
return token
}
}
text(src) {
const cap = this.rules.block.text.exec(src)
if (cap) {
const token = {
type: "text",
raw: cap[0],
text: cap[0],
tokens: [],
}
this.lexer.inline(token.text, token.tokens)
return token
}
}
escape(src) {
const cap = this.rules.inline.escape.exec(src)
if (cap) {
return {
type: "escape",
raw: cap[0],
text: escape(cap[1]),
}
}
}
tag(src) {
const cap = this.rules.inline.tag.exec(src)
if (cap) {
if (!this.lexer.state.inLink && /^/i.test(cap[0])) {
this.lexer.state.inLink = false
}
if (
!this.lexer.state.inRawBlock &&
/^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])
) {
this.lexer.state.inRawBlock = true
} else if (
this.lexer.state.inRawBlock &&
/^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])
) {
this.lexer.state.inRawBlock = false
}
return {
type: this.options.sanitize ? "text" : "html",
raw: cap[0],
inLink: this.lexer.state.inLink,
inRawBlock: this.lexer.state.inRawBlock,
text: this.options.sanitize
? this.options.sanitizer
? this.options.sanitizer(cap[0])
: escape(cap[0])
: cap[0],
}
}
}
link(src) {
const cap = this.rules.inline.link.exec(src)
if (cap) {
const trimmedUrl = cap[2].trim()
if (!this.options.pedantic && /^$/.test(trimmedUrl)) {
return
}
// ending angle bracket cannot be escaped
const rtrimSlash = rtrim(trimmedUrl.slice(0, -1), "\\")
if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) {
return
}
} else {
// find closing parenthesis
const lastParenIndex = findClosingBracket(cap[2], "()")
if (lastParenIndex > -1) {
const start = cap[0].indexOf("!") === 0 ? 5 : 4
const linkLen = start + cap[1].length + lastParenIndex
cap[2] = cap[2].substring(0, lastParenIndex)
cap[0] = cap[0].substring(0, linkLen).trim()
cap[3] = ""
}
}
let href = cap[2]
let title = ""
if (this.options.pedantic) {
// split pedantic href and title
const link = /^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(href)
if (link) {
href = link[1]
title = link[3]
}
} else {
title = cap[3] ? cap[3].slice(1, -1) : ""
}
href = href.trim()
if (/^$/.test(trimmedUrl)) {
// pedantic allows starting angle bracket without ending angle bracket
href = href.slice(1)
} else {
href = href.slice(1, -1)
}
}
return outputLink(
cap,
{
href: href ? href.replace(this.rules.inline._escapes, "$1") : href,
title: title
? title.replace(this.rules.inline._escapes, "$1")
: title,
},
cap[0],
this.lexer
)
}
}
reflink(src, links) {
let cap
if (
(cap = this.rules.inline.reflink.exec(src)) ||
(cap = this.rules.inline.nolink.exec(src))
) {
let link = (cap[2] || cap[1]).replace(/\s+/g, " ")
link = links[link.toLowerCase()]
if (!link || !link.href) {
const text = cap[0].charAt(0)
return {
type: "text",
raw: text,
text,
}
}
return outputLink(cap, link, cap[0], this.lexer)
}
}
emStrong(src, maskedSrc, prevChar = "") {
let match = this.rules.inline.emStrong.lDelim.exec(src)
if (!match) return
// _ can't be between two alphanumerics. \p{L}\p{N} includes non-english alphabet/numbers as well
if (match[3] && prevChar.match(/[\p{L}\p{N}]/u)) return
const nextChar = match[1] || match[2] || ""
if (
!nextChar ||
(nextChar &&
(prevChar === "" || this.rules.inline.punctuation.exec(prevChar)))
) {
const lLength = match[0].length - 1
let rDelim,
rLength,
delimTotal = lLength,
midDelimTotal = 0
const endReg =
match[0][0] === "*"
? this.rules.inline.emStrong.rDelimAst
: this.rules.inline.emStrong.rDelimUnd
endReg.lastIndex = 0
// Clip maskedSrc to same section of string as src (move to lexer?)
maskedSrc = maskedSrc.slice(-1 * src.length + lLength)
while ((match = endReg.exec(maskedSrc)) != null) {
rDelim =
match[1] || match[2] || match[3] || match[4] || match[5] || match[6]
if (!rDelim) continue // skip single * in __abc*abc__
rLength = rDelim.length
if (match[3] || match[4]) {
// found another Left Delim
delimTotal += rLength
continue
} else if (match[5] || match[6]) {
// either Left or Right Delim
if (lLength % 3 && !((lLength + rLength) % 3)) {
midDelimTotal += rLength
continue // CommonMark Emphasis Rules 9-10
}
}
delimTotal -= rLength
if (delimTotal > 0) continue // Haven't found enough closing delimiters
// Remove extra characters. *a*** -> *a*
rLength = Math.min(rLength, rLength + delimTotal + midDelimTotal)
// Create `em` if smallest delimiter has odd char count. *a***
if (Math.min(lLength, rLength) % 2) {
const text = src.slice(1, lLength + match.index + rLength)
return {
type: "em",
raw: src.slice(0, lLength + match.index + rLength + 1),
text,
tokens: this.lexer.inlineTokens(text, []),
}
}
// Create 'strong' if smallest delimiter has even char count. **a***
const text = src.slice(2, lLength + match.index + rLength - 1)
return {
type: "strong",
raw: src.slice(0, lLength + match.index + rLength + 1),
text,
tokens: this.lexer.inlineTokens(text, []),
}
}
}
}
codespan(src) {
const cap = this.rules.inline.code.exec(src)
if (cap) {
let text = cap[2].replace(/\n/g, " ")
const hasNonSpaceChars = /[^ ]/.test(text)
const hasSpaceCharsOnBothEnds = /^ /.test(text) && / $/.test(text)
if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) {
text = text.substring(1, text.length - 1)
}
text = escape(text, true)
return {
type: "codespan",
raw: cap[0],
text,
}
}
}
br(src) {
const cap = this.rules.inline.br.exec(src)
if (cap) {
return {
type: "br",
raw: cap[0],
}
}
}
del(src) {
const cap = this.rules.inline.del.exec(src)
if (cap) {
return {
type: "del",
raw: cap[0],
text: cap[2],
tokens: this.lexer.inlineTokens(cap[2], []),
}
}
}
autolink(src, mangle) {
const cap = this.rules.inline.autolink.exec(src)
if (cap) {
let text, href
if (cap[2] === "@") {
text = escape(this.options.mangle ? mangle(cap[1]) : cap[1])
href = "mailto:" + text
} else {
text = escape(cap[1])
href = text
}
return {
type: "link",
raw: cap[0],
text,
href,
tokens: [
{
type: "text",
raw: text,
text,
},
],
}
}
}
url(src, mangle) {
let cap
if ((cap = this.rules.inline.url.exec(src))) {
let text, href
if (cap[2] === "@") {
text = escape(this.options.mangle ? mangle(cap[0]) : cap[0])
href = "mailto:" + text
} else {
// do extended autolink path validation
let prevCapZero
do {
prevCapZero = cap[0]
cap[0] = this.rules.inline._backpedal.exec(cap[0])[0]
} while (prevCapZero !== cap[0])
text = escape(cap[0])
if (cap[1] === "www.") {
href = "http://" + text
} else {
href = text
}
}
return {
type: "link",
raw: cap[0],
text,
href,
tokens: [
{
type: "text",
raw: text,
text,
},
],
}
}
}
inlineText(src, smartypants) {
const cap = this.rules.inline.text.exec(src)
if (cap) {
let text
if (this.lexer.state.inRawBlock) {
text = this.options.sanitize
? this.options.sanitizer
? this.options.sanitizer(cap[0])
: escape(cap[0])
: cap[0]
} else {
text = escape(this.options.smartypants ? smartypants(cap[0]) : cap[0])
}
return {
type: "text",
raw: cap[0],
text,
}
}
}
}
/**
* Block-Level Grammar
*/
const block = {
newline: /^(?: *(?:\n|$))+/,
code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,
fences:
/^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?=\n|$)|$)/,
hr: /^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,
heading: /^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,
blockquote: /^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/,
list: /^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/,
html:
"^ {0,3}(?:" + // optional indentation
"<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:\\1>[^\\n]*\\n+|$)" + // (1)
"|comment[^\\n]*(\\n+|$)" + // (2)
"|<\\?[\\s\\S]*?(?:\\?>\\n*|$)" + // (3)
"|\\n*|$)" + // (4)
"|\\n*|$)" + // (5)
"|?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)" + // (6)
"|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)" + // (7) open tag
"|(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)" + // (7) closing tag
")",
def: /^ {0,3}\[(label)\]: *(?:\n *)?([^\s>]+)>?(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/,
table: noopTest,
lheading: /^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/,
// regex template, placeholders will be replaced according to different paragraph
// interruption rules of commonmark and the original markdown spec:
_paragraph:
/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,
text: /^[^\n]+/,
}
block._label = /(?!\s*\])(?:\\.|[^\[\]\\])+/
block._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/
block.def = edit(block.def)
.replace("label", block._label)
.replace("title", block._title)
.getRegex()
block.bullet = /(?:[*+-]|\d{1,9}[.)])/
block.listItemStart = edit(/^( *)(bull) */)
.replace("bull", block.bullet)
.getRegex()
block.list = edit(block.list)
.replace(/bull/g, block.bullet)
.replace(
"hr",
"\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))"
)
.replace("def", "\\n+(?=" + block.def.source + ")")
.getRegex()
block._tag =
"address|article|aside|base|basefont|blockquote|body|caption" +
"|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption" +
"|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe" +
"|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option" +
"|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr" +
"|track|ul"
block._comment = /|$)/
block.html = edit(block.html, "i")
.replace("comment", block._comment)
.replace("tag", block._tag)
.replace(
"attribute",
/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/
)
.getRegex()
block.paragraph = edit(block._paragraph)
.replace("hr", block.hr)
.replace("heading", " {0,3}#{1,6} ")
.replace("|lheading", "") // setex headings don't interrupt commonmark paragraphs
.replace("|table", "")
.replace("blockquote", " {0,3}>")
.replace("fences", " {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n")
.replace("list", " {0,3}(?:[*+-]|1[.)]) ") // only lists starting from 1 can interrupt
.replace(
"html",
"?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)"
)
.replace("tag", block._tag) // pars can be interrupted by type (6) html blocks
.getRegex()
block.blockquote = edit(block.blockquote)
.replace("paragraph", block.paragraph)
.getRegex()
/**
* Normal Block Grammar
*/
block.normal = merge({}, block)
/**
* GFM Block Grammar
*/
block.gfm = merge({}, block.normal, {
table:
"^ *([^\\n ].*\\|.*)\\n" + // Header
" {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)(?:\\| *)?" + // Align
"(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)", // Cells
})
block.gfm.table = edit(block.gfm.table)
.replace("hr", block.hr)
.replace("heading", " {0,3}#{1,6} ")
.replace("blockquote", " {0,3}>")
.replace("code", " {4}[^\\n]")
.replace("fences", " {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n")
.replace("list", " {0,3}(?:[*+-]|1[.)]) ") // only lists starting from 1 can interrupt
.replace(
"html",
"?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)"
)
.replace("tag", block._tag) // tables can be interrupted by type (6) html blocks
.getRegex()
block.gfm.paragraph = edit(block._paragraph)
.replace("hr", block.hr)
.replace("heading", " {0,3}#{1,6} ")
.replace("|lheading", "") // setex headings don't interrupt commonmark paragraphs
.replace("table", block.gfm.table) // interrupt paragraphs with table
.replace("blockquote", " {0,3}>")
.replace("fences", " {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n")
.replace("list", " {0,3}(?:[*+-]|1[.)]) ") // only lists starting from 1 can interrupt
.replace(
"html",
"?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)"
)
.replace("tag", block._tag) // pars can be interrupted by type (6) html blocks
.getRegex()
/**
* Pedantic grammar (original John Gruber's loose markdown specification)
*/
block.pedantic = merge({}, block.normal, {
html: edit(
"^ *(?:comment *(?:\\n|\\s*$)" +
"|<(tag)[\\s\\S]+?\\1> *(?:\\n{2,}|\\s*$)" + // closed tag
"| ${text}
\n"
)
}
return (
'" +
(escaped ? code : escape(code, true)) +
"
\n"
)
}
/**
* @param {string} quote
*/
blockquote(quote) {
return `' +
(escaped ? code : escape(code, true)) +
"\n${quote}
\n`
}
html(html) {
return html
}
/**
* @param {string} text
* @param {string} level
* @param {string} raw
* @param {any} slugger
*/
heading(text, level, raw, slugger) {
if (this.options.headerIds) {
const id = this.options.headerPrefix + slugger.slug(raw)
return `
\n" : "
\n"
}
list(body, ordered, start) {
const type = ordered ? "ol" : "ul",
startatt = ordered && start !== 1 ? ' start="' + start + '"' : ""
return "<" + type + startatt + ">\n" + body + "" + type + ">\n"
}
/**
* @param {string} text
*/
listitem(text) {
return `
${text}`
}
br() {
return this.options.xhtml ? "An error occurred:
" +
escape(e.message + "", true) +
""
)
}
throw e
}
}
/**
* Options
*/
marked.options = marked.setOptions = function (opt) {
merge(marked.defaults, opt)
changeDefaults(marked.defaults)
return marked
}
marked.getDefaults = getDefaults
marked.defaults = defaults
/**
* Use Extension
*/
marked.use = function (...args) {
const opts = merge({}, ...args)
const extensions = marked.defaults.extensions || {
renderers: {},
childTokens: {},
}
let hasExtensions
args.forEach(pack => {
// ==-- Parse "addon" extensions --== //
if (pack.extensions) {
hasExtensions = true
pack.extensions.forEach(ext => {
if (!ext.name) {
throw new Error("extension name required")
}
if (ext.renderer) {
// Renderer extensions
const prevRenderer = extensions.renderers
? extensions.renderers[ext.name]
: null
if (prevRenderer) {
// Replace extension with func to run new extension but fall back if false
extensions.renderers[ext.name] = function (...args) {
let ret = ext.renderer.apply(this, args)
if (ret === false) {
ret = prevRenderer.apply(this, args)
}
return ret
}
} else {
extensions.renderers[ext.name] = ext.renderer
}
}
if (ext.tokenizer) {
// Tokenizer Extensions
if (!ext.level || (ext.level !== "block" && ext.level !== "inline")) {
throw new Error("extension level must be 'block' or 'inline'")
}
if (extensions[ext.level]) {
extensions[ext.level].unshift(ext.tokenizer)
} else {
extensions[ext.level] = [ext.tokenizer]
}
if (ext.start) {
// Function to check for start of token
if (ext.level === "block") {
if (extensions.startBlock) {
extensions.startBlock.push(ext.start)
} else {
extensions.startBlock = [ext.start]
}
} else if (ext.level === "inline") {
if (extensions.startInline) {
extensions.startInline.push(ext.start)
} else {
extensions.startInline = [ext.start]
}
}
}
}
if (ext.childTokens) {
// Child tokens to be visited by walkTokens
extensions.childTokens[ext.name] = ext.childTokens
}
})
}
// ==-- Parse "overwrite" extensions --== //
if (pack.renderer) {
const renderer = marked.defaults.renderer || new Renderer()
for (const prop in pack.renderer) {
const prevRenderer = renderer[prop]
// Replace renderer with func to run extension, but fall back if false
renderer[prop] = (...args) => {
let ret = pack.renderer[prop].apply(renderer, args)
if (ret === false) {
ret = prevRenderer.apply(renderer, args)
}
return ret
}
}
opts.renderer = renderer
}
if (pack.tokenizer) {
const tokenizer = marked.defaults.tokenizer || new Tokenizer()
for (const prop in pack.tokenizer) {
const prevTokenizer = tokenizer[prop]
// Replace tokenizer with func to run extension, but fall back if false
tokenizer[prop] = (...args) => {
let ret = pack.tokenizer[prop].apply(tokenizer, args)
if (ret === false) {
ret = prevTokenizer.apply(tokenizer, args)
}
return ret
}
}
opts.tokenizer = tokenizer
}
// ==-- Parse WalkTokens extensions --== //
if (pack.walkTokens) {
const walkTokens = marked.defaults.walkTokens
opts.walkTokens = function (token) {
pack.walkTokens.call(this, token)
if (walkTokens) {
walkTokens.call(this, token)
}
}
}
if (hasExtensions) {
opts.extensions = extensions
}
marked.setOptions(opts)
})
}
/**
* Run callback for every token
*/
marked.walkTokens = function (tokens, callback) {
for (const token of tokens) {
callback.call(marked, token)
switch (token.type) {
case "table": {
for (const cell of token.header) {
marked.walkTokens(cell.tokens, callback)
}
for (const row of token.rows) {
for (const cell of row) {
marked.walkTokens(cell.tokens, callback)
}
}
break
}
case "list": {
marked.walkTokens(token.items, callback)
break
}
default: {
if (
marked.defaults.extensions &&
marked.defaults.extensions.childTokens &&
marked.defaults.extensions.childTokens[token.type]
) {
// Walk any extensions
marked.defaults.extensions.childTokens[token.type].forEach(function (
childTokens
) {
marked.walkTokens(token[childTokens], callback)
})
} else if (token.tokens) {
marked.walkTokens(token.tokens, callback)
}
}
}
}
}
/**
* Parse Inline
* @param {string} src
*/
marked.parseInline = function (src, opt) {
// throw error in case of non string input
if (typeof src === "undefined" || src === null) {
throw new Error(
"marked.parseInline(): input parameter is undefined or null"
)
}
if (typeof src !== "string") {
throw new Error(
"marked.parseInline(): input parameter is of type " +
Object.prototype.toString.call(src) +
", string expected"
)
}
opt = merge({}, marked.defaults, opt || {})
checkSanitizeDeprecation(opt)
try {
const tokens = Lexer.lexInline(src, opt)
if (opt.walkTokens) {
marked.walkTokens(tokens, opt.walkTokens)
}
return Parser.parseInline(tokens, opt)
} catch (e) {
e.message += "\nPlease report this to https://github.com/markedjs/marked."
if (opt.silent) {
return (
"An error occurred:
" +
escape(e.message + "", true) +
""
)
}
throw e
}
}
/**
* Expose
*/
marked.Parser = Parser
marked.parser = Parser.parse
marked.Renderer = Renderer
marked.TextRenderer = TextRenderer
marked.Lexer = Lexer
marked.lexer = Lexer.lex
marked.Tokenizer = Tokenizer
marked.Slugger = Slugger
marked.parse = marked
const options = marked.options
const setOptions = marked.setOptions
const use = marked.use
const walkTokens = marked.walkTokens
const parseInline = marked.parseInline
const parse = marked
const parser = Parser.parse
const lexer = Lexer.lex
const email = trigger.row
return marked(email.Message)