2013-05-01 22:10:31 +02:00
/ * *
* marked - a markdown parser
2020-04-15 10:23:51 +02:00
* Copyright ( c ) 2011 - 2020 , Christopher Jeffrey . ( MIT Licensed )
2019-01-20 19:28:27 +01:00
* https : //github.com/markedjs/marked
2013-05-01 22:10:31 +02:00
* /
2020-04-15 10:23:51 +02:00
function createCommonjsModule ( fn , module ) {
return module = { exports : { } } , fn ( module , module . exports ) , module . exports ;
}
var defaults = createCommonjsModule ( function ( module ) {
function getDefaults ( ) {
return {
baseUrl : null ,
breaks : false ,
gfm : true ,
headerIds : true ,
headerPrefix : '' ,
highlight : null ,
langPrefix : 'language-' ,
mangle : true ,
pedantic : false ,
renderer : null ,
sanitize : false ,
sanitizer : null ,
silent : false ,
smartLists : false ,
smartypants : false ,
xhtml : false
} ;
}
function changeDefaults ( newDefaults ) {
module . exports . defaults = newDefaults ;
}
module . exports = {
defaults : getDefaults ( ) ,
getDefaults ,
changeDefaults
} ;
} ) ;
var defaults _1 = defaults . defaults ;
var defaults _2 = defaults . getDefaults ;
var defaults _3 = defaults . changeDefaults ;
/ * *
* Helpers
* /
const escapeTest = /[&<>"']/ ;
const escapeReplace = /[&<>"']/g ;
const escapeTestNoEncode = /[<>"']|&(?!#?\w+;)/ ;
const escapeReplaceNoEncode = /[<>"']|&(?!#?\w+;)/g ;
const escapeReplacements = {
'&' : '&' ,
'<' : '<' ,
'>' : '>' ,
'"' : '"' ,
"'" : '''
} ;
const getEscapeReplacement = ( ch ) => escapeReplacements [ ch ] ;
function escape ( html , encode ) {
if ( encode ) {
if ( escapeTest . test ( html ) ) {
return html . replace ( escapeReplace , getEscapeReplacement ) ;
}
} else {
if ( escapeTestNoEncode . test ( html ) ) {
return html . replace ( escapeReplaceNoEncode , getEscapeReplacement ) ;
}
}
return html ;
}
const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig ;
function unescape ( html ) {
// explicitly match decimal, hex, and named HTML entities
return html . replace ( unescapeTest , ( _ , n ) => {
n = n . toLowerCase ( ) ;
if ( n === 'colon' ) return ':' ;
if ( n . charAt ( 0 ) === '#' ) {
return n . charAt ( 1 ) === 'x'
? String . fromCharCode ( parseInt ( n . substring ( 2 ) , 16 ) )
: String . fromCharCode ( + n . substring ( 1 ) ) ;
}
return '' ;
} ) ;
}
const caret = /(^|[^\[])\^/g ;
function edit ( regex , opt ) {
regex = regex . source || regex ;
opt = opt || '' ;
const obj = {
replace : ( name , val ) => {
val = val . source || val ;
val = val . replace ( caret , '$1' ) ;
regex = regex . replace ( name , val ) ;
return obj ;
} ,
getRegex : ( ) => {
return new RegExp ( regex , opt ) ;
}
} ;
return obj ;
}
const nonWordAndColonTest = /[^\w:]/g ;
const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i ;
function cleanUrl ( sanitize , base , href ) {
if ( sanitize ) {
let prot ;
try {
prot = decodeURIComponent ( unescape ( href ) )
. replace ( nonWordAndColonTest , '' )
. toLowerCase ( ) ;
} catch ( e ) {
return null ;
}
if ( prot . indexOf ( 'javascript:' ) === 0 || prot . indexOf ( 'vbscript:' ) === 0 || prot . indexOf ( 'data:' ) === 0 ) {
return null ;
}
}
if ( base && ! originIndependentUrl . test ( href ) ) {
href = resolveUrl ( base , href ) ;
}
try {
href = encodeURI ( href ) . replace ( /%25/g , '%' ) ;
} catch ( e ) {
return null ;
}
return href ;
}
const baseUrls = { } ;
const justDomain = /^[^:]+:\/*[^/]*$/ ;
const protocol = /^([^:]+:)[\s\S]*$/ ;
const domain = /^([^:]+:\/*[^/]*)[\s\S]*$/ ;
function resolveUrl ( base , href ) {
if ( ! baseUrls [ ' ' + base ] ) {
// we can ignore everything in base after the last slash of its path component,
// but we might need to add _that_
// https://tools.ietf.org/html/rfc3986#section-3
if ( justDomain . test ( base ) ) {
baseUrls [ ' ' + base ] = base + '/' ;
} else {
baseUrls [ ' ' + base ] = rtrim ( base , '/' , true ) ;
}
}
base = baseUrls [ ' ' + base ] ;
const relativeBase = base . indexOf ( ':' ) === - 1 ;
if ( href . substring ( 0 , 2 ) === '//' ) {
if ( relativeBase ) {
return href ;
}
return base . replace ( protocol , '$1' ) + href ;
} else if ( href . charAt ( 0 ) === '/' ) {
if ( relativeBase ) {
return href ;
}
return base . replace ( domain , '$1' ) + href ;
} else {
return base + href ;
}
}
const noopTest = { exec : function noopTest ( ) { } } ;
function merge ( obj ) {
let i = 1 ,
target ,
key ;
for ( ; i < arguments . length ; i ++ ) {
target = arguments [ i ] ;
for ( key in target ) {
if ( Object . prototype . hasOwnProperty . call ( target , key ) ) {
obj [ key ] = target [ key ] ;
}
}
}
return obj ;
}
function splitCells ( tableRow , count ) {
// ensure that every cell-delimiting pipe has a space
// before it to distinguish it from an escaped pipe
const row = tableRow . replace ( /\|/g , ( match , offset , str ) => {
let escaped = false ,
curr = offset ;
while ( -- curr >= 0 && str [ curr ] === '\\' ) escaped = ! escaped ;
if ( escaped ) {
// odd number of slashes means | is escaped
// so we leave it alone
return '|' ;
} else {
// add space before unescaped |
return ' |' ;
}
} ) ,
cells = row . split ( / \|/ ) ;
let i = 0 ;
if ( cells . length > count ) {
cells . splice ( count ) ;
} else {
while ( cells . length < count ) cells . push ( '' ) ;
}
for ( ; i < cells . length ; i ++ ) {
// leading or trailing whitespace is ignored per the gfm spec
cells [ i ] = cells [ i ] . trim ( ) . replace ( /\\\|/g , '|' ) ;
}
return cells ;
}
// Remove trailing 'c's. Equivalent to str.replace(/c*$/, '').
// /c*$/ is vulnerable to REDOS.
// invert: Remove suffix of non-c chars instead. Default falsey.
function rtrim ( str , c , invert ) {
const l = str . length ;
if ( l === 0 ) {
return '' ;
}
// Length of suffix matching the invert condition.
let suffLen = 0 ;
// Step left until we fail to match the invert condition.
while ( suffLen < l ) {
const currChar = str . charAt ( l - suffLen - 1 ) ;
if ( currChar === c && ! invert ) {
suffLen ++ ;
} else if ( currChar !== c && invert ) {
suffLen ++ ;
} else {
break ;
}
}
return str . substr ( 0 , l - suffLen ) ;
}
function findClosingBracket ( str , b ) {
if ( str . indexOf ( b [ 1 ] ) === - 1 ) {
return - 1 ;
}
const l = str . length ;
let level = 0 ,
i = 0 ;
for ( ; i < l ; i ++ ) {
if ( str [ i ] === '\\' ) {
i ++ ;
} else if ( str [ i ] === b [ 0 ] ) {
level ++ ;
} else if ( str [ i ] === b [ 1 ] ) {
level -- ;
if ( level < 0 ) {
return i ;
}
}
}
return - 1 ;
}
function checkSanitizeDeprecation ( opt ) {
if ( opt && opt . sanitize && ! opt . silent ) {
console . warn ( 'marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options' ) ;
}
}
var helpers = {
escape ,
unescape ,
edit ,
cleanUrl ,
resolveUrl ,
noopTest ,
merge ,
splitCells ,
rtrim ,
findClosingBracket ,
checkSanitizeDeprecation
} ;
const {
noopTest : noopTest$1 ,
edit : edit$1 ,
merge : merge$1
} = helpers ;
/ * *
* Block - Level Grammar
* /
const block = {
newline : /^\n+/ ,
code : /^( {4}[^\n]+\n*)+/ ,
fences : /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?:\n+|$)|$)/ ,
hr : /^ {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)/ ,
heading : /^ {0,3}(#{1,6}) +([^\n]*?)(?: +#+)? *(?:\n+|$)/ ,
blockquote : /^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/ ,
list : /^( {0,3})(bull) [\s\S]+?(?:hr|def|\n{2,}(?! )(?!\1bull )\n*|\s*$)/ ,
html : '^ {0,3}(?:' // optional indentation
+ '<(script|pre|style)[\\s>][\\s\\S]*?(?:</\\1>[^\\n]*\\n+|$)' // (1)
+ '|comment[^\\n]*(\\n+|$)' // (2)
+ '|<\\?[\\s\\S]*?\\?>\\n*' // (3)
+ '|<![A-Z][\\s\\S]*?>\\n*' // (4)
+ '|<!\\[CDATA\\[[\\s\\S]*?\\]\\]>\\n*' // (5)
+ '|</?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:\\n{2,}|$)' // (6)
+ '|<(?!script|pre|style)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:\\n{2,}|$)' // (7) open tag
+ '|</(?!script|pre|style)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:\\n{2,}|$)' // (7) closing tag
+ ')' ,
def : /^ {0,3}\[(label)\]: *\n? *<?([^\s>]+)>?(?:(?: +\n? *| *\n *)(title))? *(?:\n+|$)/ ,
nptable : noopTest$1 ,
table : noopTest$1 ,
lheading : /^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/ ,
// regex template, placeholders will be replaced according to different paragraph
// interruption rules of commonmark and the original markdown spec:
_paragraph : /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html)[^\n]+)*)/ ,
text : /^[^\n]+/
} ;
block . _label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/ ;
block . _title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/ ;
block . def = edit$1 ( block . def )
. replace ( 'label' , block . _label )
. replace ( 'title' , block . _title )
. getRegex ( ) ;
block . bullet = /(?:[*+-]|\d{1,9}\.)/ ;
block . item = /^( *)(bull) ?[^\n]*(?:\n(?!\1bull ?)[^\n]*)*/ ;
block . item = edit$1 ( block . item , 'gm' )
. replace ( /bull/g , block . bullet )
. getRegex ( ) ;
block . list = edit$1 ( block . list )
. replace ( /bull/g , block . bullet )
. replace ( 'hr' , '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))' )
. replace ( 'def' , '\\n+(?=' + block . def . source + ')' )
. getRegex ( ) ;
block . _tag = 'address|article|aside|base|basefont|blockquote|body|caption'
+ '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'
+ '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'
+ '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'
+ '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'
+ '|track|ul' ;
block . _comment = /<!--(?!-?>)[\s\S]*?-->/ ;
block . html = edit$1 ( block . html , 'i' )
. replace ( 'comment' , block . _comment )
. replace ( 'tag' , block . _tag )
. replace ( 'attribute' , / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/ )
. getRegex ( ) ;
block . paragraph = edit$1 ( block . _paragraph )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' {0,3}#{1,6} ' )
. replace ( '|lheading' , '' ) // setex headings don't interrupt commonmark paragraphs
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( 'fences' , ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n' )
. replace ( 'list' , ' {0,3}(?:[*+-]|1[.)]) ' ) // only lists starting from 1 can interrupt
. replace ( 'html' , '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|!--)' )
. replace ( 'tag' , block . _tag ) // pars can be interrupted by type (6) html blocks
. getRegex ( ) ;
block . blockquote = edit$1 ( block . blockquote )
. replace ( 'paragraph' , block . paragraph )
. getRegex ( ) ;
/ * *
* Normal Block Grammar
* /
block . normal = merge$1 ( { } , block ) ;
/ * *
* GFM Block Grammar
* /
block . gfm = merge$1 ( { } , block . normal , {
nptable : '^ *([^|\\n ].*\\|.*)\\n' // Header
+ ' *([-:]+ *\\|[-| :]*)' // Align
+ '(?:\\n((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' , // Cells
table : '^ *\\|(.+)\\n' // Header
+ ' *\\|?( *[-:]+[-| :]*)' // Align
+ '(?:\\n *((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells
} ) ;
block . gfm . nptable = edit$1 ( block . gfm . nptable )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' {0,3}#{1,6} ' )
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( 'code' , ' {4}[^\\n]' )
. replace ( 'fences' , ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n' )
. replace ( 'list' , ' {0,3}(?:[*+-]|1[.)]) ' ) // only lists starting from 1 can interrupt
. replace ( 'html' , '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|!--)' )
. replace ( 'tag' , block . _tag ) // tables can be interrupted by type (6) html blocks
. getRegex ( ) ;
block . gfm . table = edit$1 ( block . gfm . table )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' {0,3}#{1,6} ' )
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( 'code' , ' {4}[^\\n]' )
. replace ( 'fences' , ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n' )
. replace ( 'list' , ' {0,3}(?:[*+-]|1[.)]) ' ) // only lists starting from 1 can interrupt
. replace ( 'html' , '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|!--)' )
. replace ( 'tag' , block . _tag ) // tables can be interrupted by type (6) html blocks
. getRegex ( ) ;
/ * *
* Pedantic grammar ( original John Gruber ' s loose markdown specification )
* /
block . pedantic = merge$1 ( { } , block . normal , {
html : edit$1 (
'^ *(?:comment *(?:\\n|\\s*$)'
+ '|<(tag)[\\s\\S]+?</\\1> *(?:\\n{2,}|\\s*$)' // closed tag
+ '|<tag(?:"[^"]*"|\'[^\']*\'|\\s[^\'"/>\\s]*)*?/?> *(?:\\n{2,}|\\s*$))' )
. replace ( 'comment' , block . _comment )
. replace ( /tag/g , '(?!(?:'
+ 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub'
+ '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)'
+ '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b' )
. getRegex ( ) ,
def : /^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/ ,
heading : /^ *(#{1,6}) *([^\n]+?) *(?:#+ *)?(?:\n+|$)/ ,
fences : noopTest$1 , // fences not supported
paragraph : edit$1 ( block . normal . _paragraph )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' *#{1,6} *[^\n]' )
. replace ( 'lheading' , block . lheading )
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( '|fences' , '' )
. replace ( '|list' , '' )
. replace ( '|html' , '' )
. getRegex ( )
} ) ;
/ * *
* Inline - Level Grammar
* /
const inline = {
escape : /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/ ,
autolink : /^<(scheme:[^\s\x00-\x1f<>]*|email)>/ ,
url : noopTest$1 ,
tag : '^comment'
+ '|^</[a-zA-Z][\\w:-]*\\s*>' // self-closing tag
+ '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag
+ '|^<\\?[\\s\\S]*?\\?>' // processing instruction, e.g. <?php ?>
+ '|^<![a-zA-Z]+\\s[\\s\\S]*?>' // declaration, e.g. <!DOCTYPE html>
+ '|^<!\\[CDATA\\[[\\s\\S]*?\\]\\]>' , // CDATA section
link : /^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/ ,
reflink : /^!?\[(label)\]\[(?!\s*\])((?:\\[\[\]]?|[^\[\]\\])+)\]/ ,
nolink : /^!?\[(?!\s*\])((?:\[[^\[\]]*\]|\\[\[\]]|[^\[\]])*)\](?:\[\])?/ ,
strong : /^__([^\s_])__(?!_)|^\*\*([^\s*])\*\*(?!\*)|^__([^\s][\s\S]*?[^\s])__(?!_)|^\*\*([^\s][\s\S]*?[^\s])\*\*(?!\*)/ ,
em : /^_([^\s_])_(?!_)|^_([^\s_<][\s\S]*?[^\s_])_(?!_|[^\spunctuation])|^_([^\s_<][\s\S]*?[^\s])_(?!_|[^\spunctuation])|^\*([^\s*<\[])\*(?!\*)|^\*([^\s<"][\s\S]*?[^\s\[\*])\*(?![\]`punctuation])|^\*([^\s*"<\[][\s\S]*[^\s])\*(?!\*)/ ,
code : /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/ ,
br : /^( {2,}|\\)\n(?!\s*$)/ ,
del : noopTest$1 ,
text : /^(`+|[^`])(?:[\s\S]*?(?:(?=[\\<!\[`*]|\b_|$)|[^ ](?= {2,}\n))|(?= {2,}\n))/
} ;
// list of punctuation marks from common mark spec
// without ` and ] to workaround Rule 17 (inline code blocks/links)
inline . _punctuation = '!"#$%&\'()*+\\-./:;<=>?@\\[^_{|}~' ;
inline . em = edit$1 ( inline . em ) . replace ( /punctuation/g , inline . _punctuation ) . getRegex ( ) ;
inline . _escapes = /\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g ;
inline . _scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/ ;
inline . _email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/ ;
inline . autolink = edit$1 ( inline . autolink )
. replace ( 'scheme' , inline . _scheme )
. replace ( 'email' , inline . _email )
. getRegex ( ) ;
inline . _attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/ ;
inline . tag = edit$1 ( inline . tag )
. replace ( 'comment' , block . _comment )
. replace ( 'attribute' , inline . _attribute )
. getRegex ( ) ;
inline . _label = /(?:\[[^\[\]]*\]|\\.|`[^`]*`|[^\[\]\\`])*?/ ;
inline . _href = /<(?:\\[<>]?|[^\s<>\\])*>|[^\s\x00-\x1f]*/ ;
inline . _title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/ ;
inline . link = edit$1 ( inline . link )
. replace ( 'label' , inline . _label )
. replace ( 'href' , inline . _href )
. replace ( 'title' , inline . _title )
. getRegex ( ) ;
inline . reflink = edit$1 ( inline . reflink )
. replace ( 'label' , inline . _label )
. getRegex ( ) ;
/ * *
* Normal Inline Grammar
* /
inline . normal = merge$1 ( { } , inline ) ;
/ * *
* Pedantic Inline Grammar
* /
inline . pedantic = merge$1 ( { } , inline . normal , {
strong : /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/ ,
em : /^_(?=\S)([\s\S]*?\S)_(?!_)|^\*(?=\S)([\s\S]*?\S)\*(?!\*)/ ,
link : edit$1 ( /^!?\[(label)\]\((.*?)\)/ )
. replace ( 'label' , inline . _label )
. getRegex ( ) ,
reflink : edit$1 ( /^!?\[(label)\]\s*\[([^\]]*)\]/ )
. replace ( 'label' , inline . _label )
. getRegex ( )
} ) ;
/ * *
* GFM Inline Grammar
* /
inline . gfm = merge$1 ( { } , inline . normal , {
escape : edit$1 ( inline . escape ) . replace ( '])' , '~|])' ) . getRegex ( ) ,
_extended _email : /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/ ,
url : /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/ ,
_backpedal : /(?:[^?!.,:;*_~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_~)]+(?!$))+/ ,
del : /^~+(?=\S)([\s\S]*?\S)~+/ ,
text : /^(`+|[^`])(?:[\s\S]*?(?:(?=[\\<!\[`*~]|\b_|https?:\/\/|ftp:\/\/|www\.|$)|[^ ](?= {2,}\n)|[^a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-](?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@))|(?= {2,}\n|[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@))/
} ) ;
inline . gfm . url = edit$1 ( inline . gfm . url , 'i' )
. replace ( 'email' , inline . gfm . _extended _email )
. getRegex ( ) ;
/ * *
* GFM + Line Breaks Inline Grammar
* /
inline . breaks = merge$1 ( { } , inline . gfm , {
br : edit$1 ( inline . br ) . replace ( '{2,}' , '*' ) . getRegex ( ) ,
text : edit$1 ( inline . gfm . text )
. replace ( '\\b_' , '\\b_| {2,}\\n' )
. replace ( /\{2,\}/g , '*' )
. getRegex ( )
} ) ;
var rules = {
block ,
inline
} ;
const { defaults : defaults$1 } = defaults ;
const { block : block$1 , inline : inline$1 } = rules ;
const {
rtrim : rtrim$1 ,
splitCells : splitCells$1 ,
escape : escape$1 ,
findClosingBracket : findClosingBracket$1
} = helpers ;
/ * *
* Block Lexer
* /
var Lexer _1 = class Lexer {
constructor ( options ) {
this . tokens = [ ] ;
this . tokens . links = Object . create ( null ) ;
this . options = options || defaults$1 ;
this . rules = {
block : block$1 . normal ,
inline : inline$1 . normal
} ;
if ( this . options . pedantic ) {
this . rules . block = block$1 . pedantic ;
this . rules . inline = inline$1 . pedantic ;
} else if ( this . options . gfm ) {
this . rules . block = block$1 . gfm ;
if ( this . options . breaks ) {
this . rules . inline = inline$1 . breaks ;
} else {
this . rules . inline = inline$1 . gfm ;
}
}
}
/ * *
* Expose Block Rules
* /
static get rules ( ) {
return {
block : block$1 ,
inline : inline$1
} ;
}
/ * *
* Static Lex Method
* /
static lex ( src , options ) {
const lexer = new Lexer ( options ) ;
return lexer . lex ( src ) ;
}
/ * *
* Preprocessing
* /
lex ( src ) {
src = src
. replace ( /\r\n|\r/g , '\n' )
. replace ( /\t/g , ' ' ) ;
this . blockTokens ( src , this . tokens ) ;
this . inline ( this . tokens ) ;
return this . tokens ;
}
/ * *
* Lexing
* /
blockTokens ( src , tokens , top = true ) {
src = src . replace ( /^ +$/gm , '' ) ;
let next ,
loose ,
cap ,
bull ,
b ,
item ,
list ,
space ,
i ,
tag ,
l ,
isordered ,
istask ,
ischecked ,
lastToken ,
addBack ,
raw ;
while ( src ) {
// newline
if ( cap = this . rules . block . newline . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
if ( cap [ 0 ] . length > 1 ) {
tokens . push ( {
type : 'space' ,
raw
} ) ;
}
}
// code
if ( cap = this . rules . block . code . exec ( src ) ) {
lastToken = tokens [ tokens . length - 1 ] ;
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
// An indented code block cannot interrupt a paragraph.
if ( lastToken && lastToken . type === 'paragraph' ) {
lastToken . text += '\n' + cap [ 0 ] . trimRight ( ) ;
lastToken . raw += '\n' + raw ;
} else {
cap = cap [ 0 ] . replace ( /^ {4}/gm , '' ) ;
tokens . push ( {
type : 'code' ,
raw ,
codeBlockStyle : 'indented' ,
text : ! this . options . pedantic
? rtrim$1 ( cap , '\n' )
: cap
} ) ;
}
continue ;
}
// fences
if ( cap = this . rules . block . fences . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
tokens . push ( {
type : 'code' ,
raw ,
lang : cap [ 2 ] ? cap [ 2 ] . trim ( ) : cap [ 2 ] ,
text : cap [ 3 ] || ''
} ) ;
continue ;
}
// heading
if ( cap = this . rules . block . heading . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
tokens . push ( {
type : 'heading' ,
raw ,
depth : cap [ 1 ] . length ,
text : cap [ 2 ]
} ) ;
continue ;
}
// table no leading pipe (gfm)
if ( cap = this . rules . block . nptable . exec ( src ) ) {
item = {
type : 'table' ,
header : splitCells$1 ( cap [ 1 ] . replace ( /^ *| *\| *$/g , '' ) ) ,
align : cap [ 2 ] . replace ( /^ *|\| *$/g , '' ) . split ( / *\| */ ) ,
cells : cap [ 3 ] ? cap [ 3 ] . replace ( /\n$/ , '' ) . split ( '\n' ) : [ ]
} ;
if ( item . header . length === item . align . length ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
item . raw = raw ;
l = item . align . length ;
for ( i = 0 ; i < l ; i ++ ) {
if ( /^ *-+: *$/ . test ( item . align [ i ] ) ) {
item . align [ i ] = 'right' ;
} else if ( /^ *:-+: *$/ . test ( item . align [ i ] ) ) {
item . align [ i ] = 'center' ;
} else if ( /^ *:-+ *$/ . test ( item . align [ i ] ) ) {
item . align [ i ] = 'left' ;
} else {
item . align [ i ] = null ;
}
}
l = item . cells . length ;
for ( i = 0 ; i < l ; i ++ ) {
item . cells [ i ] = splitCells$1 ( item . cells [ i ] , item . header . length ) ;
}
tokens . push ( item ) ;
continue ;
}
}
// hr
if ( cap = this . rules . block . hr . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
tokens . push ( {
type : 'hr' ,
raw
} ) ;
continue ;
}
// blockquote
if ( cap = this . rules . block . blockquote . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
cap = cap [ 0 ] . replace ( /^ *> ?/gm , '' ) ;
tokens . push ( {
type : 'blockquote' ,
raw ,
tokens : this . blockTokens ( cap , [ ] , top )
} ) ;
continue ;
}
// list
if ( cap = this . rules . block . list . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
bull = cap [ 2 ] ;
isordered = bull . length > 1 ;
list = {
type : 'list' ,
raw ,
ordered : isordered ,
start : isordered ? + bull : '' ,
loose : false ,
items : [ ]
} ;
tokens . push ( list ) ;
// Get each top-level item.
cap = cap [ 0 ] . match ( this . rules . block . item ) ;
next = false ;
l = cap . length ;
for ( i = 0 ; i < l ; i ++ ) {
item = cap [ i ] ;
raw = item . trim ( ) ;
// Remove the list item's bullet
// so it is seen as the next token.
space = item . length ;
item = item . replace ( /^ *([*+-]|\d+\.) */ , '' ) ;
// Outdent whatever the
// list item contains. Hacky.
if ( ~ item . indexOf ( '\n ' ) ) {
space -= item . length ;
item = ! this . options . pedantic
? item . replace ( new RegExp ( '^ {1,' + space + '}' , 'gm' ) , '' )
: item . replace ( /^ {1,4}/gm , '' ) ;
}
// Determine whether the next list item belongs here.
// Backpedal if it does not belong in this list.
if ( i !== l - 1 ) {
b = block$1 . bullet . exec ( cap [ i + 1 ] ) [ 0 ] ;
if ( bull . length > 1 ? b . length === 1
: ( b . length > 1 || ( this . options . smartLists && b !== bull ) ) ) {
addBack = cap . slice ( i + 1 ) . join ( '\n' ) ;
src = addBack + src ;
list . raw = list . raw . substring ( list . raw . length - addBack . length ) ;
i = l - 1 ;
}
}
// Determine whether item is loose or not.
// Use: /(^|\n)(?! )[^\n]+\n\n(?!\s*$)/
// for discount behavior.
loose = next || /\n\n(?!\s*$)/ . test ( item ) ;
if ( i !== l - 1 ) {
next = item . charAt ( item . length - 1 ) === '\n' ;
if ( ! loose ) loose = next ;
}
if ( loose ) {
list . loose = true ;
}
// Check for task list items
istask = /^\[[ xX]\] / . test ( item ) ;
ischecked = undefined ;
if ( istask ) {
ischecked = item [ 1 ] !== ' ' ;
item = item . replace ( /^\[[ xX]\] +/ , '' ) ;
}
list . items . push ( {
raw ,
task : istask ,
checked : ischecked ,
loose : loose ,
tokens : this . blockTokens ( item , [ ] , false )
} ) ;
}
continue ;
}
// html
if ( cap = this . rules . block . html . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
tokens . push ( {
type : this . options . sanitize
? 'paragraph'
: 'html' ,
raw ,
pre : ! this . options . sanitizer
&& ( cap [ 1 ] === 'pre' || cap [ 1 ] === 'script' || cap [ 1 ] === 'style' ) ,
text : this . options . sanitize ? ( this . options . sanitizer ? this . options . sanitizer ( cap [ 0 ] ) : escape$1 ( cap [ 0 ] ) ) : cap [ 0 ]
} ) ;
continue ;
}
// def
if ( top && ( cap = this . rules . block . def . exec ( src ) ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
if ( cap [ 3 ] ) cap [ 3 ] = cap [ 3 ] . substring ( 1 , cap [ 3 ] . length - 1 ) ;
tag = cap [ 1 ] . toLowerCase ( ) . replace ( /\s+/g , ' ' ) ;
if ( ! this . tokens . links [ tag ] ) {
this . tokens . links [ tag ] = {
href : cap [ 2 ] ,
title : cap [ 3 ]
} ;
}
continue ;
}
// table (gfm)
if ( cap = this . rules . block . table . exec ( src ) ) {
item = {
type : 'table' ,
header : splitCells$1 ( cap [ 1 ] . replace ( /^ *| *\| *$/g , '' ) ) ,
align : cap [ 2 ] . replace ( /^ *|\| *$/g , '' ) . split ( / *\| */ ) ,
cells : cap [ 3 ] ? cap [ 3 ] . replace ( /\n$/ , '' ) . split ( '\n' ) : [ ]
} ;
if ( item . header . length === item . align . length ) {
src = src . substring ( cap [ 0 ] . length ) ;
item . raw = cap [ 0 ] ;
l = item . align . length ;
for ( i = 0 ; i < l ; i ++ ) {
if ( /^ *-+: *$/ . test ( item . align [ i ] ) ) {
item . align [ i ] = 'right' ;
} else if ( /^ *:-+: *$/ . test ( item . align [ i ] ) ) {
item . align [ i ] = 'center' ;
} else if ( /^ *:-+ *$/ . test ( item . align [ i ] ) ) {
item . align [ i ] = 'left' ;
} else {
item . align [ i ] = null ;
}
}
l = item . cells . length ;
for ( i = 0 ; i < l ; i ++ ) {
item . cells [ i ] = splitCells$1 (
item . cells [ i ] . replace ( /^ *\| *| *\| *$/g , '' ) ,
item . header . length ) ;
}
tokens . push ( item ) ;
continue ;
}
}
// lheading
if ( cap = this . rules . block . lheading . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
tokens . push ( {
type : 'heading' ,
raw ,
depth : cap [ 2 ] . charAt ( 0 ) === '=' ? 1 : 2 ,
text : cap [ 1 ]
} ) ;
continue ;
}
// top-level paragraph
if ( top && ( cap = this . rules . block . paragraph . exec ( src ) ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
tokens . push ( {
type : 'paragraph' ,
raw ,
text : cap [ 1 ] . charAt ( cap [ 1 ] . length - 1 ) === '\n'
? cap [ 1 ] . slice ( 0 , - 1 )
: cap [ 1 ]
} ) ;
continue ;
}
// text
if ( cap = this . rules . block . text . exec ( src ) ) {
// Top-level should never reach here.
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
tokens . push ( {
type : 'text' ,
raw ,
text : cap [ 0 ]
} ) ;
continue ;
}
if ( src ) {
const errMsg = 'Infinite loop on byte: ' + src . charCodeAt ( 0 ) ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
} else {
throw new Error ( errMsg ) ;
}
}
}
return tokens ;
}
inline ( tokens ) {
let i ,
j ,
k ,
l2 ,
row ,
token ;
const l = tokens . length ;
for ( i = 0 ; i < l ; i ++ ) {
token = tokens [ i ] ;
switch ( token . type ) {
case 'paragraph' :
case 'text' :
case 'heading' : {
token . tokens = [ ] ;
this . inlineTokens ( token . text , token . tokens ) ;
break ;
}
case 'table' : {
token . tokens = {
header : [ ] ,
cells : [ ]
} ;
// header
l2 = token . header . length ;
for ( j = 0 ; j < l2 ; j ++ ) {
token . tokens . header [ j ] = [ ] ;
this . inlineTokens ( token . header [ j ] , token . tokens . header [ j ] ) ;
}
// cells
l2 = token . cells . length ;
for ( j = 0 ; j < l2 ; j ++ ) {
row = token . cells [ j ] ;
token . tokens . cells [ j ] = [ ] ;
for ( k = 0 ; k < row . length ; k ++ ) {
token . tokens . cells [ j ] [ k ] = [ ] ;
this . inlineTokens ( row [ k ] , token . tokens . cells [ j ] [ k ] ) ;
}
}
break ;
}
case 'blockquote' : {
this . inline ( token . tokens ) ;
break ;
}
case 'list' : {
l2 = token . items . length ;
for ( j = 0 ; j < l2 ; j ++ ) {
this . inline ( token . items [ j ] . tokens ) ;
}
break ;
}
}
}
return tokens ;
}
/ * *
* Lexing / Compiling
* /
inlineTokens ( src , tokens ) {
let out = '' ,
link ,
text ,
newTokens ,
href ,
title ,
cap ,
prevCapZero ,
lastParenIndex ,
start ,
linkLen ,
raw ;
while ( src ) {
// escape
if ( cap = this . rules . inline . escape . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
text = escape$1 ( cap [ 1 ] ) ;
out += text ;
tokens . push ( {
type : 'escape' ,
raw ,
text
} ) ;
continue ;
}
// tag
if ( cap = this . rules . inline . tag . exec ( src ) ) {
if ( ! this . inLink && /^<a /i . test ( cap [ 0 ] ) ) {
this . inLink = true ;
} else if ( this . inLink && /^<\/a>/i . test ( cap [ 0 ] ) ) {
this . inLink = false ;
}
if ( ! this . inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i . test ( cap [ 0 ] ) ) {
this . inRawBlock = true ;
} else if ( this . inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i . test ( cap [ 0 ] ) ) {
this . inRawBlock = false ;
}
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
text = this . options . sanitize
? ( this . options . sanitizer
? this . options . sanitizer ( cap [ 0 ] )
: escape$1 ( cap [ 0 ] ) )
: cap [ 0 ] ;
tokens . push ( {
type : this . options . sanitize
? 'text'
: 'html' ,
raw ,
text
} ) ;
out += text ;
continue ;
}
// link
if ( cap = this . rules . inline . link . exec ( src ) ) {
lastParenIndex = findClosingBracket$1 ( cap [ 2 ] , '()' ) ;
if ( lastParenIndex > - 1 ) {
start = cap [ 0 ] . indexOf ( '!' ) === 0 ? 5 : 4 ;
linkLen = start + cap [ 1 ] . length + lastParenIndex ;
cap [ 2 ] = cap [ 2 ] . substring ( 0 , lastParenIndex ) ;
cap [ 0 ] = cap [ 0 ] . substring ( 0 , linkLen ) . trim ( ) ;
cap [ 3 ] = '' ;
}
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
this . inLink = true ;
href = cap [ 2 ] ;
if ( this . options . pedantic ) {
link = /^([^'"]*[^\s])\s+(['"])(.*)\2/ . exec ( href ) ;
if ( link ) {
href = link [ 1 ] ;
title = link [ 3 ] ;
} else {
title = '' ;
}
} else {
title = cap [ 3 ] ? cap [ 3 ] . slice ( 1 , - 1 ) : '' ;
}
href = href . trim ( ) . replace ( /^<([\s\S]*)>$/ , '$1' ) ;
out += this . outputLink ( cap , {
href : this . escapes ( href ) ,
title : this . escapes ( title )
} , tokens , raw ) ;
this . inLink = false ;
continue ;
}
// reflink, nolink
if ( ( cap = this . rules . inline . reflink . exec ( src ) )
|| ( cap = this . rules . inline . nolink . exec ( src ) ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
link = ( cap [ 2 ] || cap [ 1 ] ) . replace ( /\s+/g , ' ' ) ;
link = this . tokens . links [ link . toLowerCase ( ) ] ;
if ( ! link || ! link . href ) {
text = cap [ 0 ] . charAt ( 0 ) ;
out += text ;
tokens . push ( {
type : 'text' ,
raw : text ,
text
} ) ;
src = cap [ 0 ] . substring ( 1 ) + src ;
continue ;
}
this . inLink = true ;
out += this . outputLink ( cap , link , tokens , raw ) ;
this . inLink = false ;
continue ;
}
// strong
if ( cap = this . rules . inline . strong . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
newTokens = tokens ? [ ] : null ;
text = this . inlineTokens ( cap [ 4 ] || cap [ 3 ] || cap [ 2 ] || cap [ 1 ] , newTokens ) ;
tokens . push ( {
type : 'strong' ,
raw ,
text ,
tokens : newTokens
} ) ;
out += text ;
continue ;
}
// em
if ( cap = this . rules . inline . em . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
newTokens = tokens ? [ ] : null ;
text = this . inlineTokens ( cap [ 6 ] || cap [ 5 ] || cap [ 4 ] || cap [ 3 ] || cap [ 2 ] || cap [ 1 ] , newTokens ) ;
tokens . push ( {
type : 'em' ,
raw ,
text ,
tokens : newTokens
} ) ;
out += text ;
continue ;
}
// code
if ( cap = this . rules . inline . code . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
text = escape$1 ( cap [ 2 ] . trim ( ) , true ) ;
tokens . push ( {
type : 'codespan' ,
raw ,
text
} ) ;
out += text ;
continue ;
}
// br
if ( cap = this . rules . inline . br . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
tokens . push ( {
type : 'br' ,
raw
} ) ;
out += '\n' ;
continue ;
}
// del (gfm)
if ( cap = this . rules . inline . del . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
newTokens = tokens ? [ ] : null ;
text = this . inlineTokens ( cap [ 1 ] , newTokens ) ;
tokens . push ( {
type : 'del' ,
raw ,
text ,
tokens : newTokens
} ) ;
out += text ;
continue ;
}
// autolink
if ( cap = this . rules . inline . autolink . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
if ( cap [ 2 ] === '@' ) {
text = escape$1 ( this . options . mangle ? this . mangle ( cap [ 1 ] ) : cap [ 1 ] ) ;
href = 'mailto:' + text ;
} else {
text = escape$1 ( cap [ 1 ] ) ;
href = text ;
}
tokens . push ( {
type : 'link' ,
raw ,
text ,
href ,
tokens : [
{
type : 'text' ,
raw : text ,
text
}
]
} ) ;
out += text ;
continue ;
}
// url (gfm)
if ( ! this . inLink && ( cap = this . rules . inline . url . exec ( src ) ) ) {
if ( cap [ 2 ] === '@' ) {
text = escape$1 ( this . options . mangle ? this . mangle ( cap [ 0 ] ) : cap [ 0 ] ) ;
href = 'mailto:' + text ;
} else {
// do extended autolink path validation
do {
prevCapZero = cap [ 0 ] ;
cap [ 0 ] = this . rules . inline . _backpedal . exec ( cap [ 0 ] ) [ 0 ] ;
} while ( prevCapZero !== cap [ 0 ] ) ;
text = escape$1 ( cap [ 0 ] ) ;
if ( cap [ 1 ] === 'www.' ) {
href = 'http://' + text ;
} else {
href = text ;
}
}
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
tokens . push ( {
type : 'link' ,
raw ,
text ,
href ,
tokens : [
{
type : 'text' ,
raw : text ,
text
}
]
} ) ;
out += text ;
continue ;
}
// text
if ( cap = this . rules . inline . text . exec ( src ) ) {
src = src . substring ( cap [ 0 ] . length ) ;
raw = cap [ 0 ] ;
if ( this . inRawBlock ) {
text = this . options . sanitize ? ( this . options . sanitizer ? this . options . sanitizer ( cap [ 0 ] ) : escape$1 ( cap [ 0 ] ) ) : cap [ 0 ] ;
} else {
text = escape$1 ( this . options . smartypants ? this . smartypants ( cap [ 0 ] ) : cap [ 0 ] ) ;
}
tokens . push ( {
type : 'text' ,
raw ,
text
} ) ;
out += text ;
continue ;
}
if ( src ) {
const errMsg = 'Infinite loop on byte: ' + src . charCodeAt ( 0 ) ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
} else {
throw new Error ( errMsg ) ;
}
}
}
return out ;
}
escapes ( text ) {
return text ? text . replace ( inline$1 . _escapes , '$1' ) : text ;
}
/ * *
* tokenize Link
* /
outputLink ( cap , link , tokens , raw ) {
const href = link . href ;
const title = link . title ? escape$1 ( link . title ) : null ;
const newTokens = tokens ? [ ] : null ;
if ( cap [ 0 ] . charAt ( 0 ) !== '!' ) {
const text = this . inlineTokens ( cap [ 1 ] , newTokens ) ;
tokens . push ( {
type : 'link' ,
raw ,
text ,
href ,
title ,
tokens : newTokens
} ) ;
return text ;
} else {
const text = escape$1 ( cap [ 1 ] ) ;
tokens . push ( {
type : 'image' ,
raw ,
text ,
href ,
title
} ) ;
return text ;
}
}
/ * *
* Smartypants Transformations
* /
smartypants ( text ) {
return text
// em-dashes
. replace ( /---/g , '\u2014' )
// en-dashes
. replace ( /--/g , '\u2013' )
// opening singles
. replace ( /(^|[-\u2014/(\[{"\s])'/g , '$1\u2018' )
// closing singles & apostrophes
. replace ( /'/g , '\u2019' )
// opening doubles
. replace ( /(^|[-\u2014/(\[{\u2018\s])"/g , '$1\u201c' )
// closing doubles
. replace ( /"/g , '\u201d' )
// ellipses
. replace ( /\.{3}/g , '\u2026' ) ;
}
/ * *
* Mangle Links
* /
mangle ( text ) {
let out = '' ,
i ,
ch ;
const l = text . length ;
for ( i = 0 ; i < l ; i ++ ) {
ch = text . charCodeAt ( i ) ;
if ( Math . random ( ) > 0.5 ) {
ch = 'x' + ch . toString ( 16 ) ;
}
out += '&#' + ch + ';' ;
}
return out ;
}
} ;
const { defaults : defaults$2 } = defaults ;
const {
cleanUrl : cleanUrl$1 ,
escape : escape$2
} = helpers ;
/ * *
* Renderer
* /
var Renderer _1 = class Renderer {
constructor ( options ) {
this . options = options || defaults$2 ;
}
code ( code , infostring , escaped ) {
const lang = ( infostring || '' ) . match ( /\S*/ ) [ 0 ] ;
if ( this . options . highlight ) {
const out = this . options . highlight ( code , lang ) ;
if ( out != null && out !== code ) {
escaped = true ;
code = out ;
}
}
if ( ! lang ) {
return '<pre><code>'
+ ( escaped ? code : escape$2 ( code , true ) )
+ '</code></pre>' ;
}
return '<pre><code class="'
+ this . options . langPrefix
+ escape$2 ( lang , true )
+ '">'
+ ( escaped ? code : escape$2 ( code , true ) )
+ '</code></pre>\n' ;
}
blockquote ( quote ) {
return '<blockquote>\n' + quote + '</blockquote>\n' ;
}
html ( html ) {
return html ;
}
heading ( text , level , raw , slugger ) {
if ( this . options . headerIds ) {
return '<h'
+ level
+ ' id="'
+ this . options . headerPrefix
+ slugger . slug ( raw )
+ '">'
+ text
+ '</h'
+ level
+ '>\n' ;
}
// ignore IDs
return '<h' + level + '>' + text + '</h' + level + '>\n' ;
}
hr ( ) {
return this . options . xhtml ? '<hr/>\n' : '<hr>\n' ;
}
list ( body , ordered , start ) {
const type = ordered ? 'ol' : 'ul' ,
startatt = ( ordered && start !== 1 ) ? ( ' start="' + start + '"' ) : '' ;
return '<' + type + startatt + '>\n' + body + '</' + type + '>\n' ;
}
listitem ( text ) {
return '<li>' + text + '</li>\n' ;
}
checkbox ( checked ) {
return '<input '
+ ( checked ? 'checked="" ' : '' )
+ 'disabled="" type="checkbox"'
+ ( this . options . xhtml ? ' /' : '' )
+ '> ' ;
}
paragraph ( text ) {
return '<p>' + text + '</p>\n' ;
}
table ( header , body ) {
if ( body ) body = '<tbody>' + body + '</tbody>' ;
return '<table>\n'
+ '<thead>\n'
+ header
+ '</thead>\n'
+ body
+ '</table>\n' ;
}
tablerow ( content ) {
return '<tr>\n' + content + '</tr>\n' ;
}
tablecell ( content , flags ) {
const type = flags . header ? 'th' : 'td' ;
const tag = flags . align
? '<' + type + ' align="' + flags . align + '">'
: '<' + type + '>' ;
return tag + content + '</' + type + '>\n' ;
}
// span level renderer
strong ( text ) {
return '<strong>' + text + '</strong>' ;
}
em ( text ) {
return '<em>' + text + '</em>' ;
}
codespan ( text ) {
return '<code>' + text + '</code>' ;
}
br ( ) {
return this . options . xhtml ? '<br/>' : '<br>' ;
}
del ( text ) {
return '<del>' + text + '</del>' ;
}
link ( href , title , text ) {
href = cleanUrl$1 ( this . options . sanitize , this . options . baseUrl , href ) ;
if ( href === null ) {
return text ;
}
let out = '<a href="' + escape$2 ( href ) + '"' ;
if ( title ) {
out += ' title="' + title + '"' ;
}
out += '>' + text + '</a>' ;
return out ;
}
image ( href , title , text ) {
href = cleanUrl$1 ( this . options . sanitize , this . options . baseUrl , href ) ;
if ( href === null ) {
return text ;
}
let out = '<img src="' + href + '" alt="' + text + '"' ;
if ( title ) {
out += ' title="' + title + '"' ;
}
out += this . options . xhtml ? '/>' : '>' ;
return out ;
}
text ( text ) {
return text ;
}
} ;
/ * *
* TextRenderer
* returns only the textual part of the token
* /
var TextRenderer _1 = class TextRenderer {
// no need for block level renderers
strong ( text ) {
return text ;
}
em ( text ) {
return text ;
}
codespan ( text ) {
return text ;
}
del ( text ) {
return text ;
}
html ( text ) {
return text ;
}
text ( text ) {
return text ;
}
link ( href , title , text ) {
return '' + text ;
}
image ( href , title , text ) {
return '' + text ;
}
br ( ) {
return '' ;
}
} ;
/ * *
* Slugger generates header id
* /
var Slugger _1 = class Slugger {
constructor ( ) {
this . seen = { } ;
}
/ * *
* Convert string to unique id
* /
slug ( value ) {
let slug = value
. toLowerCase ( )
. trim ( )
// remove html tags
. replace ( /<[!\/a-z].*?>/ig , '' )
// remove unwanted chars
. replace ( /[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g , '' )
. replace ( /\s/g , '-' ) ;
if ( this . seen . hasOwnProperty ( slug ) ) {
const originalSlug = slug ;
do {
this . seen [ originalSlug ] ++ ;
slug = originalSlug + '-' + this . seen [ originalSlug ] ;
} while ( this . seen . hasOwnProperty ( slug ) ) ;
}
this . seen [ slug ] = 0 ;
return slug ;
}
} ;
const { defaults : defaults$3 } = defaults ;
const {
unescape : unescape$1
} = helpers ;
/ * *
* Parsing & Compiling
* /
var Parser _1 = class Parser {
constructor ( options ) {
this . options = options || defaults$3 ;
this . options . renderer = this . options . renderer || new Renderer _1 ( ) ;
this . renderer = this . options . renderer ;
this . renderer . options = this . options ;
this . textRenderer = new TextRenderer _1 ( ) ;
this . slugger = new Slugger _1 ( ) ;
}
/ * *
* Static Parse Method
* /
static parse ( tokens , options ) {
const parser = new Parser ( options ) ;
return parser . parse ( tokens ) ;
}
/ * *
* Parse Loop
* /
parse ( tokens , top = true ) {
let out = '' ,
i ,
j ,
k ,
l2 ,
l3 ,
row ,
cell ,
header ,
body ,
token ,
ordered ,
start ,
loose ,
itemBody ,
item ,
checked ,
task ,
checkbox ;
const l = tokens . length ;
for ( i = 0 ; i < l ; i ++ ) {
token = tokens [ i ] ;
switch ( token . type ) {
case 'space' : {
continue ;
}
case 'hr' : {
out += this . renderer . hr ( ) ;
continue ;
}
case 'heading' : {
out += this . renderer . heading (
this . parseInline ( token . tokens ) ,
token . depth ,
unescape$1 ( this . parseInline ( token . tokens , this . textRenderer ) ) ,
this . slugger ) ;
continue ;
}
case 'code' : {
out += this . renderer . code ( token . text ,
token . lang ,
token . escaped ) ;
continue ;
}
case 'table' : {
header = '' ;
// header
cell = '' ;
l2 = token . header . length ;
for ( j = 0 ; j < l2 ; j ++ ) {
cell += this . renderer . tablecell (
this . parseInline ( token . tokens . header [ j ] ) ,
{ header : true , align : token . align [ j ] }
) ;
}
header += this . renderer . tablerow ( cell ) ;
body = '' ;
l2 = token . cells . length ;
for ( j = 0 ; j < l2 ; j ++ ) {
row = token . tokens . cells [ j ] ;
cell = '' ;
l3 = row . length ;
for ( k = 0 ; k < l3 ; k ++ ) {
cell += this . renderer . tablecell (
this . parseInline ( row [ k ] ) ,
{ header : false , align : token . align [ k ] }
) ;
}
body += this . renderer . tablerow ( cell ) ;
}
out += this . renderer . table ( header , body ) ;
continue ;
}
case 'blockquote' : {
body = this . parse ( token . tokens ) ;
out += this . renderer . blockquote ( body ) ;
continue ;
}
case 'list' : {
ordered = token . ordered ;
start = token . start ;
loose = token . loose ;
l2 = token . items . length ;
body = '' ;
for ( j = 0 ; j < l2 ; j ++ ) {
item = token . items [ j ] ;
checked = item . checked ;
task = item . task ;
itemBody = '' ;
if ( item . task ) {
checkbox = this . renderer . checkbox ( checked ) ;
if ( loose ) {
if ( item . tokens [ 0 ] . type === 'text' ) {
item . tokens [ 0 ] . text = checkbox + ' ' + item . tokens [ 0 ] . text ;
if ( item . tokens [ 0 ] . tokens && item . tokens [ 0 ] . tokens . length > 0 && item . tokens [ 0 ] . tokens [ 0 ] . type === 'text' ) {
item . tokens [ 0 ] . tokens [ 0 ] . text = checkbox + ' ' + item . tokens [ 0 ] . tokens [ 0 ] . text ;
}
} else {
item . tokens . unshift ( {
type : 'text' ,
text : checkbox
} ) ;
}
} else {
itemBody += checkbox ;
}
}
itemBody += this . parse ( item . tokens , loose ) ;
body += this . renderer . listitem ( itemBody , task , checked ) ;
}
out += this . renderer . list ( body , ordered , start ) ;
continue ;
}
case 'html' : {
// TODO parse inline content if parameter markdown=1
out += this . renderer . html ( token . text ) ;
continue ;
}
case 'paragraph' : {
out += this . renderer . paragraph ( this . parseInline ( token . tokens ) ) ;
continue ;
}
case 'text' : {
body = token . tokens ? this . parseInline ( token . tokens ) : token . text ;
while ( i + 1 < l && tokens [ i + 1 ] . type === 'text' ) {
token = tokens [ ++ i ] ;
body += '\n' + ( token . tokens ? this . parseInline ( token . tokens ) : token . text ) ;
}
out += top ? this . renderer . paragraph ( body ) : body ;
continue ;
}
default : {
const errMsg = 'Token with "' + token . type + '" type was not found.' ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
return ;
} else {
throw new Error ( errMsg ) ;
}
}
}
}
return out ;
}
/ * *
* Parse Inline Tokens
* /
parseInline ( tokens , renderer ) {
renderer = renderer || this . renderer ;
let out = '' ,
i ,
token ;
const l = tokens . length ;
for ( i = 0 ; i < l ; i ++ ) {
token = tokens [ i ] ;
switch ( token . type ) {
case 'escape' : {
out += renderer . text ( token . text ) ;
break ;
}
case 'html' : {
out += renderer . html ( token . text ) ;
break ;
}
case 'link' : {
out += renderer . link ( token . href , token . title , this . parseInline ( token . tokens , renderer ) ) ;
break ;
}
case 'image' : {
out += renderer . image ( token . href , token . title , token . text ) ;
break ;
}
case 'strong' : {
out += renderer . strong ( this . parseInline ( token . tokens , renderer ) ) ;
break ;
}
case 'em' : {
out += renderer . em ( this . parseInline ( token . tokens , renderer ) ) ;
break ;
}
case 'codespan' : {
out += renderer . codespan ( token . text ) ;
break ;
}
case 'br' : {
out += renderer . br ( ) ;
break ;
}
case 'del' : {
out += renderer . del ( this . parseInline ( token . tokens , renderer ) ) ;
break ;
}
case 'text' : {
out += renderer . text ( token . text ) ;
break ;
}
default : {
const errMsg = 'Token with "' + token . type + '" type was not found.' ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
return ;
} else {
throw new Error ( errMsg ) ;
}
}
}
}
return out ;
}
} ;
const {
merge : merge$2 ,
checkSanitizeDeprecation : checkSanitizeDeprecation$1 ,
escape : escape$3
} = helpers ;
const {
getDefaults ,
changeDefaults ,
defaults : defaults$4
} = defaults ;
/ * *
* Marked
* /
function marked ( src , opt , callback ) {
// throw error in case of non string input
if ( typeof src === 'undefined' || src === null ) {
throw new Error ( 'marked(): input parameter is undefined or null' ) ;
}
if ( typeof src !== 'string' ) {
throw new Error ( 'marked(): input parameter is of type '
+ Object . prototype . toString . call ( src ) + ', string expected' ) ;
}
if ( callback || typeof opt === 'function' ) {
if ( ! callback ) {
callback = opt ;
opt = null ;
}
opt = merge$2 ( { } , marked . defaults , opt || { } ) ;
checkSanitizeDeprecation$1 ( opt ) ;
const highlight = opt . highlight ;
let tokens ,
pending ,
i = 0 ;
try {
tokens = Lexer _1 . lex ( src , opt ) ;
} catch ( e ) {
return callback ( e ) ;
}
pending = tokens . length ;
const done = function ( err ) {
if ( err ) {
opt . highlight = highlight ;
return callback ( err ) ;
}
let out ;
try {
out = Parser _1 . parse ( tokens , opt ) ;
} catch ( e ) {
err = e ;
}
opt . highlight = highlight ;
return err
? callback ( err )
: callback ( null , out ) ;
} ;
if ( ! highlight || highlight . length < 3 ) {
return done ( ) ;
}
delete opt . highlight ;
if ( ! pending ) return done ( ) ;
for ( ; i < tokens . length ; i ++ ) {
( function ( token ) {
if ( token . type !== 'code' ) {
return -- pending || done ( ) ;
}
return highlight ( token . text , token . lang , function ( err , code ) {
if ( err ) return done ( err ) ;
if ( code == null || code === token . text ) {
return -- pending || done ( ) ;
}
token . text = code ;
token . escaped = true ;
-- pending || done ( ) ;
} ) ;
} ) ( tokens [ i ] ) ;
}
return ;
}
try {
opt = merge$2 ( { } , marked . defaults , opt || { } ) ;
checkSanitizeDeprecation$1 ( opt ) ;
return Parser _1 . parse ( Lexer _1 . lex ( src , opt ) , opt ) ;
} catch ( e ) {
e . message += '\nPlease report this to https://github.com/markedjs/marked.' ;
if ( ( opt || marked . defaults ) . silent ) {
return '<p>An error occurred:</p><pre>'
+ escape$3 ( e . message + '' , true )
+ '</pre>' ;
}
throw e ;
}
}
/ * *
* Options
* /
marked . options =
marked . setOptions = function ( opt ) {
merge$2 ( marked . defaults , opt ) ;
changeDefaults ( marked . defaults ) ;
return marked ;
} ;
marked . getDefaults = getDefaults ;
marked . defaults = defaults$4 ;
/ * *
* Expose
* /
marked . Parser = Parser _1 ;
marked . parser = Parser _1 . parse ;
marked . Renderer = Renderer _1 ;
marked . TextRenderer = TextRenderer _1 ;
marked . Lexer = Lexer _1 ;
marked . lexer = Lexer _1 . lex ;
marked . Slugger = Slugger _1 ;
marked . parse = marked ;
var marked _1 = marked ;
export default marked _1 ;