pax_global_header 0000666 0000000 0000000 00000000064 13745714152 0014523 g ustar 00root root 0000000 0000000 52 comment=4c9bf9512682b995722660a4196c0013228e2049
blackfriday-2.1.0/ 0000775 0000000 0000000 00000000000 13745714152 0013776 5 ustar 00root root 0000000 0000000 blackfriday-2.1.0/.gitignore 0000664 0000000 0000000 00000000056 13745714152 0015767 0 ustar 00root root 0000000 0000000 *.out
*.swp
*.8
*.6
_obj
_test*
markdown
tags
blackfriday-2.1.0/.travis.yml 0000664 0000000 0000000 00000000611 13745714152 0016105 0 ustar 00root root 0000000 0000000 sudo: false
language: go
go:
- "1.10.x"
- "1.11.x"
- tip
matrix:
fast_finish: true
allow_failures:
- go: tip
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- go tool vet .
- go test -v ./...
blackfriday-2.1.0/LICENSE.txt 0000664 0000000 0000000 00000002610 13745714152 0015620 0 ustar 00root root 0000000 0000000 Blackfriday is distributed under the Simplified BSD License:
> Copyright © 2011 Russ Ross
> All rights reserved.
>
> Redistribution and use in source and binary forms, with or without
> modification, are permitted provided that the following conditions
> are met:
>
> 1. Redistributions of source code must retain the above copyright
> notice, this list of conditions and the following disclaimer.
>
> 2. Redistributions in binary form must reproduce the above
> copyright notice, this list of conditions and the following
> disclaimer in the documentation and/or other materials provided with
> the distribution.
>
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
> POSSIBILITY OF SUCH DAMAGE.
blackfriday-2.1.0/README.md 0000664 0000000 0000000 00000027714 13745714152 0015270 0 ustar 00root root 0000000 0000000 Blackfriday
[![Build Status][BuildV2SVG]][BuildV2URL]
[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL]
===========
Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
is paranoid about its input (so you can safely feed it user-supplied
data), it is fast, it supports common extensions (tables, smart
punctuation substitutions, etc.), and it is safe for all utf-8
(unicode) input.
HTML output is currently supported, along with Smartypants
extensions.
It started as a translation from C of [Sundown][3].
Installation
------------
Blackfriday is compatible with modern Go releases in module mode.
With Go installed:
go get github.com/russross/blackfriday/v2
will resolve and add the package to the current development module,
then build and install it. Alternatively, you can achieve the same
if you import it in a package:
import "github.com/russross/blackfriday/v2"
and `go get` without parameters.
Legacy GOPATH mode is unsupported.
Versions
--------
Currently maintained and recommended version of Blackfriday is `v2`. It's being
developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
documentation is available at
https://pkg.go.dev/github.com/russross/blackfriday/v2.
It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`.
Version 2 offers a number of improvements over v1:
* Cleaned up API
* A separate call to [`Parse`][4], which produces an abstract syntax tree for
the document
* Latest bug fixes
* Flexibility to easily add your own rendering extensions
Potential drawbacks:
* Our benchmarks show v2 to be slightly slower than v1. Currently in the
ballpark of around 15%.
* API breakage. If you can't afford modifying your code to adhere to the new API
and don't care too much about the new features, v2 is probably not for you.
* Several bug fixes are trailing behind and still need to be forward-ported to
v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
tracking.
If you are still interested in the legacy `v1`, you can import it from
`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found
here: https://pkg.go.dev/github.com/russross/blackfriday.
Usage
-----
For the most sensible markdown processing, it is as simple as getting your input
into a byte slice and calling:
```go
output := blackfriday.Run(input)
```
Your input will be parsed and the output rendered with a set of most popular
extensions enabled. If you want the most basic feature set, corresponding with
the bare Markdown specification, use:
```go
output := blackfriday.Run(input, blackfriday.WithNoExtensions())
```
### Sanitize untrusted content
Blackfriday itself does nothing to protect against malicious content. If you are
dealing with user-supplied markdown, we recommend running Blackfriday's output
through HTML sanitizer such as [Bluemonday][5].
Here's an example of simple usage of Blackfriday together with Bluemonday:
```go
import (
"github.com/microcosm-cc/bluemonday"
"github.com/russross/blackfriday/v2"
)
// ...
unsafe := blackfriday.Run(input)
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
```
### Custom options
If you want to customize the set of options, use `blackfriday.WithExtensions`,
`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
### `blackfriday-tool`
You can also check out `blackfriday-tool` for a more complete example
of how to use it. Download and install it using:
go get github.com/russross/blackfriday-tool
This is a simple command-line tool that allows you to process a
markdown file using a standalone program. You can also browse the
source directly on github if you are just looking for some example
code:
* Hello Goodbye List List List List List Nested list ####### Header 7 #Header 1 ##Header 2 ###Header 3 ####Header 4 #####Header 5 ######Header 6 #######Header 7 Hello Goodbye List List List Nested list } Hello Goodbye List List List List List Nested list List List List List List Nested list Hello Goodbye List List List List List Nested list Hello Goodbye List List List List List Nested list Paragraph Paragraph ======== Paragraph ===== Paragraph Paragraph Paragraph ===== - -- * ** _ __ -*- -----* Hello Yin Yang Ting Bong Goo Yin Yang Ting Bong Goo Yin Yang Ting Bong Goo *Hello Paragraph\n* No linebreak Paragraph List List\nSecond line List Continued List List List normal text Yin Yang Ting Bong Goo 1 Hello 1.Hello Paragraph\n1. No linebreak Paragraph List List\nSecond line List Continued List List Definition a Definition b Definition a Definition b Definition c Term 1\n:Definition a Definition a Definition b Text 1 Definition a Text 1 Definition b Text 2 Not nested ordered list
tag
if size := p.htmlHr(data, doRender); size > 0 {
return size
}
// no special case recognized
return 0
}
// look for an unindented matching closing tag
// followed by a blank line
found := false
/*
closetag := []byte("\n" + curtag + ">")
j = len(curtag) + 1
for !found {
// scan for a closing tag at the beginning of a line
if skip := bytes.Index(data[j:], closetag); skip >= 0 {
j += skip + len(closetag)
} else {
break
}
// see if it is the only thing on the line
if skip := p.isEmpty(data[j:]); skip > 0 {
// see if it is followed by a blank line/eof
j += skip
if j >= len(data) {
found = true
i = j
} else {
if skip := p.isEmpty(data[j:]); skip > 0 {
j += skip
found = true
i = j
}
}
}
}
*/
// if not found, try a second pass looking for indented match
// but not if tag is "ins" or "del" (following original Markdown.pl)
if !found && curtag != "ins" && curtag != "del" {
i = 1
for i < len(data) {
i++
for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
i++
}
if i+2+len(curtag) >= len(data) {
break
}
j = p.htmlFindEnd(curtag, data[i-1:])
if j > 0 {
i += j - 1
found = true
break
}
}
}
if !found {
return 0
}
// the end of the block has been found
if doRender {
// trim newlines
end := i
for end > 0 && data[end-1] == '\n' {
end--
}
finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
}
return i
}
func finalizeHTMLBlock(block *Node) {
block.Literal = block.content
block.content = nil
}
// HTML comment, lax form
func (p *Markdown) htmlComment(data []byte, doRender bool) int {
i := p.inlineHTMLComment(data)
// needs to end with a blank line
if j := p.isEmpty(data[i:]); j > 0 {
size := i + j
if doRender {
// trim trailing newlines
end := size
for end > 0 && data[end-1] == '\n' {
end--
}
block := p.addBlock(HTMLBlock, data[:end])
finalizeHTMLBlock(block)
}
return size
}
return 0
}
// HR, which is the only self-closing block tag considered
func (p *Markdown) htmlHr(data []byte, doRender bool) int {
if len(data) < 4 {
return 0
}
if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
return 0
}
if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
// not an
tag after all; at least not a valid one
return 0
}
i := 3
for i < len(data) && data[i] != '>' && data[i] != '\n' {
i++
}
if i < len(data) && data[i] == '>' {
i++
if j := p.isEmpty(data[i:]); j > 0 {
size := i + j
if doRender {
// trim newlines
end := size
for end > 0 && data[end-1] == '\n' {
end--
}
finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
}
return size
}
}
return 0
}
func (p *Markdown) htmlFindTag(data []byte) (string, bool) {
i := 0
for i < len(data) && isalnum(data[i]) {
i++
}
key := string(data[:i])
if _, ok := blockTags[key]; ok {
return key, true
}
return "", false
}
func (p *Markdown) htmlFindEnd(tag string, data []byte) int {
// assume data[0] == '<' && data[1] == '/' already tested
if tag == "hr" {
return 2
}
// check if tag is a match
closetag := []byte("" + tag + ">")
if !bytes.HasPrefix(data, closetag) {
return 0
}
i := len(closetag)
// check that the rest of the line is blank
skip := 0
if skip = p.isEmpty(data[i:]); skip == 0 {
return 0
}
i += skip
skip = 0
if i >= len(data) {
return i
}
if p.extensions&LaxHTMLBlocks != 0 {
return i
}
if skip = p.isEmpty(data[i:]); skip == 0 {
// following line must be blank
return 0
}
return i + skip
}
func (*Markdown) isEmpty(data []byte) int {
// it is okay to call isEmpty on an empty buffer
if len(data) == 0 {
return 0
}
var i int
for i = 0; i < len(data) && data[i] != '\n'; i++ {
if data[i] != ' ' && data[i] != '\t' {
return 0
}
}
if i < len(data) && data[i] == '\n' {
i++
}
return i
}
func (*Markdown) isHRule(data []byte) bool {
i := 0
// skip up to three spaces
for i < 3 && data[i] == ' ' {
i++
}
// look at the hrule char
if data[i] != '*' && data[i] != '-' && data[i] != '_' {
return false
}
c := data[i]
// the whole line must be the char or whitespace
n := 0
for i < len(data) && data[i] != '\n' {
switch {
case data[i] == c:
n++
case data[i] != ' ':
return false
}
i++
}
return n >= 3
}
// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
// and returns the end index if so, or 0 otherwise. It also returns the marker found.
// If info is not nil, it gets set to the syntax specified in the fence line.
func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) {
i, size := 0, 0
// skip up to three spaces
for i < len(data) && i < 3 && data[i] == ' ' {
i++
}
// check for the marker characters: ~ or `
if i >= len(data) {
return 0, ""
}
if data[i] != '~' && data[i] != '`' {
return 0, ""
}
c := data[i]
// the whole line must be the same char or whitespace
for i < len(data) && data[i] == c {
size++
i++
}
// the marker char must occur at least 3 times
if size < 3 {
return 0, ""
}
marker = string(data[i-size : i])
// if this is the end marker, it must match the beginning marker
if oldmarker != "" && marker != oldmarker {
return 0, ""
}
// TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
// into one, always get the info string, and discard it if the caller doesn't care.
if info != nil {
infoLength := 0
i = skipChar(data, i, ' ')
if i >= len(data) {
if i == len(data) {
return i, marker
}
return 0, ""
}
infoStart := i
if data[i] == '{' {
i++
infoStart++
for i < len(data) && data[i] != '}' && data[i] != '\n' {
infoLength++
i++
}
if i >= len(data) || data[i] != '}' {
return 0, ""
}
// strip all whitespace at the beginning and the end
// of the {} block
for infoLength > 0 && isspace(data[infoStart]) {
infoStart++
infoLength--
}
for infoLength > 0 && isspace(data[infoStart+infoLength-1]) {
infoLength--
}
i++
i = skipChar(data, i, ' ')
} else {
for i < len(data) && !isverticalspace(data[i]) {
infoLength++
i++
}
}
*info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength]))
}
if i == len(data) {
return i, marker
}
if i > len(data) || data[i] != '\n' {
return 0, ""
}
return i + 1, marker // Take newline into account.
}
// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
// If doRender is true, a final newline is mandatory to recognize the fenced code block.
func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int {
var info string
beg, marker := isFenceLine(data, &info, "")
if beg == 0 || beg >= len(data) {
return 0
}
fenceLength := beg - 1
var work bytes.Buffer
work.Write([]byte(info))
work.WriteByte('\n')
for {
// safe to assume beg < len(data)
// check for the end of the code block
fenceEnd, _ := isFenceLine(data[beg:], nil, marker)
if fenceEnd != 0 {
beg += fenceEnd
break
}
// copy the current line
end := skipUntilChar(data, beg, '\n') + 1
// did we reach the end of the buffer without a closing marker?
if end >= len(data) {
return 0
}
// verbatim copy to the working buffer
if doRender {
work.Write(data[beg:end])
}
beg = end
}
if doRender {
block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
block.IsFenced = true
block.FenceLength = fenceLength
finalizeCodeBlock(block)
}
return beg
}
func unescapeChar(str []byte) []byte {
if str[0] == '\\' {
return []byte{str[1]}
}
return []byte(html.UnescapeString(string(str)))
}
func unescapeString(str []byte) []byte {
if reBackslashOrAmp.Match(str) {
return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar)
}
return str
}
func finalizeCodeBlock(block *Node) {
if block.IsFenced {
newlinePos := bytes.IndexByte(block.content, '\n')
firstLine := block.content[:newlinePos]
rest := block.content[newlinePos+1:]
block.Info = unescapeString(bytes.Trim(firstLine, "\n"))
block.Literal = rest
} else {
block.Literal = block.content
}
block.content = nil
}
func (p *Markdown) table(data []byte) int {
table := p.addBlock(Table, nil)
i, columns := p.tableHeader(data)
if i == 0 {
p.tip = table.Parent
table.Unlink()
return 0
}
p.addBlock(TableBody, nil)
for i < len(data) {
pipes, rowStart := 0, i
for ; i < len(data) && data[i] != '\n'; i++ {
if data[i] == '|' {
pipes++
}
}
if pipes == 0 {
i = rowStart
break
}
// include the newline in data sent to tableRow
if i < len(data) && data[i] == '\n' {
i++
}
p.tableRow(data[rowStart:i], columns, false)
}
return i
}
// check if the specified position is preceded by an odd number of backslashes
func isBackslashEscaped(data []byte, i int) bool {
backslashes := 0
for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
backslashes++
}
return backslashes&1 == 1
}
func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) {
i := 0
colCount := 1
for i = 0; i < len(data) && data[i] != '\n'; i++ {
if data[i] == '|' && !isBackslashEscaped(data, i) {
colCount++
}
}
// doesn't look like a table header
if colCount == 1 {
return
}
// include the newline in the data sent to tableRow
j := i
if j < len(data) && data[j] == '\n' {
j++
}
header := data[:j]
// column count ignores pipes at beginning or end of line
if data[0] == '|' {
colCount--
}
if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
colCount--
}
columns = make([]CellAlignFlags, colCount)
// move on to the header underline
i++
if i >= len(data) {
return
}
if data[i] == '|' && !isBackslashEscaped(data, i) {
i++
}
i = skipChar(data, i, ' ')
// each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
// and trailing | optional on last column
col := 0
for i < len(data) && data[i] != '\n' {
dashes := 0
if data[i] == ':' {
i++
columns[col] |= TableAlignmentLeft
dashes++
}
for i < len(data) && data[i] == '-' {
i++
dashes++
}
if i < len(data) && data[i] == ':' {
i++
columns[col] |= TableAlignmentRight
dashes++
}
for i < len(data) && data[i] == ' ' {
i++
}
if i == len(data) {
return
}
// end of column test is messy
switch {
case dashes < 3:
// not a valid column
return
case data[i] == '|' && !isBackslashEscaped(data, i):
// marker found, now skip past trailing whitespace
col++
i++
for i < len(data) && data[i] == ' ' {
i++
}
// trailing junk found after last column
if col >= colCount && i < len(data) && data[i] != '\n' {
return
}
case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
// something else found where marker was required
return
case data[i] == '\n':
// marker is optional for the last column
col++
default:
// trailing junk found after last column
return
}
}
if col != colCount {
return
}
p.addBlock(TableHead, nil)
p.tableRow(header, columns, true)
size = i
if size < len(data) && data[size] == '\n' {
size++
}
return
}
func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) {
p.addBlock(TableRow, nil)
i, col := 0, 0
if data[i] == '|' && !isBackslashEscaped(data, i) {
i++
}
for col = 0; col < len(columns) && i < len(data); col++ {
for i < len(data) && data[i] == ' ' {
i++
}
cellStart := i
for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
i++
}
cellEnd := i
// skip the end-of-cell marker, possibly taking us past end of buffer
i++
for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' {
cellEnd--
}
cell := p.addBlock(TableCell, data[cellStart:cellEnd])
cell.IsHeader = header
cell.Align = columns[col]
}
// pad it out with empty columns to get the right number
for ; col < len(columns); col++ {
cell := p.addBlock(TableCell, nil)
cell.IsHeader = header
cell.Align = columns[col]
}
// silently ignore rows with too many cells
}
// returns blockquote prefix length
func (p *Markdown) quotePrefix(data []byte) int {
i := 0
for i < 3 && i < len(data) && data[i] == ' ' {
i++
}
if i < len(data) && data[i] == '>' {
if i+1 < len(data) && data[i+1] == ' ' {
return i + 2
}
return i + 1
}
return 0
}
// blockquote ends with at least one blank line
// followed by something without a blockquote prefix
func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool {
if p.isEmpty(data[beg:]) <= 0 {
return false
}
if end >= len(data) {
return true
}
return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
}
// parse a blockquote fragment
func (p *Markdown) quote(data []byte) int {
block := p.addBlock(BlockQuote, nil)
var raw bytes.Buffer
beg, end := 0, 0
for beg < len(data) {
end = beg
// Step over whole lines, collecting them. While doing that, check for
// fenced code and if one's found, incorporate it altogether,
// irregardless of any contents inside it
for end < len(data) && data[end] != '\n' {
if p.extensions&FencedCode != 0 {
if i := p.fencedCodeBlock(data[end:], false); i > 0 {
// -1 to compensate for the extra end++ after the loop:
end += i - 1
break
}
}
end++
}
if end < len(data) && data[end] == '\n' {
end++
}
if pre := p.quotePrefix(data[beg:]); pre > 0 {
// skip the prefix
beg += pre
} else if p.terminateBlockquote(data, beg, end) {
break
}
// this line is part of the blockquote
raw.Write(data[beg:end])
beg = end
}
p.block(raw.Bytes())
p.finalize(block)
return end
}
// returns prefix length for block code
func (p *Markdown) codePrefix(data []byte) int {
if len(data) >= 1 && data[0] == '\t' {
return 1
}
if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
return 4
}
return 0
}
func (p *Markdown) code(data []byte) int {
var work bytes.Buffer
i := 0
for i < len(data) {
beg := i
for i < len(data) && data[i] != '\n' {
i++
}
if i < len(data) && data[i] == '\n' {
i++
}
blankline := p.isEmpty(data[beg:i]) > 0
if pre := p.codePrefix(data[beg:i]); pre > 0 {
beg += pre
} else if !blankline {
// non-empty, non-prefixed line breaks the pre
i = beg
break
}
// verbatim copy to the working buffer
if blankline {
work.WriteByte('\n')
} else {
work.Write(data[beg:i])
}
}
// trim all the \n off the end of work
workbytes := work.Bytes()
eol := len(workbytes)
for eol > 0 && workbytes[eol-1] == '\n' {
eol--
}
if eol != len(workbytes) {
work.Truncate(eol)
}
work.WriteByte('\n')
block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
block.IsFenced = false
finalizeCodeBlock(block)
return i
}
// returns unordered list item prefix
func (p *Markdown) uliPrefix(data []byte) int {
i := 0
// start with up to 3 spaces
for i < len(data) && i < 3 && data[i] == ' ' {
i++
}
if i >= len(data)-1 {
return 0
}
// need one of {'*', '+', '-'} followed by a space or a tab
if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
(data[i+1] != ' ' && data[i+1] != '\t') {
return 0
}
return i + 2
}
// returns ordered list item prefix
func (p *Markdown) oliPrefix(data []byte) int {
i := 0
// start with up to 3 spaces
for i < 3 && i < len(data) && data[i] == ' ' {
i++
}
// count the digits
start := i
for i < len(data) && data[i] >= '0' && data[i] <= '9' {
i++
}
if start == i || i >= len(data)-1 {
return 0
}
// we need >= 1 digits followed by a dot and a space or a tab
if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') {
return 0
}
return i + 2
}
// returns definition list item prefix
func (p *Markdown) dliPrefix(data []byte) int {
if len(data) < 2 {
return 0
}
i := 0
// need a ':' followed by a space or a tab
if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') {
return 0
}
for i < len(data) && data[i] == ' ' {
i++
}
return i + 2
}
// parse ordered or unordered list block
func (p *Markdown) list(data []byte, flags ListType) int {
i := 0
flags |= ListItemBeginningOfList
block := p.addBlock(List, nil)
block.ListFlags = flags
block.Tight = true
for i < len(data) {
skip := p.listItem(data[i:], &flags)
if flags&ListItemContainsBlock != 0 {
block.ListData.Tight = false
}
i += skip
if skip == 0 || flags&ListItemEndOfList != 0 {
break
}
flags &= ^ListItemBeginningOfList
}
above := block.Parent
finalizeList(block)
p.tip = above
return i
}
// Returns true if the list item is not the same type as its parent list
func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool {
if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 {
return true
} else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 {
return true
} else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) {
return true
}
return false
}
// Returns true if block ends with a blank line, descending if needed
// into lists and sublists.
func endsWithBlankLine(block *Node) bool {
// TODO: figure this out. Always false now.
for block != nil {
//if block.lastLineBlank {
//return true
//}
t := block.Type
if t == List || t == Item {
block = block.LastChild
} else {
break
}
}
return false
}
func finalizeList(block *Node) {
block.open = false
item := block.FirstChild
for item != nil {
// check for non-final list item ending with blank line:
if endsWithBlankLine(item) && item.Next != nil {
block.ListData.Tight = false
break
}
// recurse into children of list item, to see if there are spaces
// between any of them:
subItem := item.FirstChild
for subItem != nil {
if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) {
block.ListData.Tight = false
break
}
subItem = subItem.Next
}
item = item.Next
}
}
// Parse a single list item.
// Assumes initial prefix is already removed if this is a sublist.
func (p *Markdown) listItem(data []byte, flags *ListType) int {
// keep track of the indentation of the first line
itemIndent := 0
if data[0] == '\t' {
itemIndent += 4
} else {
for itemIndent < 3 && data[itemIndent] == ' ' {
itemIndent++
}
}
var bulletChar byte = '*'
i := p.uliPrefix(data)
if i == 0 {
i = p.oliPrefix(data)
} else {
bulletChar = data[i-2]
}
if i == 0 {
i = p.dliPrefix(data)
// reset definition term flag
if i > 0 {
*flags &= ^ListTypeTerm
}
}
if i == 0 {
// if in definition list, set term flag and continue
if *flags&ListTypeDefinition != 0 {
*flags |= ListTypeTerm
} else {
return 0
}
}
// skip leading whitespace on first line
for i < len(data) && data[i] == ' ' {
i++
}
// find the end of the line
line := i
for i > 0 && i < len(data) && data[i-1] != '\n' {
i++
}
// get working buffer
var raw bytes.Buffer
// put the first line into the working buffer
raw.Write(data[line:i])
line = i
// process the following lines
containsBlankLine := false
sublist := 0
codeBlockMarker := ""
gatherlines:
for line < len(data) {
i++
// find the end of this line
for i < len(data) && data[i-1] != '\n' {
i++
}
// if it is an empty line, guess that it is part of this item
// and move on to the next line
if p.isEmpty(data[line:i]) > 0 {
containsBlankLine = true
line = i
continue
}
// calculate the indentation
indent := 0
indentIndex := 0
if data[line] == '\t' {
indentIndex++
indent += 4
} else {
for indent < 4 && line+indent < i && data[line+indent] == ' ' {
indent++
indentIndex++
}
}
chunk := data[line+indentIndex : i]
if p.extensions&FencedCode != 0 {
// determine if in or out of codeblock
// if in codeblock, ignore normal list processing
_, marker := isFenceLine(chunk, nil, codeBlockMarker)
if marker != "" {
if codeBlockMarker == "" {
// start of codeblock
codeBlockMarker = marker
} else {
// end of codeblock.
codeBlockMarker = ""
}
}
// we are in a codeblock, write line, and continue
if codeBlockMarker != "" || marker != "" {
raw.Write(data[line+indentIndex : i])
line = i
continue gatherlines
}
}
// evaluate how this line fits in
switch {
// is this a nested list item?
case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
p.oliPrefix(chunk) > 0 ||
p.dliPrefix(chunk) > 0:
// to be a nested list, it must be indented more
// if not, it is either a different kind of list
// or the next item in the same list
if indent <= itemIndent {
if p.listTypeChanged(chunk, flags) {
*flags |= ListItemEndOfList
} else if containsBlankLine {
*flags |= ListItemContainsBlock
}
break gatherlines
}
if containsBlankLine {
*flags |= ListItemContainsBlock
}
// is this the first item in the nested list?
if sublist == 0 {
sublist = raw.Len()
}
// is this a nested prefix heading?
case p.isPrefixHeading(chunk):
// if the heading is not indented, it is not nested in the list
// and thus ends the list
if containsBlankLine && indent < 4 {
*flags |= ListItemEndOfList
break gatherlines
}
*flags |= ListItemContainsBlock
// anything following an empty line is only part
// of this item if it is indented 4 spaces
// (regardless of the indentation of the beginning of the item)
case containsBlankLine && indent < 4:
if *flags&ListTypeDefinition != 0 && i < len(data)-1 {
// is the next item still a part of this list?
next := i
for next < len(data) && data[next] != '\n' {
next++
}
for next < len(data)-1 && data[next] == '\n' {
next++
}
if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
*flags |= ListItemEndOfList
}
} else {
*flags |= ListItemEndOfList
}
break gatherlines
// a blank line means this should be parsed as a block
case containsBlankLine:
raw.WriteByte('\n')
*flags |= ListItemContainsBlock
}
// if this line was preceded by one or more blanks,
// re-introduce the blank into the buffer
if containsBlankLine {
containsBlankLine = false
raw.WriteByte('\n')
}
// add the line into the working buffer without prefix
raw.Write(data[line+indentIndex : i])
line = i
}
rawBytes := raw.Bytes()
block := p.addBlock(Item, nil)
block.ListFlags = *flags
block.Tight = false
block.BulletChar = bulletChar
block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark
// render the contents of the list item
if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 {
// intermediate render of block item, except for definition term
if sublist > 0 {
p.block(rawBytes[:sublist])
p.block(rawBytes[sublist:])
} else {
p.block(rawBytes)
}
} else {
// intermediate render of inline item
if sublist > 0 {
child := p.addChild(Paragraph, 0)
child.content = rawBytes[:sublist]
p.block(rawBytes[sublist:])
} else {
child := p.addChild(Paragraph, 0)
child.content = rawBytes
}
}
return line
}
// render a single paragraph that has already been parsed out
func (p *Markdown) renderParagraph(data []byte) {
if len(data) == 0 {
return
}
// trim leading spaces
beg := 0
for data[beg] == ' ' {
beg++
}
end := len(data)
// trim trailing newline
if data[len(data)-1] == '\n' {
end--
}
// trim trailing spaces
for end > beg && data[end-1] == ' ' {
end--
}
p.addBlock(Paragraph, data[beg:end])
}
func (p *Markdown) paragraph(data []byte) int {
// prev: index of 1st char of previous line
// line: index of 1st char of current line
// i: index of cursor/end of current line
var prev, line, i int
tabSize := TabSizeDefault
if p.extensions&TabSizeEight != 0 {
tabSize = TabSizeDouble
}
// keep going until we find something to mark the end of the paragraph
for i < len(data) {
// mark the beginning of the current line
prev = line
current := data[i:]
line = i
// did we find a reference or a footnote? If so, end a paragraph
// preceding it and report that we have consumed up to the end of that
// reference:
if refEnd := isReference(p, current, tabSize); refEnd > 0 {
p.renderParagraph(data[:i])
return i + refEnd
}
// did we find a blank line marking the end of the paragraph?
if n := p.isEmpty(current); n > 0 {
// did this blank line followed by a definition list item?
if p.extensions&DefinitionLists != 0 {
if i < len(data)-1 && data[i+1] == ':' {
return p.list(data[prev:], ListTypeDefinition)
}
}
p.renderParagraph(data[:i])
return i + n
}
// an underline under some text marks a heading, so our paragraph ended on prev line
if i > 0 {
if level := p.isUnderlinedHeading(current); level > 0 {
// render the paragraph
p.renderParagraph(data[:prev])
// ignore leading and trailing whitespace
eol := i - 1
for prev < eol && data[prev] == ' ' {
prev++
}
for eol > prev && data[eol-1] == ' ' {
eol--
}
id := ""
if p.extensions&AutoHeadingIDs != 0 {
id = SanitizedAnchorName(string(data[prev:eol]))
}
block := p.addBlock(Heading, data[prev:eol])
block.Level = level
block.HeadingID = id
// find the end of the underline
for i < len(data) && data[i] != '\n' {
i++
}
return i
}
}
// if the next line starts a block of HTML, then the paragraph ends here
if p.extensions&LaxHTMLBlocks != 0 {
if data[i] == '<' && p.html(current, false) > 0 {
// rewind to before the HTML block
p.renderParagraph(data[:i])
return i
}
}
// if there's a prefixed heading or a horizontal rule after this, paragraph is over
if p.isPrefixHeading(current) || p.isHRule(current) {
p.renderParagraph(data[:i])
return i
}
// if there's a fenced code block, paragraph is over
if p.extensions&FencedCode != 0 {
if p.fencedCodeBlock(current, false) > 0 {
p.renderParagraph(data[:i])
return i
}
}
// if there's a definition list item, prev line is a definition term
if p.extensions&DefinitionLists != 0 {
if p.dliPrefix(current) != 0 {
ret := p.list(data[prev:], ListTypeDefinition)
return ret
}
}
// if there's a list after this, paragraph is over
if p.extensions&NoEmptyLineBeforeBlock != 0 {
if p.uliPrefix(current) != 0 ||
p.oliPrefix(current) != 0 ||
p.quotePrefix(current) != 0 ||
p.codePrefix(current) != 0 {
p.renderParagraph(data[:i])
return i
}
}
// otherwise, scan to the beginning of the next line
nl := bytes.IndexByte(data[i:], '\n')
if nl >= 0 {
i += nl + 1
} else {
i += len(data[i:])
}
}
p.renderParagraph(data[:i])
return i
}
func skipChar(data []byte, start int, char byte) int {
i := start
for i < len(data) && data[i] == char {
i++
}
return i
}
func skipUntilChar(text []byte, start int, char byte) int {
i := start
for i < len(text) && text[i] != char {
i++
}
return i
}
// SanitizedAnchorName returns a sanitized anchor name for the given text.
//
// It implements the algorithm specified in the package comment.
func SanitizedAnchorName(text string) string {
var anchorName []rune
futureDash := false
for _, r := range text {
switch {
case unicode.IsLetter(r) || unicode.IsNumber(r):
if futureDash && len(anchorName) > 0 {
anchorName = append(anchorName, '-')
}
futureDash = false
anchorName = append(anchorName, unicode.ToLower(r))
default:
futureDash = true
}
}
return string(anchorName)
}
blackfriday-2.1.0/block_test.go 0000664 0000000 0000000 00000147433 13745714152 0016472 0 ustar 00root root 0000000 0000000 //
// Blackfriday Markdown Processor
// Available at http://github.com/russross/blackfriday
//
// Copyright © 2011 Russ Ross Header 1
\n",
"## Header 2\n",
"Header 2
\n",
"### Header 3\n",
"Header 3
\n",
"#### Header 4\n",
"Header 4
\n",
"##### Header 5\n",
"Header 5
\n",
"###### Header 6\n",
"Header 6
\n",
"####### Header 7\n",
"# Header 7
\n",
"#Header 1\n",
"Header 1
\n",
"##Header 2\n",
"Header 2
\n",
"###Header 3\n",
"Header 3
\n",
"####Header 4\n",
"Header 4
\n",
"#####Header 5\n",
"Header 5
\n",
"######Header 6\n",
"Header 6
\n",
"#######Header 7\n",
"#Header 7
\n",
"Hello\n# Header 1\nGoodbye\n",
"Header 1
\n\n\n
\n",
"* List\n#Header\n* List\n",
"Header
\n
\n",
"* List\n * Nested list\n # Nested header\n",
"Header
\n
\n",
"#Header 1 \\#\n",
"\n
Nested header
Header 1 #
\n",
"#Header 1 \\# foo\n",
"Header 1 # foo
\n",
"#Header 1 #\\##\n",
"Header 1 ##
\n",
}
doTestsBlock(t, tests, 0)
}
func TestPrefixHeaderSpaceExtension(t *testing.T) {
t.Parallel()
var tests = []string{
"# Header 1\n",
"Header 1
\n",
"## Header 2\n",
"Header 2
\n",
"### Header 3\n",
"Header 3
\n",
"#### Header 4\n",
"Header 4
\n",
"##### Header 5\n",
"Header 5
\n",
"###### Header 6\n",
"Header 6
\n",
"####### Header 7\n",
"Header 1
\n\n\n
\n",
"* List\n#Header\n* List\n",
"Header
\n
\n",
"* List\n * Nested list\n # Nested header\n",
"\n
\n",
}
doTestsBlock(t, tests, SpaceHeadings)
}
func TestPrefixHeaderIdExtension(t *testing.T) {
t.Parallel()
var tests = []string{
"# Header 1 {#someid}\n",
"\n
Nested header
Header 1
\n",
"# Header 1 {#someid} \n",
"Header 1
\n",
"# Header 1 {#someid}\n",
"Header 1
\n",
"# Header 1 {#someid\n",
"Header 1 {#someid
\n",
"# Header 1 {#someid\n",
"Header 1 {#someid
\n",
"# Header 1 {#someid}}\n",
"Header 1
\n\nHeader 2
\n",
"### Header 3 {#someid}\n",
"Header 3
\n",
"#### Header 4 {#someid}\n",
"Header 4
\n",
"##### Header 5 {#someid}\n",
"Header 5
\n",
"###### Header 6 {#someid}\n",
"Header 6
\n",
"####### Header 7 {#someid}\n",
"# Header 7
\n",
"# Header 1 # {#someid}\n",
"Header 1
\n",
"## Header 2 ## {#someid}\n",
"Header 2
\n",
"Hello\n# Header 1\nGoodbye\n",
"Header 1
\n\n\n
\n",
"* List\n#Header {#someid}\n* List\n",
"Header
\n
\n",
"* List\n * Nested list\n # Nested header {#someid}\n",
"Header
\n
\n",
}
doTestsBlock(t, tests, HeadingIDs)
}
func TestPrefixHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
t.Parallel()
var tests = []string{
"# header 1 {#someid}\n",
"\n
Nested header
header 1
\n",
"## header 2 {#someid}\n",
"header 2
\n",
"### header 3 {#someid}\n",
"header 3
\n",
"#### header 4 {#someid}\n",
"header 4
\n",
"##### header 5 {#someid}\n",
"header 5
\n",
"###### header 6 {#someid}\n",
"header 6
\n",
"####### header 7 {#someid}\n",
"# header 7
\n",
"# header 1 # {#someid}\n",
"header 1
\n",
"## header 2 ## {#someid}\n",
"header 2
\n",
"* List\n# Header {#someid}\n* List\n",
"\n
\n",
"* List\n#Header {#someid}\n* List\n",
"Header
\n
\n",
"* List\n * Nested list\n # Nested header {#someid}\n",
"Header
\n
\n",
}
parameters := HTMLRendererParameters{
HeadingIDPrefix: "PRE:",
HeadingIDSuffix: ":POST",
}
doTestsParam(t, tests, TestParams{
extensions: HeadingIDs,
HTMLFlags: UseXHTML,
HTMLRendererParameters: parameters,
})
}
func TestPrefixAutoHeaderIdExtension(t *testing.T) {
t.Parallel()
var tests = []string{
"# Header 1\n",
"\n
Nested header
Header 1
\n",
"# Header 1 \n",
"Header 1
\n",
"## Header 2\n",
"Header 2
\n",
"### Header 3\n",
"Header 3
\n",
"#### Header 4\n",
"Header 4
\n",
"##### Header 5\n",
"Header 5
\n",
"###### Header 6\n",
"Header 6
\n",
"####### Header 7\n",
"# Header 7
\n",
"Hello\n# Header 1\nGoodbye\n",
"Header 1
\n\n\n
\n",
"* List\n#Header\n* List\n",
"Header
\n
\n",
"* List\n * Nested list\n # Nested header\n",
"Header
\n
\n",
"# Header\n\n# Header\n",
"\n
Nested header
Header
\n\nHeader
\n",
"# Header 1\n\n# Header 1",
"Header 1
\n\nHeader 1
\n",
"# Header\n\n# Header 1\n\n# Header\n\n# Header",
"Header
\n\nHeader 1
\n\nHeader
\n\nHeader
\n",
}
doTestsBlock(t, tests, AutoHeadingIDs)
}
func TestPrefixAutoHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
t.Parallel()
var tests = []string{
"# Header 1\n",
"Header 1
\n",
"# Header 1 \n",
"Header 1
\n",
"## Header 2\n",
"Header 2
\n",
"### Header 3\n",
"Header 3
\n",
"#### Header 4\n",
"Header 4
\n",
"##### Header 5\n",
"Header 5
\n",
"###### Header 6\n",
"Header 6
\n",
"####### Header 7\n",
"# Header 7
\n",
"Hello\n# Header 1\nGoodbye\n",
"Header 1
\n\n\n
\n",
"* List\n#Header\n* List\n",
"Header
\n
\n",
"* List\n * Nested list\n # Nested header\n",
"Header
\n
\n",
"# Header\n\n# Header\n",
"\n
Nested header
Header
\n\nHeader
\n",
"# Header 1\n\n# Header 1",
"Header 1
\n\nHeader 1
\n",
"# Header\n\n# Header 1\n\n# Header\n\n# Header",
"Header
\n\nHeader 1
\n\nHeader
\n\nHeader
\n",
}
parameters := HTMLRendererParameters{
HeadingIDPrefix: "PRE:",
HeadingIDSuffix: ":POST",
}
doTestsParam(t, tests, TestParams{
extensions: AutoHeadingIDs,
HTMLFlags: UseXHTML,
HTMLRendererParameters: parameters,
})
}
func TestPrefixHeaderLevelOffset(t *testing.T) {
t.Parallel()
var offsetTests = []struct {
offset int
tests []string
}{{
offset: 0,
tests: []string{
"# Header 1\n",
"Header 1
\n",
"## Header 2\n",
"Header 2
\n",
"### Header 3\n",
"Header 3
\n",
"#### Header 4\n",
"Header 4
\n",
"##### Header 5\n",
"Header 5
\n",
"###### Header 6\n",
"Header 6
\n",
"####### Header 7\n",
"# Header 7
\n",
},
}, {
offset: 1,
tests: []string{
"# Header 1\n",
"Header 1
\n",
"## Header 2\n",
"Header 2
\n",
"### Header 3\n",
"Header 3
\n",
"#### Header 4\n",
"Header 4
\n",
"##### Header 5\n",
"Header 5
\n",
"###### Header 6\n",
"Header 6
\n",
"####### Header 7\n",
"# Header 7
\n",
},
}, {
offset: -1,
tests: []string{
"# Header 1\n",
"Header 1
\n",
"## Header 2\n",
"Header 2
\n",
"### Header 3\n",
"Header 3
\n",
"#### Header 4\n",
"Header 4
\n",
"##### Header 5\n",
"Header 5
\n",
"###### Header 6\n",
"Header 6
\n",
"####### Header 7\n",
"# Header 7
\n",
},
}}
for _, offsetTest := range offsetTests {
offset := offsetTest.offset
tests := offsetTest.tests
doTestsParam(t, tests, TestParams{
HTMLRendererParameters: HTMLRendererParameters{HeadingLevelOffset: offset},
})
}
}
func TestPrefixMultipleHeaderExtensions(t *testing.T) {
t.Parallel()
var tests = []string{
"# Header\n\n# Header {#header}\n\n# Header 1",
"Header
\n\nHeader
\n\nHeader 1
\n",
}
doTestsBlock(t, tests, AutoHeadingIDs|HeadingIDs)
}
func TestUnderlineHeaders(t *testing.T) {
t.Parallel()
var tests = []string{
"Header 1\n========\n",
"Header 1
\n",
"Header 2\n--------\n",
"Header 2
\n",
"A\n=\n",
"A
\n",
"B\n-\n",
"B
\n",
"Paragraph\nHeader\n=\n",
"Header
\n",
"Header\n===\nParagraph\n",
"Header
\n\nHeader
\n\nAnother header
\n",
" Header\n======\n",
"Header
\n",
" Code\n========\n",
"
\n\nCode\n
Header with inline
\n",
"* List\n * Sublist\n Not a header\n ------\n",
"\n
\n",
"Paragraph\n\n\n\n\nHeader\n===\n",
"\n
Header
\n",
"Trailing space \n==== \n\n",
"Trailing space
\n",
"Trailing spaces\n==== \n\n",
"Trailing spaces
\n",
"Double underline\n=====\n=====\n",
"Double underline
\n\nHeader 1
\n",
"Header 2\n--------\n",
"Header 2
\n",
"A\n=\n",
"A
\n",
"B\n-\n",
"B
\n",
"Paragraph\nHeader\n=\n",
"Header
\n",
"Header\n===\nParagraph\n",
"Header
\n\nHeader
\n\nAnother header
\n",
" Header\n======\n",
"Header
\n",
"Header with *inline*\n=====\n",
"Header with inline
\n",
"Paragraph\n\n\n\n\nHeader\n===\n",
"Header
\n",
"Trailing space \n==== \n\n",
"Trailing space
\n",
"Trailing spaces\n==== \n\n",
"Trailing spaces
\n",
"Double underline\n=====\n=====\n",
"Double underline
\n\nHeader
\n\nHeader
\n",
"Header 1\n========\n\nHeader 1\n========\n",
"Header 1
\n\nHeader 1
\n",
}
doTestsBlock(t, tests, AutoHeadingIDs)
}
func TestHorizontalRule(t *testing.T) {
t.Parallel()
var tests = []string{
"-\n",
"
\n",
"----\n",
"
\n",
"*\n",
"
\n",
"****\n",
"
\n",
"_\n",
"
\n",
"____\n",
"
\n",
"-*-\n",
"
\n",
"* * *\n",
"
\n",
"_ _ _\n",
"
\n",
"-----*\n",
"
\n",
"Hello\n***\n",
"
\n",
"---\n***\n___\n",
"
\n\n
\n\n
\n",
}
doTestsBlock(t, tests, 0)
}
func TestUnorderedList(t *testing.T) {
t.Parallel()
var tests = []string{
"* Hello\n",
"\n
\n",
"* Yin\n* Yang\n",
"\n
\n",
"* Ting\n* Bong\n* Goo\n",
"\n
\n",
"* Yin\n\n* Yang\n",
"\n
\n",
"* Ting\n\n* Bong\n* Goo\n",
"\n
\n",
"+ Hello\n",
"\n
\n",
"+ Yin\n+ Yang\n",
"\n
\n",
"+ Ting\n+ Bong\n+ Goo\n",
"\n
\n",
"+ Yin\n\n+ Yang\n",
"\n
\n",
"+ Ting\n\n+ Bong\n+ Goo\n",
"\n
\n",
"- Hello\n",
"\n
\n",
"- Yin\n- Yang\n",
"\n
\n",
"- Ting\n- Bong\n- Goo\n",
"\n
\n",
"- Yin\n\n- Yang\n",
"\n
\n",
"- Ting\n\n- Bong\n- Goo\n",
"\n
\n",
"*Hello\n",
"\n
\n",
"* Hello \n Next line \n",
"\n
\n",
"Paragraph\n* No linebreak\n",
"\n
\n",
"* List\n * Nested list\n",
"\n
\n",
"* List\n\n * Nested list\n",
"\n
\n
\n",
"* List\n Second line\n\n + Nested\n",
"\n
\n
\n",
"* List\n + Nested\n\n Continued\n",
"\n
\n
\n",
"* List\n * shallow indent\n",
"\n
\n\n\n
\n",
"* List\n" +
" * shallow indent\n" +
" * part of second list\n" +
" * still second\n" +
" * almost there\n" +
" * third level\n",
"\n
\n" +
"
\n",
"* List\n extra indent, same paragraph\n",
"\n" +
"
\n" +
"
\n
\n",
"* List\n\n code block\n",
"\n
\n",
"* List\n\n code block with spaces\n",
"code block\n
\n
\n",
"* List\n\n * sublist\n\n normal text\n\n * another sublist\n",
" code block with spaces\n
\n
\n",
}
doTestsBlock(t, tests, 0)
}
func TestOrderedList(t *testing.T) {
t.Parallel()
var tests = []string{
"1. Hello\n",
"\n
\n\n\n
\n
\n",
"1. Yin\n2. Yang\n",
"\n
\n",
"1. Ting\n2. Bong\n3. Goo\n",
"\n
\n",
"1. Yin\n\n2. Yang\n",
"\n
\n",
"1. Ting\n\n2. Bong\n3. Goo\n",
"\n
\n",
"1 Hello\n",
"\n
\n",
"1. Hello \n Next line \n",
"\n
\n",
"Paragraph\n1. No linebreak\n",
"\n
\n",
"1. List\n 1. Nested list\n",
"\n
\n",
"1. List\n\n 1. Nested list\n",
"\n
\n
\n",
"1. List\n Second line\n\n 1. Nested\n",
"\n
\n
\n",
"1. List\n 1. Nested\n\n Continued\n",
"\n
\n
\n",
"1. List\n 1. shallow indent\n",
"\n
\n\n\n
\n",
"1. List\n" +
" 1. shallow indent\n" +
" 2. part of second list\n" +
" 3. still second\n" +
" 4. almost there\n" +
" 1. third level\n",
"\n
\n" +
"
\n",
"1. List\n extra indent, same paragraph\n",
"\n" +
"
\n" +
"
\n
\n",
"1. List\n\n code block\n",
"\n
\n",
"1. List\n\n code block with spaces\n",
"code block\n
\n
\n",
"1. List\n * Mixted list\n",
" code block with spaces\n
\n
\n",
"1. List\n * Mixed list\n",
"\n
\n
\n",
"* Start with unordered\n 1. Ordered\n",
"\n
\n
\n",
"* Start with unordered\n 1. Ordered\n",
"\n
\n
\n",
"1. numbers\n1. are ignored\n",
"\n
\n
\n",
}
doTestsBlock(t, tests, 0)
}
func TestDefinitionList(t *testing.T) {
t.Parallel()
var tests = []string{
"Term 1\n: Definition a\n",
"\n
\n",
"Term 1\n: Definition a \n",
"\n
\n",
"Term 1\n: Definition a\n: Definition b\n",
"\n
\n",
"Term 1\n: Definition a\n\nTerm 2\n: Definition b\n",
"\n" +
"
\n",
"Term 1\n: Definition a\n\nTerm 2\n: Definition b\n\nTerm 3\n: Definition c\n",
"\n" +
"
\n",
"Term 1\n: Definition a\n: Definition b\n\nTerm 2\n: Definition c\n",
"\n" +
"
\n",
"Term 1\n\n: Definition a\n\nTerm 2\n\n: Definition b\n",
"\n" +
"
\n",
"Term 1\n\n: Definition a\n\n: Definition b\n\nTerm 2\n\n: Definition c\n",
"\n" +
"
\n",
"Term 1\n: Definition a\nNext line\n",
"\n
\n",
"Term 1\n: Definition a\n Next line\n",
"\n
\n",
"Term 1\n: Definition a \n Next line \n",
"\n
\n",
"Term 1\n: Definition a\nNext line\n\nTerm 2\n: Definition b",
"\n" +
"
\n",
"Term 1\n: Definition a\n",
"\n
\n",
"Term 1\n:Definition a\n",
"\n" +
"
\n" +
"\n\n" +
"
\n" +
"\n\n" +
"
\n" +
"\n\n
\n\n\n
\n\n\n
\n",
"1. Not nested\n2. ordered list\n\n\t1. nested\n\t2. ordered list\n\n\t* nested\n\t* unordered list\n* Not nested\n* unordered list",
"\n
\n\n\n
\n\n\n
\n
\n",
}
doTestsBlock(t, tests, DefinitionLists)
}
func TestPreformattedHtml(t *testing.T) {
t.Parallel()
var tests = []string{
"\n",
"\n",
"
Paragraph\n
Paragraph
\n\nParagraph\n
Paragraph
\n\nParagraph\n
And here?
\n", "Paragraph\n\nParagraph
\n\nAnd here?
\n", } doTestsBlock(t, tests, 0) } func TestPreformattedHtmlLax(t *testing.T) { t.Parallel() var tests = []string{ "Paragraph\nParagraph
\n\nParagraph
\n\nParagraph
\n\nAnd here?
\n", "Paragraph\n\nParagraph
\n\nAnd here?
\n", "Paragraph\nParagraph
\n\nAnd here?
\n", "Paragraph\n\nParagraph
\n\nAnd here?
\n", } doTestsBlock(t, tests, LaxHTMLBlocks) } func TestFencedCodeBlock(t *testing.T) { t.Parallel() var tests = []string{ "``` go\nfunc foo() bool {\n\treturn true;\n}\n```\n", "func foo() bool {\n\treturn true;\n}\n
\n",
"``` go foo bar\nfunc foo() bool {\n\treturn true;\n}\n```\n",
"func foo() bool {\n\treturn true;\n}\n
\n",
"``` c\n/* special & char < > \" escaping */\n```\n",
"/* special & char < > " escaping */\n
\n",
"``` c\nno *inline* processing ~~of text~~\n```\n",
"no *inline* processing ~~of text~~\n
\n",
"```\nNo language\n```\n",
"No language\n
\n",
"``` {ocaml}\nlanguage in braces\n```\n",
"language in braces\n
\n",
"``` {ocaml} \nwith extra whitespace\n```\n",
"with extra whitespace\n
\n",
"```{ ocaml }\nwith extra whitespace\n```\n",
"with extra whitespace\n
\n",
"~ ~~ java\nWith whitespace\n~~~\n",
"~ ~~ java\nWith whitespace\n~~~
\n", "~~\nonly two\n~~\n", "~~\nonly two\n~~
\n", "```` python\nextra\n````\n", "extra\n
\n",
"~~~ perl\nthree to start, four to end\n~~~~\n",
"~~~ perl\nthree to start, four to end\n~~~~
\n", "~~~~ perl\nfour to start, three to end\n~~~\n", "~~~~ perl\nfour to start, three to end\n~~~
\n", "~~~ bash\ntildes\n~~~\n", "tildes\n
\n",
"``` lisp\nno ending\n",
"``` lisp\nno ending
\n", "~~~ lisp\nend with language\n~~~ lisp\n", "~~~ lisp\nend with language\n~~~ lisp
\n", "```\nmismatched begin and end\n~~~\n", "```\nmismatched begin and end\n~~~
\n", "~~~\nmismatched begin and end\n```\n", "~~~\nmismatched begin and end\n```
\n", " ``` oz\nleading spaces\n```\n", "leading spaces\n
\n",
" ``` oz\nleading spaces\n ```\n",
"leading spaces\n
\n",
" ``` oz\nleading spaces\n ```\n",
"leading spaces\n
\n",
"``` oz\nleading spaces\n ```\n",
"leading spaces\n
\n",
" ``` oz\nleading spaces\n ```\n",
"``` oz\n
\n\nleading spaces\n ```
\n", "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n", "Bla bla
\n\ncode blocks breakup paragraphs\n
\n\nBla Bla
\n", "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nAnd some text after a fenced code block", "Some text before a fenced code block
\n\ncode blocks breakup paragraphs\n
\n\nAnd some text after a fenced code block
\n", "`", "`
\n", "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n\n``` oz\nmultiple code blocks work okay\n```\n\nBla Bla\n", "Bla bla
\n\ncode blocks breakup paragraphs\n
\n\nBla Bla
\n\nmultiple code blocks work okay\n
\n\nBla Bla
\n", "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nSome text in between\n``` oz\nmultiple code blocks work okay\n```\nAnd some text after a fenced code block", "Some text before a fenced code block
\n\ncode blocks breakup paragraphs\n
\n\nSome text in between
\n\nmultiple code blocks work okay\n
\n\nAnd some text after a fenced code block
\n", "```\n[]:()\n```\n", "[]:()\n
\n",
"```\n[]:()\n[]:)\n[]:(\n[]:x\n[]:testing\n[:testing\n\n[]:\nlinebreak\n[]()\n\n[]:\n[]()\n```",
"[]:()\n[]:)\n[]:(\n[]:x\n[]:testing\n[:testing\n\n[]:\nlinebreak\n[]()\n\n[]:\n[]()\n
\n",
}
doTestsBlock(t, tests, FencedCode)
}
func TestFencedCodeInsideBlockquotes(t *testing.T) {
t.Parallel()
cat := func(s ...string) string { return strings.Join(s, "\n") }
var tests = []string{
cat("> ```go",
"package moo",
"",
"```",
""),
``, // ------------------------------------------- cat("> foo", "> ", "> ```go", "package moo", "```", "> ", "> goo.", ""), `package moo
`, // ------------------------------------------- cat("> foo", "> ", "> quote", "continues", "```", ""), `foo
package moo
goo.
`, // ------------------------------------------- cat("> foo", "> ", "> ```go", "package moo", "```", "> ", "> goo.", "> ", "> ```go", "package zoo", "```", "> ", "> woo.", ""), `foo
quote continues ` + "```" + `
`, } // These 2 alternative forms of blockquoted fenced code blocks should produce same output. forms := [2]string{ cat("> plain quoted text", "> ```fenced", "code", " with leading single space correctly preserved", "okay", "```", "> rest of quoted text"), cat("> plain quoted text", "> ```fenced", "> code", "> with leading single space correctly preserved", "> okay", "> ```", "> rest of quoted text"), } want := `foo
package moo
goo.
package zoo
woo.
` tests = append(tests, forms[0], want) tests = append(tests, forms[1], want) doTestsBlock(t, tests, FencedCode) } func TestTable(t *testing.T) { t.Parallel() var tests = []string{ "a | b\n---|---\nc | d\n", "plain quoted text
code with leading single space correctly preserved okay
rest of quoted text
a | \nb | \n
---|---|
c | \nd | \n
a | b\n---|--\nc | d
\n", "|a|b|c|d|\n|----|----|----|---|\n|e|f|g|h|\n", "a | \nb | \nc | \nd | \n
---|---|---|---|
e | \nf | \ng | \nh | \n
a | \nb | \nc | \nd | \n
---|---|---|---|
e | \nf | \ng | \nh | \n
a | \nb | \nc | \n
---|---|---|
d | \ne | \nf | \n
g | \nh | \n\n |
i | \nj | \nk | \n
n | \no | \np | \n
a | \nb | \nc | \n
---|---|---|
d | \ne | \nf | \n
a | \nb | \n" + "c | \nd | \n
---|---|---|---|
e | \nf | \n" + "g | \nh | \n
a | \nb | \nc | \n
---|
a | \nb | \nc | \nd | \ne | \n
---|---|---|---|---|
f | \ng | \nh | \ni | \nj | \n
a | \nb|c | \nd | \n
---|---|---|
f | \ng|h | \ni | \n
Yin
Yang
Ting
Bong
Goo
Yin
Yang
Ting
Bong
Goo
Yin
Yang
Ting
Bong
Goo
*Hello
\n", "* Hello \n", "Paragraph
\n\nParagraph
\n\nList
\n\nList\nSecond line
\n\nList
\n\nContinued
List
\n\ncode block\n
List
\n\n code block with spaces\n
List
\n\nnormal text
\n\nYin
Yang
Ting
Bong
Goo
1 Hello
\n", "1.Hello\n", "1.Hello
\n", "1. Hello \n", "Paragraph
\n\nParagraph
\n\nList
\n\nList\nSecond line
\n\nList
\n\nContinued
List
\n\ncode block\n
List
\n\n code block with spaces\n
func foo() bool {\n\treturn true;\n}\n
\n",
"``` go foo bar\nfunc foo() bool {\n\treturn true;\n}\n```\n",
"func foo() bool {\n\treturn true;\n}\n
\n",
"``` c\n/* special & char < > \" escaping */\n```\n",
"/* special & char < > " escaping */\n
\n",
"``` c\nno *inline* processing ~~of text~~\n```\n",
"no *inline* processing ~~of text~~\n
\n",
"```\nNo language\n```\n",
"No language\n
\n",
"``` {ocaml}\nlanguage in braces\n```\n",
"language in braces\n
\n",
"``` {ocaml} \nwith extra whitespace\n```\n",
"with extra whitespace\n
\n",
"```{ ocaml }\nwith extra whitespace\n```\n",
"with extra whitespace\n
\n",
"~ ~~ java\nWith whitespace\n~~~\n",
"~ ~~ java\nWith whitespace\n~~~
\n", "~~\nonly two\n~~\n", "~~\nonly two\n~~
\n", "```` python\nextra\n````\n", "extra\n
\n",
"~~~ perl\nthree to start, four to end\n~~~~\n",
"~~~ perl\nthree to start, four to end\n~~~~
\n", "~~~~ perl\nfour to start, three to end\n~~~\n", "~~~~ perl\nfour to start, three to end\n~~~
\n", "~~~ bash\ntildes\n~~~\n", "tildes\n
\n",
"``` lisp\nno ending\n",
"``` lisp\nno ending
\n", "~~~ lisp\nend with language\n~~~ lisp\n", "~~~ lisp\nend with language\n~~~ lisp
\n", "```\nmismatched begin and end\n~~~\n", "```\nmismatched begin and end\n~~~
\n", "~~~\nmismatched begin and end\n```\n", "~~~\nmismatched begin and end\n```
\n", " ``` oz\nleading spaces\n```\n", "leading spaces\n
\n",
" ``` oz\nleading spaces\n ```\n",
"leading spaces\n
\n",
" ``` oz\nleading spaces\n ```\n",
"leading spaces\n
\n",
"``` oz\nleading spaces\n ```\n",
"leading spaces\n
\n",
" ``` oz\nleading spaces\n ```\n",
"``` oz\n
\n\nleading spaces
\n\n```\n
\n",
}
doTestsBlock(t, tests, FencedCode|NoEmptyLineBeforeBlock)
}
func TestListWithFencedCodeBlock(t *testing.T) {
t.Parallel()
var tests = []string{
"1. one\n\n ```\n code\n ```\n\n2. two\n",
"one
\n\ncode\n
two
one
\n\n- code\n
two
one
\n\n\ncode\n
two
one
\n\n```
\n\ntwo
Some text
\n\n\n", "Some text\n\n\n", "Some text
\n\n\n", "Some text\n\n\n", "Some text
\n\n\n", } doTestsBlock(t, tests, 0) } func TestTOC(t *testing.T) { t.Parallel() var tests = []string{ "# Title\n\n##Subtitle1\n\n##Subtitle2", //"\n\ncode
foo
`, } doTestsParam(t, tests, TestParams{HTMLFlags: UseXHTML | CompletePage}) } func TestIsFenceLine(t *testing.T) { t.Parallel() tests := []struct { data []byte infoRequested bool wantEnd int wantMarker string wantInfo string }{ { data: []byte("```"), wantEnd: 3, wantMarker: "```", }, { data: []byte("```\nstuff here\n"), wantEnd: 4, wantMarker: "```", }, { data: []byte("```\nstuff here\n"), infoRequested: true, wantEnd: 4, wantMarker: "```", }, { data: []byte("stuff here\n```\n"), wantEnd: 0, }, { data: []byte("```"), infoRequested: true, wantEnd: 3, wantMarker: "```", }, { data: []byte("``` go"), infoRequested: true, wantEnd: 6, wantMarker: "```", wantInfo: "go", }, { data: []byte("``` go foo bar"), infoRequested: true, wantEnd: 14, wantMarker: "```", wantInfo: "go foo bar", }, { data: []byte("``` go foo bar "), infoRequested: true, wantEnd: 16, wantMarker: "```", wantInfo: "go foo bar", }, } for _, test := range tests { var info *string if test.infoRequested { info = new(string) } end, marker := isFenceLine(test.data, info, "```") if got, want := end, test.wantEnd; got != want { t.Errorf("got end %v, want %v", got, want) } if got, want := marker, test.wantMarker; got != want { t.Errorf("got marker %q, want %q", got, want) } if test.infoRequested { if got, want := *info, test.wantInfo; got != want { t.Errorf("got info string %q, want %q", got, want) } } } } func TestSanitizedAnchorName(t *testing.T) { tests := []struct { text string want string }{ { text: "This is a header", want: "this-is-a-header", }, { text: "This is also a header", want: "this-is-also-a-header", }, { text: "main.go", want: "main-go", }, { text: "Article 123", want: "article-123", }, { text: "<- Let's try this, shall we?", want: "let-s-try-this-shall-we", }, { text: " ", want: "", }, { text: "Hello, 世界", want: "hello-世界", }, } for _, test := range tests { if got := SanitizedAnchorName(test.text); got != test.want { t.Errorf("SanitizedAnchorName(%q):\ngot %q\nwant %q", test.text, got, test.want) } } } blackfriday-2.1.0/doc.go 0000664 0000000 0000000 00000004711 13745714152 0015075 0 ustar 00root root 0000000 0000000 // Package blackfriday is a markdown processor. // // It translates plain text with simple formatting rules into an AST, which can // then be further processed to HTML (provided by Blackfriday itself) or other // formats (provided by the community). // // The simplest way to invoke Blackfriday is to call the Run function. It will // take a text input and produce a text output in HTML (or other format). // // A slightly more sophisticated way to use Blackfriday is to create a Markdown // processor and to call Parse, which returns a syntax tree for the input // document. You can leverage Blackfriday's parsing for content extraction from // markdown documents. You can assign a custom renderer and set various options // to the Markdown processor. // // If you're interested in calling Blackfriday from command line, see // https://github.com/russross/blackfriday-tool. // // Sanitized Anchor Names // // Blackfriday includes an algorithm for creating sanitized anchor names // corresponding to a given input text. This algorithm is used to create // anchors for headings when AutoHeadingIDs extension is enabled. The // algorithm is specified below, so that other packages can create // compatible anchor names and links to those anchors. // // The algorithm iterates over the input text, interpreted as UTF-8, // one Unicode code point (rune) at a time. All runes that are letters (category L) // or numbers (category N) are considered valid characters. They are mapped to // lower case, and included in the output. All other runes are considered // invalid characters. Invalid characters that precede the first valid character, // as well as invalid character that follow the last valid character // are dropped completely. All other sequences of invalid characters // between two valid characters are replaced with a single dash character '-'. // // SanitizedAnchorName exposes this functionality, and can be used to // create compatible links to the anchor names generated by blackfriday. // This algorithm is also implemented in a small standalone package at // github.com/shurcooL/sanitized_anchor_name. It can be useful for clients // that want a small package and don't need full functionality of blackfriday. package blackfriday // NOTE: Keep Sanitized Anchor Name algorithm in sync with package // github.com/shurcooL/sanitized_anchor_name. // Otherwise, users of sanitized_anchor_name will get anchor names // that are incompatible with those generated by blackfriday. blackfriday-2.1.0/entities.go 0000664 0000000 0000000 00000277762 13745714152 0016176 0 ustar 00root root 0000000 0000000 package blackfriday // Extracted from https://html.spec.whatwg.org/multipage/entities.json var entities = map[string]bool{ "Æ": true, "Æ": true, "&": true, "&": true, "Á": true, "Á": true, "Ă": true, "Â": true, "Â": true, "А": true, "𝔄": true, "À": true, "À": true, "Α": true, "Ā": true, "⩓": true, "Ą": true, "𝔸": true, "⁡": true, "Å": true, "Å": true, "𝒜": true, "≔": true, "Ã": true, "Ã": true, "Ä": true, "Ä": true, "∖": true, "⫧": true, "⌆": true, "Б": true, "∵": true, "ℬ": true, "Β": true, "𝔅": true, "𝔹": true, "˘": true, "ℬ": true, "≎": true, "Ч": true, "©": true, "©": true, "Ć": true, "⋒": true, "ⅅ": true, "ℭ": true, "Č": true, "Ç": true, "Ç": true, "Ĉ": true, "∰": true, "Ċ": true, "¸": true, "·": true, "ℭ": true, "Χ": true, "⊙": true, "⊖": true, "⊕": true, "⊗": true, "∲": true, "”": true, "’": true, "∷": true, "⩴": true, "≡": true, "∯": true, "∮": true, "ℂ": true, "∐": true, "∳": true, "⨯": true, "𝒞": true, "⋓": true, "≍": true, "ⅅ": true, "⤑": true, "Ђ": true, "Ѕ": true, "Џ": true, "‡": true, "↡": true, "⫤": true, "Ď": true, "Д": true, "∇": true, "Δ": true, "𝔇": true, "´": true, "˙": true, "˝": true, "`": true, "˜": true, "⋄": true, "ⅆ": true, "𝔻": true, "¨": true, "⃜": true, "≐": true, "∯": true, "¨": true, "⇓": true, "⇐": true, "⇔": true, "⫤": true, "⟸": true, "⟺": true, "⟹": true, "⇒": true, "⊨": true, "⇑": true, "⇕": true, "∥": true, "↓": true, "⤓": true, "⇵": true, "̑": true, "⥐": true, "⥞": true, "↽": true, "⥖": true, "⥟": true, "⇁": true, "⥗": true, "⊤": true, "↧": true, "⇓": true, "𝒟": true, "Đ": true, "Ŋ": true, "Ð": true, "Ð": true, "É": true, "É": true, "Ě": true, "Ê": true, "Ê": true, "Э": true, "Ė": true, "𝔈": true, "È": true, "È": true, "∈": true, "Ē": true, "◻": true, "▫": true, "Ę": true, "𝔼": true, "Ε": true, "⩵": true, "≂": true, "⇌": true, "ℰ": true, "⩳": true, "Η": true, "Ë": true, "Ë": true, "∃": true, "ⅇ": true, "Ф": true, "𝔉": true, "◼": true, "▪": true, "𝔽": true, "∀": true, "ℱ": true, "ℱ": true, "Ѓ": true, ">": true, ">": true, "Γ": true, "Ϝ": true, "Ğ": true, "Ģ": true, "Ĝ": true, "Г": true, "Ġ": true, "𝔊": true, "⋙": true, "𝔾": true, "≥": true, "⋛": true, "≧": true, "⪢": true, "≷": true, "⩾": true, "≳": true, "𝒢": true, "≫": true, "Ъ": true, "ˇ": true, "^": true, "Ĥ": true, "ℌ": true, "ℋ": true, "ℍ": true, "─": true, "ℋ": true, "Ħ": true, "≎": true, "≏": true, "Е": true, "IJ": true, "Ё": true, "Í": true, "Í": true, "Î": true, "Î": true, "И": true, "İ": true, "ℑ": true, "Ì": true, "Ì": true, "ℑ": true, "Ī": true, "ⅈ": true, "⇒": true, "∬": true, "∫": true, "⋂": true, "⁣": true, "⁢": true, "Į": true, "𝕀": true, "Ι": true, "ℐ": true, "Ĩ": true, "І": true, "Ï": true, "Ï": true, "Ĵ": true, "Й": true, "𝔍": true, "𝕁": true, "𝒥": true, "Ј": true, "Є": true, "Х": true, "Ќ": true, "Κ": true, "Ķ": true, "К": true, "𝔎": true, "𝕂": true, "𝒦": true, "Љ": true, "<": true, "<": true, "Ĺ": true, "Λ": true, "⟪": true, "ℒ": true, "↞": true, "Ľ": true, "Ļ": true, "Л": true, "⟨": true, "←": true, "⇤": true, "⇆": true, "⌈": true, "⟦": true, "⥡": true, "⇃": true, "⥙": true, "⌊": true, "↔": true, "⥎": true, "⊣": true, "↤": true, "⥚": true, "⊲": true, "⧏": true, "⊴": true, "⥑": true, "⥠": true, "↿": true, "⥘": true, "↼": true, "⥒": true, "⇐": true, "⇔": true, "⋚": true, "≦": true, "≶": true, "⪡": true, "⩽": true, "≲": true, "𝔏": true, "⋘": true, "⇚": true, "Ŀ": true, "⟵": true, "⟷": true, "⟶": true, "⟸": true, "⟺": true, "⟹": true, "𝕃": true, "↙": true, "↘": true, "ℒ": true, "↰": true, "Ł": true, "≪": true, "⤅": true, "М": true, " ": true, "ℳ": true, "𝔐": true, "∓": true, "𝕄": true, "ℳ": true, "Μ": true, "Њ": true, "Ń": true, "Ň": true, "Ņ": true, "Н": true, "​": true, "​": true, "​": true, "​": true, "≫": true, "≪": true, "
": true, "𝔑": true, "⁠": true, " ": true, "ℕ": true, "⫬": true, "≢": true, "≭": true, "∦": true, "∉": true, "≠": true, "≂̸": true, "∄": true, "≯": true, "≱": true, "≧̸": true, "≫̸": true, "≹": true, "⩾̸": true, "≵": true, "≎̸": true, "≏̸": true, "⋪": true, "⧏̸": true, "⋬": true, "≮": true, "≰": true, "≸": true, "≪̸": true, "⩽̸": true, "≴": true, "⪢̸": true, "⪡̸": true, "⊀": true, "⪯̸": true, "⋠": true, "∌": true, "⋫": true, "⧐̸": true, "⋭": true, "⊏̸": true, "⋢": true, "⊐̸": true, "⋣": true, "⊂⃒": true, "⊈": true, "⊁": true, "⪰̸": true, "⋡": true, "≿̸": true, "⊃⃒": true, "⊉": true, "≁": true, "≄": true, "≇": true, "≉": true, "∤": true, "𝒩": true, "Ñ": true, "Ñ": true, "Ν": true, "Œ": true, "Ó": true, "Ó": true, "Ô": true, "Ô": true, "О": true, "Ő": true, "𝔒": true, "Ò": true, "Ò": true, "Ō": true, "Ω": true, "Ο": true, "𝕆": true, "“": true, "‘": true, "⩔": true, "𝒪": true, "Ø": true, "Ø": true, "Õ": true, "Õ": true, "⨷": true, "Ö": true, "Ö": true, "‾": true, "⏞": true, "⎴": true, "⏜": true, "∂": true, "П": true, "𝔓": true, "Φ": true, "Π": true, "±": true, "ℌ": true, "ℙ": true, "⪻": true, "≺": true, "⪯": true, "≼": true, "≾": true, "″": true, "∏": true, "∷": true, "∝": true, "𝒫": true, "Ψ": true, """: true, """: true, "𝔔": true, "ℚ": true, "𝒬": true, "⤐": true, "®": true, "®": true, "Ŕ": true, "⟫": true, "↠": true, "⤖": true, "Ř": true, "Ŗ": true, "Р": true, "ℜ": true, "∋": true, "⇋": true, "⥯": true, "ℜ": true, "Ρ": true, "⟩": true, "→": true, "⇥": true, "⇄": true, "⌉": true, "⟧": true, "⥝": true, "⇂": true, "⥕": true, "⌋": true, "⊢": true, "↦": true, "⥛": true, "⊳": true, "⧐": true, "⊵": true, "⥏": true, "⥜": true, "↾": true, "⥔": true, "⇀": true, "⥓": true, "⇒": true, "ℝ": true, "⥰": true, "⇛": true, "ℛ": true, "↱": true, "⧴": true, "Щ": true, "Ш": true, "Ь": true, "Ś": true, "⪼": true, "Š": true, "Ş": true, "Ŝ": true, "С": true, "𝔖": true, "↓": true, "←": true, "→": true, "↑": true, "Σ": true, "∘": true, "𝕊": true, "√": true, "□": true, "⊓": true, "⊏": true, "⊑": true, "⊐": true, "⊒": true, "⊔": true, "𝒮": true, "⋆": true, "⋐": true, "⋐": true, "⊆": true, "≻": true, "⪰": true, "≽": true, "≿": true, "∋": true, "∑": true, "⋑": true, "⊃": true, "⊇": true, "⋑": true, "Þ": true, "Þ": true, "™": true, "Ћ": true, "Ц": true, "	": true, "Τ": true, "Ť": true, "Ţ": true, "Т": true, "𝔗": true, "∴": true, "Θ": true, "  ": true, " ": true, "∼": true, "≃": true, "≅": true, "≈": true, "𝕋": true, "⃛": true, "𝒯": true, "Ŧ": true, "Ú": true, "Ú": true, "↟": true, "⥉": true, "Ў": true, "Ŭ": true, "Û": true, "Û": true, "У": true, "Ű": true, "𝔘": true, "Ù": true, "Ù": true, "Ū": true, "_": true, "⏟": true, "⎵": true, "⏝": true, "⋃": true, "⊎": true, "Ų": true, "𝕌": true, "↑": true, "⤒": true, "⇅": true, "↕": true, "⥮": true, "⊥": true, "↥": true, "⇑": true, "⇕": true, "↖": true, "↗": true, "ϒ": true, "Υ": true, "Ů": true, "𝒰": true, "Ũ": true, "Ü": true, "Ü": true, "⊫": true, "⫫": true, "В": true, "⊩": true, "⫦": true, "⋁": true, "‖": true, "‖": true, "∣": true, "|": true, "❘": true, "≀": true, " ": true, "𝔙": true, "𝕍": true, "𝒱": true, "⊪": true, "Ŵ": true, "⋀": true, "𝔚": true, "𝕎": true, "𝒲": true, "𝔛": true, "Ξ": true, "𝕏": true, "𝒳": true, "Я": true, "Ї": true, "Ю": true, "Ý": true, "Ý": true, "Ŷ": true, "Ы": true, "𝔜": true, "𝕐": true, "𝒴": true, "Ÿ": true, "Ж": true, "Ź": true, "Ž": true, "З": true, "Ż": true, "​": true, "Ζ": true, "ℨ": true, "ℤ": true, "𝒵": true, "á": true, "á": true, "ă": true, "∾": true, "∾̳": true, "∿": true, "â": true, "â": true, "´": true, "´": true, "а": true, "æ": true, "æ": true, "⁡": true, "𝔞": true, "à": true, "à": true, "ℵ": true, "ℵ": true, "α": true, "ā": true, "⨿": true, "&": true, "&": true, "∧": true, "⩕": true, "⩜": true, "⩘": true, "⩚": true, "∠": true, "⦤": true, "∠": true, "∡": true, "⦨": true, "⦩": true, "⦪": true, "⦫": true, "⦬": true, "⦭": true, "⦮": true, "⦯": true, "∟": true, "⊾": true, "⦝": true, "∢": true, "Å": true, "⍼": true, "ą": true, "𝕒": true, "≈": true, "⩰": true, "⩯": true, "≊": true, "≋": true, "'": true, "≈": true, "≊": true, "å": true, "å": true, "𝒶": true, "*": true, "≈": true, "≍": true, "ã": true, "ã": true, "ä": true, "ä": true, "∳": true, "⨑": true, "⫭": true, "≌": true, "϶": true, "‵": true, "∽": true, "⋍": true, "⊽": true, "⌅": true, "⌅": true, "⎵": true, "⎶": true, "≌": true, "б": true, "„": true, "∵": true, "∵": true, "⦰": true, "϶": true, "ℬ": true, "β": true, "ℶ": true, "≬": true, "𝔟": true, "⋂": true, "◯": true, "⋃": true, "⨀": true, "⨁": true, "⨂": true, "⨆": true, "★": true, "▽": true, "△": true, "⨄": true, "⋁": true, "⋀": true, "⤍": true, "⧫": true, "▪": true, "▴": true, "▾": true, "◂": true, "▸": true, "␣": true, "▒": true, "░": true, "▓": true, "█": true, "=⃥": true, "≡⃥": true, "⌐": true, "𝕓": true, "⊥": true, "⊥": true, "⋈": true, "╗": true, "╔": true, "╖": true, "╓": true, "═": true, "╦": true, "╩": true, "╤": true, "╧": true, "╝": true, "╚": true, "╜": true, "╙": true, "║": true, "╬": true, "╣": true, "╠": true, "╫": true, "╢": true, "╟": true, "⧉": true, "╕": true, "╒": true, "┐": true, "┌": true, "─": true, "╥": true, "╨": true, "┬": true, "┴": true, "⊟": true, "⊞": true, "⊠": true, "╛": true, "╘": true, "┘": true, "└": true, "│": true, "╪": true, "╡": true, "╞": true, "┼": true, "┤": true, "├": true, "‵": true, "˘": true, "¦": true, "¦": true, "𝒷": true, "⁏": true, "∽": true, "⋍": true, "\": true, "⧅": true, "⟈": true, "•": true, "•": true, "≎": true, "⪮": true, "≏": true, "≏": true, "ć": true, "∩": true, "⩄": true, "⩉": true, "⩋": true, "⩇": true, "⩀": true, "∩︀": true, "⁁": true, "ˇ": true, "⩍": true, "č": true, "ç": true, "ç": true, "ĉ": true, "⩌": true, "⩐": true, "ċ": true, "¸": true, "¸": true, "⦲": true, "¢": true, "¢": true, "·": true, "𝔠": true, "ч": true, "✓": true, "✓": true, "χ": true, "○": true, "⧃": true, "ˆ": true, "≗": true, "↺": true, "↻": true, "®": true, "Ⓢ": true, "⊛": true, "⊚": true, "⊝": true, "≗": true, "⨐": true, "⫯": true, "⧂": true, "♣": true, "♣": true, ":": true, "≔": true, "≔": true, ",": true, "@": true, "∁": true, "∘": true, "∁": true, "ℂ": true, "≅": true, "⩭": true, "∮": true, "𝕔": true, "∐": true, "©": true, "©": true, "℗": true, "↵": true, "✗": true, "𝒸": true, "⫏": true, "⫑": true, "⫐": true, "⫒": true, "⋯": true, "⤸": true, "⤵": true, "⋞": true, "⋟": true, "↶": true, "⤽": true, "∪": true, "⩈": true, "⩆": true, "⩊": true, "⊍": true, "⩅": true, "∪︀": true, "↷": true, "⤼": true, "⋞": true, "⋟": true, "⋎": true, "⋏": true, "¤": true, "¤": true, "↶": true, "↷": true, "⋎": true, "⋏": true, "∲": true, "∱": true, "⌭": true, "⇓": true, "⥥": true, "†": true, "ℸ": true, "↓": true, "‐": true, "⊣": true, "⤏": true, "˝": true, "ď": true, "д": true, "ⅆ": true, "‡": true, "⇊": true, "⩷": true, "°": true, "°": true, "δ": true, "⦱": true, "⥿": true, "𝔡": true, "⇃": true, "⇂": true, "⋄": true, "⋄": true, "♦": true, "♦": true, "¨": true, "ϝ": true, "⋲": true, "÷": true, "÷": true, "÷": true, "⋇": true, "⋇": true, "ђ": true, "⌞": true, "⌍": true, "$": true, "𝕕": true, "˙": true, "≐": true, "≑": true, "∸": true, "∔": true, "⊡": true, "⌆": true, "↓": true, "⇊": true, "⇃": true, "⇂": true, "⤐": true, "⌟": true, "⌌": true, "𝒹": true, "ѕ": true, "⧶": true, "đ": true, "⋱": true, "▿": true, "▾": true, "⇵": true, "⥯": true, "⦦": true, "џ": true, "⟿": true, "⩷": true, "≑": true, "é": true, "é": true, "⩮": true, "ě": true, "≖": true, "ê": true, "ê": true, "≕": true, "э": true, "ė": true, "ⅇ": true, "≒": true, "𝔢": true, "⪚": true, "è": true, "è": true, "⪖": true, "⪘": true, "⪙": true, "⏧": true, "ℓ": true, "⪕": true, "⪗": true, "ē": true, "∅": true, "∅": true, "∅": true, " ": true, " ": true, " ": true, "ŋ": true, " ": true, "ę": true, "𝕖": true, "⋕": true, "⧣": true, "⩱": true, "ε": true, "ε": true, "ϵ": true, "≖": true, "≕": true, "≂": true, "⪖": true, "⪕": true, "=": true, "≟": true, "≡": true, "⩸": true, "⧥": true, "≓": true, "⥱": true, "ℯ": true, "≐": true, "≂": true, "η": true, "ð": true, "ð": true, "ë": true, "ë": true, "€": true, "!": true, "∃": true, "ℰ": true, "ⅇ": true, "≒": true, "ф": true, "♀": true, "ffi": true, "ff": true, "ffl": true, "𝔣": true, "fi": true, "fj": true, "♭": true, "fl": true, "▱": true, "ƒ": true, "𝕗": true, "∀": true, "⋔": true, "⫙": true, "⨍": true, "½": true, "½": true, "⅓": true, "¼": true, "¼": true, "⅕": true, "⅙": true, "⅛": true, "⅔": true, "⅖": true, "¾": true, "¾": true, "⅗": true, "⅜": true, "⅘": true, "⅚": true, "⅝": true, "⅞": true, "⁄": true, "⌢": true, "𝒻": true, "≧": true, "⪌": true, "ǵ": true, "γ": true, "ϝ": true, "⪆": true, "ğ": true, "ĝ": true, "г": true, "ġ": true, "≥": true, "⋛": true, "≥": true, "≧": true, "⩾": true, "⩾": true, "⪩": true, "⪀": true, "⪂": true, "⪄": true, "⋛︀": true, "⪔": true, "𝔤": true, "≫": true, "⋙": true, "ℷ": true, "ѓ": true, "≷": true, "⪒": true, "⪥": true, "⪤": true, "≩": true, "⪊": true, "⪊": true, "⪈": true, "⪈": true, "≩": true, "⋧": true, "𝕘": true, "`": true, "ℊ": true, "≳": true, "⪎": true, "⪐": true, ">": true, ">": true, "⪧": true, "⩺": true, "⋗": true, "⦕": true, "⩼": true, "⪆": true, "⥸": true, "⋗": true, "⋛": true, "⪌": true, "≷": true, "≳": true, "≩︀": true, "≩︀": true, "⇔": true, " ": true, "½": true, "ℋ": true, "ъ": true, "↔": true, "⥈": true, "↭": true, "ℏ": true, "ĥ": true, "♥": true, "♥": true, "…": true, "⊹": true, "𝔥": true, "⤥": true, "⤦": true, "⇿": true, "∻": true, "↩": true, "↪": true, "𝕙": true, "―": true, "𝒽": true, "ℏ": true, "ħ": true, "⁃": true, "‐": true, "í": true, "í": true, "⁣": true, "î": true, "î": true, "и": true, "е": true, "¡": true, "¡": true, "⇔": true, "𝔦": true, "ì": true, "ì": true, "ⅈ": true, "⨌": true, "∭": true, "⧜": true, "℩": true, "ij": true, "ī": true, "ℑ": true, "ℐ": true, "ℑ": true, "ı": true, "⊷": true, "Ƶ": true, "∈": true, "℅": true, "∞": true, "⧝": true, "ı": true, "∫": true, "⊺": true, "ℤ": true, "⊺": true, "⨗": true, "⨼": true, "ё": true, "į": true, "𝕚": true, "ι": true, "⨼": true, "¿": true, "¿": true, "𝒾": true, "∈": true, "⋹": true, "⋵": true, "⋴": true, "⋳": true, "∈": true, "⁢": true, "ĩ": true, "і": true, "ï": true, "ï": true, "ĵ": true, "й": true, "𝔧": true, "ȷ": true, "𝕛": true, "𝒿": true, "ј": true, "є": true, "κ": true, "ϰ": true, "ķ": true, "к": true, "𝔨": true, "ĸ": true, "х": true, "ќ": true, "𝕜": true, "𝓀": true, "⇚": true, "⇐": true, "⤛": true, "⤎": true, "≦": true, "⪋": true, "⥢": true, "ĺ": true, "⦴": true, "ℒ": true, "λ": true, "〈": true, "⦑": true, "⟨": true, "⪅": true, "«": true, "«": true, "←": true, "⇤": true, "⤟": true, "⤝": true, "↩": true, "↫": true, "⤹": true, "⥳": true, "↢": true, "⪫": true, "⤙": true, "⪭": true, "⪭︀": true, "⤌": true, "❲": true, "{": true, "[": true, "⦋": true, "⦏": true, "⦍": true, "ľ": true, "ļ": true, "⌈": true, "{": true, "л": true, "⤶": true, "“": true, "„": true, "⥧": true, "⥋": true, "↲": true, "≤": true, "←": true, "↢": true, "↽": true, "↼": true, "⇇": true, "↔": true, "⇆": true, "⇋": true, "↭": true, "⋋": true, "⋚": true, "≤": true, "≦": true, "⩽": true, "⩽": true, "⪨": true, "⩿": true, "⪁": true, "⪃": true, "⋚︀": true, "⪓": true, "⪅": true, "⋖": true, "⋚": true, "⪋": true, "≶": true, "≲": true, "⥼": true, "⌊": true, "𝔩": true, "≶": true, "⪑": true, "↽": true, "↼": true, "⥪": true, "▄": true, "љ": true, "≪": true, "⇇": true, "⌞": true, "⥫": true, "◺": true, "ŀ": true, "⎰": true, "⎰": true, "≨": true, "⪉": true, "⪉": true, "⪇": true, "⪇": true, "≨": true, "⋦": true, "⟬": true, "⇽": true, "⟦": true, "⟵": true, "⟷": true, "⟼": true, "⟶": true, "↫": true, "↬": true, "⦅": true, "𝕝": true, "⨭": true, "⨴": true, "∗": true, "_": true, "◊": true, "◊": true, "⧫": true, "(": true, "⦓": true, "⇆": true, "⌟": true, "⇋": true, "⥭": true, "": true, "⊿": true, "‹": true, "𝓁": true, "↰": true, "≲": true, "⪍": true, "⪏": true, "[": true, "‘": true, "‚": true, "ł": true, "<": true, "<": true, "⪦": true, "⩹": true, "⋖": true, "⋋": true, "⋉": true, "⥶": true, "⩻": true, "⦖": true, "◃": true, "⊴": true, "◂": true, "⥊": true, "⥦": true, "≨︀": true, "≨︀": true, "∺": true, "¯": true, "¯": true, "♂": true, "✠": true, "✠": true, "↦": true, "↦": true, "↧": true, "↤": true, "↥": true, "▮": true, "⨩": true, "м": true, "—": true, "∡": true, "𝔪": true, "℧": true, "µ": true, "µ": true, "∣": true, "*": true, "⫰": true, "·": true, "·": true, "−": true, "⊟": true, "∸": true, "⨪": true, "⫛": true, "…": true, "∓": true, "⊧": true, "𝕞": true, "∓": true, "𝓂": true, "∾": true, "μ": true, "⊸": true, "⊸": true, "⋙̸": true, "≫⃒": true, "≫̸": true, "⇍": true, "⇎": true, "⋘̸": true, "≪⃒": true, "≪̸": true, "⇏": true, "⊯": true, "⊮": true, "∇": true, "ń": true, "∠⃒": true, "≉": true, "⩰̸": true, "≋̸": true, "ʼn": true, "≉": true, "♮": true, "♮": true, "ℕ": true, " ": true, " ": true, "≎̸": true, "≏̸": true, "⩃": true, "ň": true, "ņ": true, "≇": true, "⩭̸": true, "⩂": true, "н": true, "–": true, "≠": true, "⇗": true, "⤤": true, "↗": true, "↗": true, "≐̸": true, "≢": true, "⤨": true, "≂̸": true, "∄": true, "∄": true, "𝔫": true, "≧̸": true, "≱": true, "≱": true, "≧̸": true, "⩾̸": true, "⩾̸": true, "≵": true, "≯": true, "≯": true, "⇎": true, "↮": true, "⫲": true, "∋": true, "⋼": true, "⋺": true, "∋": true, "њ": true, "⇍": true, "≦̸": true, "↚": true, "‥": true, "≰": true, "↚": true, "↮": true, "≰": true, "≦̸": true, "⩽̸": true, "⩽̸": true, "≮": true, "≴": true, "≮": true, "⋪": true, "⋬": true, "∤": true, "𝕟": true, "¬": true, "¬": true, "∉": true, "⋹̸": true, "⋵̸": true, "∉": true, "⋷": true, "⋶": true, "∌": true, "∌": true, "⋾": true, "⋽": true, "∦": true, "∦": true, "⫽⃥": true, "∂̸": true, "⨔": true, "⊀": true, "⋠": true, "⪯̸": true, "⊀": true, "⪯̸": true, "⇏": true, "↛": true, "⤳̸": true, "↝̸": true, "↛": true, "⋫": true, "⋭": true, "⊁": true, "⋡": true, "⪰̸": true, "𝓃": true, "∤": true, "∦": true, "≁": true, "≄": true, "≄": true, "∤": true, "∦": true, "⋢": true, "⋣": true, "⊄": true, "⫅̸": true, "⊈": true, "⊂⃒": true, "⊈": true, "⫅̸": true, "⊁": true, "⪰̸": true, "⊅": true, "⫆̸": true, "⊉": true, "⊃⃒": true, "⊉": true, "⫆̸": true, "≹": true, "ñ": true, "ñ": true, "≸": true, "⋪": true, "⋬": true, "⋫": true, "⋭": true, "ν": true, "#": true, "№": true, " ": true, "⊭": true, "⤄": true, "≍⃒": true, "⊬": true, "≥⃒": true, ">⃒": true, "⧞": true, "⤂": true, "≤⃒": true, "<⃒": true, "⊴⃒": true, "⤃": true, "⊵⃒": true, "∼⃒": true, "⇖": true, "⤣": true, "↖": true, "↖": true, "⤧": true, "Ⓢ": true, "ó": true, "ó": true, "⊛": true, "⊚": true, "ô": true, "ô": true, "о": true, "⊝": true, "ő": true, "⨸": true, "⊙": true, "⦼": true, "œ": true, "⦿": true, "𝔬": true, "˛": true, "ò": true, "ò": true, "⧁": true, "⦵": true, "Ω": true, "∮": true, "↺": true, "⦾": true, "⦻": true, "‾": true, "⧀": true, "ō": true, "ω": true, "ο": true, "⦶": true, "⊖": true, "𝕠": true, "⦷": true, "⦹": true, "⊕": true, "∨": true, "↻": true, "⩝": true, "ℴ": true, "ℴ": true, "ª": true, "ª": true, "º": true, "º": true, "⊶": true, "⩖": true, "⩗": true, "⩛": true, "ℴ": true, "ø": true, "ø": true, "⊘": true, "õ": true, "õ": true, "⊗": true, "⨶": true, "ö": true, "ö": true, "⌽": true, "∥": true, "¶": true, "¶": true, "∥": true, "⫳": true, "⫽": true, "∂": true, "п": true, "%": true, ".": true, "‰": true, "⊥": true, "‱": true, "𝔭": true, "φ": true, "ϕ": true, "ℳ": true, "☎": true, "π": true, "⋔": true, "ϖ": true, "ℏ": true, "ℎ": true, "ℏ": true, "+": true, "⨣": true, "⊞": true, "⨢": true, "∔": true, "⨥": true, "⩲": true, "±": true, "±": true, "⨦": true, "⨧": true, "±": true, "⨕": true, "𝕡": true, "£": true, "£": true, "≺": true, "⪳": true, "⪷": true, "≼": true, "⪯": true, "≺": true, "⪷": true, "≼": true, "⪯": true, "⪹": true, "⪵": true, "⋨": true, "≾": true, "′": true, "ℙ": true, "⪵": true, "⪹": true, "⋨": true, "∏": true, "⌮": true, "⌒": true, "⌓": true, "∝": true, "∝": true, "≾": true, "⊰": true, "𝓅": true, "ψ": true, " ": true, "𝔮": true, "⨌": true, "𝕢": true, "⁗": true, "𝓆": true, "ℍ": true, "⨖": true, "?": true, "≟": true, """: true, """: true, "⇛": true, "⇒": true, "⤜": true, "⤏": true, "⥤": true, "∽̱": true, "ŕ": true, "√": true, "⦳": true, "〉": true, "⦒": true, "⦥": true, "⟩": true, "»": true, "»": true, "→": true, "⥵": true, "⇥": true, "⤠": true, "⤳": true, "⤞": true, "↪": true, "↬": true, "⥅": true, "⥴": true, "↣": true, "↝": true, "⤚": true, "∶": true, "ℚ": true, "⤍": true, "❳": true, "}": true, "]": true, "⦌": true, "⦎": true, "⦐": true, "ř": true, "ŗ": true, "⌉": true, "}": true, "р": true, "⤷": true, "⥩": true, "”": true, "”": true, "↳": true, "ℜ": true, "ℛ": true, "ℜ": true, "ℝ": true, "▭": true, "®": true, "®": true, "⥽": true, "⌋": true, "𝔯": true, "⇁": true, "⇀": true, "⥬": true, "ρ": true, "ϱ": true, "→": true, "↣": true, "⇁": true, "⇀": true, "⇄": true, "⇌": true, "⇉": true, "↝": true, "⋌": true, "˚": true, "≓": true, "⇄": true, "⇌": true, "": true, "⎱": true, "⎱": true, "⫮": true, "⟭": true, "⇾": true, "⟧": true, "⦆": true, "𝕣": true, "⨮": true, "⨵": true, ")": true, "⦔": true, "⨒": true, "⇉": true, "›": true, "𝓇": true, "↱": true, "]": true, "’": true, "’": true, "⋌": true, "⋊": true, "▹": true, "⊵": true, "▸": true, "⧎": true, "⥨": true, "℞": true, "ś": true, "‚": true, "≻": true, "⪴": true, "⪸": true, "š": true, "≽": true, "⪰": true, "ş": true, "ŝ": true, "⪶": true, "⪺": true, "⋩": true, "⨓": true, "≿": true, "с": true, "⋅": true, "⊡": true, "⩦": true, "⇘": true, "⤥": true, "↘": true, "↘": true, "§": true, "§": true, ";": true, "⤩": true, "∖": true, "∖": true, "✶": true, "𝔰": true, "⌢": true, "♯": true, "щ": true, "ш": true, "∣": true, "∥": true, "­": true, "": true, "σ": true, "ς": true, "ς": true, "∼": true, "⩪": true, "≃": true, "≃": true, "⪞": true, "⪠": true, "⪝": true, "⪟": true, "≆": true, "⨤": true, "⥲": true, "←": true, "∖": true, "⨳": true, "⧤": true, "∣": true, "⌣": true, "⪪": true, "⪬": true, "⪬︀": true, "ь": true, "/": true, "⧄": true, "⌿": true, "𝕤": true, "♠": true, "♠": true, "∥": true, "⊓": true, "⊓︀": true, "⊔": true, "⊔︀": true, "⊏": true, "⊑": true, "⊏": true, "⊑": true, "⊐": true, "⊒": true, "⊐": true, "⊒": true, "□": true, "□": true, "▪": true, "▪": true, "→": true, "𝓈": true, "∖": true, "⌣": true, "⋆": true, "☆": true, "★": true, "ϵ": true, "ϕ": true, "¯": true, "⊂": true, "⫅": true, "⪽": true, "⊆": true, "⫃": true, "⫁": true, "⫋": true, "⊊": true, "⪿": true, "⥹": true, "⊂": true, "⊆": true, "⫅": true, "⊊": true, "⫋": true, "⫇": true, "⫕": true, "⫓": true, "≻": true, "⪸": true, "≽": true, "⪰": true, "⪺": true, "⪶": true, "⋩": true, "≿": true, "∑": true, "♪": true, "¹": true, "¹": true, "²": true, "²": true, "³": true, "³": true, "⊃": true, "⫆": true, "⪾": true, "⫘": true, "⊇": true, "⫄": true, "⟉": true, "⫗": true, "⥻": true, "⫂": true, "⫌": true, "⊋": true, "⫀": true, "⊃": true, "⊇": true, "⫆": true, "⊋": true, "⫌": true, "⫈": true, "⫔": true, "⫖": true, "⇙": true, "⤦": true, "↙": true, "↙": true, "⤪": true, "ß": true, "ß": true, "⌖": true, "τ": true, "⎴": true, "ť": true, "ţ": true, "т": true, "⃛": true, "⌕": true, "𝔱": true, "∴": true, "∴": true, "θ": true, "ϑ": true, "ϑ": true, "≈": true, "∼": true, " ": true, "≈": true, "∼": true, "þ": true, "þ": true, "˜": true, "×": true, "×": true, "⊠": true, "⨱": true, "⨰": true, "∭": true, "⤨": true, "⊤": true, "⌶": true, "⫱": true, "𝕥": true, "⫚": true, "⤩": true, "‴": true, "™": true, "▵": true, "▿": true, "◃": true, "⊴": true, "≜": true, "▹": true, "⊵": true, "◬": true, "≜": true, "⨺": true, "⨹": true, "⧍": true, "⨻": true, "⏢": true, "𝓉": true, "ц": true, "ћ": true, "ŧ": true, "≬": true, "↞": true, "↠": true, "⇑": true, "⥣": true, "ú": true, "ú": true, "↑": true, "ў": true, "ŭ": true, "û": true, "û": true, "у": true, "⇅": true, "ű": true, "⥮": true, "⥾": true, "𝔲": true, "ù": true, "ù": true, "↿": true, "↾": true, "▀": true, "⌜": true, "⌜": true, "⌏": true, "◸": true, "ū": true, "¨": true, "¨": true, "ų": true, "𝕦": true, "↑": true, "↕": true, "↿": true, "↾": true, "⊎": true, "υ": true, "ϒ": true, "υ": true, "⇈": true, "⌝": true, "⌝": true, "⌎": true, "ů": true, "◹": true, "𝓊": true, "⋰": true, "ũ": true, "▵": true, "▴": true, "⇈": true, "ü": true, "ü": true, "⦧": true, "⇕": true, "⫨": true, "⫩": true, "⊨": true, "⦜": true, "ϵ": true, "ϰ": true, "∅": true, "ϕ": true, "ϖ": true, "∝": true, "↕": true, "ϱ": true, "ς": true, "⊊︀": true, "⫋︀": true, "⊋︀": true, "⫌︀": true, "ϑ": true, "⊲": true, "⊳": true, "в": true, "⊢": true, "∨": true, "⊻": true, "≚": true, "⋮": true, "|": true, "|": true, "𝔳": true, "⊲": true, "⊂⃒": true, "⊃⃒": true, "𝕧": true, "∝": true, "⊳": true, "𝓋": true, "⫋︀": true, "⊊︀": true, "⫌︀": true, "⊋︀": true, "⦚": true, "ŵ": true, "⩟": true, "∧": true, "≙": true, "℘": true, "𝔴": true, "𝕨": true, "℘": true, "≀": true, "≀": true, "𝓌": true, "⋂": true, "◯": true, "⋃": true, "▽": true, "𝔵": true, "⟺": true, "⟷": true, "ξ": true, "⟸": true, "⟵": true, "⟼": true, "⋻": true, "⨀": true, "𝕩": true, "⨁": true, "⨂": true, "⟹": true, "⟶": true, "𝓍": true, "⨆": true, "⨄": true, "△": true, "⋁": true, "⋀": true, "ý": true, "ý": true, "я": true, "ŷ": true, "ы": true, "¥": true, "¥": true, "𝔶": true, "ї": true, "𝕪": true, "𝓎": true, "ю": true, "ÿ": true, "ÿ": true, "ź": true, "ž": true, "з": true, "ż": true, "ℨ": true, "ζ": true, "𝔷": true, "ж": true, "⇝": true, "𝕫": true, "𝓏": true, "": true, "": true, } blackfriday-2.1.0/esc.go 0000664 0000000 0000000 00000002527 13745714152 0015105 0 ustar 00root root 0000000 0000000 package blackfriday import ( "html" "io" ) var htmlEscaper = [256][]byte{ '&': []byte("&"), '<': []byte("<"), '>': []byte(">"), '"': []byte("""), } func escapeHTML(w io.Writer, s []byte) { escapeEntities(w, s, false) } func escapeAllHTML(w io.Writer, s []byte) { escapeEntities(w, s, true) } func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { var start, end int for end < len(s) { escSeq := htmlEscaper[s[end]] if escSeq != nil { isEntity, entityEnd := nodeIsEntity(s, end) if isEntity && !escapeValidEntities { w.Write(s[start : entityEnd+1]) start = entityEnd + 1 } else { w.Write(s[start:end]) w.Write(escSeq) start = end + 1 } } end++ } if start < len(s) && end <= len(s) { w.Write(s[start:end]) } } func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { isEntity = false endEntityPos = end + 1 if s[end] == '&' { for endEntityPos < len(s) { if s[endEntityPos] == ';' { if entities[string(s[end:endEntityPos+1])] { isEntity = true break } } if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { break } endEntityPos++ } } return isEntity, endEntityPos } func escLink(w io.Writer, text []byte) { unesc := html.UnescapeString(string(text)) escapeHTML(w, []byte(unesc)) } blackfriday-2.1.0/esc_test.go 0000664 0000000 0000000 00000002257 13745714152 0016144 0 ustar 00root root 0000000 0000000 package blackfriday import ( "bytes" "testing" ) func TestEsc(t *testing.T) { t.Parallel() tests := []string{ "abc", "abc", "a&c", "a&c", "<", "<", "[]:<", "[]:<", "Hello |" processingInstruction = "[<][?].*?[?][>]" singleQuotedValue = "'[^']*'" tagName = "[A-Za-z][A-Za-z0-9-]*" unquotedValue = "[^\"'=<>`\\x00-\\x20]+" ) // HTMLRendererParameters is a collection of supplementary parameters tweaking // the behavior of various parts of HTML renderer. type HTMLRendererParameters struct { // Prepend this text to each relative URL. AbsolutePrefix string // Add this text to each footnote anchor, to ensure uniqueness. FootnoteAnchorPrefix string // Show this text inside the tag for a footnote return link, if the // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string // [return] is used. FootnoteReturnLinkContents string // If set, add this text to the front of each Heading ID, to ensure // uniqueness. HeadingIDPrefix string // If set, add this text to the back of each Heading ID, to ensure uniqueness. HeadingIDSuffix string // Increase heading levels: if the offset is 1,") preCloseTag = []byte("") codeTag = []byte("
")
codeCloseTag = []byte("
")
pTag = []byte("") pCloseTag = []byte("
") blockquoteTag = []byte("") blockquoteCloseTag = []byte("") hrTag = []byte("