cmd/stdiscosrv: New discovery server (fixes #4618)
This is a new revision of the discovery server. Relevant changes and non-changes: - Protocol towards clients is unchanged. - Recommended large scale design is still to be deployed nehind nginx (I tested, and it's still a lot faster at terminating TLS). - Database backend is leveldb again, only. It scales enough, is easy to setup, and we don't need any backend to take care of. - Server supports replication. This is a simple TCP channel - protect it with a firewall when deploying over the internet. (We deploy this within the same datacenter, and with firewall.) Any incoming client announces are sent over the replication channel(s) to other peer discosrvs. Incoming replication changes are applied to the database as if they came from clients, but without the TLS/certificate overhead. - Metrics are exposed using the prometheus library, when enabled. - The database values and replication protocol is protobuf, because JSON was quite CPU intensive when I tried that and benchmarked it. - The "Retry-After" value for failed lookups gets slowly increased from a default of 120 seconds, by 5 seconds for each failed lookup, independently by each discosrv. This lowers the query load over time for clients that are never seen. The Retry-After maxes out at 3600 after a couple of weeks of this increase. The number of failed lookups is stored in the database, now and then (avoiding making each lookup a database put). All in all this means clients can be pointed towards a cluster using just multiple A / AAAA records to gain both load sharing and redundancy (if one is down, clients will talk to the remaining ones). GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4648
This commit is contained in:
9
vendor/github.com/a8m/mark/LICENSE
generated
vendored
Normal file
9
vendor/github.com/a8m/mark/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2015 Ariel Mashraki
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
94
vendor/github.com/a8m/mark/cmd/mark/main.go
generated
vendored
Normal file
94
vendor/github.com/a8m/mark/cmd/mark/main.go
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
// mark command line tool. available at https://github.com/a8m/mark
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/a8m/mark"
|
||||
)
|
||||
|
||||
var (
|
||||
input = flag.String("i", "", "")
|
||||
output = flag.String("o", "", "")
|
||||
smarty = flag.Bool("smartypants", false, "")
|
||||
fractions = flag.Bool("fractions", false, "")
|
||||
)
|
||||
|
||||
var usage = `Usage: mark [options...] <input>
|
||||
|
||||
Options:
|
||||
-i Specify file input, otherwise use last argument as input file.
|
||||
If no input file is specified, read from stdin.
|
||||
-o Specify file output. If none is specified, write to stdout.
|
||||
|
||||
-smartypants Use "smart" typograhic punctuation for things like
|
||||
quotes and dashes.
|
||||
-fractions Traslate fraction like to suitable HTML elements
|
||||
`
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Fprint(os.Stderr, fmt.Sprintf(usage))
|
||||
}
|
||||
flag.Parse()
|
||||
// read
|
||||
var reader *bufio.Reader
|
||||
if *input != "" {
|
||||
file, err := os.Open(*input)
|
||||
if err != nil {
|
||||
usageAndExit(fmt.Sprintf("Error to open file input: %s.", *input))
|
||||
}
|
||||
defer file.Close()
|
||||
reader = bufio.NewReader(file)
|
||||
} else {
|
||||
stat, err := os.Stdin.Stat()
|
||||
if err != nil || (stat.Mode()&os.ModeCharDevice) != 0 {
|
||||
usageAndExit("")
|
||||
}
|
||||
reader = bufio.NewReader(os.Stdin)
|
||||
}
|
||||
// collect data
|
||||
var data string
|
||||
for {
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
usageAndExit("failed to reading input.")
|
||||
}
|
||||
data += line
|
||||
}
|
||||
// write
|
||||
var (
|
||||
err error
|
||||
file = os.Stdout
|
||||
)
|
||||
if *output != "" {
|
||||
if file, err = os.Create(*output); err != nil {
|
||||
usageAndExit("error to create the wanted output file.")
|
||||
}
|
||||
}
|
||||
// mark rendering
|
||||
opts := mark.DefaultOptions()
|
||||
opts.Smartypants = *smarty
|
||||
opts.Fractions = *fractions
|
||||
m := mark.New(data, opts)
|
||||
if _, err := file.WriteString(m.Render()); err != nil {
|
||||
usageAndExit(fmt.Sprintf("error writing output to: %s.", file.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
func usageAndExit(msg string) {
|
||||
if msg != "" {
|
||||
fmt.Fprintf(os.Stderr, msg)
|
||||
fmt.Fprintf(os.Stderr, "\n\n")
|
||||
}
|
||||
flag.Usage()
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
92
vendor/github.com/a8m/mark/grammar.go
generated
vendored
Normal file
92
vendor/github.com/a8m/mark/grammar.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package mark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// Block Grammar
|
||||
var (
|
||||
reHr = regexp.MustCompile(`^(?:(?:\* *){3,}|(?:_ *){3,}|(?:- *){3,}) *(?:\n+|$)`)
|
||||
reHeading = regexp.MustCompile(`^ *(#{1,6})(?: +#*| +([^\n]*?)|)(?: +#*|) *(?:\n|$)`)
|
||||
reLHeading = regexp.MustCompile(`^([^\n]+?) *\n {0,3}(=|-){1,} *(?:\n+|$)`)
|
||||
reBlockQuote = regexp.MustCompile(`^ *>[^\n]*(\n[^\n]+)*\n*`)
|
||||
reDefLink = regexp.MustCompile(`(?s)^ *\[([^\]]+)\]: *\n? *<?([^\s>]+)>?(?: *\n? *["'(](.+?)['")])? *(?:\n+|$)`)
|
||||
reSpaceGen = func(i int) *regexp.Regexp {
|
||||
return regexp.MustCompile(fmt.Sprintf(`(?m)^ {1,%d}`, i))
|
||||
}
|
||||
)
|
||||
|
||||
var reList = struct {
|
||||
item, marker, loose *regexp.Regexp
|
||||
scanLine, scanNewLine func(src string) string
|
||||
}{
|
||||
regexp.MustCompile(`^( *)(?:[*+-]|\d{1,9}\.) (.*)(?:\n|)`),
|
||||
regexp.MustCompile(`^ *([*+-]|\d+\.) +`),
|
||||
regexp.MustCompile(`(?m)\n\n(.*)`),
|
||||
regexp.MustCompile(`^(.*)(?:\n|)`).FindString,
|
||||
regexp.MustCompile(`^\n{1,}`).FindString,
|
||||
}
|
||||
|
||||
var reCodeBlock = struct {
|
||||
*regexp.Regexp
|
||||
trim func(src, repl string) string
|
||||
}{
|
||||
regexp.MustCompile(`^( {4}[^\n]+(?: *\n)*)+`),
|
||||
regexp.MustCompile("(?m)^( {0,4})").ReplaceAllLiteralString,
|
||||
}
|
||||
|
||||
var reGfmCode = struct {
|
||||
*regexp.Regexp
|
||||
endGen func(end string, i int) *regexp.Regexp
|
||||
}{
|
||||
regexp.MustCompile("^( {0,3})([`~]{3,}) *(\\S*)?(?:.*)"),
|
||||
func(end string, i int) *regexp.Regexp {
|
||||
return regexp.MustCompile(fmt.Sprintf(`(?s)(.*?)(?:((?m)^ {0,3}%s{%d,} *$)|$)`, end, i))
|
||||
},
|
||||
}
|
||||
|
||||
var reTable = struct {
|
||||
item, itemLp *regexp.Regexp
|
||||
split func(s string, n int) []string
|
||||
trim func(src, repl string) string
|
||||
}{
|
||||
regexp.MustCompile(`^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*`),
|
||||
regexp.MustCompile(`(^ *\|.+)\n( *\| *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*`),
|
||||
regexp.MustCompile(` *\| *`).Split,
|
||||
regexp.MustCompile(`^ *\| *| *\| *$`).ReplaceAllString,
|
||||
}
|
||||
|
||||
var reHTML = struct {
|
||||
CDATA_OPEN, CDATA_CLOSE string
|
||||
item, comment, tag, span *regexp.Regexp
|
||||
endTagGen func(tag string) *regexp.Regexp
|
||||
}{
|
||||
`![CDATA[`,
|
||||
"?\\]\\]",
|
||||
regexp.MustCompile(`^<(\w+|!\[CDATA\[)(?:"[^"]*"|'[^']*'|[^'">])*?>`),
|
||||
regexp.MustCompile(`(?sm)<!--.*?-->`),
|
||||
regexp.MustCompile(`^<!--.*?-->|^<\/?\w+(?:"[^"]*"|'[^']*'|[^'">])*?>`),
|
||||
// TODO: Add all span-tags and move to config.
|
||||
regexp.MustCompile(`^(a|em|strong|small|s|q|data|time|code|sub|sup|i|b|u|span|br|del|img)$`),
|
||||
func(tag string) *regexp.Regexp {
|
||||
return regexp.MustCompile(fmt.Sprintf(`(?s)(.+?)<\/%s> *`, tag))
|
||||
},
|
||||
}
|
||||
|
||||
// Inline Grammar
|
||||
var (
|
||||
reBr = regexp.MustCompile(`^(?: {2,}|\\)\n`)
|
||||
reLinkText = `(?:\[[^\]]*\]|[^\[\]]|\])*`
|
||||
reLinkHref = `\s*<?(.*?)>?(?:\s+['"\(](.*?)['"\)])?\s*`
|
||||
reGfmLink = regexp.MustCompile(`^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])`)
|
||||
reLink = regexp.MustCompile(fmt.Sprintf(`(?s)^!?\[(%s)\]\(%s\)`, reLinkText, reLinkHref))
|
||||
reAutoLink = regexp.MustCompile(`^<([^ >]+(@|:\/)[^ >]+)>`)
|
||||
reRefLink = regexp.MustCompile(`^!?\[((?:\[[^\]]*\]|[^\[\]]|\])*)\](?:\s*\[([^\]]*)\])?`)
|
||||
reImage = regexp.MustCompile(fmt.Sprintf(`(?s)^!?\[(%s)\]\(%s\)`, reLinkText, reLinkHref))
|
||||
reCode = regexp.MustCompile("(?s)^`{1,2}\\s*(.*?[^`])\\s*`{1,2}")
|
||||
reStrike = regexp.MustCompile(`(?s)^~{2}(.+?)~{2}`)
|
||||
reEmphasise = `(?s)^_{%[1]d}(\S.*?_*)_{%[1]d}|^\*{%[1]d}(\S.*?\**)\*{%[1]d}`
|
||||
reItalic = regexp.MustCompile(fmt.Sprintf(reEmphasise, 1))
|
||||
reStrong = regexp.MustCompile(fmt.Sprintf(reEmphasise, 2))
|
||||
)
|
||||
568
vendor/github.com/a8m/mark/lexer.go
generated
vendored
Normal file
568
vendor/github.com/a8m/mark/lexer.go
generated
vendored
Normal file
@@ -0,0 +1,568 @@
|
||||
package mark
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// type position
|
||||
type Pos int
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
// Item represent a token or text string returned from the scanner
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
const eof = -1 // Zero value so closed channel delivers EOF
|
||||
|
||||
const (
|
||||
itemError itemType = iota // Error occurred; value is text of error
|
||||
itemEOF
|
||||
itemNewLine
|
||||
itemHTML
|
||||
itemHeading
|
||||
itemLHeading
|
||||
itemBlockQuote
|
||||
itemList
|
||||
itemListItem
|
||||
itemLooseItem
|
||||
itemCodeBlock
|
||||
itemGfmCodeBlock
|
||||
itemHr
|
||||
itemTable
|
||||
itemLpTable
|
||||
itemTableRow
|
||||
itemTableCell
|
||||
itemStrong
|
||||
itemItalic
|
||||
itemStrike
|
||||
itemCode
|
||||
itemLink
|
||||
itemDefLink
|
||||
itemRefLink
|
||||
itemAutoLink
|
||||
itemGfmLink
|
||||
itemImage
|
||||
itemRefImage
|
||||
itemText
|
||||
itemBr
|
||||
itemPipe
|
||||
itemIndent
|
||||
)
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// Lexer interface, used to composed it inside the parser
|
||||
type Lexer interface {
|
||||
nextItem() item
|
||||
}
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
input string // the string being scanned
|
||||
state stateFn // the next lexing function to enter
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
lastPos Pos // position of most recent item returned by nextItem
|
||||
items chan item // channel of scanned items
|
||||
}
|
||||
|
||||
// lex creates a new lexer for the input string.
|
||||
func lex(input string) *lexer {
|
||||
l := &lexer{
|
||||
input: input,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// lexInline create a new lexer for one phase lexing(inline blocks).
|
||||
func lexInline(input string) *lexer {
|
||||
l := &lexer{
|
||||
input: input,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.lexInline()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexAny; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
close(l.items)
|
||||
}
|
||||
|
||||
// next return the next rune in the input
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// lexAny scanner is kind of forwarder, it get the current char in the text
|
||||
// and forward it to the appropriate scanner based on some conditions.
|
||||
func lexAny(l *lexer) stateFn {
|
||||
switch r := l.peek(); r {
|
||||
case '*', '-', '_':
|
||||
return lexHr
|
||||
case '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
return lexList
|
||||
case '<':
|
||||
return lexHTML
|
||||
case '>':
|
||||
return lexBlockQuote
|
||||
case '[':
|
||||
return lexDefLink
|
||||
case '#':
|
||||
return lexHeading
|
||||
case '`', '~':
|
||||
return lexGfmCode
|
||||
case ' ':
|
||||
if reCodeBlock.MatchString(l.input[l.pos:]) {
|
||||
return lexCode
|
||||
} else if reGfmCode.MatchString(l.input[l.pos:]) {
|
||||
return lexGfmCode
|
||||
}
|
||||
// Keep moving forward until we get all the indentation size
|
||||
for ; r == l.peek(); r = l.next() {
|
||||
}
|
||||
l.emit(itemIndent)
|
||||
return lexAny
|
||||
case '|':
|
||||
if m := reTable.itemLp.MatchString(l.input[l.pos:]); m {
|
||||
l.emit(itemLpTable)
|
||||
return lexTable
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if m := reTable.item.MatchString(l.input[l.pos:]); m {
|
||||
l.emit(itemTable)
|
||||
return lexTable
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
}
|
||||
|
||||
// lexHeading test if the current text position is an heading item.
|
||||
// is so, it will emit an item and return back to lenAny function
|
||||
// else, lex it as a simple text value
|
||||
func lexHeading(l *lexer) stateFn {
|
||||
if m := reHeading.FindString(l.input[l.pos:]); m != "" {
|
||||
l.pos += Pos(len(m))
|
||||
l.emit(itemHeading)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexHr test if the current text position is an horizontal rules item.
|
||||
// is so, it will emit an horizontal rule item and return back to lenAny function
|
||||
// else, forward it to lexList function
|
||||
func lexHr(l *lexer) stateFn {
|
||||
if match := reHr.FindString(l.input[l.pos:]); match != "" {
|
||||
l.pos += Pos(len(match))
|
||||
l.emit(itemHr)
|
||||
return lexAny
|
||||
}
|
||||
return lexList
|
||||
}
|
||||
|
||||
// lexGfmCode test if the current text position is start of GFM code-block item.
|
||||
// if so, it will generate regexp based on the fence type[`~] and it length.
|
||||
// it scan until the end, and then emit the code-block item and return back to the
|
||||
// lenAny forwarder.
|
||||
// else, lex it as a simple inline text.
|
||||
func lexGfmCode(l *lexer) stateFn {
|
||||
if match := reGfmCode.FindStringSubmatch(l.input[l.pos:]); len(match) != 0 {
|
||||
l.pos += Pos(len(match[0]))
|
||||
fence := match[2]
|
||||
// Generate Regexp based on fence type[`~] and length
|
||||
reGfmEnd := reGfmCode.endGen(fence[0:1], len(fence))
|
||||
infoContainer := reGfmEnd.FindStringSubmatch(l.input[l.pos:])
|
||||
l.pos += Pos(len(infoContainer[0]))
|
||||
infoString := infoContainer[1]
|
||||
// Remove leading and trailing spaces
|
||||
if indent := len(match[1]); indent > 0 {
|
||||
reSpace := reSpaceGen(indent)
|
||||
infoString = reSpace.ReplaceAllString(infoString, "")
|
||||
}
|
||||
l.emit(itemGfmCodeBlock, match[0]+infoString)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexCode scans code block.
|
||||
func lexCode(l *lexer) stateFn {
|
||||
match := reCodeBlock.FindString(l.input[l.pos:])
|
||||
l.pos += Pos(len(match))
|
||||
l.emit(itemCodeBlock)
|
||||
return lexAny
|
||||
}
|
||||
|
||||
// lexText scans until end-of-line(\n)
|
||||
func lexText(l *lexer) stateFn {
|
||||
// Drain text before emitting
|
||||
emit := func(item itemType, pos Pos) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.pos += pos
|
||||
l.emit(item)
|
||||
}
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.peek(); r {
|
||||
case eof:
|
||||
emit(itemEOF, Pos(0))
|
||||
break Loop
|
||||
case '\n':
|
||||
// CM 4.4: An indented code block cannot interrupt a paragraph.
|
||||
if l.pos > l.start && strings.HasPrefix(l.input[l.pos+1:], " ") {
|
||||
l.next()
|
||||
continue
|
||||
}
|
||||
emit(itemNewLine, l.width)
|
||||
break Loop
|
||||
default:
|
||||
// Test for Setext-style headers
|
||||
if m := reLHeading.FindString(l.input[l.pos:]); m != "" {
|
||||
emit(itemLHeading, Pos(len(m)))
|
||||
break Loop
|
||||
}
|
||||
l.next()
|
||||
}
|
||||
}
|
||||
return lexAny
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType, s ...string) {
|
||||
if len(s) == 0 {
|
||||
s = append(s, l.input[l.start:l.pos])
|
||||
}
|
||||
l.items <- item{t, l.start, s[0]}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// lexItem return the next item token, called by the parser.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
l.lastPos = l.pos
|
||||
return item
|
||||
}
|
||||
|
||||
// One phase lexing(inline reason)
|
||||
func (l *lexer) lexInline() {
|
||||
escape := regexp.MustCompile("^\\\\([\\`*{}\\[\\]()#+\\-.!_>~|])")
|
||||
// Drain text before emitting
|
||||
emit := func(item itemType, pos int) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.pos += Pos(pos)
|
||||
l.emit(item)
|
||||
}
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.peek(); r {
|
||||
case eof:
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
break Loop
|
||||
// backslash escaping
|
||||
case '\\':
|
||||
if m := escape.FindStringSubmatch(l.input[l.pos:]); len(m) != 0 {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.pos += Pos(len(m[0]))
|
||||
l.emit(itemText, m[1])
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case ' ':
|
||||
if m := reBr.FindString(l.input[l.pos:]); m != "" {
|
||||
// pos - length of new-line
|
||||
emit(itemBr, len(m))
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
case '_', '*', '~', '`':
|
||||
input := l.input[l.pos:]
|
||||
// Strong
|
||||
if m := reStrong.FindString(input); m != "" {
|
||||
emit(itemStrong, len(m))
|
||||
break
|
||||
}
|
||||
// Italic
|
||||
if m := reItalic.FindString(input); m != "" {
|
||||
emit(itemItalic, len(m))
|
||||
break
|
||||
}
|
||||
// Strike
|
||||
if m := reStrike.FindString(input); m != "" {
|
||||
emit(itemStrike, len(m))
|
||||
break
|
||||
}
|
||||
// InlineCode
|
||||
if m := reCode.FindString(input); m != "" {
|
||||
emit(itemCode, len(m))
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
// itemLink, itemImage, itemRefLink, itemRefImage
|
||||
case '[', '!':
|
||||
input := l.input[l.pos:]
|
||||
if m := reLink.FindString(input); m != "" {
|
||||
pos := len(m)
|
||||
if r == '[' {
|
||||
emit(itemLink, pos)
|
||||
} else {
|
||||
emit(itemImage, pos)
|
||||
}
|
||||
break
|
||||
}
|
||||
if m := reRefLink.FindString(input); m != "" {
|
||||
pos := len(m)
|
||||
if r == '[' {
|
||||
emit(itemRefLink, pos)
|
||||
} else {
|
||||
emit(itemRefImage, pos)
|
||||
}
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
// itemAutoLink, htmlBlock
|
||||
case '<':
|
||||
if m := reAutoLink.FindString(l.input[l.pos:]); m != "" {
|
||||
emit(itemAutoLink, len(m))
|
||||
break
|
||||
}
|
||||
if match, res := l.matchHTML(l.input[l.pos:]); match {
|
||||
emit(itemHTML, len(res))
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
default:
|
||||
if m := reGfmLink.FindString(l.input[l.pos:]); m != "" {
|
||||
emit(itemGfmLink, len(m))
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
}
|
||||
}
|
||||
close(l.items)
|
||||
}
|
||||
|
||||
// lexHTML.
|
||||
func lexHTML(l *lexer) stateFn {
|
||||
if match, res := l.matchHTML(l.input[l.pos:]); match {
|
||||
l.pos += Pos(len(res))
|
||||
l.emit(itemHTML)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// Test if the given input is match the HTML pattern(blocks only)
|
||||
func (l *lexer) matchHTML(input string) (bool, string) {
|
||||
if m := reHTML.comment.FindString(input); m != "" {
|
||||
return true, m
|
||||
}
|
||||
if m := reHTML.item.FindStringSubmatch(input); len(m) != 0 {
|
||||
el, name := m[0], m[1]
|
||||
// if name is a span... is a text
|
||||
if reHTML.span.MatchString(name) {
|
||||
return false, ""
|
||||
}
|
||||
// if it's a self-closed html element, but not a itemAutoLink
|
||||
if strings.HasSuffix(el, "/>") && !reAutoLink.MatchString(el) {
|
||||
return true, el
|
||||
}
|
||||
if name == reHTML.CDATA_OPEN {
|
||||
name = reHTML.CDATA_CLOSE
|
||||
}
|
||||
reEndTag := reHTML.endTagGen(name)
|
||||
if m := reEndTag.FindString(input); m != "" {
|
||||
return true, m
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// lexDefLink scans link definition
|
||||
func lexDefLink(l *lexer) stateFn {
|
||||
if m := reDefLink.FindString(l.input[l.pos:]); m != "" {
|
||||
l.pos += Pos(len(m))
|
||||
l.emit(itemDefLink)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexList scans ordered and unordered lists.
|
||||
func lexList(l *lexer) stateFn {
|
||||
match, items := l.matchList(l.input[l.pos:])
|
||||
if !match {
|
||||
return lexText
|
||||
}
|
||||
var space int
|
||||
var typ itemType
|
||||
for i, item := range items {
|
||||
// Emit itemList on the first loop
|
||||
if i == 0 {
|
||||
l.emit(itemList, reList.marker.FindStringSubmatch(item)[1])
|
||||
}
|
||||
// Initialize each loop
|
||||
typ = itemListItem
|
||||
space = len(item)
|
||||
l.pos += Pos(space)
|
||||
item = reList.marker.ReplaceAllString(item, "")
|
||||
// Indented
|
||||
if strings.Contains(item, "\n ") {
|
||||
space -= len(item)
|
||||
reSpace := reSpaceGen(space)
|
||||
item = reSpace.ReplaceAllString(item, "")
|
||||
}
|
||||
// If current is loose
|
||||
for _, l := range reList.loose.FindAllString(item, -1) {
|
||||
if len(strings.TrimSpace(l)) > 0 || i != len(items)-1 {
|
||||
typ = itemLooseItem
|
||||
break
|
||||
}
|
||||
}
|
||||
// or previous
|
||||
if typ != itemLooseItem && i > 0 && strings.HasSuffix(items[i-1], "\n\n") {
|
||||
typ = itemLooseItem
|
||||
}
|
||||
l.emit(typ, strings.TrimSpace(item))
|
||||
}
|
||||
return lexAny
|
||||
}
|
||||
|
||||
func (l *lexer) matchList(input string) (bool, []string) {
|
||||
var res []string
|
||||
reItem := reList.item
|
||||
if !reItem.MatchString(input) {
|
||||
return false, res
|
||||
}
|
||||
// First item
|
||||
m := reItem.FindStringSubmatch(input)
|
||||
item, depth := m[0], len(m[1])
|
||||
input = input[len(item):]
|
||||
// Loop over the input
|
||||
for len(input) > 0 {
|
||||
// Count new-lines('\n')
|
||||
if m := reList.scanNewLine(input); m != "" {
|
||||
item += m
|
||||
input = input[len(m):]
|
||||
if len(m) >= 2 || !reItem.MatchString(input) && !strings.HasPrefix(input, " ") {
|
||||
break
|
||||
}
|
||||
}
|
||||
// DefLink or hr
|
||||
if reDefLink.MatchString(input) || reHr.MatchString(input) {
|
||||
break
|
||||
}
|
||||
// It's list in the same depth
|
||||
if m := reItem.FindStringSubmatch(input); len(m) > 0 && len(m[1]) == depth {
|
||||
if item != "" {
|
||||
res = append(res, item)
|
||||
}
|
||||
item = m[0]
|
||||
input = input[len(item):]
|
||||
} else {
|
||||
m := reList.scanLine(input)
|
||||
item += m
|
||||
input = input[len(m):]
|
||||
}
|
||||
}
|
||||
// Drain res
|
||||
if item != "" {
|
||||
res = append(res, item)
|
||||
}
|
||||
return true, res
|
||||
}
|
||||
|
||||
// Test if the given input match blockquote
|
||||
func (l *lexer) matchBlockQuote(input string) (bool, string) {
|
||||
match := reBlockQuote.FindString(input)
|
||||
if match == "" {
|
||||
return false, match
|
||||
}
|
||||
lines := strings.Split(match, "\n")
|
||||
for i, line := range lines {
|
||||
// if line is a link-definition or horizontal role, we cut the match until this point
|
||||
if reDefLink.MatchString(line) || reHr.MatchString(line) {
|
||||
match = strings.Join(lines[0:i], "\n")
|
||||
break
|
||||
}
|
||||
}
|
||||
return true, match
|
||||
}
|
||||
|
||||
// lexBlockQuote
|
||||
func lexBlockQuote(l *lexer) stateFn {
|
||||
if match, res := l.matchBlockQuote(l.input[l.pos:]); match {
|
||||
l.pos += Pos(len(res))
|
||||
l.emit(itemBlockQuote)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexTable
|
||||
func lexTable(l *lexer) stateFn {
|
||||
re := reTable.item
|
||||
if l.peek() == '|' {
|
||||
re = reTable.itemLp
|
||||
}
|
||||
table := re.FindStringSubmatch(l.input[l.pos:])
|
||||
l.pos += Pos(len(table[0]))
|
||||
l.start = l.pos
|
||||
// Ignore the first match, and flat all rows(by splitting \n)
|
||||
rows := append(table[1:3], strings.Split(table[3], "\n")...)
|
||||
for _, row := range rows {
|
||||
if row == "" {
|
||||
continue
|
||||
}
|
||||
l.emit(itemTableRow)
|
||||
rawCells := reTable.trim(row, "")
|
||||
cells := reTable.split(rawCells, -1)
|
||||
// Emit cells in the current row
|
||||
for _, cell := range cells {
|
||||
l.emit(itemTableCell, cell)
|
||||
}
|
||||
}
|
||||
return lexAny
|
||||
}
|
||||
60
vendor/github.com/a8m/mark/mark.go
generated
vendored
Normal file
60
vendor/github.com/a8m/mark/mark.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package mark
|
||||
|
||||
import "strings"
|
||||
|
||||
// Mark
|
||||
type Mark struct {
|
||||
*parse
|
||||
Input string
|
||||
}
|
||||
|
||||
// Mark options used to configure your Mark object
|
||||
// set `Smartypants` and `Fractions` to true to enable
|
||||
// smartypants and smartfractions rendering.
|
||||
type Options struct {
|
||||
Gfm bool
|
||||
Tables bool
|
||||
Smartypants bool
|
||||
Fractions bool
|
||||
}
|
||||
|
||||
// DefaultOptions return an options struct with default configuration
|
||||
// it's means that only Gfm, and Tables set to true.
|
||||
func DefaultOptions() *Options {
|
||||
return &Options{
|
||||
Gfm: true,
|
||||
Tables: true,
|
||||
}
|
||||
}
|
||||
|
||||
// New return a new Mark
|
||||
func New(input string, opts *Options) *Mark {
|
||||
// Preprocessing
|
||||
input = strings.Replace(input, "\t", " ", -1)
|
||||
if opts == nil {
|
||||
opts = DefaultOptions()
|
||||
}
|
||||
return &Mark{
|
||||
Input: input,
|
||||
parse: newParse(input, opts),
|
||||
}
|
||||
}
|
||||
|
||||
// parse and render input
|
||||
func (m *Mark) Render() string {
|
||||
m.parse.parse()
|
||||
m.render()
|
||||
return m.output
|
||||
}
|
||||
|
||||
// AddRenderFn let you pass NodeType, and RenderFn function
|
||||
// and override the default Node rendering
|
||||
func (m *Mark) AddRenderFn(typ NodeType, fn RenderFn) {
|
||||
m.renderFn[typ] = fn
|
||||
}
|
||||
|
||||
// Staic render function
|
||||
func Render(input string) string {
|
||||
m := New(input, nil)
|
||||
return m.Render()
|
||||
}
|
||||
614
vendor/github.com/a8m/mark/node.go
generated
vendored
Normal file
614
vendor/github.com/a8m/mark/node.go
generated
vendored
Normal file
@@ -0,0 +1,614 @@
|
||||
package mark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A Node is an element in the parse tree.
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
Render() string
|
||||
}
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
// for embedding in a Node. Embedded in all non-trivial Nodes.
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
// Render function, used for overriding default rendering.
|
||||
type RenderFn func(Node) string
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota // A plain text
|
||||
NodeParagraph // A Paragraph
|
||||
NodeEmphasis // An emphasis(strong, em, ...)
|
||||
NodeHeading // A heading (h1, h2, ...)
|
||||
NodeBr // A link break
|
||||
NodeHr // A horizontal rule
|
||||
NodeImage // An image
|
||||
NodeRefImage // A image reference
|
||||
NodeList // A list of ListItems
|
||||
NodeListItem // A list item node
|
||||
NodeLink // A link(href)
|
||||
NodeRefLink // A link reference
|
||||
NodeDefLink // A link definition
|
||||
NodeTable // A table of NodeRows
|
||||
NodeRow // A row of NodeCells
|
||||
NodeCell // A table-cell(td)
|
||||
NodeCode // A code block(wrapped with pre)
|
||||
NodeBlockQuote // A blockquote
|
||||
NodeHTML // An inline HTML
|
||||
NodeCheckbox // A checkbox
|
||||
)
|
||||
|
||||
// ParagraphNode hold simple paragraph node contains text
|
||||
// that may be emphasis.
|
||||
type ParagraphNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Render returns the html representation of ParagraphNode
|
||||
func (n *ParagraphNode) Render() (s string) {
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return wrap("p", s)
|
||||
}
|
||||
|
||||
func (p *parse) newParagraph(pos Pos) *ParagraphNode {
|
||||
return &ParagraphNode{NodeType: NodeParagraph, Pos: pos}
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Text string
|
||||
}
|
||||
|
||||
// Render returns the string representation of TexNode
|
||||
func (n *TextNode) Render() string {
|
||||
return n.Text
|
||||
}
|
||||
|
||||
func (p *parse) newText(pos Pos, text string) *TextNode {
|
||||
return &TextNode{NodeType: NodeText, Pos: pos, Text: p.text(text)}
|
||||
}
|
||||
|
||||
// HTMLNode holds the raw html source.
|
||||
type HTMLNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Src string
|
||||
}
|
||||
|
||||
// Render returns the src of the HTMLNode
|
||||
func (n *HTMLNode) Render() string {
|
||||
return n.Src
|
||||
}
|
||||
|
||||
func (p *parse) newHTML(pos Pos, src string) *HTMLNode {
|
||||
return &HTMLNode{NodeType: NodeHTML, Pos: pos, Src: src}
|
||||
}
|
||||
|
||||
// HrNode represents horizontal rule
|
||||
type HrNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
}
|
||||
|
||||
// Render returns the html representation of hr.
|
||||
func (n *HrNode) Render() string {
|
||||
return "<hr>"
|
||||
}
|
||||
|
||||
func (p *parse) newHr(pos Pos) *HrNode {
|
||||
return &HrNode{NodeType: NodeHr, Pos: pos}
|
||||
}
|
||||
|
||||
// BrNode represents a link-break element.
|
||||
type BrNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
}
|
||||
|
||||
// Render returns the html representation of line-break.
|
||||
func (n *BrNode) Render() string {
|
||||
return "<br>"
|
||||
}
|
||||
|
||||
func (p *parse) newBr(pos Pos) *BrNode {
|
||||
return &BrNode{NodeType: NodeBr, Pos: pos}
|
||||
}
|
||||
|
||||
// EmphasisNode holds plain-text wrapped with style.
|
||||
// (strong, em, del, code)
|
||||
type EmphasisNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Style itemType
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Tag return the tagName based on the Style field.
|
||||
func (n *EmphasisNode) Tag() (s string) {
|
||||
switch n.Style {
|
||||
case itemStrong:
|
||||
s = "strong"
|
||||
case itemItalic:
|
||||
s = "em"
|
||||
case itemStrike:
|
||||
s = "del"
|
||||
case itemCode:
|
||||
s = "code"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Return the html representation of emphasis text.
|
||||
func (n *EmphasisNode) Render() string {
|
||||
var s string
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return wrap(n.Tag(), s)
|
||||
}
|
||||
|
||||
func (p *parse) newEmphasis(pos Pos, style itemType) *EmphasisNode {
|
||||
return &EmphasisNode{NodeType: NodeEmphasis, Pos: pos, Style: style}
|
||||
}
|
||||
|
||||
// HeadingNode holds heaing element with specific level(1-6).
|
||||
type HeadingNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Level int
|
||||
Text string
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Render returns the html representation based on heading level.
|
||||
func (n *HeadingNode) Render() (s string) {
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
re := regexp.MustCompile(`[^\w]+`)
|
||||
id := re.ReplaceAllString(n.Text, "-")
|
||||
// ToLowerCase
|
||||
id = strings.ToLower(id)
|
||||
return fmt.Sprintf("<%[1]s id=\"%s\">%s</%[1]s>", "h"+strconv.Itoa(n.Level), id, s)
|
||||
}
|
||||
|
||||
func (p *parse) newHeading(pos Pos, level int, text string) *HeadingNode {
|
||||
return &HeadingNode{NodeType: NodeHeading, Pos: pos, Level: level, Text: p.text(text)}
|
||||
}
|
||||
|
||||
// Code holds CodeBlock node with specific lang field.
|
||||
type CodeNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Lang, Text string
|
||||
}
|
||||
|
||||
// Return the html representation of codeBlock
|
||||
func (n *CodeNode) Render() string {
|
||||
var attr string
|
||||
if n.Lang != "" {
|
||||
attr = fmt.Sprintf(" class=\"lang-%s\"", n.Lang)
|
||||
}
|
||||
code := fmt.Sprintf("<%[1]s%s>%s</%[1]s>", "code", attr, n.Text)
|
||||
return wrap("pre", code)
|
||||
}
|
||||
|
||||
func (p *parse) newCode(pos Pos, lang, text string) *CodeNode {
|
||||
// DRY: see `escape()` below
|
||||
text = strings.NewReplacer("<", "<", ">", ">", "\"", """, "&", "&").Replace(text)
|
||||
return &CodeNode{NodeType: NodeCode, Pos: pos, Lang: lang, Text: text}
|
||||
}
|
||||
|
||||
// Link holds a tag with optional title
|
||||
type LinkNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Title, Href string
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Return the html representation of link node
|
||||
func (n *LinkNode) Render() (s string) {
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
attrs := fmt.Sprintf("href=\"%s\"", n.Href)
|
||||
if n.Title != "" {
|
||||
attrs += fmt.Sprintf(" title=\"%s\"", n.Title)
|
||||
}
|
||||
return fmt.Sprintf("<a %s>%s</a>", attrs, s)
|
||||
}
|
||||
|
||||
func (p *parse) newLink(pos Pos, title, href string, nodes ...Node) *LinkNode {
|
||||
return &LinkNode{NodeType: NodeLink, Pos: pos, Title: p.text(title), Href: p.text(href), Nodes: nodes}
|
||||
}
|
||||
|
||||
// RefLink holds link with refrence to link definition
|
||||
type RefNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *parse
|
||||
Text, Ref, Raw string
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// rendering based type
|
||||
func (n *RefNode) Render() string {
|
||||
var node Node
|
||||
ref := strings.ToLower(n.Ref)
|
||||
if l, ok := n.tr.links[ref]; ok {
|
||||
if n.Type() == NodeRefLink {
|
||||
node = n.tr.newLink(n.Pos, l.Title, l.Href, n.Nodes...)
|
||||
} else {
|
||||
node = n.tr.newImage(n.Pos, l.Title, l.Href, n.Text)
|
||||
}
|
||||
} else {
|
||||
node = n.tr.newText(n.Pos, n.Raw)
|
||||
}
|
||||
return node.Render()
|
||||
}
|
||||
|
||||
// newRefLink create new RefLink that suitable for link
|
||||
func (p *parse) newRefLink(typ itemType, pos Pos, raw, ref string, text []Node) *RefNode {
|
||||
return &RefNode{NodeType: NodeRefLink, Pos: pos, tr: p.root(), Raw: raw, Ref: ref, Nodes: text}
|
||||
}
|
||||
|
||||
// newRefImage create new RefLink that suitable for image
|
||||
func (p *parse) newRefImage(typ itemType, pos Pos, raw, ref, text string) *RefNode {
|
||||
return &RefNode{NodeType: NodeRefImage, Pos: pos, tr: p.root(), Raw: raw, Ref: ref, Text: text}
|
||||
}
|
||||
|
||||
// DefLinkNode refresent single reference to link-definition
|
||||
type DefLinkNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Name, Href, Title string
|
||||
}
|
||||
|
||||
// Deflink have no representation(Transparent node)
|
||||
func (n *DefLinkNode) Render() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *parse) newDefLink(pos Pos, name, href, title string) *DefLinkNode {
|
||||
return &DefLinkNode{NodeType: NodeLink, Pos: pos, Name: name, Href: href, Title: title}
|
||||
}
|
||||
|
||||
// ImageNode represents an image element with optional alt and title attributes.
|
||||
type ImageNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Title, Src, Alt string
|
||||
}
|
||||
|
||||
// Render returns the html representation on image node
|
||||
func (n *ImageNode) Render() string {
|
||||
attrs := fmt.Sprintf("src=\"%s\" alt=\"%s\"", n.Src, n.Alt)
|
||||
if n.Title != "" {
|
||||
attrs += fmt.Sprintf(" title=\"%s\"", n.Title)
|
||||
}
|
||||
return fmt.Sprintf("<img %s>", attrs)
|
||||
}
|
||||
|
||||
func (p *parse) newImage(pos Pos, title, src, alt string) *ImageNode {
|
||||
return &ImageNode{NodeType: NodeImage, Pos: pos, Title: p.text(title), Src: p.text(src), Alt: p.text(alt)}
|
||||
}
|
||||
|
||||
// ListNode holds list items nodes in ordered or unordered states.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Ordered bool
|
||||
Items []*ListItemNode
|
||||
}
|
||||
|
||||
func (n *ListNode) append(item *ListItemNode) {
|
||||
n.Items = append(n.Items, item)
|
||||
}
|
||||
|
||||
// Render returns the html representation of orderd(ol) or unordered(ul) list.
|
||||
func (n *ListNode) Render() (s string) {
|
||||
tag := "ul"
|
||||
if n.Ordered {
|
||||
tag = "ol"
|
||||
}
|
||||
for _, item := range n.Items {
|
||||
s += "\n" + item.Render()
|
||||
}
|
||||
s += "\n"
|
||||
return wrap(tag, s)
|
||||
}
|
||||
|
||||
func (p *parse) newList(pos Pos, ordered bool) *ListNode {
|
||||
return &ListNode{NodeType: NodeList, Pos: pos, Ordered: ordered}
|
||||
}
|
||||
|
||||
// ListItem represents single item in ListNode that may contains nested nodes.
|
||||
type ListItemNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
func (l *ListItemNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
// Render returns the html representation of list-item
|
||||
func (l *ListItemNode) Render() (s string) {
|
||||
for _, node := range l.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return wrap("li", s)
|
||||
}
|
||||
|
||||
func (p *parse) newListItem(pos Pos) *ListItemNode {
|
||||
return &ListItemNode{NodeType: NodeListItem, Pos: pos}
|
||||
}
|
||||
|
||||
// TableNode represents table element contains head and body
|
||||
type TableNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Rows []*RowNode
|
||||
}
|
||||
|
||||
func (n *TableNode) append(row *RowNode) {
|
||||
n.Rows = append(n.Rows, row)
|
||||
}
|
||||
|
||||
// Render returns the html representation of a table
|
||||
func (n *TableNode) Render() string {
|
||||
var s string
|
||||
for i, row := range n.Rows {
|
||||
s += "\n"
|
||||
switch i {
|
||||
case 0:
|
||||
s += wrap("thead", "\n"+row.Render()+"\n")
|
||||
case 1:
|
||||
s += "<tbody>\n"
|
||||
fallthrough
|
||||
default:
|
||||
s += row.Render()
|
||||
}
|
||||
}
|
||||
s += "\n</tbody>\n"
|
||||
return wrap("table", s)
|
||||
}
|
||||
|
||||
func (p *parse) newTable(pos Pos) *TableNode {
|
||||
return &TableNode{NodeType: NodeTable, Pos: pos}
|
||||
}
|
||||
|
||||
// RowNode represnt tr that holds list of cell-nodes
|
||||
type RowNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Cells []*CellNode
|
||||
}
|
||||
|
||||
func (r *RowNode) append(cell *CellNode) {
|
||||
r.Cells = append(r.Cells, cell)
|
||||
}
|
||||
|
||||
// Render returns the html representation of table-row
|
||||
func (r *RowNode) Render() string {
|
||||
var s string
|
||||
for _, cell := range r.Cells {
|
||||
s += "\n" + cell.Render()
|
||||
}
|
||||
s += "\n"
|
||||
return wrap("tr", s)
|
||||
}
|
||||
|
||||
func (p *parse) newRow(pos Pos) *RowNode {
|
||||
return &RowNode{NodeType: NodeRow, Pos: pos}
|
||||
}
|
||||
|
||||
// AlignType identifies the aligment-type of specfic cell.
|
||||
type AlignType int
|
||||
|
||||
// Align returns itself and provides an easy default implementation
|
||||
// for embedding in a Node.
|
||||
func (t AlignType) Align() AlignType {
|
||||
return t
|
||||
}
|
||||
|
||||
// Alignment
|
||||
const (
|
||||
None AlignType = iota
|
||||
Right
|
||||
Left
|
||||
Center
|
||||
)
|
||||
|
||||
// Cell types
|
||||
const (
|
||||
Header = iota
|
||||
Data
|
||||
)
|
||||
|
||||
// CellNode represents table-data/cell that holds simple text(may be emphasis)
|
||||
// Note: the text in <th> elements are bold and centered by default.
|
||||
type CellNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
AlignType
|
||||
Kind int
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Render returns the html reprenestation of table-cell
|
||||
func (c *CellNode) Render() string {
|
||||
var s string
|
||||
tag := "td"
|
||||
if c.Kind == Header {
|
||||
tag = "th"
|
||||
}
|
||||
for _, node := range c.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return fmt.Sprintf("<%[1]s%s>%s</%[1]s>", tag, c.Style(), s)
|
||||
}
|
||||
|
||||
// Style return the cell-style based on alignment field
|
||||
func (c *CellNode) Style() string {
|
||||
s := " style=\"text-align:"
|
||||
switch c.Align() {
|
||||
case Right:
|
||||
s += "right\""
|
||||
case Left:
|
||||
s += "left\""
|
||||
case Center:
|
||||
s += "center\""
|
||||
default:
|
||||
s = ""
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *parse) newCell(pos Pos, kind int, align AlignType) *CellNode {
|
||||
return &CellNode{NodeType: NodeCell, Pos: pos, Kind: kind, AlignType: align}
|
||||
}
|
||||
|
||||
// BlockQuote represents block-quote tag.
|
||||
type BlockQuoteNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Render returns the html representation of BlockQuote
|
||||
func (n *BlockQuoteNode) Render() string {
|
||||
var s string
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return wrap("blockquote", s)
|
||||
}
|
||||
|
||||
func (p *parse) newBlockQuote(pos Pos) *BlockQuoteNode {
|
||||
return &BlockQuoteNode{NodeType: NodeBlockQuote, Pos: pos}
|
||||
}
|
||||
|
||||
// CheckboxNode represents checked and unchecked checkbox tag.
|
||||
// Used in task lists.
|
||||
type CheckboxNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Checked bool
|
||||
}
|
||||
|
||||
// Render returns the html representation of checked and unchecked CheckBox.
|
||||
func (n *CheckboxNode) Render() string {
|
||||
s := "<input type=\"checkbox\""
|
||||
if n.Checked {
|
||||
s += " checked"
|
||||
}
|
||||
return s + ">"
|
||||
}
|
||||
|
||||
func (p *parse) newCheckbox(pos Pos, checked bool) *CheckboxNode {
|
||||
return &CheckboxNode{NodeType: NodeCheckbox, Pos: pos, Checked: checked}
|
||||
}
|
||||
|
||||
// Wrap text with specific tag.
|
||||
func wrap(tag, body string) string {
|
||||
return fmt.Sprintf("<%[1]s>%s</%[1]s>", tag, body)
|
||||
}
|
||||
|
||||
// Group all text configuration in one place(escaping, smartypants, etc..)
|
||||
func (p *parse) text(input string) string {
|
||||
opts := p.root().options
|
||||
if opts.Smartypants {
|
||||
input = smartypants(input)
|
||||
}
|
||||
if opts.Fractions {
|
||||
input = smartyfractions(input)
|
||||
}
|
||||
return escape(input)
|
||||
}
|
||||
|
||||
// Helper escaper
|
||||
func escape(str string) (cpy string) {
|
||||
emp := regexp.MustCompile(`&\w+;`)
|
||||
for i := 0; i < len(str); i++ {
|
||||
switch s := str[i]; s {
|
||||
case '>':
|
||||
cpy += ">"
|
||||
case '"':
|
||||
cpy += """
|
||||
case '\'':
|
||||
cpy += "'"
|
||||
case '<':
|
||||
if res := reHTML.tag.FindString(str[i:]); res != "" {
|
||||
cpy += res
|
||||
i += len(res) - 1
|
||||
} else {
|
||||
cpy += "<"
|
||||
}
|
||||
case '&':
|
||||
if res := emp.FindString(str[i:]); res != "" {
|
||||
cpy += res
|
||||
i += len(res) - 1
|
||||
} else {
|
||||
cpy += "&"
|
||||
}
|
||||
default:
|
||||
cpy += str[i : i+1]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Smartypants transformation helper, translate from marked.js
|
||||
func smartypants(text string) string {
|
||||
// em-dashes, en-dashes, ellipses
|
||||
re := strings.NewReplacer("---", "\u2014", "--", "\u2013", "...", "\u2026")
|
||||
text = re.Replace(text)
|
||||
// opening singles
|
||||
text = regexp.MustCompile("(^|[-\u2014/(\\[{\"\\s])'").ReplaceAllString(text, "$1\u2018")
|
||||
// closing singles & apostrophes
|
||||
text = strings.Replace(text, "'", "\u2019", -1)
|
||||
// opening doubles
|
||||
text = regexp.MustCompile("(^|[-\u2014/(\\[{\u2018\\s])\"").ReplaceAllString(text, "$1\u201c")
|
||||
// closing doubles
|
||||
text = strings.Replace(text, "\"", "\u201d", -1)
|
||||
return text
|
||||
}
|
||||
|
||||
// Smartyfractions transformation helper.
|
||||
func smartyfractions(text string) string {
|
||||
re := regexp.MustCompile(`(\d+)(/\d+)(/\d+|)`)
|
||||
return re.ReplaceAllStringFunc(text, func(str string) string {
|
||||
var match []string
|
||||
// If it's date like
|
||||
if match = re.FindStringSubmatch(str); match[3] != "" {
|
||||
return str
|
||||
}
|
||||
switch n := match[1] + match[2]; n {
|
||||
case "1/2", "1/3", "2/3", "1/4", "3/4", "1/5", "2/5", "3/5", "4/5",
|
||||
"1/6", "5/6", "1/7", "1/8", "3/8", "5/8", "7/8":
|
||||
return fmt.Sprintf("&frac%s;", strings.Replace(n, "/", "", 1))
|
||||
default:
|
||||
return fmt.Sprintf("<sup>%s</sup>⁄<sub>%s</sub>",
|
||||
match[1], strings.Replace(match[2], "/", "", 1))
|
||||
}
|
||||
})
|
||||
}
|
||||
436
vendor/github.com/a8m/mark/parser.go
generated
vendored
Normal file
436
vendor/github.com/a8m/mark/parser.go
generated
vendored
Normal file
@@ -0,0 +1,436 @@
|
||||
package mark
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// parse holds the state of the parser.
|
||||
type parse struct {
|
||||
Nodes []Node
|
||||
lex Lexer
|
||||
options *Options
|
||||
tr *parse
|
||||
output string
|
||||
peekCount int
|
||||
token [3]item // three-token lookahead for parser
|
||||
links map[string]*DefLinkNode // Deflink parsing, used RefLinks
|
||||
renderFn map[NodeType]RenderFn // Custom overridden fns
|
||||
}
|
||||
|
||||
// Return new parser
|
||||
func newParse(input string, opts *Options) *parse {
|
||||
return &parse{
|
||||
lex: lex(input),
|
||||
options: opts,
|
||||
links: make(map[string]*DefLinkNode),
|
||||
renderFn: make(map[NodeType]RenderFn),
|
||||
}
|
||||
}
|
||||
|
||||
// parse convert the raw text to Nodeparse.
|
||||
func (p *parse) parse() {
|
||||
Loop:
|
||||
for {
|
||||
var n Node
|
||||
switch t := p.peek(); t.typ {
|
||||
case itemEOF, itemError:
|
||||
break Loop
|
||||
case itemNewLine:
|
||||
p.next()
|
||||
case itemHr:
|
||||
n = p.newHr(p.next().pos)
|
||||
case itemHTML:
|
||||
t = p.next()
|
||||
n = p.newHTML(t.pos, t.val)
|
||||
case itemDefLink:
|
||||
n = p.parseDefLink()
|
||||
case itemHeading, itemLHeading:
|
||||
n = p.parseHeading()
|
||||
case itemCodeBlock, itemGfmCodeBlock:
|
||||
n = p.parseCodeBlock()
|
||||
case itemList:
|
||||
n = p.parseList()
|
||||
case itemTable, itemLpTable:
|
||||
n = p.parseTable()
|
||||
case itemBlockQuote:
|
||||
n = p.parseBlockQuote()
|
||||
case itemIndent:
|
||||
space := p.next()
|
||||
// If it isn't followed by itemText
|
||||
if p.peek().typ != itemText {
|
||||
continue
|
||||
}
|
||||
p.backup2(space)
|
||||
fallthrough
|
||||
// itemText
|
||||
default:
|
||||
tmp := p.newParagraph(t.pos)
|
||||
tmp.Nodes = p.parseText(p.next().val + p.scanLines())
|
||||
n = tmp
|
||||
}
|
||||
if n != nil {
|
||||
p.append(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Root getter
|
||||
func (p *parse) root() *parse {
|
||||
if p.tr == nil {
|
||||
return p
|
||||
}
|
||||
return p.tr.root()
|
||||
}
|
||||
|
||||
// Render parse nodes to the wanted output
|
||||
func (p *parse) render() {
|
||||
var output string
|
||||
for i, node := range p.Nodes {
|
||||
// If there's a custom render function, use it instead.
|
||||
if fn, ok := p.renderFn[node.Type()]; ok {
|
||||
output = fn(node)
|
||||
} else {
|
||||
output = node.Render()
|
||||
}
|
||||
p.output += output
|
||||
if output != "" && i != len(p.Nodes)-1 {
|
||||
p.output += "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// append new node to nodes-list
|
||||
func (p *parse) append(n Node) {
|
||||
p.Nodes = append(p.Nodes, n)
|
||||
}
|
||||
|
||||
// next returns the next token
|
||||
func (p *parse) next() item {
|
||||
if p.peekCount > 0 {
|
||||
p.peekCount--
|
||||
} else {
|
||||
p.token[0] = p.lex.nextItem()
|
||||
}
|
||||
return p.token[p.peekCount]
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (p *parse) peek() item {
|
||||
if p.peekCount > 0 {
|
||||
return p.token[p.peekCount-1]
|
||||
}
|
||||
p.peekCount = 1
|
||||
p.token[0] = p.lex.nextItem()
|
||||
return p.token[0]
|
||||
}
|
||||
|
||||
// backup backs the input stream tp one token
|
||||
func (p *parse) backup() {
|
||||
p.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (p *parse) backup2(t1 item) {
|
||||
p.token[1] = t1
|
||||
p.peekCount = 2
|
||||
}
|
||||
|
||||
// parseText
|
||||
func (p *parse) parseText(input string) (nodes []Node) {
|
||||
// Trim whitespaces that not a line-break
|
||||
input = regexp.MustCompile(`(?m)^ +| +(\n|$)`).ReplaceAllStringFunc(input, func(s string) string {
|
||||
if reBr.MatchString(s) {
|
||||
return s
|
||||
}
|
||||
return strings.Replace(s, " ", "", -1)
|
||||
})
|
||||
l := lexInline(input)
|
||||
for token := range l.items {
|
||||
var node Node
|
||||
switch token.typ {
|
||||
case itemBr:
|
||||
node = p.newBr(token.pos)
|
||||
case itemStrong, itemItalic, itemStrike, itemCode:
|
||||
node = p.parseEmphasis(token.typ, token.pos, token.val)
|
||||
case itemLink, itemAutoLink, itemGfmLink:
|
||||
var title, href string
|
||||
var text []Node
|
||||
if token.typ == itemLink {
|
||||
match := reLink.FindStringSubmatch(token.val)
|
||||
text = p.parseText(match[1])
|
||||
href, title = match[2], match[3]
|
||||
} else {
|
||||
var match []string
|
||||
if token.typ == itemGfmLink {
|
||||
match = reGfmLink.FindStringSubmatch(token.val)
|
||||
} else {
|
||||
match = reAutoLink.FindStringSubmatch(token.val)
|
||||
}
|
||||
href = match[1]
|
||||
text = append(text, p.newText(token.pos, match[1]))
|
||||
}
|
||||
node = p.newLink(token.pos, title, href, text...)
|
||||
case itemImage:
|
||||
match := reImage.FindStringSubmatch(token.val)
|
||||
node = p.newImage(token.pos, match[3], match[2], match[1])
|
||||
case itemRefLink, itemRefImage:
|
||||
match := reRefLink.FindStringSubmatch(token.val)
|
||||
text, ref := match[1], match[2]
|
||||
if ref == "" {
|
||||
ref = text
|
||||
}
|
||||
if token.typ == itemRefLink {
|
||||
node = p.newRefLink(token.typ, token.pos, token.val, ref, p.parseText(text))
|
||||
} else {
|
||||
node = p.newRefImage(token.typ, token.pos, token.val, ref, text)
|
||||
}
|
||||
case itemHTML:
|
||||
node = p.newHTML(token.pos, token.val)
|
||||
default:
|
||||
node = p.newText(token.pos, token.val)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// parse inline emphasis
|
||||
func (p *parse) parseEmphasis(typ itemType, pos Pos, val string) *EmphasisNode {
|
||||
var re *regexp.Regexp
|
||||
switch typ {
|
||||
case itemStrike:
|
||||
re = reStrike
|
||||
case itemStrong:
|
||||
re = reStrong
|
||||
case itemCode:
|
||||
re = reCode
|
||||
case itemItalic:
|
||||
re = reItalic
|
||||
}
|
||||
node := p.newEmphasis(pos, typ)
|
||||
match := re.FindStringSubmatch(val)
|
||||
text := match[len(match)-1]
|
||||
if text == "" {
|
||||
text = match[1]
|
||||
}
|
||||
node.Nodes = p.parseText(text)
|
||||
return node
|
||||
}
|
||||
|
||||
// parse heading block
|
||||
func (p *parse) parseHeading() (node *HeadingNode) {
|
||||
token := p.next()
|
||||
level := 1
|
||||
var text string
|
||||
if token.typ == itemHeading {
|
||||
match := reHeading.FindStringSubmatch(token.val)
|
||||
level, text = len(match[1]), match[2]
|
||||
} else {
|
||||
match := reLHeading.FindStringSubmatch(token.val)
|
||||
// using equal signs for first-level, and dashes for second-level.
|
||||
text = match[1]
|
||||
if match[2] == "-" {
|
||||
level = 2
|
||||
}
|
||||
}
|
||||
node = p.newHeading(token.pos, level, text)
|
||||
node.Nodes = p.parseText(text)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *parse) parseDefLink() *DefLinkNode {
|
||||
token := p.next()
|
||||
match := reDefLink.FindStringSubmatch(token.val)
|
||||
name := strings.ToLower(match[1])
|
||||
// name(lowercase), href, title
|
||||
n := p.newDefLink(token.pos, name, match[2], match[3])
|
||||
// store in links
|
||||
links := p.root().links
|
||||
if _, ok := links[name]; !ok {
|
||||
links[name] = n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// parse codeBlock
|
||||
func (p *parse) parseCodeBlock() *CodeNode {
|
||||
var lang, text string
|
||||
token := p.next()
|
||||
if token.typ == itemGfmCodeBlock {
|
||||
codeStart := reGfmCode.FindStringSubmatch(token.val)
|
||||
lang = codeStart[3]
|
||||
text = token.val[len(codeStart[0]):]
|
||||
} else {
|
||||
text = reCodeBlock.trim(token.val, "")
|
||||
}
|
||||
return p.newCode(token.pos, lang, text)
|
||||
}
|
||||
|
||||
func (p *parse) parseBlockQuote() (n *BlockQuoteNode) {
|
||||
token := p.next()
|
||||
// replacer
|
||||
re := regexp.MustCompile(`(?m)^ *> ?`)
|
||||
raw := re.ReplaceAllString(token.val, "")
|
||||
// TODO(a8m): doesn't work right now with defLink(inside the blockQuote)
|
||||
tr := &parse{lex: lex(raw), tr: p}
|
||||
tr.parse()
|
||||
n = p.newBlockQuote(token.pos)
|
||||
n.Nodes = tr.Nodes
|
||||
return
|
||||
}
|
||||
|
||||
// parse list
|
||||
func (p *parse) parseList() *ListNode {
|
||||
token := p.next()
|
||||
list := p.newList(token.pos, isDigit(token.val))
|
||||
Loop:
|
||||
for {
|
||||
switch token = p.peek(); token.typ {
|
||||
case itemLooseItem, itemListItem:
|
||||
list.append(p.parseListItem())
|
||||
default:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// parse listItem
|
||||
func (p *parse) parseListItem() *ListItemNode {
|
||||
token := p.next()
|
||||
item := p.newListItem(token.pos)
|
||||
token.val = strings.TrimSpace(token.val)
|
||||
if p.isTaskItem(token.val) {
|
||||
item.Nodes = p.parseTaskItem(token)
|
||||
return item
|
||||
}
|
||||
tr := &parse{lex: lex(token.val), tr: p}
|
||||
tr.parse()
|
||||
for _, node := range tr.Nodes {
|
||||
// wrap with paragraph only when it's a loose item
|
||||
if n, ok := node.(*ParagraphNode); ok && token.typ == itemListItem {
|
||||
item.Nodes = append(item.Nodes, n.Nodes...)
|
||||
} else {
|
||||
item.append(node)
|
||||
}
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
// parseTaskItem parses list item as a task item.
|
||||
func (p *parse) parseTaskItem(token item) []Node {
|
||||
checkbox := p.newCheckbox(token.pos, token.val[1] == 'x')
|
||||
token.val = strings.TrimSpace(token.val[3:])
|
||||
return append([]Node{checkbox}, p.parseText(token.val)...)
|
||||
}
|
||||
|
||||
// isTaskItem tests if the given string is list task item.
|
||||
func (p *parse) isTaskItem(s string) bool {
|
||||
if len(s) < 5 || s[0] != '[' || (s[1] != 'x' && s[1] != ' ') || s[2] != ']' {
|
||||
return false
|
||||
}
|
||||
return "" != strings.TrimSpace(s[3:])
|
||||
}
|
||||
|
||||
// parse table
|
||||
func (p *parse) parseTable() *TableNode {
|
||||
table := p.newTable(p.next().pos)
|
||||
// Align [ None, Left, Right, ... ]
|
||||
// Header [ Cells: [ ... ] ]
|
||||
// Data: [ Rows: [ Cells: [ ... ] ] ]
|
||||
rows := struct {
|
||||
Align []AlignType
|
||||
Header []item
|
||||
Cells [][]item
|
||||
}{}
|
||||
Loop:
|
||||
for i := 0; ; {
|
||||
switch token := p.next(); token.typ {
|
||||
case itemTableRow:
|
||||
i++
|
||||
if i > 2 {
|
||||
rows.Cells = append(rows.Cells, []item{})
|
||||
}
|
||||
case itemTableCell:
|
||||
// Header
|
||||
if i == 1 {
|
||||
rows.Header = append(rows.Header, token)
|
||||
// Alignment
|
||||
} else if i == 2 {
|
||||
rows.Align = append(rows.Align, parseAlign(token.val))
|
||||
// Data
|
||||
} else {
|
||||
pos := i - 3
|
||||
rows.Cells[pos] = append(rows.Cells[pos], token)
|
||||
}
|
||||
default:
|
||||
p.backup()
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
// Tranform to nodes
|
||||
table.append(p.parseCells(Header, rows.Header, rows.Align))
|
||||
// Table body
|
||||
for _, row := range rows.Cells {
|
||||
table.append(p.parseCells(Data, row, rows.Align))
|
||||
}
|
||||
return table
|
||||
}
|
||||
|
||||
// parse cells and return new row
|
||||
func (p *parse) parseCells(kind int, items []item, align []AlignType) *RowNode {
|
||||
var row *RowNode
|
||||
for i, item := range items {
|
||||
if i == 0 {
|
||||
row = p.newRow(item.pos)
|
||||
}
|
||||
cell := p.newCell(item.pos, kind, align[i])
|
||||
cell.Nodes = p.parseText(item.val)
|
||||
row.append(cell)
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
// Used to consume lines(itemText) for a continues paragraphs
|
||||
func (p *parse) scanLines() (s string) {
|
||||
for {
|
||||
tkn := p.next()
|
||||
if tkn.typ == itemText || tkn.typ == itemIndent {
|
||||
s += tkn.val
|
||||
} else if tkn.typ == itemNewLine {
|
||||
if t := p.peek().typ; t != itemText && t != itemIndent {
|
||||
p.backup2(tkn)
|
||||
break
|
||||
}
|
||||
s += tkn.val
|
||||
} else {
|
||||
p.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// get align-string and return the align type of it
|
||||
func parseAlign(s string) (typ AlignType) {
|
||||
sfx, pfx := strings.HasSuffix(s, ":"), strings.HasPrefix(s, ":")
|
||||
switch {
|
||||
case sfx && pfx:
|
||||
typ = Center
|
||||
case sfx:
|
||||
typ = Right
|
||||
case pfx:
|
||||
typ = Left
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// test if given string is digit
|
||||
func isDigit(s string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(s)
|
||||
return unicode.IsDigit(r)
|
||||
}
|
||||
Reference in New Issue
Block a user